Commit c36f6e6d authored by Hugh Dickins's avatar Hugh Dickins Committed by Andrew Morton

mempolicy trivia: slightly more consistent naming

Before getting down to work, do a little cleanup, mainly of inconsistent
variable naming.  I gave up trying to rationalize mpol versus pol versus
policy, and node versus nid, but let's avoid p and nd.  Remove a few
superfluous blank lines, but add one; and here prefer vma->vm_policy to
vma_policy(vma) - the latter being appropriate in other sources, which
have to allow for !CONFIG_NUMA.  That intriguing line about KERNEL_DS? 
should have gone in v2.6.15, when numa_policy_init() stopped using
set_mempolicy(2)'s system call handler.

Link: https://lkml.kernel.org/r/68287974-b6ae-7df-4ba-d19ddd69cbf@google.comSigned-off-by: default avatarHugh Dickins <hughd@google.com>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Tejun heo <tj@kernel.org>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 7f1ee4e2
...@@ -124,10 +124,9 @@ struct shared_policy { ...@@ -124,10 +124,9 @@ struct shared_policy {
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
int mpol_set_shared_policy(struct shared_policy *info, int mpol_set_shared_policy(struct shared_policy *sp,
struct vm_area_struct *vma, struct vm_area_struct *vma, struct mempolicy *mpol);
struct mempolicy *new); void mpol_free_shared_policy(struct shared_policy *sp);
void mpol_free_shared_policy(struct shared_policy *p);
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
unsigned long idx); unsigned long idx);
...@@ -191,7 +190,7 @@ static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) ...@@ -191,7 +190,7 @@ static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
return true; return true;
} }
static inline void mpol_put(struct mempolicy *p) static inline void mpol_put(struct mempolicy *pol)
{ {
} }
...@@ -210,7 +209,7 @@ static inline void mpol_shared_policy_init(struct shared_policy *sp, ...@@ -210,7 +209,7 @@ static inline void mpol_shared_policy_init(struct shared_policy *sp,
{ {
} }
static inline void mpol_free_shared_policy(struct shared_policy *p) static inline void mpol_free_shared_policy(struct shared_policy *sp)
{ {
} }
......
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
* on systems with highmem kernel lowmem allocation don't get policied. * on systems with highmem kernel lowmem allocation don't get policied.
* Same with GFP_DMA allocations. * Same with GFP_DMA allocations.
* *
* For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between * For shmem/tmpfs shared memory the policy is shared between
* all users and remembered even when nobody has memory mapped. * all users and remembered even when nobody has memory mapped.
*/ */
...@@ -291,6 +291,7 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, ...@@ -291,6 +291,7 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} else if (nodes_empty(*nodes)) } else if (nodes_empty(*nodes))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!policy) if (!policy)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -303,11 +304,11 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, ...@@ -303,11 +304,11 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
} }
/* Slow path of a mpol destructor. */ /* Slow path of a mpol destructor. */
void __mpol_put(struct mempolicy *p) void __mpol_put(struct mempolicy *pol)
{ {
if (!atomic_dec_and_test(&p->refcnt)) if (!atomic_dec_and_test(&pol->refcnt))
return; return;
kmem_cache_free(policy_cache, p); kmem_cache_free(policy_cache, pol);
} }
static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
...@@ -364,7 +365,6 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) ...@@ -364,7 +365,6 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
* *
* Called with task's alloc_lock held. * Called with task's alloc_lock held.
*/ */
void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
{ {
mpol_rebind_policy(tsk->mempolicy, new); mpol_rebind_policy(tsk->mempolicy, new);
...@@ -375,7 +375,6 @@ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) ...@@ -375,7 +375,6 @@ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
* *
* Call holding a reference to mm. Takes mm->mmap_lock during call. * Call holding a reference to mm. Takes mm->mmap_lock during call.
*/ */
void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
...@@ -800,7 +799,7 @@ static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma, ...@@ -800,7 +799,7 @@ static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
vmstart = vma->vm_start; vmstart = vma->vm_start;
} }
if (mpol_equal(vma_policy(vma), new_pol)) { if (mpol_equal(vma->vm_policy, new_pol)) {
*prev = vma; *prev = vma;
return 0; return 0;
} }
...@@ -855,18 +854,18 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags, ...@@ -855,18 +854,18 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
* *
* Called with task's alloc_lock held * Called with task's alloc_lock held
*/ */
static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) static void get_policy_nodemask(struct mempolicy *pol, nodemask_t *nodes)
{ {
nodes_clear(*nodes); nodes_clear(*nodes);
if (p == &default_policy) if (pol == &default_policy)
return; return;
switch (p->mode) { switch (pol->mode) {
case MPOL_BIND: case MPOL_BIND:
case MPOL_INTERLEAVE: case MPOL_INTERLEAVE:
case MPOL_PREFERRED: case MPOL_PREFERRED:
case MPOL_PREFERRED_MANY: case MPOL_PREFERRED_MANY:
*nodes = p->nodes; *nodes = pol->nodes;
break; break;
case MPOL_LOCAL: case MPOL_LOCAL:
/* return empty node mask for local allocation */ /* return empty node mask for local allocation */
...@@ -1634,7 +1633,6 @@ static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, ...@@ -1634,7 +1633,6 @@ static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
out_put: out_put:
put_task_struct(task); put_task_struct(task);
goto out; goto out;
} }
SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
...@@ -1644,7 +1642,6 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, ...@@ -1644,7 +1642,6 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
} }
/* Retrieve NUMA policy */ /* Retrieve NUMA policy */
static int kernel_get_mempolicy(int __user *policy, static int kernel_get_mempolicy(int __user *policy,
unsigned long __user *nmask, unsigned long __user *nmask,
...@@ -1827,10 +1824,10 @@ nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) ...@@ -1827,10 +1824,10 @@ nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
* policy_node() is always coupled with policy_nodemask(), which * policy_node() is always coupled with policy_nodemask(), which
* secures the nodemask limit for 'bind' and 'prefer-many' policy. * secures the nodemask limit for 'bind' and 'prefer-many' policy.
*/ */
static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) static int policy_node(gfp_t gfp, struct mempolicy *policy, int nid)
{ {
if (policy->mode == MPOL_PREFERRED) { if (policy->mode == MPOL_PREFERRED) {
nd = first_node(policy->nodes); nid = first_node(policy->nodes);
} else { } else {
/* /*
* __GFP_THISNODE shouldn't even be used with the bind policy * __GFP_THISNODE shouldn't even be used with the bind policy
...@@ -1845,19 +1842,18 @@ static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) ...@@ -1845,19 +1842,18 @@ static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
policy->home_node != NUMA_NO_NODE) policy->home_node != NUMA_NO_NODE)
return policy->home_node; return policy->home_node;
return nd; return nid;
} }
/* Do dynamic interleaving for a process */ /* Do dynamic interleaving for a process */
static unsigned interleave_nodes(struct mempolicy *policy) static unsigned int interleave_nodes(struct mempolicy *policy)
{ {
unsigned next; unsigned int nid;
struct task_struct *me = current;
next = next_node_in(me->il_prev, policy->nodes); nid = next_node_in(current->il_prev, policy->nodes);
if (next < MAX_NUMNODES) if (nid < MAX_NUMNODES)
me->il_prev = next; current->il_prev = nid;
return next; return nid;
} }
/* /*
...@@ -2347,7 +2343,7 @@ unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, ...@@ -2347,7 +2343,7 @@ unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
{ {
struct mempolicy *pol = mpol_dup(vma_policy(src)); struct mempolicy *pol = mpol_dup(src->vm_policy);
if (IS_ERR(pol)) if (IS_ERR(pol))
return PTR_ERR(pol); return PTR_ERR(pol);
...@@ -2771,40 +2767,40 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) ...@@ -2771,40 +2767,40 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
} }
} }
int mpol_set_shared_policy(struct shared_policy *info, int mpol_set_shared_policy(struct shared_policy *sp,
struct vm_area_struct *vma, struct mempolicy *npol) struct vm_area_struct *vma, struct mempolicy *pol)
{ {
int err; int err;
struct sp_node *new = NULL; struct sp_node *new = NULL;
unsigned long sz = vma_pages(vma); unsigned long sz = vma_pages(vma);
if (npol) { if (pol) {
new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, pol);
if (!new) if (!new)
return -ENOMEM; return -ENOMEM;
} }
err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); err = shared_policy_replace(sp, vma->vm_pgoff, vma->vm_pgoff + sz, new);
if (err && new) if (err && new)
sp_free(new); sp_free(new);
return err; return err;
} }
/* Free a backing policy store on inode delete. */ /* Free a backing policy store on inode delete. */
void mpol_free_shared_policy(struct shared_policy *p) void mpol_free_shared_policy(struct shared_policy *sp)
{ {
struct sp_node *n; struct sp_node *n;
struct rb_node *next; struct rb_node *next;
if (!p->root.rb_node) if (!sp->root.rb_node)
return; return;
write_lock(&p->lock); write_lock(&sp->lock);
next = rb_first(&p->root); next = rb_first(&sp->root);
while (next) { while (next) {
n = rb_entry(next, struct sp_node, nd); n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd); next = rb_next(&n->nd);
sp_delete(p, n); sp_delete(sp, n);
} }
write_unlock(&p->lock); write_unlock(&sp->lock);
} }
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
...@@ -2854,7 +2850,6 @@ static inline void __init check_numabalancing_enable(void) ...@@ -2854,7 +2850,6 @@ static inline void __init check_numabalancing_enable(void)
} }
#endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA_BALANCING */
/* assumes fs == KERNEL_DS */
void __init numa_policy_init(void) void __init numa_policy_init(void)
{ {
nodemask_t interleave_nodes; nodemask_t interleave_nodes;
...@@ -2917,7 +2912,6 @@ void numa_default_policy(void) ...@@ -2917,7 +2912,6 @@ void numa_default_policy(void)
/* /*
* Parse and format mempolicy from/to strings * Parse and format mempolicy from/to strings
*/ */
static const char * const policy_modes[] = static const char * const policy_modes[] =
{ {
[MPOL_DEFAULT] = "default", [MPOL_DEFAULT] = "default",
...@@ -2928,7 +2922,6 @@ static const char * const policy_modes[] = ...@@ -2928,7 +2922,6 @@ static const char * const policy_modes[] =
[MPOL_PREFERRED_MANY] = "prefer (many)", [MPOL_PREFERRED_MANY] = "prefer (many)",
}; };
#ifdef CONFIG_TMPFS #ifdef CONFIG_TMPFS
/** /**
* mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment