Commit 21abb147 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] Remove old node based policy interface from mempolicy.c

mempolicy.c contains provisional interface for huge page allocation based on
node numbers.  This is in use in SLES9 but was never used (AFAIK) in upstream
versions of Linux.

Huge page allocations now use zonelists to figure out where to allocate pages.
 The use of zonelists allows us to find the closest hugepage which was the
consideration of the NUMA distance for huge page allocations.

Remove the obsolete functions.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Cc: Andi Kleen <ak@muc.de>
Acked-by: default avatarWilliam Lee Irwin III <wli@holomorphy.com>
Cc: Adam Litke <agl@us.ibm.com>
Acked-by: default avatarPaul Jackson <pj@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 5da7ca86
...@@ -109,14 +109,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) ...@@ -109,14 +109,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
/*
* Hugetlb policy. i386 hugetlb so far works with node numbers
* instead of zone lists, so give it special interfaces for now.
*/
extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr);
extern int mpol_node_valid(int nid, struct vm_area_struct *vma,
unsigned long addr);
/* /*
* Tree of shared policies for a shared memory region. * Tree of shared policies for a shared memory region.
* Maintain the policies in a pseudo mm that contains vmas. The vmas * Maintain the policies in a pseudo mm that contains vmas. The vmas
...@@ -184,17 +176,6 @@ static inline struct mempolicy *mpol_copy(struct mempolicy *old) ...@@ -184,17 +176,6 @@ static inline struct mempolicy *mpol_copy(struct mempolicy *old)
return NULL; return NULL;
} }
static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a)
{
return numa_node_id();
}
static inline int
mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a)
{
return 1;
}
struct shared_policy {}; struct shared_policy {};
static inline int mpol_set_shared_policy(struct shared_policy *info, static inline int mpol_set_shared_policy(struct shared_policy *info,
......
...@@ -960,54 +960,6 @@ void __mpol_free(struct mempolicy *p) ...@@ -960,54 +960,6 @@ void __mpol_free(struct mempolicy *p)
kmem_cache_free(policy_cache, p); kmem_cache_free(policy_cache, p);
} }
/*
* Hugetlb policy. Same as above, just works with node numbers instead of
* zonelists.
*/
/* Find first node suitable for an allocation */
int mpol_first_node(struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = get_vma_policy(current, vma, addr);
switch (pol->policy) {
case MPOL_DEFAULT:
return numa_node_id();
case MPOL_BIND:
return pol->v.zonelist->zones[0]->zone_pgdat->node_id;
case MPOL_INTERLEAVE:
return interleave_nodes(pol);
case MPOL_PREFERRED:
return pol->v.preferred_node >= 0 ?
pol->v.preferred_node : numa_node_id();
}
BUG();
return 0;
}
/* Find secondary valid nodes for an allocation */
int mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = get_vma_policy(current, vma, addr);
switch (pol->policy) {
case MPOL_PREFERRED:
case MPOL_DEFAULT:
case MPOL_INTERLEAVE:
return 1;
case MPOL_BIND: {
struct zone **z;
for (z = pol->v.zonelist->zones; *z; z++)
if ((*z)->zone_pgdat->node_id == nid)
return 1;
return 0;
}
default:
BUG();
return 0;
}
}
/* /*
* Shared memory backing store policy support. * Shared memory backing store policy support.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment