Commit 511aaca8 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Ingo Molnar

x86/mm/pat: Drop the rbt_ prefix from external memtype calls

Drop the rbt_memtype_*() call rbt_ prefix, as we no longer use
an rbtree directly.
Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lkml.kernel.org/r/20191121011601.20611-4-dave@stgolabs.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6a9930b1
...@@ -603,7 +603,7 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, ...@@ -603,7 +603,7 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
spin_lock(&memtype_lock); spin_lock(&memtype_lock);
err = rbt_memtype_check_insert(new, new_type); err = memtype_check_insert(new, new_type);
if (err) { if (err) {
pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n", pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
start, end - 1, start, end - 1,
...@@ -650,7 +650,7 @@ int free_memtype(u64 start, u64 end) ...@@ -650,7 +650,7 @@ int free_memtype(u64 start, u64 end)
} }
spin_lock(&memtype_lock); spin_lock(&memtype_lock);
entry = rbt_memtype_erase(start, end); entry = memtype_erase(start, end);
spin_unlock(&memtype_lock); spin_unlock(&memtype_lock);
if (IS_ERR(entry)) { if (IS_ERR(entry)) {
...@@ -693,7 +693,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr) ...@@ -693,7 +693,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
spin_lock(&memtype_lock); spin_lock(&memtype_lock);
entry = rbt_memtype_lookup(paddr); entry = memtype_lookup(paddr);
if (entry != NULL) if (entry != NULL)
rettype = entry->type; rettype = entry->type;
else else
...@@ -1109,7 +1109,7 @@ static struct memtype *memtype_get_idx(loff_t pos) ...@@ -1109,7 +1109,7 @@ static struct memtype *memtype_get_idx(loff_t pos)
return NULL; return NULL;
spin_lock(&memtype_lock); spin_lock(&memtype_lock);
ret = rbt_memtype_copy_nth_element(print_entry, pos); ret = memtype_copy_nth_element(print_entry, pos);
spin_unlock(&memtype_lock); spin_unlock(&memtype_lock);
if (!ret) { if (!ret) {
......
...@@ -29,20 +29,20 @@ static inline char *cattr_name(enum page_cache_mode pcm) ...@@ -29,20 +29,20 @@ static inline char *cattr_name(enum page_cache_mode pcm)
} }
#ifdef CONFIG_X86_PAT #ifdef CONFIG_X86_PAT
extern int rbt_memtype_check_insert(struct memtype *new, extern int memtype_check_insert(struct memtype *new,
enum page_cache_mode *new_type); enum page_cache_mode *new_type);
extern struct memtype *rbt_memtype_erase(u64 start, u64 end); extern struct memtype *memtype_erase(u64 start, u64 end);
extern struct memtype *rbt_memtype_lookup(u64 addr); extern struct memtype *memtype_lookup(u64 addr);
extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos); extern int memtype_copy_nth_element(struct memtype *out, loff_t pos);
#else #else
static inline int rbt_memtype_check_insert(struct memtype *new, static inline int memtype_check_insert(struct memtype *new,
enum page_cache_mode *new_type) enum page_cache_mode *new_type)
{ return 0; } { return 0; }
static inline struct memtype *rbt_memtype_erase(u64 start, u64 end) static inline struct memtype *memtype_erase(u64 start, u64 end)
{ return NULL; } { return NULL; }
static inline struct memtype *rbt_memtype_lookup(u64 addr) static inline struct memtype *memtype_lookup(u64 addr)
{ return NULL; } { return NULL; }
static inline int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos) static inline int memtype_copy_nth_element(struct memtype *out, loff_t pos)
{ return 0; } { return 0; }
#endif #endif
......
...@@ -109,8 +109,8 @@ static int memtype_check_conflict(u64 start, u64 end, ...@@ -109,8 +109,8 @@ static int memtype_check_conflict(u64 start, u64 end,
return -EBUSY; return -EBUSY;
} }
int rbt_memtype_check_insert(struct memtype *new, int memtype_check_insert(struct memtype *new,
enum page_cache_mode *ret_type) enum page_cache_mode *ret_type)
{ {
int err = 0; int err = 0;
...@@ -125,13 +125,13 @@ int rbt_memtype_check_insert(struct memtype *new, ...@@ -125,13 +125,13 @@ int rbt_memtype_check_insert(struct memtype *new,
return 0; return 0;
} }
struct memtype *rbt_memtype_erase(u64 start, u64 end) struct memtype *memtype_erase(u64 start, u64 end)
{ {
struct memtype *data; struct memtype *data;
/* /*
* Since the memtype_rbroot tree allows overlapping ranges, * Since the memtype_rbroot tree allows overlapping ranges,
* rbt_memtype_erase() checks with EXACT_MATCH first, i.e. free * memtype_erase() checks with EXACT_MATCH first, i.e. free
* a whole node for the munmap case. If no such entry is found, * a whole node for the munmap case. If no such entry is found,
* it then checks with END_MATCH, i.e. shrink the size of a node * it then checks with END_MATCH, i.e. shrink the size of a node
* from the end for the mremap case. * from the end for the mremap case.
...@@ -157,14 +157,14 @@ struct memtype *rbt_memtype_erase(u64 start, u64 end) ...@@ -157,14 +157,14 @@ struct memtype *rbt_memtype_erase(u64 start, u64 end)
return data; return data;
} }
struct memtype *rbt_memtype_lookup(u64 addr) struct memtype *memtype_lookup(u64 addr)
{ {
return memtype_interval_iter_first(&memtype_rbroot, addr, return memtype_interval_iter_first(&memtype_rbroot, addr,
addr + PAGE_SIZE); addr + PAGE_SIZE);
} }
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos) int memtype_copy_nth_element(struct memtype *out, loff_t pos)
{ {
struct memtype *match; struct memtype *match;
int i = 1; int i = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment