Commit ecdd6ee7 authored by Ingo Molnar's avatar Ingo Molnar

x86/mm/pat: Standardize on memtype_*() prefix for APIs

Half of our memtype APIs are memtype_ prefixed, the other half are _memtype suffixed:

	reserve_memtype()
	free_memtype()
	kernel_map_sync_memtype()
	io_reserve_memtype()
	io_free_memtype()

	memtype_check_insert()
	memtype_erase()
	memtype_lookup()
	memtype_copy_nth_element()

Use prefixes consistently, like most other modern kernel APIs:

	reserve_memtype()		=> memtype_reserve()
	free_memtype()			=> memtype_free()
	kernel_map_sync_memtype()	=> memtype_kernel_map_sync()
	io_reserve_memtype()		=> memtype_reserve_io()
	io_free_memtype()		=> memtype_free_io()

	memtype_check_insert()		=> memtype_check_insert()
	memtype_erase()			=> memtype_erase()
	memtype_lookup()		=> memtype_lookup()
	memtype_copy_nth_element()	=> memtype_copy_nth_element()
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f9b57cf8
...@@ -10,17 +10,17 @@ void pat_disable(const char *reason); ...@@ -10,17 +10,17 @@ void pat_disable(const char *reason);
extern void pat_init(void); extern void pat_init(void);
extern void init_cache_modes(void); extern void init_cache_modes(void);
extern int reserve_memtype(u64 start, u64 end, extern int memtype_reserve(u64 start, u64 end,
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
extern int free_memtype(u64 start, u64 end); extern int memtype_free(u64 start, u64 end);
extern int kernel_map_sync_memtype(u64 base, unsigned long size, extern int memtype_kernel_map_sync(u64 base, unsigned long size,
enum page_cache_mode pcm); enum page_cache_mode pcm);
int io_reserve_memtype(resource_size_t start, resource_size_t end, int memtype_reserve_io(resource_size_t start, resource_size_t end,
enum page_cache_mode *pcm); enum page_cache_mode *pcm);
void io_free_memtype(resource_size_t start, resource_size_t end); void memtype_free_io(resource_size_t start, resource_size_t end);
bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn); bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn);
......
...@@ -26,7 +26,7 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) ...@@ -26,7 +26,7 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
if (!is_io_mapping_possible(base, size)) if (!is_io_mapping_possible(base, size))
return -EINVAL; return -EINVAL;
ret = io_reserve_memtype(base, base + size, &pcm); ret = memtype_reserve_io(base, base + size, &pcm);
if (ret) if (ret)
return ret; return ret;
...@@ -40,7 +40,7 @@ EXPORT_SYMBOL_GPL(iomap_create_wc); ...@@ -40,7 +40,7 @@ EXPORT_SYMBOL_GPL(iomap_create_wc);
void iomap_free(resource_size_t base, unsigned long size) void iomap_free(resource_size_t base, unsigned long size)
{ {
io_free_memtype(base, base + size); memtype_free_io(base, base + size);
} }
EXPORT_SYMBOL_GPL(iomap_free); EXPORT_SYMBOL_GPL(iomap_free);
......
...@@ -196,10 +196,10 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size, ...@@ -196,10 +196,10 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
phys_addr &= PHYSICAL_PAGE_MASK; phys_addr &= PHYSICAL_PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr; size = PAGE_ALIGN(last_addr+1) - phys_addr;
retval = reserve_memtype(phys_addr, (u64)phys_addr + size, retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
pcm, &new_pcm); pcm, &new_pcm);
if (retval) { if (retval) {
printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
return NULL; return NULL;
} }
...@@ -255,7 +255,7 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size, ...@@ -255,7 +255,7 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
area->phys_addr = phys_addr; area->phys_addr = phys_addr;
vaddr = (unsigned long) area->addr; vaddr = (unsigned long) area->addr;
if (kernel_map_sync_memtype(phys_addr, size, pcm)) if (memtype_kernel_map_sync(phys_addr, size, pcm))
goto err_free_area; goto err_free_area;
if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
...@@ -275,7 +275,7 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size, ...@@ -275,7 +275,7 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
err_free_area: err_free_area:
free_vm_area(area); free_vm_area(area);
err_free_memtype: err_free_memtype:
free_memtype(phys_addr, phys_addr + size); memtype_free(phys_addr, phys_addr + size);
return NULL; return NULL;
} }
...@@ -451,7 +451,7 @@ void iounmap(volatile void __iomem *addr) ...@@ -451,7 +451,7 @@ void iounmap(volatile void __iomem *addr)
return; return;
} }
free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
/* Finally remove it */ /* Finally remove it */
o = remove_vm_area((void __force *)addr); o = remove_vm_area((void __force *)addr);
......
...@@ -575,7 +575,7 @@ static u64 sanitize_phys(u64 address) ...@@ -575,7 +575,7 @@ static u64 sanitize_phys(u64 address)
* available type in new_type in case of no error. In case of any error * available type in new_type in case of no error. In case of any error
* it will return a negative return value. * it will return a negative return value.
*/ */
int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
enum page_cache_mode *new_type) enum page_cache_mode *new_type)
{ {
struct memtype *entry_new; struct memtype *entry_new;
...@@ -638,7 +638,7 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, ...@@ -638,7 +638,7 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
err = memtype_check_insert(entry_new, new_type); err = memtype_check_insert(entry_new, new_type);
if (err) { if (err) {
pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n", pr_info("x86/PAT: memtype_reserve failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
start, end - 1, start, end - 1,
cattr_name(entry_new->type), cattr_name(req_type)); cattr_name(entry_new->type), cattr_name(req_type));
kfree(entry_new); kfree(entry_new);
...@@ -649,14 +649,14 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, ...@@ -649,14 +649,14 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
spin_unlock(&memtype_lock); spin_unlock(&memtype_lock);
dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n", dprintk("memtype_reserve added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
start, end - 1, cattr_name(entry_new->type), cattr_name(req_type), start, end - 1, cattr_name(entry_new->type), cattr_name(req_type),
new_type ? cattr_name(*new_type) : "-"); new_type ? cattr_name(*new_type) : "-");
return err; return err;
} }
int free_memtype(u64 start, u64 end) int memtype_free(u64 start, u64 end)
{ {
int is_range_ram; int is_range_ram;
struct memtype *entry_old; struct memtype *entry_old;
...@@ -689,7 +689,7 @@ int free_memtype(u64 start, u64 end) ...@@ -689,7 +689,7 @@ int free_memtype(u64 start, u64 end)
kfree(entry_old); kfree(entry_old);
dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1); dprintk("memtype_free request [mem %#010Lx-%#010Lx]\n", start, end - 1);
return 0; return 0;
} }
...@@ -752,7 +752,7 @@ bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn) ...@@ -752,7 +752,7 @@ bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn)
EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr); EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr);
/** /**
* io_reserve_memtype - Request a memory type mapping for a region of memory * memtype_reserve_io - Request a memory type mapping for a region of memory
* @start: start (physical address) of the region * @start: start (physical address) of the region
* @end: end (physical address) of the region * @end: end (physical address) of the region
* @type: A pointer to memtype, with requested type. On success, requested * @type: A pointer to memtype, with requested type. On success, requested
...@@ -761,7 +761,7 @@ EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr); ...@@ -761,7 +761,7 @@ EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr);
* On success, returns 0 * On success, returns 0
* On failure, returns non-zero * On failure, returns non-zero
*/ */
int io_reserve_memtype(resource_size_t start, resource_size_t end, int memtype_reserve_io(resource_size_t start, resource_size_t end,
enum page_cache_mode *type) enum page_cache_mode *type)
{ {
resource_size_t size = end - start; resource_size_t size = end - start;
...@@ -771,47 +771,47 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end, ...@@ -771,47 +771,47 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end,
WARN_ON_ONCE(iomem_map_sanity_check(start, size)); WARN_ON_ONCE(iomem_map_sanity_check(start, size));
ret = reserve_memtype(start, end, req_type, &new_type); ret = memtype_reserve(start, end, req_type, &new_type);
if (ret) if (ret)
goto out_err; goto out_err;
if (!is_new_memtype_allowed(start, size, req_type, new_type)) if (!is_new_memtype_allowed(start, size, req_type, new_type))
goto out_free; goto out_free;
if (kernel_map_sync_memtype(start, size, new_type) < 0) if (memtype_kernel_map_sync(start, size, new_type) < 0)
goto out_free; goto out_free;
*type = new_type; *type = new_type;
return 0; return 0;
out_free: out_free:
free_memtype(start, end); memtype_free(start, end);
ret = -EBUSY; ret = -EBUSY;
out_err: out_err:
return ret; return ret;
} }
/** /**
* io_free_memtype - Release a memory type mapping for a region of memory * memtype_free_io - Release a memory type mapping for a region of memory
* @start: start (physical address) of the region * @start: start (physical address) of the region
* @end: end (physical address) of the region * @end: end (physical address) of the region
*/ */
void io_free_memtype(resource_size_t start, resource_size_t end) void memtype_free_io(resource_size_t start, resource_size_t end)
{ {
free_memtype(start, end); memtype_free(start, end);
} }
int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size) int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
{ {
enum page_cache_mode type = _PAGE_CACHE_MODE_WC; enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
return io_reserve_memtype(start, start + size, &type); return memtype_reserve_io(start, start + size, &type);
} }
EXPORT_SYMBOL(arch_io_reserve_memtype_wc); EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size) void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
{ {
io_free_memtype(start, start + size); memtype_free_io(start, start + size);
} }
EXPORT_SYMBOL(arch_io_free_memtype_wc); EXPORT_SYMBOL(arch_io_free_memtype_wc);
...@@ -871,7 +871,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, ...@@ -871,7 +871,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
* Change the memory type for the physical address range in kernel identity * Change the memory type for the physical address range in kernel identity
* mapping space if that range is a part of identity map. * mapping space if that range is a part of identity map.
*/ */
int kernel_map_sync_memtype(u64 base, unsigned long size, int memtype_kernel_map_sync(u64 base, unsigned long size,
enum page_cache_mode pcm) enum page_cache_mode pcm)
{ {
unsigned long id_sz; unsigned long id_sz;
...@@ -901,7 +901,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, ...@@ -901,7 +901,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
/* /*
* Internal interface to reserve a range of physical memory with prot. * Internal interface to reserve a range of physical memory with prot.
* Reserved non RAM regions only and after successful reserve_memtype, * Reserved non RAM regions only and after successful memtype_reserve,
* this func also keeps identity mapping (if any) in sync with this new prot. * this func also keeps identity mapping (if any) in sync with this new prot.
*/ */
static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
...@@ -938,14 +938,14 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, ...@@ -938,14 +938,14 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
return 0; return 0;
} }
ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm); ret = memtype_reserve(paddr, paddr + size, want_pcm, &pcm);
if (ret) if (ret)
return ret; return ret;
if (pcm != want_pcm) { if (pcm != want_pcm) {
if (strict_prot || if (strict_prot ||
!is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
free_memtype(paddr, paddr + size); memtype_free(paddr, paddr + size);
pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n", pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
current->comm, current->pid, current->comm, current->pid,
cattr_name(want_pcm), cattr_name(want_pcm),
...@@ -963,8 +963,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, ...@@ -963,8 +963,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
cachemode2protval(pcm)); cachemode2protval(pcm));
} }
if (kernel_map_sync_memtype(paddr, size, pcm) < 0) { if (memtype_kernel_map_sync(paddr, size, pcm) < 0) {
free_memtype(paddr, paddr + size); memtype_free(paddr, paddr + size);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
...@@ -980,7 +980,7 @@ static void free_pfn_range(u64 paddr, unsigned long size) ...@@ -980,7 +980,7 @@ static void free_pfn_range(u64 paddr, unsigned long size)
is_ram = pat_pagerange_is_ram(paddr, paddr + size); is_ram = pat_pagerange_is_ram(paddr, paddr + size);
if (is_ram == 0) if (is_ram == 0)
free_memtype(paddr, paddr + size); memtype_free(paddr, paddr + size);
} }
/* /*
......
...@@ -1801,7 +1801,7 @@ int set_memory_uc(unsigned long addr, int numpages) ...@@ -1801,7 +1801,7 @@ int set_memory_uc(unsigned long addr, int numpages)
/* /*
* for now UC MINUS. see comments in ioremap() * for now UC MINUS. see comments in ioremap()
*/ */
ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
_PAGE_CACHE_MODE_UC_MINUS, NULL); _PAGE_CACHE_MODE_UC_MINUS, NULL);
if (ret) if (ret)
goto out_err; goto out_err;
...@@ -1813,7 +1813,7 @@ int set_memory_uc(unsigned long addr, int numpages) ...@@ -1813,7 +1813,7 @@ int set_memory_uc(unsigned long addr, int numpages)
return 0; return 0;
out_free: out_free:
free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
out_err: out_err:
return ret; return ret;
} }
...@@ -1839,14 +1839,14 @@ int set_memory_wc(unsigned long addr, int numpages) ...@@ -1839,14 +1839,14 @@ int set_memory_wc(unsigned long addr, int numpages)
{ {
int ret; int ret;
ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
_PAGE_CACHE_MODE_WC, NULL); _PAGE_CACHE_MODE_WC, NULL);
if (ret) if (ret)
return ret; return ret;
ret = _set_memory_wc(addr, numpages); ret = _set_memory_wc(addr, numpages);
if (ret) if (ret)
free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
return ret; return ret;
} }
...@@ -1873,7 +1873,7 @@ int set_memory_wb(unsigned long addr, int numpages) ...@@ -1873,7 +1873,7 @@ int set_memory_wb(unsigned long addr, int numpages)
if (ret) if (ret)
return ret; return ret;
free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
return 0; return 0;
} }
EXPORT_SYMBOL(set_memory_wb); EXPORT_SYMBOL(set_memory_wb);
...@@ -2014,7 +2014,7 @@ static int _set_pages_array(struct page **pages, int numpages, ...@@ -2014,7 +2014,7 @@ static int _set_pages_array(struct page **pages, int numpages,
continue; continue;
start = page_to_pfn(pages[i]) << PAGE_SHIFT; start = page_to_pfn(pages[i]) << PAGE_SHIFT;
end = start + PAGE_SIZE; end = start + PAGE_SIZE;
if (reserve_memtype(start, end, new_type, NULL)) if (memtype_reserve(start, end, new_type, NULL))
goto err_out; goto err_out;
} }
...@@ -2040,7 +2040,7 @@ static int _set_pages_array(struct page **pages, int numpages, ...@@ -2040,7 +2040,7 @@ static int _set_pages_array(struct page **pages, int numpages,
continue; continue;
start = page_to_pfn(pages[i]) << PAGE_SHIFT; start = page_to_pfn(pages[i]) << PAGE_SHIFT;
end = start + PAGE_SIZE; end = start + PAGE_SIZE;
free_memtype(start, end); memtype_free(start, end);
} }
return -EINVAL; return -EINVAL;
} }
...@@ -2089,7 +2089,7 @@ int set_pages_array_wb(struct page **pages, int numpages) ...@@ -2089,7 +2089,7 @@ int set_pages_array_wb(struct page **pages, int numpages)
continue; continue;
start = page_to_pfn(pages[i]) << PAGE_SHIFT; start = page_to_pfn(pages[i]) << PAGE_SHIFT;
end = start + PAGE_SIZE; end = start + PAGE_SIZE;
free_memtype(start, end); memtype_free(start, end);
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment