Commit 4707a341 authored by Thierry Reding's avatar Thierry Reding

/dev/mem: Use more consistent data types

The xlate_dev_{kmem,mem}_ptr() functions take either a physical address
or a kernel virtual address, so data types should be phys_addr_t and
void *. They both return a kernel virtual address which is only ever
used in calls to copy_{from,to}_user(), so make variables that store it
void * rather than char * for consistency.

Also only define a weak unxlate_dev_mem_ptr() function if architectures
haven't overridden them in the asm/io.h header file.
Signed-off-by: default avatarThierry Reding <treding@nvidia.com>
parent dc012014
...@@ -13,9 +13,10 @@ ...@@ -13,9 +13,10 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pci_io.h> #include <asm/pci_io.h>
void *xlate_dev_mem_ptr(unsigned long phys);
#define xlate_dev_mem_ptr xlate_dev_mem_ptr #define xlate_dev_mem_ptr xlate_dev_mem_ptr
void unxlate_dev_mem_ptr(unsigned long phys, void *addr); void *xlate_dev_mem_ptr(phys_addr_t phys);
#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
/* /*
* Convert a virtual cached pointer to an uncached pointer * Convert a virtual cached pointer to an uncached pointer
......
...@@ -176,7 +176,7 @@ static int is_swapped(unsigned long addr) ...@@ -176,7 +176,7 @@ static int is_swapped(unsigned long addr)
* For swapped prefix pages a new buffer is returned that contains a copy of * For swapped prefix pages a new buffer is returned that contains a copy of
* the absolute memory. The buffer size is maximum one page large. * the absolute memory. The buffer size is maximum one page large.
*/ */
void *xlate_dev_mem_ptr(unsigned long addr) void *xlate_dev_mem_ptr(phys_addr_t addr)
{ {
void *bounce = (void *) addr; void *bounce = (void *) addr;
unsigned long size; unsigned long size;
...@@ -197,7 +197,7 @@ void *xlate_dev_mem_ptr(unsigned long addr) ...@@ -197,7 +197,7 @@ void *xlate_dev_mem_ptr(unsigned long addr)
/* /*
* Free converted buffer for /dev/mem access (if necessary) * Free converted buffer for /dev/mem access (if necessary)
*/ */
void unxlate_dev_mem_ptr(unsigned long addr, void *buf) void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
{ {
if ((void *) addr != buf) if ((void *) addr != buf)
free_page((unsigned long) buf); free_page((unsigned long) buf);
......
...@@ -310,8 +310,8 @@ BUILDIO(b, b, char) ...@@ -310,8 +310,8 @@ BUILDIO(b, b, char)
BUILDIO(w, w, short) BUILDIO(w, w, short)
BUILDIO(l, , int) BUILDIO(l, , int)
extern void *xlate_dev_mem_ptr(unsigned long phys); extern void *xlate_dev_mem_ptr(phys_addr_t phys);
extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
unsigned long prot_val); unsigned long prot_val);
......
...@@ -327,7 +327,7 @@ EXPORT_SYMBOL(iounmap); ...@@ -327,7 +327,7 @@ EXPORT_SYMBOL(iounmap);
* Convert a physical pointer to a virtual kernel pointer for /dev/mem * Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access * access
*/ */
void *xlate_dev_mem_ptr(unsigned long phys) void *xlate_dev_mem_ptr(phys_addr_t phys)
{ {
void *addr; void *addr;
unsigned long start = phys & PAGE_MASK; unsigned long start = phys & PAGE_MASK;
...@@ -343,7 +343,7 @@ void *xlate_dev_mem_ptr(unsigned long phys) ...@@ -343,7 +343,7 @@ void *xlate_dev_mem_ptr(unsigned long phys)
return addr; return addr;
} }
void unxlate_dev_mem_ptr(unsigned long phys, void *addr) void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
{ {
if (page_is_ram(phys >> PAGE_SHIFT)) if (page_is_ram(phys >> PAGE_SHIFT))
return; return;
......
...@@ -84,9 +84,12 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) ...@@ -84,9 +84,12 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
} }
#endif #endif
void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr) #ifndef unxlate_dev_mem_ptr
#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
{ {
} }
#endif
/* /*
* This funcion reads the *physical* memory. The f_pos points directly to the * This funcion reads the *physical* memory. The f_pos points directly to the
...@@ -97,7 +100,7 @@ static ssize_t read_mem(struct file *file, char __user *buf, ...@@ -97,7 +100,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
{ {
phys_addr_t p = *ppos; phys_addr_t p = *ppos;
ssize_t read, sz; ssize_t read, sz;
char *ptr; void *ptr;
if (p != *ppos) if (p != *ppos)
return 0; return 0;
...@@ -400,7 +403,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf, ...@@ -400,7 +403,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
* uncached, then it must also be accessed uncached * uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur * by the kernel or data corruption may occur
*/ */
kbuf = xlate_dev_kmem_ptr((char *)p); kbuf = xlate_dev_kmem_ptr((void *)p);
if (copy_to_user(buf, kbuf, sz)) if (copy_to_user(buf, kbuf, sz))
return -EFAULT; return -EFAULT;
...@@ -461,7 +464,7 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf, ...@@ -461,7 +464,7 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
#endif #endif
while (count > 0) { while (count > 0) {
char *ptr; void *ptr;
sz = size_inside_page(p, count); sz = size_inside_page(p, count);
...@@ -470,7 +473,7 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf, ...@@ -470,7 +473,7 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
* it must also be accessed uncached by the kernel or data * it must also be accessed uncached by the kernel or data
* corruption may occur. * corruption may occur.
*/ */
ptr = xlate_dev_kmem_ptr((char *)p); ptr = xlate_dev_kmem_ptr((void *)p);
copied = copy_from_user(ptr, buf, sz); copied = copy_from_user(ptr, buf, sz);
if (copied) { if (copied) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment