Commit 0c3c8a18 authored by Suresh Siddha's avatar Suresh Siddha Committed by Ingo Molnar

x86, PAT: Remove duplicate memtype reserve in devmem mmap

/dev/mem mmap code was doing memtype reserve/free for a while now.
Recently we added memtype tracking in remap_pfn_range, and /dev/mem mmap
uses it indirectly. So, we don't need seperate tracking in /dev/mem code
any more. That means another ~100 lines of code removed :-).
Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
LKML-Reference: <20090409212709.085210000@intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b6ff32d9
...@@ -18,9 +18,5 @@ extern int free_memtype(u64 start, u64 end); ...@@ -18,9 +18,5 @@ extern int free_memtype(u64 start, u64 end);
extern int kernel_map_sync_memtype(u64 base, unsigned long size, extern int kernel_map_sync_memtype(u64 base, unsigned long size,
unsigned long flag); unsigned long flag);
extern void map_devmem(unsigned long pfn, unsigned long size,
struct pgprot vma_prot);
extern void unmap_devmem(unsigned long pfn, unsigned long size,
struct pgprot vma_prot);
#endif /* _ASM_X86_PAT_H */ #endif /* _ASM_X86_PAT_H */
...@@ -536,9 +536,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) ...@@ -536,9 +536,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t *vma_prot) unsigned long size, pgprot_t *vma_prot)
{ {
u64 offset = ((u64) pfn) << PAGE_SHIFT; unsigned long flags = _PAGE_CACHE_WB;
unsigned long flags = -1;
int retval;
if (!range_is_allowed(pfn, size)) if (!range_is_allowed(pfn, size))
return 0; return 0;
...@@ -566,65 +564,11 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, ...@@ -566,65 +564,11 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
} }
#endif #endif
/*
* With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
*
* Without O_SYNC, we want to get
* - WB for WB-able memory and no other conflicting mappings
* - UC_MINUS for non-WB-able memory with no other conflicting mappings
* - Inherit from confliting mappings otherwise
*/
if (flags != -1) {
retval = reserve_memtype(offset, offset + size, flags, NULL);
} else {
retval = reserve_memtype(offset, offset + size,
_PAGE_CACHE_WB, &flags);
}
if (retval < 0)
return 0;
if (((pfn < max_low_pfn_mapped) ||
(pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
free_memtype(offset, offset + size);
printk(KERN_INFO
"%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
current->comm, current->pid,
cattr_name(flags),
offset, (unsigned long long)(offset + size));
return 0;
}
*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
flags); flags);
return 1; return 1;
} }
void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
{
unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
u64 addr = (u64)pfn << PAGE_SHIFT;
unsigned long flags;
reserve_memtype(addr, addr + size, want_flags, &flags);
if (flags != want_flags) {
printk(KERN_INFO
"%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
current->comm, current->pid,
cattr_name(want_flags),
addr, (unsigned long long)(addr + size),
cattr_name(flags));
}
}
void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
{
u64 addr = (u64)pfn << PAGE_SHIFT;
free_memtype(addr, addr + size);
}
/* /*
* Change the memory type for the physial address range in kernel identity * Change the memory type for the physial address range in kernel identity
* mapping space if that range is a part of identity map. * mapping space if that range is a part of identity map.
...@@ -662,8 +606,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, ...@@ -662,8 +606,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
{ {
int is_ram = 0; int is_ram = 0;
int ret; int ret;
unsigned long flags;
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
unsigned long flags = want_flags;
is_ram = pat_pagerange_is_ram(paddr, paddr + size); is_ram = pat_pagerange_is_ram(paddr, paddr + size);
......
...@@ -301,33 +301,7 @@ static inline int private_mapping_ok(struct vm_area_struct *vma) ...@@ -301,33 +301,7 @@ static inline int private_mapping_ok(struct vm_area_struct *vma)
} }
#endif #endif
void __attribute__((weak))
map_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
{
/* nothing. architectures can override. */
}
void __attribute__((weak))
unmap_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
{
/* nothing. architectures can override. */
}
static void mmap_mem_open(struct vm_area_struct *vma)
{
map_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
static void mmap_mem_close(struct vm_area_struct *vma)
{
unmap_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
static struct vm_operations_struct mmap_mem_ops = { static struct vm_operations_struct mmap_mem_ops = {
.open = mmap_mem_open,
.close = mmap_mem_close,
#ifdef CONFIG_HAVE_IOREMAP_PROT #ifdef CONFIG_HAVE_IOREMAP_PROT
.access = generic_access_phys .access = generic_access_phys
#endif #endif
...@@ -362,7 +336,6 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma) ...@@ -362,7 +336,6 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
vma->vm_pgoff, vma->vm_pgoff,
size, size,
vma->vm_page_prot)) { vma->vm_page_prot)) {
unmap_devmem(vma->vm_pgoff, size, vma->vm_page_prot);
return -EAGAIN; return -EAGAIN;
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment