Commit 1dbe77d3 authored by Linus Torvalds's avatar Linus Torvalds

Merge

parents 68d6275b c8712aeb
No related merge requests found
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
O_TARGET := mm.o O_TARGET := mm.o
obj-y := init.o fault.o ioremap.o extable.o obj-y := init.o fault.o ioremap.o extable.o pageattr.o
export-objs := pageattr.o
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
...@@ -10,12 +10,13 @@ ...@@ -10,12 +10,13 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/pgtable.h>
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags) unsigned long phys_addr, unsigned long flags)
...@@ -155,6 +156,7 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag ...@@ -155,6 +156,7 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
area = get_vm_area(size, VM_IOREMAP); area = get_vm_area(size, VM_IOREMAP);
if (!area) if (!area)
return NULL; return NULL;
area->phys_addr = phys_addr;
addr = area->addr; addr = area->addr;
if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) { if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
vfree(addr); vfree(addr);
...@@ -163,10 +165,71 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag ...@@ -163,10 +165,71 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
return (void *) (offset + (char *)addr); return (void *) (offset + (char *)addr);
} }
/**
* ioremap_nocache - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap_nocache performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* This version of ioremap ensures that the memory is marked uncachable
* on the CPU as well as honouring existing caching rules from things like
* the PCI bus. Note that there are other caches and buffers on many
* busses. In particular driver authors should read up on PCI writes
*
* It's useful if some control registers are in such an area and
* write combining or read caching is not desirable:
*
* Must be freed with iounmap.
*/
void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
{
void *p = __ioremap(phys_addr, size, _PAGE_PCD);
if (!p)
return p;
if (phys_addr + size < virt_to_phys(high_memory)) {
struct page *ppage = virt_to_page(__va(phys_addr));
unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
BUG_ON(phys_addr+size > (unsigned long)high_memory);
BUG_ON(phys_addr + size < phys_addr);
if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
iounmap(p);
p = NULL;
}
}
return p;
}
void iounmap(void *addr) void iounmap(void *addr)
{ {
if (addr > high_memory) struct vm_struct *p;
return vfree((void *) (PAGE_MASK & (unsigned long) addr)); if (addr < high_memory)
return;
p = remove_kernel_area(addr);
if (!p) {
printk("__iounmap: bad address %p\n", addr);
return;
}
BUG_ON(p->phys_addr == 0); /* not allocated with ioremap */
vmfree_area_pages(VMALLOC_VMADDR(p->addr), p->size);
if (p->flags && p->phys_addr < virt_to_phys(high_memory)) {
change_page_attr(virt_to_page(__va(p->phys_addr)),
p->size >> PAGE_SHIFT,
PAGE_KERNEL);
}
kfree(p);
} }
void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
......
/*
* Copyright 2002 Andi Kleen, SuSE Labs.
* Thanks to Ben LaHaise for precious feedback.
*/
#include <linux/config.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
static inline pte_t *lookup_address(unsigned long address)
{
pgd_t *pgd = pgd_offset_k(address);
pmd_t *pmd = pmd_offset(pgd, address);
if (pmd_large(*pmd))
return (pte_t *)pmd;
return pte_offset_kernel(pmd, address);
}
static struct page *split_large_page(unsigned long address, pgprot_t prot)
{
int i;
unsigned long addr;
struct page *base = alloc_pages(GFP_KERNEL, 0);
pte_t *pbase;
if (!base)
return NULL;
address = __pa(address);
addr = address & LARGE_PAGE_MASK;
pbase = (pte_t *)page_address(base);
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
addr == address ? prot : PAGE_KERNEL);
}
return base;
}
static void flush_kernel_map(void *dummy)
{
/* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
if (boot_cpu_data.x86_model >= 4)
asm volatile("wbinvd":::"memory");
/* Flush all to work around Errata in early athlons regarding
* large page flushing.
*/
__flush_tlb_all();
}
static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
{
set_pte_atomic(kpte, pte); /* change init_mm */
#ifndef CONFIG_X86_PAE
{
struct list_head *l;
spin_lock(&mmlist_lock);
list_for_each(l, &init_mm.mmlist) {
struct mm_struct *mm = list_entry(l, struct mm_struct, mmlist);
pmd_t *pmd = pmd_offset(pgd_offset(mm, address), address);
set_pte_atomic((pte_t *)pmd, pte);
}
spin_unlock(&mmlist_lock);
}
#endif
}
/*
* No more special protections in this 2/4MB area - revert to a
* large page again.
*/
static inline void revert_page(struct page *kpte_page, unsigned long address)
{
pte_t *linear = (pte_t *)
pmd_offset(pgd_offset(&init_mm, address), address);
set_pmd_pte(linear, address,
pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
PAGE_KERNEL_LARGE));
}
static int
__change_page_attr(struct page *page, pgprot_t prot, struct page **oldpage)
{
pte_t *kpte;
unsigned long address;
struct page *kpte_page;
#ifdef CONFIG_HIGHMEM
if (page >= highmem_start_page)
BUG();
#endif
address = (unsigned long)page_address(page);
kpte = lookup_address(address);
kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
pte_t old = *kpte;
pte_t standard = mk_pte(page, PAGE_KERNEL);
set_pte_atomic(kpte, mk_pte(page, prot));
if (pte_same(old,standard))
atomic_inc(&kpte_page->count);
} else {
struct page *split = split_large_page(address, prot);
if (!split)
return -ENOMEM;
set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
}
} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
atomic_dec(&kpte_page->count);
}
if (cpu_has_pse && (atomic_read(&kpte_page->count) == 1)) {
*oldpage = kpte_page;
revert_page(kpte_page, address);
}
return 0;
}
static inline void flush_map(void)
{
#ifdef CONFIG_SMP
smp_call_function(flush_kernel_map, NULL, 1, 1);
#endif
flush_kernel_map(NULL);
}
struct deferred_page {
struct deferred_page *next;
struct page *fpage;
};
static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
/*
* Change the page attributes of an page in the linear mapping.
*
* This should be used when a page is mapped with a different caching policy
* than write-back somewhere - some CPUs do not like it when mappings with
* different caching policies exist. This changes the page attributes of the
* in kernel linear mapping too.
*
* The caller needs to ensure that there are no conflicting mappings elsewhere.
* This function only deals with the kernel linear map.
*
* Caller must call global_flush_tlb() after this.
*/
int change_page_attr(struct page *page, int numpages, pgprot_t prot)
{
int err = 0;
struct page *fpage;
int i;
down_write(&init_mm.mmap_sem);
for (i = 0; i < numpages; i++, page++) {
fpage = NULL;
err = __change_page_attr(page, prot, &fpage);
if (err)
break;
if (fpage) {
struct deferred_page *df;
df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL);
if (!df) {
flush_map();
__free_page(fpage);
} else {
df->next = df_list;
df->fpage = fpage;
df_list = df;
}
}
}
up_write(&init_mm.mmap_sem);
return err;
}
void global_flush_tlb(void)
{
struct deferred_page *df, *next_df;
down_read(&init_mm.mmap_sem);
df = xchg(&df_list, NULL);
up_read(&init_mm.mmap_sem);
flush_map();
for (; df; df = next_df) {
next_df = df->next;
if (df->fpage)
__free_page(df->fpage);
kfree(df);
}
}
EXPORT_SYMBOL(change_page_attr);
EXPORT_SYMBOL(global_flush_tlb);
...@@ -118,8 +118,8 @@ struct agp_bridge_data { ...@@ -118,8 +118,8 @@ struct agp_bridge_data {
int (*remove_memory) (agp_memory *, off_t, int); int (*remove_memory) (agp_memory *, off_t, int);
agp_memory *(*alloc_by_type) (size_t, int); agp_memory *(*alloc_by_type) (size_t, int);
void (*free_by_type) (agp_memory *); void (*free_by_type) (agp_memory *);
unsigned long (*agp_alloc_page) (void); void *(*agp_alloc_page) (void);
void (*agp_destroy_page) (unsigned long); void (*agp_destroy_page) (void *);
int (*suspend)(void); int (*suspend)(void);
void (*resume)(void); void (*resume)(void);
......
This diff is collapsed.
#ifndef AGP_H
#define AGP_H 1
/* dummy for now */
#define map_page_into_agp(page)
#define unmap_page_from_agp(page)
#define flush_agp_mappings()
#define flush_agp_cache() mb()
#endif
#ifndef AGP_H
#define AGP_H 1
#include <asm/pgtable.h>
/*
* Functions to keep the agpgart mappings coherent with the MMU.
* The GART gives the CPU a physical alias of pages in memory. The alias region is
* mapped uncacheable. Make sure there are no conflicting mappings
* with different cachability attributes for the same page. This avoids
* data corruption on some CPUs.
*/
#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
#define flush_agp_mappings() global_flush_tlb()
/* Could use CLFLUSH here if the cpu supports it. But then it would
need to be called for each cacheline of the whole page so it may not be
worth it. Would need a page for it. */
#define flush_agp_cache() asm volatile("wbinvd":::"memory")
#endif
...@@ -15,4 +15,7 @@ ...@@ -15,4 +15,7 @@
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
void global_flush_tlb(void);
int change_page_attr(struct page *page, int numpages, pgprot_t prot);
#endif /* _I386_CACHEFLUSH_H */ #endif /* _I386_CACHEFLUSH_H */
...@@ -121,31 +121,7 @@ static inline void * ioremap (unsigned long offset, unsigned long size) ...@@ -121,31 +121,7 @@ static inline void * ioremap (unsigned long offset, unsigned long size)
return __ioremap(offset, size, 0); return __ioremap(offset, size, 0);
} }
/** extern void * ioremap_nocache (unsigned long offset, unsigned long size);
* ioremap_nocache - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap_nocache performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* This version of ioremap ensures that the memory is marked uncachable
* on the CPU as well as honouring existing caching rules from things like
* the PCI bus. Note that there are other caches and buffers on many
* busses. In paticular driver authors should read up on PCI writes
*
* It's useful if some control registers are in such an area and
* write combining or read caching is not desirable:
*/
static inline void * ioremap_nocache (unsigned long offset, unsigned long size)
{
return __ioremap(offset, size, _PAGE_PCD);
}
extern void iounmap(void *addr); extern void iounmap(void *addr);
/* /*
......
...@@ -6,6 +6,9 @@ ...@@ -6,6 +6,9 @@
#define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~(PAGE_SIZE-1))
#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -40,6 +40,7 @@ static inline int pgd_present(pgd_t pgd) { return 1; } ...@@ -40,6 +40,7 @@ static inline int pgd_present(pgd_t pgd) { return 1; }
* hook is made available. * hook is made available.
*/ */
#define set_pte(pteptr, pteval) (*(pteptr) = pteval) #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
/* /*
* (pmds are folded into pgds so this doesnt get actually called, * (pmds are folded into pgds so this doesnt get actually called,
* but the define is needed for a generic inline function.) * but the define is needed for a generic inline function.)
......
...@@ -49,6 +49,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte) ...@@ -49,6 +49,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
smp_wmb(); smp_wmb();
ptep->pte_low = pte.pte_low; ptep->pte_low = pte.pte_low;
} }
#define set_pte_atomic(pteptr,pteval) \
set_64bit((unsigned long long *)(pteptr),pte_val(pteval))
#define set_pmd(pmdptr,pmdval) \ #define set_pmd(pmdptr,pmdval) \
set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval)) set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval))
#define set_pgd(pgdptr,pgdval) \ #define set_pgd(pgdptr,pgdval) \
......
...@@ -237,6 +237,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -237,6 +237,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pmd_page(pmd) \ #define pmd_page(pmd) \
(mem_map + (pmd_val(pmd) >> PAGE_SHIFT)) (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
#define pmd_large(pmd) \
((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
/* to find an entry in a page-table-directory. */ /* to find an entry in a page-table-directory. */
#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
......
#ifndef AGP_H
#define AGP_H 1
/* dummy for now */
#define map_page_into_agp(page)
#define unmap_page_from_agp(page)
#define flush_agp_mappings()
#define flush_agp_cache() mb()
#endif
#ifndef AGP_H
#define AGP_H 1
/* dummy for now */
#define map_page_into_agp(page)
#define unmap_page_from_agp(page)
#define flush_agp_mappings()
#define flush_agp_cache() mb()
#endif
#ifndef AGP_H
#define AGP_H 1
#include <asm/cacheflush.h>
/*
* Functions to keep the agpgart mappings coherent.
* The GART gives the CPU a physical alias of memory. The alias is
* mapped uncacheable. Make sure there are no conflicting mappings
* with different cachability attributes for the same page.
*/
#define map_page_into_agp(page) \
change_page_attr(page, __pgprot(__PAGE_KERNEL | _PAGE_PCD))
#define unmap_page_from_agp(page) change_page_attr(page, PAGE_KERNEL)
#define flush_agp_mappings() global_flush_tlb()
/* Could use CLFLUSH here if the cpu supports it. But then it would
need to be called for each cacheline of the whole page so it may not be
worth it. Would need a page for it. */
#define flush_agp_cache() asm volatile("wbinvd":::"memory")
#endif
...@@ -15,4 +15,7 @@ ...@@ -15,4 +15,7 @@
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
void global_flush_tlb(void);
int change_page_attr(struct page *page, int numpages, pgprot_t prot);
#endif /* _I386_CACHEFLUSH_H */ #endif /* _I386_CACHEFLUSH_H */
...@@ -13,6 +13,7 @@ struct vm_struct { ...@@ -13,6 +13,7 @@ struct vm_struct {
unsigned long flags; unsigned long flags;
void * addr; void * addr;
unsigned long size; unsigned long size;
unsigned long phys_addr;
struct vm_struct * next; struct vm_struct * next;
}; };
...@@ -23,6 +24,8 @@ extern long vread(char *buf, char *addr, unsigned long count); ...@@ -23,6 +24,8 @@ extern long vread(char *buf, char *addr, unsigned long count);
extern void vmfree_area_pages(unsigned long address, unsigned long size); extern void vmfree_area_pages(unsigned long address, unsigned long size);
extern int vmalloc_area_pages(unsigned long address, unsigned long size, extern int vmalloc_area_pages(unsigned long address, unsigned long size,
int gfp_mask, pgprot_t prot); int gfp_mask, pgprot_t prot);
extern struct vm_struct *remove_kernel_area(void *addr);
/* /*
* Various ways to allocate pages. * Various ways to allocate pages.
*/ */
......
...@@ -195,6 +195,7 @@ struct vm_struct * get_vm_area(unsigned long size, unsigned long flags) ...@@ -195,6 +195,7 @@ struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
if (addr > VMALLOC_END-size) if (addr > VMALLOC_END-size)
goto out; goto out;
} }
area->phys_addr = 0;
area->flags = flags; area->flags = flags;
area->addr = (void *)addr; area->addr = (void *)addr;
area->size = size; area->size = size;
...@@ -209,9 +210,25 @@ struct vm_struct * get_vm_area(unsigned long size, unsigned long flags) ...@@ -209,9 +210,25 @@ struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
return NULL; return NULL;
} }
void vfree(void * addr) struct vm_struct *remove_kernel_area(void *addr)
{ {
struct vm_struct **p, *tmp; struct vm_struct **p, *tmp;
write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
write_unlock(&vmlist_lock);
return tmp;
}
}
write_unlock(&vmlist_lock);
return NULL;
}
void vfree(void * addr)
{
struct vm_struct *tmp;
if (!addr) if (!addr)
return; return;
...@@ -219,17 +236,12 @@ void vfree(void * addr) ...@@ -219,17 +236,12 @@ void vfree(void * addr)
printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr); printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
return; return;
} }
write_lock(&vmlist_lock); tmp = remove_kernel_area(addr);
for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { if (tmp) {
if (tmp->addr == addr) {
*p = tmp->next;
vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size); vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
write_unlock(&vmlist_lock);
kfree(tmp); kfree(tmp);
return; return;
} }
}
write_unlock(&vmlist_lock);
printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr); printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment