Commit 34dc307a authored by Russell King's avatar Russell King

2.5.14 updates - for the new memory management pfn() macros. Also,

we fix ARM720T support - this CPU has unified writethrough caches
only, so we can't use the Harvard cache operations when copying
pages.  Also, we don't have to evict cache entries during copypage.
parent 3bcf06b7
...@@ -29,15 +29,21 @@ obj-$(CONFIG_DISCONTIGMEM) += discontig.o ...@@ -29,15 +29,21 @@ obj-$(CONFIG_DISCONTIGMEM) += discontig.o
# Select the processor-specific files # Select the processor-specific files
p-$(CONFIG_CPU_26) += proc-arm2,3.o p-$(CONFIG_CPU_26) += proc-arm2,3.o
# ARMv3
p-$(CONFIG_CPU_ARM610) += proc-arm6,7.o tlb-v3.o copypage-v3.o p-$(CONFIG_CPU_ARM610) += proc-arm6,7.o tlb-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM710) += proc-arm6,7.o tlb-v3.o copypage-v3.o p-$(CONFIG_CPU_ARM710) += proc-arm6,7.o tlb-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o copypage-v4.o abort-lv4t.o
p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wb.o copypage-v4.o abort-ev4t.o # ARMv4
p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wb.o copypage-v4.o abort-ev4t.o p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o copypage-v4wt.o abort-lv4t.o
p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wb.o copypage-v4.o abort-ev5ej.o p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wb.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wb.o copypage-v4.o abort-ev4t.o p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wb.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o copypage-v4.o copypage-v4mc.o abort-ev4.o minicache.o p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wb.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_SA1100) += proc-sa110.o tlb-v4wb.o copypage-v4.o copypage-v4mc.o abort-ev4.o minicache.o p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o copypage-v4wb.o abort-ev4.o minicache.o
p-$(CONFIG_CPU_SA1100) += proc-sa110.o tlb-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o
# ARMv5
p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wb.o copypage-v4wb.o abort-ev5ej.o
p-$(CONFIG_CPU_XSCALE) += proc-xscale.o tlb-v4wb.o copypage-v5te.o abort-ev4t.o minicache.o p-$(CONFIG_CPU_XSCALE) += proc-xscale.o tlb-v4wb.o copypage-v5te.o abort-ev4t.o minicache.o
obj-y += $(sort $(p-y)) obj-y += $(sort $(p-y))
......
...@@ -52,7 +52,8 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) ...@@ -52,7 +52,8 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
goto no_page; goto no_page;
*dma_handle = page_to_bus(page); *dma_handle = page_to_bus(page);
ret = __ioremap(page_to_phys(page), size, 0); ret = __ioremap(page_to_pfn(page) << PAGE_SHIFT, size, 0,
PAGE_SIZE << order);
if (!ret) if (!ret)
goto no_remap; goto no_remap;
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
* instruction. If your processor does not supply this, you have to write your * instruction. If your processor does not supply this, you have to write your
* own copy_user_page that does the right thing. * own copy_user_page that does the right thing.
*/ */
ENTRY(v4_copy_user_page) ENTRY(v4wb_copy_user_page)
stmfd sp!, {r4, lr} @ 2 stmfd sp!, {r4, lr} @ 2
mov r2, #PAGE_SZ/64 @ 1 mov r2, #PAGE_SZ/64 @ 1
ldmia r1!, {r3, r4, ip, lr} @ 4 ldmia r1!, {r3, r4, ip, lr} @ 4
...@@ -51,7 +51,7 @@ ENTRY(v4_copy_user_page) ...@@ -51,7 +51,7 @@ ENTRY(v4_copy_user_page)
* *
* Same story as above. * Same story as above.
*/ */
ENTRY(v4_clear_user_page) ENTRY(v4wb_clear_user_page)
str lr, [sp, #-4]! str lr, [sp, #-4]!
mov r1, #PAGE_SZ/64 @ 1 mov r1, #PAGE_SZ/64 @ 1
mov r2, #0 @ 1 mov r2, #0 @ 1
...@@ -71,7 +71,7 @@ ENTRY(v4_clear_user_page) ...@@ -71,7 +71,7 @@ ENTRY(v4_clear_user_page)
.section ".text.init", #alloc, #execinstr .section ".text.init", #alloc, #execinstr
ENTRY(v4_user_fns) ENTRY(v4wb_user_fns)
.long v4_clear_user_page .long v4wb_clear_user_page
.long v4_copy_user_page .long v4wb_copy_user_page
/*
* linux/arch/arm/lib/copypage-v4.S
*
* Copyright (C) 1995-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ASM optimised string functions
*
* This is for CPUs with a writethrough cache and 'flush ID cache' is
* the only supported cache operation.
*/
#include <linux/linkage.h>
#include <asm/constants.h>
.text
.align 5
/*
* ARMv4 optimised copy_user_page
*
* Since we have writethrough caches, we don't have to worry about
* dirty data in the cache. However, we do have to ensure that
* subsequent reads are up to date.
*/
ENTRY(v4wt_copy_user_page)
stmfd sp!, {r4, lr} @ 2
mov r2, #PAGE_SZ/64 @ 1
ldmia r1!, {r3, r4, ip, lr} @ 4
1: stmia r0!, {r3, r4, ip, lr} @ 4
ldmia r1!, {r3, r4, ip, lr} @ 4+1
stmia r0!, {r3, r4, ip, lr} @ 4
ldmia r1!, {r3, r4, ip, lr} @ 4
stmia r0!, {r3, r4, ip, lr} @ 4
ldmia r1!, {r3, r4, ip, lr} @ 4
subs r2, r2, #1 @ 1
stmia r0!, {r3, r4, ip, lr} @ 4
ldmneia r1!, {r3, r4, ip, lr} @ 4
bne 1b @ 1
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache
ldmfd sp!, {r4, pc} @ 3
.align 5
/*
* ARMv4 optimised clear_user_page
*
* Same story as above.
*/
ENTRY(v4wt_clear_user_page)
str lr, [sp, #-4]!
mov r1, #PAGE_SZ/64 @ 1
mov r2, #0 @ 1
mov r3, #0 @ 1
mov ip, #0 @ 1
mov lr, #0 @ 1
1: stmia r0!, {r2, r3, ip, lr} @ 4
stmia r0!, {r2, r3, ip, lr} @ 4
stmia r0!, {r2, r3, ip, lr} @ 4
stmia r0!, {r2, r3, ip, lr} @ 4
subs r1, r1, #1 @ 1
bne 1b @ 1
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache
ldr pc, [sp], #4
.section ".text.init", #alloc, #execinstr
ENTRY(v4wt_user_fns)
.long v4wt_clear_user_page
.long v4wt_copy_user_page
...@@ -557,5 +557,5 @@ __arm1020_proc_info: ...@@ -557,5 +557,5 @@ __arm1020_proc_info:
.long cpu_arm1020_info .long cpu_arm1020_info
.long arm1020_processor_functions .long arm1020_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long v4_user_fns .long v4wb_user_fns
.size __arm1020_proc_info, . - __arm1020_proc_info .size __arm1020_proc_info, . - __arm1020_proc_info
...@@ -261,5 +261,5 @@ __arm720_proc_info: ...@@ -261,5 +261,5 @@ __arm720_proc_info:
.long cpu_arm720_info @ info .long cpu_arm720_info @ info
.long arm720_processor_functions .long arm720_processor_functions
.long v4_tlb_fns .long v4_tlb_fns
.long v4_user_fns .long v4wt_user_fns
.size __arm720_proc_info, . - __arm720_proc_info .size __arm720_proc_info, . - __arm720_proc_info
...@@ -543,5 +543,5 @@ __arm920_proc_info: ...@@ -543,5 +543,5 @@ __arm920_proc_info:
.long cpu_arm920_info .long cpu_arm920_info
.long arm920_processor_functions .long arm920_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long v4_user_fns .long v4wb_user_fns
.size __arm920_proc_info, . - __arm920_proc_info .size __arm920_proc_info, . - __arm920_proc_info
...@@ -544,5 +544,5 @@ __arm922_proc_info: ...@@ -544,5 +544,5 @@ __arm922_proc_info:
.long cpu_arm922_info .long cpu_arm922_info
.long arm922_processor_functions .long arm922_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long v4_user_fns .long v4wb_user_fns
.size __arm922_proc_info, . - __arm922_proc_info .size __arm922_proc_info, . - __arm922_proc_info
...@@ -542,5 +542,5 @@ __arm926_proc_info: ...@@ -542,5 +542,5 @@ __arm926_proc_info:
.long cpu_arm926_info .long cpu_arm926_info
.long arm926_processor_functions .long arm926_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long v4_user_fns .long v4wb_user_fns
.size __arm926_proc_info, . - __arm926_proc_info .size __arm926_proc_info, . - __arm926_proc_info
...@@ -611,6 +611,7 @@ cpu_elf_name: ...@@ -611,6 +611,7 @@ cpu_elf_name:
.section ".proc.info", #alloc, #execinstr .section ".proc.info", #alloc, #execinstr
#ifdef CONFIG_CPU_SA110
.type __sa110_proc_info,#object .type __sa110_proc_info,#object
__sa110_proc_info: __sa110_proc_info:
.long 0x4401a100 .long 0x4401a100
...@@ -623,9 +624,11 @@ __sa110_proc_info: ...@@ -623,9 +624,11 @@ __sa110_proc_info:
.long cpu_sa110_info .long cpu_sa110_info
.long sa110_processor_functions .long sa110_processor_functions
.long v4wb_tlb_fns .long v4wb_tlb_fns
.long v4_user_fns .long v4wb_user_fns
.size __sa110_proc_info, . - __sa110_proc_info .size __sa110_proc_info, . - __sa110_proc_info
#endif
#ifdef CONFIG_CPU_SA1100
.type __sa1100_proc_info,#object .type __sa1100_proc_info,#object
__sa1100_proc_info: __sa1100_proc_info:
.long 0x4401a110 .long 0x4401a110
...@@ -655,3 +658,4 @@ __sa1110_proc_info: ...@@ -655,3 +658,4 @@ __sa1110_proc_info:
.long v4wb_tlb_fns .long v4wb_tlb_fns
.long v4_mc_user_fns .long v4_mc_user_fns
.size __sa1110_proc_info, . - __sa1110_proc_info .size __sa1110_proc_info, . - __sa1110_proc_info
#endif
...@@ -55,14 +55,13 @@ ENTRY(v4_flush_user_tlb_range) ...@@ -55,14 +55,13 @@ ENTRY(v4_flush_user_tlb_range)
eors r3, ip, r3 @ == mm ? eors r3, ip, r3 @ == mm ?
movne pc, lr @ no, we dont do anything movne pc, lr @ no, we dont do anything
vma_vm_flags ip, r2 vma_vm_flags ip, r2
.v4_flush_kern_tlb_range:
bic r0, r0, #0x0ff bic r0, r0, #0x0ff
bic r0, r0, #0xf00 bic r0, r0, #0xf00
1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry 1: mcr p15, 0, r0, c8, c7, 1 @ invalidate TLB entry
add r0, r0, #PAGE_SZ add r0, r0, #PAGE_SZ
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
tst ip, #VM_EXEC
mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB
mov pc, lr mov pc, lr
/* /*
...@@ -80,9 +79,8 @@ ENTRY(v4_flush_user_tlb_page) ...@@ -80,9 +79,8 @@ ENTRY(v4_flush_user_tlb_page)
teq r2, r3 @ equal teq r2, r3 @ equal
movne pc, lr @ no movne pc, lr @ no
vma_vm_flags r2, r1 vma_vm_flags r2, r1
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry .v4_flush_kern_tlb_page:
tst r2, #VM_EXEC mcr p15, 0, r0, c8, c7, 1 @ invalidate TLB entry
mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB
mov pc, lr mov pc, lr
/* /*
...@@ -94,16 +92,8 @@ ENTRY(v4_flush_user_tlb_page) ...@@ -94,16 +92,8 @@ ENTRY(v4_flush_user_tlb_page)
* - start - virtual address (may not be aligned) * - start - virtual address (may not be aligned)
* - end - virtual address (may not be aligned) * - end - virtual address (may not be aligned)
*/ */
.align 5 .globl v4_flush_kern_tlb_range
ENTRY(v4_flush_kern_tlb_range) .equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mcr p15, 0, r3, c8, c5, 0 @ invalidate I TLB
mov pc, lr
/* /*
...@@ -115,9 +105,8 @@ ENTRY(v4_flush_kern_tlb_range) ...@@ -115,9 +105,8 @@ ENTRY(v4_flush_kern_tlb_range)
* *
* - kaddr - Kernel virtual memory address * - kaddr - Kernel virtual memory address
*/ */
ENTRY(v4_flush_kern_tlb_page) .globl v4_flush_kern_tlb_page
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry .equ v4_flush_kern_tlb_page, .v4_flush_kern_tlb_page
mov pc, lr
.section ".text.init", #alloc, #execinstr .section ".text.init", #alloc, #execinstr
......
...@@ -120,9 +120,10 @@ ...@@ -120,9 +120,10 @@
(((unsigned long)(addr) - PAGE_OFFSET) >> NODE_MAX_MEM_SHIFT) (((unsigned long)(addr) - PAGE_OFFSET) >> NODE_MAX_MEM_SHIFT)
/* /*
* Given a physical address, convert it to a node id. * Given a page frame number, convert it to a node id.
*/ */
#define PHYS_TO_NID(addr) KVADDR_TO_NID(__phys_to_virt(addr)) #define PFN_TO_NID(pfn) \
(((pfn) - PHYS_PFN_OFFSET) >> (NODE_MAX_MEM_SHIFT - PAGE_SHIFT))
/* /*
* Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory * Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory
...@@ -131,30 +132,15 @@ ...@@ -131,30 +132,15 @@
#define ADDR_TO_MAPBASE(kaddr) \ #define ADDR_TO_MAPBASE(kaddr) \
NODE_MEM_MAP(KVADDR_TO_NID((unsigned long)(kaddr))) NODE_MEM_MAP(KVADDR_TO_NID((unsigned long)(kaddr)))
#define PFN_TO_MAPBASE(pfn) NODE_MEM_MAP(PFN_TO_NID(pfn))
/* /*
* Given a kaddr, LOCAL_MAR_NR finds the owning node of the memory * Given a kaddr, LOCAL_MAR_NR finds the owning node of the memory
* and returns the index corresponding to the appropriate page in the * and returns the index corresponding to the appropriate page in the
* node's mem_map. * node's mem_map.
*/ */
#define LOCAL_MAP_NR(kaddr) \ #define LOCAL_MAP_NR(addr) \
(((unsigned long)(kaddr)-LOCAL_BASE_ADDR((kaddr))) >> PAGE_SHIFT) (((unsigned long)(addr) & (NODE_MAX_MEM_SIZE - 1)) >> PAGE_SHIFT)
/*
* Given a kaddr, virt_to_page returns a pointer to the corresponding
* mem_map entry.
*/
#define virt_to_page(kaddr) \
(ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
/*
* VALID_PAGE returns a non-zero value if given page pointer is valid.
* This assumes all node's mem_maps are stored within the node they refer to.
*/
#define VALID_PAGE(page) \
({ unsigned int node = KVADDR_TO_NID(page); \
( (node < NR_NODES) && \
((unsigned)((page) - NODE_MEM_MAP(node)) < NODE_DATA(node)->node_size) ); \
})
/* /*
* The PS7211 allows up to 256MB max per DRAM bank, but the EDB7211 * The PS7211 allows up to 256MB max per DRAM bank, but the EDB7211
...@@ -167,40 +153,13 @@ ...@@ -167,40 +153,13 @@
#define NODE_MAX_MEM_SHIFT 24 #define NODE_MAX_MEM_SHIFT 24
#define NODE_MAX_MEM_SIZE (1<<NODE_MAX_MEM_SHIFT) #define NODE_MAX_MEM_SIZE (1<<NODE_MAX_MEM_SHIFT)
/*
* Given a mem_map_t, LOCAL_MAP_BASE finds the owning node for the
* physical page and returns the kaddr for the mem_map of that node.
*/
#define LOCAL_MAP_BASE(page) \
NODE_MEM_MAP(KVADDR_TO_NID((unsigned long)(page)))
/*
* Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
* and returns the kaddr corresponding to first physical page in the
* node's mem_map.
*/
#define LOCAL_BASE_ADDR(kaddr) ((unsigned long)(kaddr) & ~(NODE_MAX_MEM_SIZE-1))
/*
* With discontigmem, the conceptual mem_map array starts from PAGE_OFFSET.
* Given a kaddr, MAP_NR returns the appropriate global mem_map index so
* it matches the corresponding node's local mem_map.
*/
#define MAP_NR(kaddr) (LOCAL_MAP_NR((kaddr)) + \
(((unsigned long)ADDR_TO_MAPBASE((kaddr)) - PAGE_OFFSET) / \
sizeof(mem_map_t)))
#else #else
#define PHYS_TO_NID(addr) (0) #define PFN_TO_NID(pfn) (0)
#endif /* CONFIG_DISCONTIGMEM */ #endif /* CONFIG_DISCONTIGMEM */
#endif /* CONFIG_ARCH_EDB7211 */ #endif /* CONFIG_ARCH_EDB7211 */
#ifndef PHYS_TO_NID
#define PHYS_TO_NID(addr) (0)
#endif
#endif #endif
...@@ -79,49 +79,36 @@ ...@@ -79,49 +79,36 @@
/* /*
* Given a kernel address, find the home node of the underlying memory. * Given a kernel address, find the home node of the underlying memory.
*/ */
#define KVADDR_TO_NID(addr) \ #define KVADDR_TO_NID(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> 27)
(((unsigned long)(addr) - 0xc0000000) >> 27)
/* /*
* Given a physical address, convert it to a node id. * Given a page frame number, convert it to a node id.
*/ */
#define PHYS_TO_NID(addr) KVADDR_TO_NID(__phys_to_virt(addr)) #define PFN_TO_NID(pfn) (((pfn) - PHYS_PFN_OFFSET) >> (27 - PAGE_SHIFT))
/* /*
* Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory * Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory
* and returns the mem_map of that node. * and return the mem_map of that node.
*/ */
#define ADDR_TO_MAPBASE(kaddr) \ #define ADDR_TO_MAPBASE(kaddr) NODE_MEM_MAP(KVADDR_TO_NID(kaddr))
NODE_MEM_MAP(KVADDR_TO_NID((unsigned long)(kaddr)))
/* /*
* Given a kaddr, LOCAL_MEM_MAP finds the owning node of the memory * Given a page frame number, find the owning node of the memory
* and returns the index corresponding to the appropriate page in the * and return the mem_map of that node.
* node's mem_map.
*/ */
#define LOCAL_MAP_NR(kvaddr) \ #define PFN_TO_MAPBASE(pfn) NODE_MEM_MAP(PFN_TO_NID(pfn))
(((unsigned long)(kvaddr) & 0x07ffffff) >> PAGE_SHIFT)
/* /*
* Given a kaddr, virt_to_page returns a pointer to the corresponding * Given a kaddr, LOCAL_MEM_MAP finds the owning node of the memory
* mem_map entry. * and returns the index corresponding to the appropriate page in the
*/ * node's mem_map.
#define virt_to_page(kaddr) \
(ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
/*
* VALID_PAGE returns a non-zero value if given page pointer is valid.
* This assumes all node's mem_maps are stored within the node they refer to.
*/ */
#define VALID_PAGE(page) \ #define LOCAL_MAP_NR(addr) \
({ unsigned int node = KVADDR_TO_NID(page); \ (((unsigned long)(addr) & 0x07ffffff) >> PAGE_SHIFT)
( (node < NR_NODES) && \
((unsigned)((page) - NODE_MEM_MAP(node)) < NODE_DATA(node)->node_size) ); \
})
#else #else
#define PHYS_TO_NID(addr) (0) #define PFN_TO_NID(addr) (0)
#endif #endif
......
...@@ -158,7 +158,8 @@ ...@@ -158,7 +158,8 @@
* *
* We have the following to choose from: * We have the following to choose from:
* v3 - ARMv3 * v3 - ARMv3
* v4 - ARMv4 without minicache * v4wt - ARMv4 with writethrough cache, without minicache
* v4wb - ARMv4 with writeback cache, without minicache
* v4_mc - ARMv4 with minicache * v4_mc - ARMv4 with minicache
* v5te_mc - ARMv5TE with minicache * v5te_mc - ARMv5TE with minicache
*/ */
...@@ -173,13 +174,21 @@ ...@@ -173,13 +174,21 @@
# endif # endif
#endif #endif
#if defined(CONFIG_CPU_ARM720T) || defined(CONFIG_CPU_ARM920T) || \ #if defined(CONFIG_CPU_ARM720T)
defined(CONFIG_CPU_ARM922T) || defined(CONFIG_CPU_ARM926T) || \ # ifdef _USER
defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_ARM1020) # define MULTI_USER 1
# else
# define _USER v4wt
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM926T) || defined(CONFIG_CPU_SA110) || \
defined(CONFIG_CPU_ARM1020)
# ifdef _USER # ifdef _USER
# define MULTI_USER 1 # define MULTI_USER 1
# else # else
# define _USER v4 # define _USER v4wb
# endif # endif
#endif #endif
......
...@@ -269,24 +269,6 @@ extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle); ...@@ -269,24 +269,6 @@ extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle); extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle);
extern void consistent_sync(void *vaddr, size_t size, int rw); extern void consistent_sync(void *vaddr, size_t size, int rw);
/*
* Change "struct page" to physical address.
*/
#ifdef CONFIG_DISCONTIGMEM
#define page_to_phys(page) \
((((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
+ page_zone(page)->zone_start_paddr)
#else
#define page_to_phys(page) \
(PHYS_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
#endif
/*
* We should really eliminate virt_to_bus() here - it's depreciated.
*/
#define page_to_bus(page) \
(virt_to_bus(page_address(page)))
/* /*
* can the hardware map this into one segment or not, given no other * can the hardware map this into one segment or not, given no other
* constraints. * constraints.
......
/* /*
* linux/include/asm-arm/memory.h * linux/include/asm-arm/memory.h
* *
* Copyright (C) 2000 Russell King * Copyright (C) 2000-2002 Russell King
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
* *
* Note: this file should not be included by non-asm/.h files * Note: this file should not be included by non-asm/.h files
*
* Modifications:
*/ */
#ifndef __ASM_ARM_MEMORY_H #ifndef __ASM_ARM_MEMORY_H
#define __ASM_ARM_MEMORY_H #define __ASM_ARM_MEMORY_H
#include <linux/config.h>
#include <asm/arch/memory.h> #include <asm/arch/memory.h>
static inline unsigned long virt_to_phys(volatile void *x) /*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*
* This is the PFN of the first RAM page in the kernel
* direct-mapped view. We assume this is the first page
* of RAM in the mem_map as well.
*/
#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
/*
* These are *only* valid on the kernel direct mapped RAM memory.
*/
static inline unsigned long virt_to_phys(void *x)
{ {
return __virt_to_phys((unsigned long)(x)); return __virt_to_phys((unsigned long)(x));
} }
...@@ -26,10 +38,77 @@ static inline void *phys_to_virt(unsigned long x) ...@@ -26,10 +38,77 @@ static inline void *phys_to_virt(unsigned long x)
return (void *)(__phys_to_virt((unsigned long)(x))); return (void *)(__phys_to_virt((unsigned long)(x)));
} }
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
/* /*
* Virtual <-> DMA view memory address translations * Virtual <-> DMA view memory address translations
* Again, these are *only* valid on the kernel direct mapped RAM
* memory. Use of these is *depreciated*.
*/ */
#define virt_to_bus(x) (__virt_to_bus((unsigned long)(x))) #define virt_to_bus(x) (__virt_to_bus((unsigned long)(x)))
#define bus_to_virt(x) ((void *)(__bus_to_virt((unsigned long)(x)))) #define bus_to_virt(x) ((void *)(__bus_to_virt((unsigned long)(x))))
/*
* Conversion between a struct page and a physical address.
*
* Note: when converting an unknown physical address to a
* struct page, the resulting pointer must be validated
* using VALID_PAGE(). It must return an invalid struct page
* for any physical address not corresponding to a system
* RAM address.
*
* page_to_pfn(page) convert a struct page * to a PFN number
* pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
* pfn_valid(pfn) indicates whether a PFN number is valid
*
* virt_to_page(k) convert a _valid_ virtual address to struct page *
* virt_addr_valid(k) indicates whether a virtual address is valid
*/
#ifndef CONFIG_DISCONTIGMEM
#define page_to_pfn(page) (((page) - mem_map) + PHYS_PFN_OFFSET)
#define pfn_to_page(pfn) ((mem_map + (pfn)) - PHYS_PFN_OFFSET)
#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < max_mapnr)
#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
#define virt_addr_valid(kaddr) ((kaddr) >= PAGE_OFFSET && (kaddr) < (unsigned long)high_memory)
#else
/*
* This is more complex. We have a set of mem_map arrays spread
* around in memory.
*/
#define page_to_pfn(page) \
(((page) - page_zone(page)->zone_mem_map) \
+ (page_zone(page)->zone_start_paddr >> PAGE_SHIFT))
#define pfn_to_page(pfn) \
(PFN_TO_MAPBASE(pfn) + LOCAL_MAP_NR((pfn) << PAGE_SHIFT))
#define pfn_valid(pfn) (PFN_TO_NID(pfn) < NR_NODES)
#define virt_to_page(kaddr) \
(ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
#define virt_addr_valid(kaddr) (KVADDR_TO_NID(kaddr) < NR_NODES)
/*
* Common discontigmem stuff.
* PHYS_TO_NID is used by the ARM kernel/setup.c
*/
#define PHYS_TO_NID(addr) PFN_TO_NID((addr) >> PAGE_SHIFT)
#endif
/*
* For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die.
*/
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
/*
* We should really eliminate virt_to_bus() here - it's depreciated.
*/
#define page_to_bus(page) (virt_to_bus(page_address(page)))
#endif #endif
...@@ -125,18 +125,9 @@ static inline int get_order(unsigned long size) ...@@ -125,18 +125,9 @@ static inline int get_order(unsigned long size)
return order; return order;
} }
#endif /* !__ASSEMBLY__ */ #include <asm/memory.h>
#include <asm/arch/memory.h>
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
#ifndef CONFIG_DISCONTIGMEM #endif /* !__ASSEMBLY__ */
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT) - \
(PHYS_OFFSET >> PAGE_SHIFT))
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#endif
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#define _ASMARM_PGTABLE_H #define _ASMARM_PGTABLE_H
#include <linux/config.h> #include <linux/config.h>
#include <asm/arch/memory.h> #include <asm/memory.h>
#include <asm/arch/vmalloc.h> #include <asm/arch/vmalloc.h>
/* /*
...@@ -79,21 +79,12 @@ extern void __pgd_error(const char *file, int line, unsigned long val); ...@@ -79,21 +79,12 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
extern struct page *empty_zero_page; extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page) #define ZERO_PAGE(vaddr) (empty_zero_page)
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define pte_none(pte) (!pte_val(pte)) #define pte_none(pte) (!pte_val(pte))
#define pte_clear(ptep) set_pte((ptep), __pte(0)) #define pte_clear(ptep) set_pte((ptep), __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
#ifndef CONFIG_DISCONTIGMEM
#define pte_page(x) (mem_map + (pte_val((x)) >> PAGE_SHIFT) - \
(PHYS_OFFSET >> PAGE_SHIFT))
#else
/*
* I'm not happy with this - we needlessly convert a physical address
* to a virtual one, and then immediately back to a physical address,
* which, if __va and __pa are expensive causes twice the expense for
* zero gain. --rmk
*/
#define pte_page(x) (virt_to_page(__va(pte_val((x)))))
#endif
#define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd)) #define pmd_present(pmd) (pmd_val(pmd))
...@@ -107,12 +98,7 @@ extern struct page *empty_zero_page; ...@@ -107,12 +98,7 @@ extern struct page *empty_zero_page;
* Conversion functions: convert a page and protection to a page entry, * Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to. * and a page entry and page directory to the page they refer to.
*/ */
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
{
return __pte(physpage | pgprot_val(pgprot));
}
#define mk_pte(page,pgprot) mk_pte_phys(__pa(page_address(page)), pgprot)
/* /*
* The "pgd_xxx()" functions here are trivial for a folded two-level * The "pgd_xxx()" functions here are trivial for a folded two-level
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment