Commit 62f7176e authored by David S. Miller's avatar David S. Miller

[SPARC64]: No longer set WANT_PAGE_VIRTUAL.

Also uninline pfn_to_page and page_to_pfn.
Struct page is now 8 bytes smaller.
parent f4f988e2
......@@ -654,13 +654,13 @@ extern atomic_t dcpage_flushes_xcall;
static __inline__ void __local_flush_dcache_page(struct page *page)
{
#if (L1DCACHE_SIZE > PAGE_SIZE)
__flush_dcache_page(page->virtual,
__flush_dcache_page(page_address(page),
((tlb_type == spitfire) &&
page_mapping(page) != NULL));
#else
if (page_mapping(page) != NULL &&
tlb_type == spitfire)
__flush_icache_page(__pa(page->virtual));
__flush_icache_page(__pa(page_address(page)));
#endif
}
......@@ -675,6 +675,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
if (cpu == this_cpu) {
__local_flush_dcache_page(page);
} else if (cpu_online(cpu)) {
void *pg_addr = page_address(page);
u64 data0;
if (tlb_type == spitfire) {
......@@ -683,14 +684,14 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
if (page_mapping(page) != NULL)
data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0,
__pa(page->virtual),
(u64) page->virtual,
__pa(pg_addr),
(u64) pg_addr,
mask);
} else {
data0 =
((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0,
__pa(page->virtual),
__pa(pg_addr),
0, mask);
}
#ifdef CONFIG_DEBUG_DCFLUSH
......@@ -703,6 +704,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
{
void *pg_addr = page_address(page);
cpumask_t mask = cpu_online_map;
u64 data0;
int this_cpu = get_cpu();
......@@ -719,13 +721,13 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
if (page_mapping(page) != NULL)
data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0,
__pa(page->virtual),
(u64) page->virtual,
__pa(pg_addr),
(u64) pg_addr,
mask);
} else {
data0 = ((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0,
__pa(page->virtual),
__pa(pg_addr),
0, mask);
}
#ifdef CONFIG_DEBUG_DCFLUSH
......
......@@ -360,6 +360,8 @@ EXPORT_SYMBOL(__bzero_noasi);
EXPORT_SYMBOL(phys_base);
EXPORT_SYMBOL(pfn_base);
EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
EXPORT_SYMBOL(page_to_pfn);
EXPORT_SYMBOL(pfn_to_page);
/* No version information on this, heavily used in inline asm,
* and will always be 'void __ret_efault(void)'.
......
......@@ -137,13 +137,13 @@ __inline__ void flush_dcache_page_impl(struct page *page)
#endif
#if (L1DCACHE_SIZE > PAGE_SIZE)
__flush_dcache_page(page->virtual,
__flush_dcache_page(page_address(page),
((tlb_type == spitfire) &&
page_mapping(page) != NULL));
#else
if (page_mapping(page) != NULL &&
tlb_type == spitfire)
__flush_icache_page(__pa(page->virtual));
__flush_icache_page(__pa(page_address(page)));
#endif
}
......@@ -344,6 +344,16 @@ void flush_icache_range(unsigned long start, unsigned long end)
}
}
unsigned long page_to_pfn(struct page *page)
{
return (unsigned long) ((page - mem_map) + pfn_base);
}
struct page *pfn_to_page(unsigned long pfn)
{
return (mem_map + (pfn - pfn_base));
}
void show_mem(void)
{
printk("Mem-info:\n");
......
......@@ -19,8 +19,8 @@ extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr);
#define bus_to_virt bus_to_virt_not_defined_use_pci_map
/* BIO layer definitions. */
extern unsigned long phys_base, kern_base, kern_size;
#define page_to_phys(page) ((((page) - mem_map) << PAGE_SHIFT)+phys_base)
extern unsigned long kern_base, kern_size;
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define BIO_VMERGE_BOUNDARY 8192
/* Different PCI controllers we support have their PCI MEM space
......
......@@ -14,9 +14,6 @@
#ifndef __ASSEMBLY__
/* Sparc64 is slow at multiplication, we prefer to use some extra space. */
#define WANT_PAGE_VIRTUAL 1
extern void _clear_page(void *page);
#define clear_page(X) _clear_page((void *)(X))
struct page;
......@@ -111,17 +108,19 @@ typedef unsigned long iopgprot_t;
*/
#define PAGE_OFFSET _AC(0xFFFFF80000000000,UL)
#ifndef __ASSEMBLY__
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
/* PFNs are real physical page numbers. However, mem_map only begins to record
* per-page information starting at pfn_base. This is to handle systems where
* the first physical page in the machine is at some huge physical address, such
* as 4GB. This is common on a partitioned E10000, for example.
* the first physical page in the machine is at some huge physical address,
* such as 4GB. This is common on a partitioned E10000, for example.
*/
extern struct page *pfn_to_page(unsigned long pfn);
extern unsigned long page_to_pfn(struct page *);
#define pfn_to_page(pfn) (mem_map + ((pfn)-(pfn_base)))
#define page_to_pfn(page) ((unsigned long)(((page) - mem_map) + pfn_base))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
#define pfn_valid(pfn) (((pfn)-(pfn_base)) < max_mapnr)
......@@ -130,8 +129,6 @@ typedef unsigned long iopgprot_t;
#define virt_to_phys __pa
#define phys_to_virt __va
#ifndef __ASSEMBLY__
/* The following structure is used to hold the physical
* memory configuration of the machine. This is filled in
* probe_memory() and is later used by mem_init() to set up
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment