Commit 219edaf1 authored by David S. Miller's avatar David S. Miller

Merge davem@nuts.davemloft.net:/disk1/BK/sparc-2.6

into kernel.bkbits.net:/home/davem/sparc-2.6
parents ed407526 c0ddb45e
...@@ -67,7 +67,8 @@ include/asm-$(ARCH)/asm_offsets.h: arch/$(ARCH)/kernel/asm-offsets.s ...@@ -67,7 +67,8 @@ include/asm-$(ARCH)/asm_offsets.h: arch/$(ARCH)/kernel/asm-offsets.s
$(call filechk,gen-asm-offsets) $(call filechk,gen-asm-offsets)
CLEAN_FILES += include/asm-$(ARCH)/asm_offsets.h \ CLEAN_FILES += include/asm-$(ARCH)/asm_offsets.h \
arch/$(ARCH)/kernel/asm-offsets.s arch/$(ARCH)/kernel/asm-offsets.s \
arch/$(ARCH)/boot/System.map
# Don't use tabs in echo arguments. # Don't use tabs in echo arguments.
define archhelp define archhelp
......
...@@ -32,6 +32,3 @@ $(obj)/tftpboot.img: $(obj)/piggyback $(obj)/System.map $(obj)/image FORCE ...@@ -32,6 +32,3 @@ $(obj)/tftpboot.img: $(obj)/piggyback $(obj)/System.map $(obj)/image FORCE
$(obj)/btfix.s: $(obj)/btfixupprep vmlinux FORCE $(obj)/btfix.s: $(obj)/btfixupprep vmlinux FORCE
$(call if_changed,btfix) $(call if_changed,btfix)
clean:
rm $(obj)/System.map
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/obio.h> #include <asm/obio.h>
#include <asm/mxcc.h> #include <asm/mxcc.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/param.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
...@@ -1808,36 +1809,37 @@ fpload: ...@@ -1808,36 +1809,37 @@ fpload:
retl retl
nop nop
.globl ndelay /* __ndelay and __udelay take two arguments:
ndelay: * 0 - nsecs or usecs to delay
* 1 - per_cpu udelay_val (loops per jiffy)
*
* Note that ndelay gives HZ times higher resolution but has a 10ms
* limit. udelay can handle up to 1s.
*/
.globl __ndelay
__ndelay:
save %sp, -STACKFRAME_SZ, %sp save %sp, -STACKFRAME_SZ, %sp
mov %i0, %o0 mov %i0, %o0
call .umul call .umul
mov 5, %o1 mov 0x1ad, %o1 ! 2**32 / (1 000 000 000 / HZ)
call .umul
mov %i1, %o1 ! udelay_val
ba delay_continue ba delay_continue
nop mov %o1, %o0 ! >>32 later for better resolution
.globl udelay .globl __udelay
udelay: __udelay:
save %sp, -STACKFRAME_SZ, %sp save %sp, -STACKFRAME_SZ, %sp
mov %i0, %o0 mov %i0, %o0
sethi %hi(0x10c6), %o1 sethi %hi(0x10c6), %o1
call .umul call .umul
or %o1, %lo(0x10c6), %o1 or %o1, %lo(0x10c6), %o1 ! 2**32 / 1 000 000
delay_continue:
#ifndef CONFIG_SMP
sethi %hi(loops_per_jiffy), %o3
call .umul call .umul
ld [%o3 + %lo(loops_per_jiffy)], %o1 mov %i1, %o1 ! udelay_val
#else
GET_PROCESSOR_OFFSET(o4, o2)
set cpu_data, %o3
call .umul
ld [%o3 + %o4], %o1
#endif
call .umul call .umul
mov 100, %o0 mov HZ, %o0 ! >>32 earlier for wider range
delay_continue:
cmp %o0, 0x0 cmp %o0, 0x0
1: 1:
bne 1b bne 1b
......
...@@ -331,6 +331,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -331,6 +331,7 @@ void __init setup_arch(char **cmdline_p)
if (highest_paddr < top) if (highest_paddr < top)
highest_paddr = top; highest_paddr = top;
} }
pfn_base = phys_base >> PAGE_SHIFT;
if (!root_flags) if (!root_flags)
root_mountflags &= ~MS_RDONLY; root_mountflags &= ~MS_RDONLY;
......
...@@ -20,11 +20,11 @@ ...@@ -20,11 +20,11 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/delay.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/delay.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
......
...@@ -145,6 +145,7 @@ EXPORT_SYMBOL(__down_interruptible); ...@@ -145,6 +145,7 @@ EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(sparc_valid_addr_bitmap); EXPORT_SYMBOL(sparc_valid_addr_bitmap);
EXPORT_SYMBOL(phys_base); EXPORT_SYMBOL(phys_base);
EXPORT_SYMBOL(pfn_base);
/* Atomic operations. */ /* Atomic operations. */
EXPORT_SYMBOL(___atomic24_add); EXPORT_SYMBOL(___atomic24_add);
...@@ -164,8 +165,8 @@ EXPORT_SYMBOL(__cpu_number_map); ...@@ -164,8 +165,8 @@ EXPORT_SYMBOL(__cpu_number_map);
EXPORT_SYMBOL(__cpu_logical_map); EXPORT_SYMBOL(__cpu_logical_map);
#endif #endif
EXPORT_SYMBOL(udelay); EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(ndelay); EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(rtc_lock); EXPORT_SYMBOL(rtc_lock);
EXPORT_SYMBOL(mostek_lock); EXPORT_SYMBOL(mostek_lock);
EXPORT_SYMBOL(mstk48t02_regs); EXPORT_SYMBOL(mstk48t02_regs);
......
...@@ -49,7 +49,6 @@ static int smp_highest_cpu; ...@@ -49,7 +49,6 @@ static int smp_highest_cpu;
extern int smp_threads_ready; extern int smp_threads_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern struct cpuinfo_sparc cpu_data[NR_CPUS]; extern struct cpuinfo_sparc cpu_data[NR_CPUS];
extern unsigned long cpu_offset[NR_CPUS];
extern unsigned char boot_cpu_id; extern unsigned char boot_cpu_id;
extern int smp_activated; extern int smp_activated;
extern volatile int __cpu_number_map[NR_CPUS]; extern volatile int __cpu_number_map[NR_CPUS];
...@@ -171,9 +170,6 @@ void __init smp4d_boot_cpus(void) ...@@ -171,9 +170,6 @@ void __init smp4d_boot_cpus(void)
printk("Entering SMP Mode...\n"); printk("Entering SMP Mode...\n");
for (i = 0; i < NR_CPUS; i++)
cpu_offset[i] = (char *)&(cpu_data(i)) - (char *)&(cpu_data(0));
if (boot_cpu_id) if (boot_cpu_id)
current_set[0] = NULL; current_set[0] = NULL;
...@@ -427,9 +423,6 @@ void smp4d_message_pass(int target, int msg, unsigned long data, int wait) ...@@ -427,9 +423,6 @@ void smp4d_message_pass(int target, int msg, unsigned long data, int wait)
extern void sparc_do_profile(unsigned long pc, unsigned long o7); extern void sparc_do_profile(unsigned long pc, unsigned long o7);
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
void smp4d_percpu_timer_interrupt(struct pt_regs *regs) void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
{ {
int cpu = hard_smp4d_processor_id(); int cpu = hard_smp4d_processor_id();
......
...@@ -44,7 +44,6 @@ extern unsigned long cpu_present_map; ...@@ -44,7 +44,6 @@ extern unsigned long cpu_present_map;
extern int smp_num_cpus; extern int smp_num_cpus;
extern int smp_threads_ready; extern int smp_threads_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern unsigned long cpu_offset[NR_CPUS];
extern unsigned char boot_cpu_id; extern unsigned char boot_cpu_id;
extern int smp_activated; extern int smp_activated;
extern volatile int __cpu_number_map[NR_CPUS]; extern volatile int __cpu_number_map[NR_CPUS];
...@@ -152,9 +151,7 @@ void __init smp4m_boot_cpus(void) ...@@ -152,9 +151,7 @@ void __init smp4m_boot_cpus(void)
for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++) for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
cpu_present_map |= (1<<mid); cpu_present_map |= (1<<mid);
/* XXX cpu_offset is broken -Keith */
for(i=0; i < NR_CPUS; i++) { for(i=0; i < NR_CPUS; i++) {
cpu_offset[i] = (char *)&(cpu_data(i)) - (char *)&(cpu_data(0));
__cpu_number_map[i] = -1; __cpu_number_map[i] = -1;
__cpu_logical_map[i] = -1; __cpu_logical_map[i] = -1;
} }
...@@ -409,9 +406,6 @@ void smp4m_cross_call_irq(void) ...@@ -409,9 +406,6 @@ void smp4m_cross_call_irq(void)
extern void sparc_do_profile(unsigned long pc, unsigned long o7); extern void sparc_do_profile(unsigned long pc, unsigned long o7);
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
void smp4m_percpu_timer_interrupt(struct pt_regs *regs) void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
......
...@@ -38,6 +38,7 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); ...@@ -38,6 +38,7 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long *sparc_valid_addr_bitmap; unsigned long *sparc_valid_addr_bitmap;
unsigned long phys_base; unsigned long phys_base;
unsigned long pfn_base;
unsigned long page_kernel; unsigned long page_kernel;
...@@ -134,7 +135,7 @@ unsigned long calc_highpages(void) ...@@ -134,7 +135,7 @@ unsigned long calc_highpages(void)
unsigned long calc_max_low_pfn(void) unsigned long calc_max_low_pfn(void)
{ {
int i; int i;
unsigned long tmp = (SRMMU_MAXMEM >> PAGE_SHIFT); unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
unsigned long curr_pfn, last_pfn; unsigned long curr_pfn, last_pfn;
last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT; last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT;
...@@ -189,9 +190,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) ...@@ -189,9 +190,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
*/ */
start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end)); start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end));
/* Adjust up to the physical address where the kernel begins. */
start_pfn += phys_base;
/* Now shift down to get the real physical page frame number. */ /* Now shift down to get the real physical page frame number. */
start_pfn >>= PAGE_SHIFT; start_pfn >>= PAGE_SHIFT;
...@@ -202,8 +200,8 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) ...@@ -202,8 +200,8 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
max_low_pfn = max_pfn; max_low_pfn = max_pfn;
highstart_pfn = highend_pfn = max_pfn; highstart_pfn = highend_pfn = max_pfn;
if (max_low_pfn > (SRMMU_MAXMEM >> PAGE_SHIFT)) { if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) {
highstart_pfn = (SRMMU_MAXMEM >> PAGE_SHIFT); highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
max_low_pfn = calc_max_low_pfn(); max_low_pfn = calc_max_low_pfn();
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
calc_highpages() >> (20 - PAGE_SHIFT)); calc_highpages() >> (20 - PAGE_SHIFT));
...@@ -230,7 +228,8 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) ...@@ -230,7 +228,8 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
} }
#endif #endif
/* Initialize the boot-time allocator. */ /* Initialize the boot-time allocator. */
bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, phys_base>>PAGE_SHIFT, max_low_pfn); bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base,
max_low_pfn);
/* Now register the available physical memory with the /* Now register the available physical memory with the
* allocator. * allocator.
...@@ -267,8 +266,8 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) ...@@ -267,8 +266,8 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
reserve_bootmem(initrd_start, size); reserve_bootmem(initrd_start, size);
*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
initrd_start += PAGE_OFFSET; initrd_start = (initrd_start - phys_base) + PAGE_OFFSET;
initrd_end += PAGE_OFFSET; initrd_end = (initrd_end - phys_base) + PAGE_OFFSET;
} }
#endif #endif
/* Reserve the kernel text/data/bss. */ /* Reserve the kernel text/data/bss. */
...@@ -432,7 +431,7 @@ void __init mem_init(void) ...@@ -432,7 +431,7 @@ void __init mem_init(void)
taint_real_pages(); taint_real_pages();
max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT); max_mapnr = last_valid_pfn - pfn_base;
high_memory = __va(max_low_pfn << PAGE_SHIFT); high_memory = __va(max_low_pfn << PAGE_SHIFT);
num_physpages = totalram_pages = free_all_bootmem(); num_physpages = totalram_pages = free_all_bootmem();
...@@ -474,11 +473,9 @@ void free_initmem (void) ...@@ -474,11 +473,9 @@ void free_initmem (void)
addr = (unsigned long)(&__init_begin); addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
unsigned long page;
struct page *p; struct page *p;
page = addr + phys_base; p = virt_to_page(addr);
p = virt_to_page(page);
ClearPageReserved(p); ClearPageReserved(p);
set_page_count(p, 1); set_page_count(p, 1);
......
...@@ -213,7 +213,7 @@ static inline pte_t srmmu_pte_mkyoung(pte_t pte) ...@@ -213,7 +213,7 @@ static inline pte_t srmmu_pte_mkyoung(pte_t pte)
* and a page entry and page directory to the page they refer to. * and a page entry and page directory to the page they refer to.
*/ */
static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot) static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot)
{ return __pte(((page - mem_map) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); } { return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); }
static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot) static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
{ return __pte(((page) >> 4) | pgprot_val(pgprot)); } { return __pte(((page) >> 4) | pgprot_val(pgprot)); }
...@@ -245,7 +245,7 @@ static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep) ...@@ -245,7 +245,7 @@ static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep)
unsigned long ptp; /* Physical address, shifted right by 4 */ unsigned long ptp; /* Physical address, shifted right by 4 */
int i; int i;
ptp = (ptep - mem_map) << (PAGE_SHIFT-4); /* watch for overflow */ ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++) { for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++) {
srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
ptp += (SRMMU_PTRS_PER_PTE*sizeof(pte_t) >> 4); ptp += (SRMMU_PTRS_PER_PTE*sizeof(pte_t) >> 4);
...@@ -480,7 +480,7 @@ srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -480,7 +480,7 @@ srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0) if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
return NULL; return NULL;
return mem_map + (__nocache_pa(pte) >> PAGE_SHIFT); return pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
} }
static void srmmu_free_pte_fast(pte_t *pte) static void srmmu_free_pte_fast(pte_t *pte)
...@@ -495,7 +495,7 @@ static void srmmu_pte_free(struct page *pte) ...@@ -495,7 +495,7 @@ static void srmmu_pte_free(struct page *pte)
p = (unsigned long)page_address(pte); /* Cached address (for test) */ p = (unsigned long)page_address(pte); /* Cached address (for test) */
if (p == 0) if (p == 0)
BUG(); BUG();
p = ((pte - mem_map) << PAGE_SHIFT); /* Physical address */ p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
p = (unsigned long) __nocache_va(p); /* Nocached virtual */ p = (unsigned long) __nocache_va(p); /* Nocached virtual */
srmmu_free_nocache(p, SRMMU_PTE_SZ_SOFT); srmmu_free_nocache(p, SRMMU_PTE_SZ_SOFT);
} }
...@@ -1316,7 +1316,7 @@ void __init srmmu_paging_init(void) ...@@ -1316,7 +1316,7 @@ void __init srmmu_paging_init(void)
for (znum = 0; znum < MAX_NR_ZONES; znum++) for (znum = 0; znum < MAX_NR_ZONES; znum++)
zones_size[znum] = zholes_size[znum] = 0; zones_size[znum] = zholes_size[znum] = 0;
npages = max_low_pfn - (phys_base >> PAGE_SHIFT); npages = max_low_pfn - pfn_base;
zones_size[ZONE_DMA] = npages; zones_size[ZONE_DMA] = npages;
zholes_size[ZONE_DMA] = npages - pages_avail; zholes_size[ZONE_DMA] = npages - pages_avail;
...@@ -1326,13 +1326,9 @@ void __init srmmu_paging_init(void) ...@@ -1326,13 +1326,9 @@ void __init srmmu_paging_init(void)
zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
free_area_init_node(0, &contig_page_data, NULL, zones_size, free_area_init_node(0, &contig_page_data, NULL, zones_size,
phys_base >> PAGE_SHIFT, zholes_size); pfn_base, zholes_size);
mem_map = contig_page_data.node_mem_map; mem_map = contig_page_data.node_mem_map;
} }
/* P3: easy to fix, todo. Current code is utterly broken, though. */
if (phys_base != 0)
panic("phys_base nonzero");
} }
static void srmmu_mmu_info(struct seq_file *m) static void srmmu_mmu_info(struct seq_file *m)
......
...@@ -2088,7 +2088,7 @@ void __init sun4c_paging_init(void) ...@@ -2088,7 +2088,7 @@ void __init sun4c_paging_init(void)
for (znum = 0; znum < MAX_NR_ZONES; znum++) for (znum = 0; znum < MAX_NR_ZONES; znum++)
zones_size[znum] = zholes_size[znum] = 0; zones_size[znum] = zholes_size[znum] = 0;
npages = max_low_pfn - (phys_base >> PAGE_SHIFT); npages = max_low_pfn - pfn_base;
zones_size[ZONE_DMA] = npages; zones_size[ZONE_DMA] = npages;
zholes_size[ZONE_DMA] = npages - pages_avail; zholes_size[ZONE_DMA] = npages - pages_avail;
...@@ -2098,7 +2098,7 @@ void __init sun4c_paging_init(void) ...@@ -2098,7 +2098,7 @@ void __init sun4c_paging_init(void)
zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
free_area_init_node(0, &contig_page_data, NULL, zones_size, free_area_init_node(0, &contig_page_data, NULL, zones_size,
phys_base >> PAGE_SHIFT, zholes_size); pfn_base, zholes_size);
mem_map = contig_page_data.node_mem_map; mem_map = contig_page_data.node_mem_map;
} }
......
...@@ -44,7 +44,7 @@ config BINFMT_SHARED_FLAT ...@@ -44,7 +44,7 @@ config BINFMT_SHARED_FLAT
config BINFMT_AOUT config BINFMT_AOUT
tristate "Kernel support for a.out and ECOFF binaries" tristate "Kernel support for a.out and ECOFF binaries"
depends on (X86 && !X86_64) || ALPHA || ARM || M68K || MIPS || SPARC depends on (X86 && !X86_64) || ALPHA || ARM || M68K || MIPS || SPARC32
---help--- ---help---
A.out (Assembler.OUTput) is a set of formats for libraries and A.out (Assembler.OUTput) is a set of formats for libraries and
executables used in the earliest versions of UNIX. Linux used executables used in the earliest versions of UNIX. Linux used
......
...@@ -29,13 +29,6 @@ ...@@ -29,13 +29,6 @@
srl %reg, 12, %reg; \ srl %reg, 12, %reg; \
and %reg, 3, %reg; and %reg, 3, %reg;
#define GET_PROCESSOR_OFFSET(reg, tmp) \
GET_PROCESSOR_ID(reg) \
sethi %hi(cpu_offset), %tmp; \
sll %reg, 2, %reg; \
or %tmp, %lo(cpu_offset), %tmp; \
ld [%tmp + %reg], %reg;
/* All trap entry points _must_ begin with this macro or else you /* All trap entry points _must_ begin with this macro or else you
* lose. It makes sure the kernel has a proper window so that * lose. It makes sure the kernel has a proper window so that
* c-code can be called. * c-code can be called.
......
...@@ -7,7 +7,8 @@ ...@@ -7,7 +7,8 @@
#ifndef __SPARC_DELAY_H #ifndef __SPARC_DELAY_H
#define __SPARC_DELAY_H #define __SPARC_DELAY_H
extern unsigned long loops_per_jiffy; #include <linux/config.h>
#include <asm/cpudata.h>
extern __inline__ void __delay(unsigned long loops) extern __inline__ void __delay(unsigned long loops)
{ {
...@@ -20,7 +21,15 @@ extern __inline__ void __delay(unsigned long loops) ...@@ -20,7 +21,15 @@ extern __inline__ void __delay(unsigned long loops)
} }
/* This is too messy with inline asm on the Sparc. */ /* This is too messy with inline asm on the Sparc. */
extern void udelay(unsigned long usecs); extern void __udelay(unsigned long usecs, unsigned long lpj);
extern void ndelay(unsigned long usecs); extern void __ndelay(unsigned long nsecs, unsigned long lpj);
#ifdef CONFIG_SMP
#define __udelay_val cpu_data(smp_processor_id()).udelay_val
#else /* SMP */
#define __udelay_val loops_per_jiffy
#endif /* SMP */
#define udelay(__usecs) __udelay(__usecs, __udelay_val)
#define ndelay(__nsecs) __ndelay(__nsecs, __udelay_val)
#endif /* defined(__SPARC_DELAY_H) */ #endif /* defined(__SPARC_DELAY_H) */
...@@ -156,17 +156,22 @@ extern __inline__ int get_order(unsigned long size) ...@@ -156,17 +156,22 @@ extern __inline__ int get_order(unsigned long size)
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
#define PAGE_OFFSET 0xf0000000 #define PAGE_OFFSET 0xf0000000
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) #ifndef __ASSEMBLY__
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) extern unsigned long phys_base;
extern unsigned long pfn_base;
#endif
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + phys_base)
#define __va(x) ((void *)((unsigned long) (x) - phys_base + PAGE_OFFSET))
#define virt_to_phys __pa
#define phys_to_virt __va
#define virt_to_phys(x) __pa((unsigned long)(x)) #define pfn_to_page(pfn) (mem_map + ((pfn)-(pfn_base)))
#define phys_to_virt(x) __va((unsigned long)(x)) #define page_to_pfn(page) ((unsigned long)(((page) - mem_map) + pfn_base))
#define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT)))
#define pfn_to_page(pfn) (mem_map + (pfn)) #define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr)
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
...@@ -181,6 +181,7 @@ extern int num_contexts; ...@@ -181,6 +181,7 @@ extern int num_contexts;
* hit for all __pa()/__va() operations. * hit for all __pa()/__va() operations.
*/ */
extern unsigned long phys_base; extern unsigned long phys_base;
extern unsigned long pfn_base;
/* /*
* BAD_PAGETABLE is used when we need a bogus page-table, while * BAD_PAGETABLE is used when we need a bogus page-table, while
......
...@@ -148,7 +148,10 @@ extern __inline__ int hard_smp_processor_id(void) ...@@ -148,7 +148,10 @@ extern __inline__ int hard_smp_processor_id(void)
} }
#endif #endif
#define smp_processor_id() hard_smp_processor_id() #define smp_processor_id() (current_thread_info()->cpu)
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
#endif /* !(__ASSEMBLY__) */ #endif /* !(__ASSEMBLY__) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment