Commit 48a975d3 authored by Keith M. Wesolowski's avatar Keith M. Wesolowski

[SPARC32]: Down with our cpu_offset. Use regular per_cpu instead.

The only users of this were udelay/ndelay.  These now look more
like sparc64, except ours are too weird and too big to inline.
parent 92c06c66
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/obio.h> #include <asm/obio.h>
#include <asm/mxcc.h> #include <asm/mxcc.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/param.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
...@@ -1808,36 +1809,37 @@ fpload: ...@@ -1808,36 +1809,37 @@ fpload:
retl retl
nop nop
.globl ndelay /* __ndelay and __udelay take two arguments:
ndelay: * 0 - nsecs or usecs to delay
* 1 - per_cpu udelay_val (loops per jiffy)
*
* Note that ndelay gives HZ times higher resolution but has a 10ms
* limit. udelay can handle up to 1s.
*/
.globl __ndelay
__ndelay:
save %sp, -STACKFRAME_SZ, %sp save %sp, -STACKFRAME_SZ, %sp
mov %i0, %o0 mov %i0, %o0
call .umul call .umul
mov 5, %o1 mov 0x1ad, %o1 ! 2**32 / (1 000 000 000 / HZ)
call .umul
mov %i1, %o1 ! udelay_val
ba delay_continue ba delay_continue
nop mov %o1, %o0 ! >>32 later for better resolution
.globl udelay .globl __udelay
udelay: __udelay:
save %sp, -STACKFRAME_SZ, %sp save %sp, -STACKFRAME_SZ, %sp
mov %i0, %o0 mov %i0, %o0
sethi %hi(0x10c6), %o1 sethi %hi(0x10c6), %o1
call .umul call .umul
or %o1, %lo(0x10c6), %o1 or %o1, %lo(0x10c6), %o1 ! 2**32 / 1 000 000
delay_continue:
#ifndef CONFIG_SMP
sethi %hi(loops_per_jiffy), %o3
call .umul call .umul
ld [%o3 + %lo(loops_per_jiffy)], %o1 mov %i1, %o1 ! udelay_val
#else
GET_PROCESSOR_OFFSET(o4, o2)
set cpu_data, %o3
call .umul
ld [%o3 + %o4], %o1
#endif
call .umul call .umul
mov 100, %o0 mov HZ, %o0 ! >>32 earlier for wider range
delay_continue:
cmp %o0, 0x0 cmp %o0, 0x0
1: 1:
bne 1b bne 1b
......
...@@ -20,11 +20,11 @@ ...@@ -20,11 +20,11 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/delay.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/delay.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
......
...@@ -164,8 +164,8 @@ EXPORT_SYMBOL(__cpu_number_map); ...@@ -164,8 +164,8 @@ EXPORT_SYMBOL(__cpu_number_map);
EXPORT_SYMBOL(__cpu_logical_map); EXPORT_SYMBOL(__cpu_logical_map);
#endif #endif
EXPORT_SYMBOL(udelay); EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(ndelay); EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(rtc_lock); EXPORT_SYMBOL(rtc_lock);
EXPORT_SYMBOL(mostek_lock); EXPORT_SYMBOL(mostek_lock);
EXPORT_SYMBOL(mstk48t02_regs); EXPORT_SYMBOL(mstk48t02_regs);
......
...@@ -49,7 +49,6 @@ static int smp_highest_cpu; ...@@ -49,7 +49,6 @@ static int smp_highest_cpu;
extern int smp_threads_ready; extern int smp_threads_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern struct cpuinfo_sparc cpu_data[NR_CPUS]; extern struct cpuinfo_sparc cpu_data[NR_CPUS];
extern unsigned long cpu_offset[NR_CPUS];
extern unsigned char boot_cpu_id; extern unsigned char boot_cpu_id;
extern int smp_activated; extern int smp_activated;
extern volatile int __cpu_number_map[NR_CPUS]; extern volatile int __cpu_number_map[NR_CPUS];
...@@ -171,9 +170,6 @@ void __init smp4d_boot_cpus(void) ...@@ -171,9 +170,6 @@ void __init smp4d_boot_cpus(void)
printk("Entering SMP Mode...\n"); printk("Entering SMP Mode...\n");
for (i = 0; i < NR_CPUS; i++)
cpu_offset[i] = (char *)&(cpu_data(i)) - (char *)&(cpu_data(0));
if (boot_cpu_id) if (boot_cpu_id)
current_set[0] = NULL; current_set[0] = NULL;
...@@ -427,9 +423,6 @@ void smp4d_message_pass(int target, int msg, unsigned long data, int wait) ...@@ -427,9 +423,6 @@ void smp4d_message_pass(int target, int msg, unsigned long data, int wait)
extern void sparc_do_profile(unsigned long pc, unsigned long o7); extern void sparc_do_profile(unsigned long pc, unsigned long o7);
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
void smp4d_percpu_timer_interrupt(struct pt_regs *regs) void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
{ {
int cpu = hard_smp4d_processor_id(); int cpu = hard_smp4d_processor_id();
......
...@@ -44,7 +44,6 @@ extern unsigned long cpu_present_map; ...@@ -44,7 +44,6 @@ extern unsigned long cpu_present_map;
extern int smp_num_cpus; extern int smp_num_cpus;
extern int smp_threads_ready; extern int smp_threads_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern unsigned long cpu_offset[NR_CPUS];
extern unsigned char boot_cpu_id; extern unsigned char boot_cpu_id;
extern int smp_activated; extern int smp_activated;
extern volatile int __cpu_number_map[NR_CPUS]; extern volatile int __cpu_number_map[NR_CPUS];
...@@ -152,9 +151,7 @@ void __init smp4m_boot_cpus(void) ...@@ -152,9 +151,7 @@ void __init smp4m_boot_cpus(void)
for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++) for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
cpu_present_map |= (1<<mid); cpu_present_map |= (1<<mid);
/* XXX cpu_offset is broken -Keith */
for(i=0; i < NR_CPUS; i++) { for(i=0; i < NR_CPUS; i++) {
cpu_offset[i] = (char *)&(cpu_data(i)) - (char *)&(cpu_data(0));
__cpu_number_map[i] = -1; __cpu_number_map[i] = -1;
__cpu_logical_map[i] = -1; __cpu_logical_map[i] = -1;
} }
...@@ -409,9 +406,6 @@ void smp4m_cross_call_irq(void) ...@@ -409,9 +406,6 @@ void smp4m_cross_call_irq(void)
extern void sparc_do_profile(unsigned long pc, unsigned long o7); extern void sparc_do_profile(unsigned long pc, unsigned long o7);
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
void smp4m_percpu_timer_interrupt(struct pt_regs *regs) void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
......
...@@ -29,13 +29,6 @@ ...@@ -29,13 +29,6 @@
srl %reg, 12, %reg; \ srl %reg, 12, %reg; \
and %reg, 3, %reg; and %reg, 3, %reg;
#define GET_PROCESSOR_OFFSET(reg, tmp) \
GET_PROCESSOR_ID(reg) \
sethi %hi(cpu_offset), %tmp; \
sll %reg, 2, %reg; \
or %tmp, %lo(cpu_offset), %tmp; \
ld [%tmp + %reg], %reg;
/* All trap entry points _must_ begin with this macro or else you /* All trap entry points _must_ begin with this macro or else you
* lose. It makes sure the kernel has a proper window so that * lose. It makes sure the kernel has a proper window so that
* c-code can be called. * c-code can be called.
......
...@@ -7,7 +7,8 @@ ...@@ -7,7 +7,8 @@
#ifndef __SPARC_DELAY_H #ifndef __SPARC_DELAY_H
#define __SPARC_DELAY_H #define __SPARC_DELAY_H
extern unsigned long loops_per_jiffy; #include <linux/config.h>
#include <asm/cpudata.h>
extern __inline__ void __delay(unsigned long loops) extern __inline__ void __delay(unsigned long loops)
{ {
...@@ -20,7 +21,15 @@ extern __inline__ void __delay(unsigned long loops) ...@@ -20,7 +21,15 @@ extern __inline__ void __delay(unsigned long loops)
} }
/* This is too messy with inline asm on the Sparc. */ /* This is too messy with inline asm on the Sparc. */
extern void udelay(unsigned long usecs); extern void __udelay(unsigned long usecs, unsigned long lpj);
extern void ndelay(unsigned long usecs); extern void __ndelay(unsigned long nsecs, unsigned long lpj);
#ifdef CONFIG_SMP
#define __udelay_val cpu_data(smp_processor_id()).udelay_val
#else /* SMP */
#define __udelay_val loops_per_jiffy
#endif /* SMP */
#define udelay(__usecs) __udelay(__usecs, __udelay_val)
#define ndelay(__nsecs) __ndelay(__nsecs, __udelay_val)
#endif /* defined(__SPARC_DELAY_H) */ #endif /* defined(__SPARC_DELAY_H) */
...@@ -148,7 +148,10 @@ extern __inline__ int hard_smp_processor_id(void) ...@@ -148,7 +148,10 @@ extern __inline__ int hard_smp_processor_id(void)
} }
#endif #endif
#define smp_processor_id() hard_smp_processor_id() #define smp_processor_id() (current_thread_info()->cpu)
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
#endif /* !(__ASSEMBLY__) */ #endif /* !(__ASSEMBLY__) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment