Commit 520c380c authored by Keith M. Wesolowski's avatar Keith M. Wesolowski

Merge kernel.bkbits.net:/home/repos/linux-2.5

into kernel.bkbits.net:/home/wesolows/sparc32-2.6
parents 67da11af 7e527026
......@@ -11,21 +11,12 @@
# Uncomment the first CFLAGS if you are doing kgdb source level
# debugging of the kernel to get the proper debugging information.
IS_EGCS := $(shell if $(CC) -m32 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo y; else echo n; fi; )
NEW_GAS := $(shell if $(LD) --version 2>&1 | grep 'elf64_sparc' > /dev/null; then echo y; else echo n; fi)
ifeq ($(NEW_GAS),y)
AS := $(AS) -32
LDFLAGS := -m elf32_sparc
endif
#CFLAGS := $(CFLAGS) -g -pipe -fcall-used-g5 -fcall-used-g7
ifneq ($(IS_EGCS),y)
CFLAGS := $(CFLAGS) -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
else
CFLAGS := $(CFLAGS) -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
AFLAGS := $(AFLAGS) -m32
endif
#LDFLAGS_vmlinux = -N -Ttext 0xf0004000
# Since 2.5.40, the first stage is left not btfix-ed.
......
......@@ -19,7 +19,6 @@
#include <asm/ptrace.h>
#include <asm/asm_offsets.h>
#include <asm/psr.h>
#include <asm/cprefix.h>
#include <asm/vaddrs.h>
#include <asm/memreg.h>
#include <asm/page.h>
......@@ -68,8 +67,8 @@ in_trap_handler:
! available before jumping into C code. It will also restore the world if you
! return from handle_exception.
.globl C_LABEL(trap_low)
C_LABEL(trap_low):
.globl trap_low
trap_low:
rd %wim, %l3
SAVE_ALL
......@@ -104,7 +103,7 @@ C_LABEL(trap_low):
wr %l0, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(handle_exception)
call handle_exception
add %sp, STACKFRAME_SZ, %o0 ! Pass address of registers
/* Load new kgdb register set. */
......@@ -134,8 +133,8 @@ C_LABEL(trap_low):
#ifdef CONFIG_BLK_DEV_FD
.text
.align 4
.globl C_LABEL(floppy_hardint)
C_LABEL(floppy_hardint):
.globl floppy_hardint
floppy_hardint:
/*
* This code cannot touch registers %l0 %l1 and %l2
* because SAVE_ALL depends on their values. It depends
......@@ -149,21 +148,21 @@ C_LABEL(floppy_hardint):
*/
/* Do we have work to do? */
sethi %hi(C_LABEL(doing_pdma)), %l7
ld [%l7 + %lo(C_LABEL(doing_pdma))], %l7
sethi %hi(doing_pdma), %l7
ld [%l7 + %lo(doing_pdma)], %l7
cmp %l7, 0
be floppy_dosoftint
nop
/* Load fdc register base */
sethi %hi(C_LABEL(fdc_status)), %l3
ld [%l3 + %lo(C_LABEL(fdc_status))], %l3
sethi %hi(fdc_status), %l3
ld [%l3 + %lo(fdc_status)], %l3
/* Setup register addresses */
sethi %hi(C_LABEL(pdma_vaddr)), %l5 ! transfer buffer
ld [%l5 + %lo(C_LABEL(pdma_vaddr))], %l4
sethi %hi(C_LABEL(pdma_size)), %l5 ! bytes to go
ld [%l5 + %lo(C_LABEL(pdma_size))], %l6
sethi %hi(pdma_vaddr), %l5 ! transfer buffer
ld [%l5 + %lo(pdma_vaddr)], %l4
sethi %hi(pdma_size), %l5 ! bytes to go
ld [%l5 + %lo(pdma_size)], %l6
next_byte:
ldub [%l3], %l7
......@@ -195,15 +194,15 @@ floppy_write:
/* fall through... */
floppy_tdone:
sethi %hi(C_LABEL(pdma_vaddr)), %l5
st %l4, [%l5 + %lo(C_LABEL(pdma_vaddr))]
sethi %hi(C_LABEL(pdma_size)), %l5
st %l6, [%l5 + %lo(C_LABEL(pdma_size))]
sethi %hi(pdma_vaddr), %l5
st %l4, [%l5 + %lo(pdma_vaddr)]
sethi %hi(pdma_size), %l5
st %l6, [%l5 + %lo(pdma_size)]
/* Flip terminal count pin */
set C_LABEL(auxio_register), %l7
set auxio_register, %l7
ld [%l7], %l7
set C_LABEL(sparc_cpu_model), %l5
set sparc_cpu_model, %l5
ld [%l5], %l5
subcc %l5, 1, %g0 /* enum { sun4c = 1 }; */
be 1f
......@@ -228,9 +227,9 @@ floppy_tdone:
stb %l5, [%l7]
/* Prevent recursion */
sethi %hi(C_LABEL(doing_pdma)), %l7
sethi %hi(doing_pdma), %l7
b floppy_dosoftint
st %g0, [%l7 + %lo(C_LABEL(doing_pdma))]
st %g0, [%l7 + %lo(doing_pdma)]
/* We emptied the FIFO, but we haven't read everything
* as of yet. Store the current transfer address and
......@@ -238,10 +237,10 @@ floppy_tdone:
* fast IRQ comes in.
*/
floppy_fifo_emptied:
sethi %hi(C_LABEL(pdma_vaddr)), %l5
st %l4, [%l5 + %lo(C_LABEL(pdma_vaddr))]
sethi %hi(C_LABEL(pdma_size)), %l7
st %l6, [%l7 + %lo(C_LABEL(pdma_size))]
sethi %hi(pdma_vaddr), %l5
st %l4, [%l5 + %lo(pdma_vaddr)]
sethi %hi(pdma_size), %l7
st %l6, [%l7 + %lo(pdma_size)]
/* Restore condition codes */
wr %l0, 0x0, %psr
......@@ -251,13 +250,13 @@ floppy_fifo_emptied:
rett %l2
floppy_overrun:
sethi %hi(C_LABEL(pdma_vaddr)), %l5
st %l4, [%l5 + %lo(C_LABEL(pdma_vaddr))]
sethi %hi(C_LABEL(pdma_size)), %l5
st %l6, [%l5 + %lo(C_LABEL(pdma_size))]
sethi %hi(pdma_vaddr), %l5
st %l4, [%l5 + %lo(pdma_vaddr)]
sethi %hi(pdma_size), %l5
st %l6, [%l5 + %lo(pdma_size)]
/* Prevent recursion */
sethi %hi(C_LABEL(doing_pdma)), %l7
st %g0, [%l7 + %lo(C_LABEL(doing_pdma))]
sethi %hi(doing_pdma), %l7
st %g0, [%l7 + %lo(doing_pdma)]
/* fall through... */
floppy_dosoftint:
......@@ -273,7 +272,7 @@ floppy_dosoftint:
mov 11, %o0 ! floppy irq level (unused anyway)
mov %g0, %o1 ! devid is not used in fast interrupts
call C_LABEL(sparc_floppy_irq)
call sparc_floppy_irq
add %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs
RESTORE_ALL
......@@ -290,7 +289,7 @@ bad_trap_handler:
mov %l7, %o0 ! trap number
mov %l0, %o1 ! psr
call C_LABEL(do_hw_interrupt)
call do_hw_interrupt
mov %l1, %o2 ! pc
RESTORE_ALL
......@@ -322,7 +321,7 @@ real_irq_continue:
WRITE_PAUSE
mov %l7, %o0 ! irq level
patch_handler_irq:
call C_LABEL(handler_irq)
call handler_irq
add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
or %l0, PSR_PIL, %g2 ! restore PIL after handler_irq
wr %g2, PSR_ET, %psr ! keep ET up
......@@ -339,7 +338,7 @@ smp4m_ticker:
WRITE_PAUSE
wr %g2, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(smp4m_percpu_timer_interrupt)
call smp4m_percpu_timer_interrupt
add %sp, STACKFRAME_SZ, %o0
wr %l0, PSR_ET, %psr
WRITE_PAUSE
......@@ -351,7 +350,7 @@ smp4m_ticker:
*/
maybe_smp4m_msg:
GET_PROCESSOR_MID(o3, o2)
set C_LABEL(sun4m_interrupts), %l5
set sun4m_interrupts, %l5
ld [%l5], %o5
sethi %hi(0x60000000), %o4
sll %o3, 12, %o3
......@@ -378,10 +377,10 @@ maybe_smp4m_msg:
tst %o2
bne 2f
nop
call C_LABEL(smp_reschedule_irq)
call smp_reschedule_irq
add %o7, 8, %o7
2:
call C_LABEL(smp_stop_cpu_irq)
call smp_stop_cpu_irq
nop
RESTORE_ALL
......@@ -391,7 +390,7 @@ linux_trap_ipi15_sun4m:
SAVE_ALL
sethi %hi(0x80000000), %o2
GET_PROCESSOR_MID(o0, o1)
set C_LABEL(sun4m_interrupts), %l5
set sun4m_interrupts, %l5
ld [%l5], %o5
sll %o0, 12, %o0
add %o5, %o0, %o5
......@@ -407,7 +406,7 @@ linux_trap_ipi15_sun4m:
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(smp4m_cross_call_irq)
call smp4m_cross_call_irq
nop
b ret_trap_lockless_ipi
clr %l6
......@@ -426,7 +425,7 @@ linux_trap_ipi15_sun4m:
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(sun4m_nmi)
call sun4m_nmi
nop
st %l4, [%l5 + 0x8]
WRITE_PAUSE
......@@ -447,7 +446,7 @@ smp4d_ticker:
WRITE_PAUSE
wr %g2, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(smp4d_percpu_timer_interrupt)
call smp4d_percpu_timer_interrupt
add %sp, STACKFRAME_SZ, %o0
wr %l0, PSR_ET, %psr
WRITE_PAUSE
......@@ -475,7 +474,7 @@ linux_trap_ipi15_sun4d:
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(smp4d_cross_call_irq)
call smp4d_cross_call_irq
nop
b ret_trap_lockless_ipi
clr %l6
......@@ -513,7 +512,7 @@ bad_instruction:
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call C_LABEL(do_illegal_instruction)
call do_illegal_instruction
mov %l0, %o3
RESTORE_ALL
......@@ -533,7 +532,7 @@ priv_instruction:
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call C_LABEL(do_priv_instruction)
call do_priv_instruction
mov %l0, %o3
RESTORE_ALL
......@@ -552,7 +551,7 @@ mna_handler:
WRITE_PAUSE
ld [%l1], %o1
call C_LABEL(kernel_unaligned_trap)
call kernel_unaligned_trap
add %sp, STACKFRAME_SZ, %o0
RESTORE_ALL
......@@ -564,7 +563,7 @@ mna_fromuser:
WRITE_PAUSE
ld [%l1], %o1
call C_LABEL(user_unaligned_trap)
call user_unaligned_trap
add %sp, STACKFRAME_SZ, %o0
RESTORE_ALL
......@@ -581,7 +580,7 @@ fpd_trap_handler:
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call C_LABEL(do_fpd_trap)
call do_fpd_trap
mov %l0, %o3
RESTORE_ALL
......@@ -593,8 +592,8 @@ fpe_trap_handler:
set fpsave_magic, %l5
cmp %l1, %l5
be 1f
sethi %hi(C_LABEL(fpsave)), %l5
or %l5, %lo(C_LABEL(fpsave)), %l5
sethi %hi(fpsave), %l5
or %l5, %lo(fpsave), %l5
cmp %l1, %l5
bne 2f
sethi %hi(fpsave_catch2), %l5
......@@ -620,7 +619,7 @@ fpe_trap_handler:
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call C_LABEL(do_fpe_trap)
call do_fpe_trap
mov %l0, %o3
RESTORE_ALL
......@@ -637,7 +636,7 @@ do_tag_overflow:
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call C_LABEL(handle_tag_overflow)
call handle_tag_overflow
mov %l0, %o3
RESTORE_ALL
......@@ -654,7 +653,7 @@ do_watchpoint:
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call C_LABEL(handle_watchpoint)
call handle_watchpoint
mov %l0, %o3
RESTORE_ALL
......@@ -671,7 +670,7 @@ do_reg_access:
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call C_LABEL(handle_reg_access)
call handle_reg_access
mov %l0, %o3
RESTORE_ALL
......@@ -688,7 +687,7 @@ do_cp_disabled:
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call C_LABEL(handle_cp_disabled)
call handle_cp_disabled
mov %l0, %o3
RESTORE_ALL
......@@ -705,7 +704,7 @@ do_cp_exception:
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call C_LABEL(handle_cp_exception)
call handle_cp_exception
mov %l0, %o3
RESTORE_ALL
......@@ -722,7 +721,7 @@ do_hw_divzero:
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call C_LABEL(handle_hw_divzero)
call handle_hw_divzero
mov %l0, %o3
RESTORE_ALL
......@@ -739,7 +738,7 @@ do_flush_windows:
bne dfw_kernel
nop
call C_LABEL(flush_user_windows)
call flush_user_windows
nop
/* Advance over the trap instruction. */
......@@ -805,8 +804,8 @@ linux_trap_nmi_sun4c:
/* Ugh, we need to clear the IRQ line. This is now
* a very sun4c specific trap handler...
*/
sethi %hi(C_LABEL(interrupt_enable)), %l5
ld [%l5 + %lo(C_LABEL(interrupt_enable))], %l5
sethi %hi(interrupt_enable), %l5
ld [%l5 + %lo(interrupt_enable)], %l5
ldub [%l5], %l6
andn %l6, INTS_ENAB, %l6
stb %l6, [%l5]
......@@ -829,51 +828,51 @@ linux_trap_nmi_sun4c:
lda [%o0] ASI_CONTROL, %o4 ! async vaddr
sub %o0, 0x4, %o0
lda [%o0] ASI_CONTROL, %o3 ! async error
call C_LABEL(sparc_lvl15_nmi)
call sparc_lvl15_nmi
add %sp, STACKFRAME_SZ, %o0
RESTORE_ALL
.align 4
.globl C_LABEL(invalid_segment_patch1_ff)
.globl C_LABEL(invalid_segment_patch2_ff)
C_LABEL(invalid_segment_patch1_ff): cmp %l4, 0xff
C_LABEL(invalid_segment_patch2_ff): mov 0xff, %l3
.globl invalid_segment_patch1_ff
.globl invalid_segment_patch2_ff
invalid_segment_patch1_ff: cmp %l4, 0xff
invalid_segment_patch2_ff: mov 0xff, %l3
.align 4
.globl C_LABEL(invalid_segment_patch1_1ff)
.globl C_LABEL(invalid_segment_patch2_1ff)
C_LABEL(invalid_segment_patch1_1ff): cmp %l4, 0x1ff
C_LABEL(invalid_segment_patch2_1ff): mov 0x1ff, %l3
.globl invalid_segment_patch1_1ff
.globl invalid_segment_patch2_1ff
invalid_segment_patch1_1ff: cmp %l4, 0x1ff
invalid_segment_patch2_1ff: mov 0x1ff, %l3
.align 4
.globl C_LABEL(num_context_patch1_16), C_LABEL(num_context_patch2_16)
C_LABEL(num_context_patch1_16): mov 0x10, %l7
C_LABEL(num_context_patch2_16): mov 0x10, %l7
.globl num_context_patch1_16, num_context_patch2_16
num_context_patch1_16: mov 0x10, %l7
num_context_patch2_16: mov 0x10, %l7
.align 4
.globl C_LABEL(vac_linesize_patch_32)
C_LABEL(vac_linesize_patch_32): subcc %l7, 32, %l7
.globl vac_linesize_patch_32
vac_linesize_patch_32: subcc %l7, 32, %l7
.align 4
.globl C_LABEL(vac_hwflush_patch1_on), C_LABEL(vac_hwflush_patch2_on)
.globl vac_hwflush_patch1_on, vac_hwflush_patch2_on
/*
* Ugly, but we cant use hardware flushing on the sun4 and we'd require
* two instructions (Anton)
*/
#ifdef CONFIG_SUN4
C_LABEL(vac_hwflush_patch1_on): nop
vac_hwflush_patch1_on: nop
#else
C_LABEL(vac_hwflush_patch1_on): addcc %l7, -PAGE_SIZE, %l7
vac_hwflush_patch1_on: addcc %l7, -PAGE_SIZE, %l7
#endif
C_LABEL(vac_hwflush_patch2_on): sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG
vac_hwflush_patch2_on: sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG
.globl C_LABEL(invalid_segment_patch1), C_LABEL(invalid_segment_patch2)
.globl C_LABEL(num_context_patch1), C_LABEL(num_context_patch2)
.globl C_LABEL(vac_linesize_patch), C_LABEL(vac_hwflush_patch1)
.globl C_LABEL(vac_hwflush_patch2)
.globl invalid_segment_patch1, invalid_segment_patch2
.globl num_context_patch1, num_context_patch2
.globl vac_linesize_patch, vac_hwflush_patch1
.globl vac_hwflush_patch2
.align 4
.globl sun4c_fault
......@@ -886,8 +885,8 @@ C_LABEL(vac_hwflush_patch2_on): sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG
! We want error in %l5, vaddr in %l6
sun4c_fault:
#ifdef CONFIG_SUN4
sethi %hi(C_LABEL(sun4c_memerr_reg)), %l4
ld [%l4+%lo(C_LABEL(sun4c_memerr_reg))], %l4 ! memerr ctrl reg addr
sethi %hi(sun4c_memerr_reg), %l4
ld [%l4+%lo(sun4c_memerr_reg)], %l4 ! memerr ctrl reg addr
ld [%l4], %l6 ! memerr ctrl reg
ld [%l4 + 4], %l5 ! memerr vaddr reg
andcc %l6, 0x80, %g0 ! check for error type
......@@ -895,7 +894,7 @@ sun4c_fault:
be 0f ! normal error
sethi %hi(AC_BUS_ERROR), %l4 ! bus err reg addr
call C_LABEL(prom_halt) ! something weird happened
call prom_halt ! something weird happened
! what exactly did happen?
! what should we do here?
......@@ -959,12 +958,12 @@ sun4c_fault:
/* Test for NULL pte_t * in vmalloc area. */
sethi %hi(VMALLOC_START), %l4
cmp %l5, %l4
blu,a C_LABEL(invalid_segment_patch1)
blu,a invalid_segment_patch1
lduXa [%l5] ASI_SEGMAP, %l4
sethi %hi(C_LABEL(swapper_pg_dir)), %l4
sethi %hi(swapper_pg_dir), %l4
srl %l5, SUN4C_PGDIR_SHIFT, %l6
or %l4, %lo(C_LABEL(swapper_pg_dir)), %l4
or %l4, %lo(swapper_pg_dir), %l4
sll %l6, 2, %l6
ld [%l4 + %l6], %l4
#ifdef CONFIG_SUN4
......@@ -976,15 +975,15 @@ sun4c_fault:
be sun4c_fault_fromuser
lduXa [%l5] ASI_SEGMAP, %l4
C_LABEL(invalid_segment_patch1):
invalid_segment_patch1:
cmp %l4, 0x7f
bne 1f
sethi %hi(C_LABEL(sun4c_kfree_ring)), %l4
or %l4, %lo(C_LABEL(sun4c_kfree_ring)), %l4
sethi %hi(sun4c_kfree_ring), %l4
or %l4, %lo(sun4c_kfree_ring), %l4
ld [%l4 + 0x18], %l3
deccc %l3 ! do we have a free entry?
bcs,a 2f ! no, unmap one.
sethi %hi(C_LABEL(sun4c_kernel_ring)), %l4
sethi %hi(sun4c_kernel_ring), %l4
st %l3, [%l4 + 0x18] ! sun4c_kfree_ring.num_entries--
......@@ -997,8 +996,8 @@ C_LABEL(invalid_segment_patch1):
st %l7, [%l3 + 0x04] ! next->prev = entry->prev
st %l3, [%l7 + 0x00] ! entry->prev->next = next
sethi %hi(C_LABEL(sun4c_kernel_ring)), %l4
or %l4, %lo(C_LABEL(sun4c_kernel_ring)), %l4
sethi %hi(sun4c_kernel_ring), %l4
or %l4, %lo(sun4c_kernel_ring), %l4
! head = &sun4c_kernel_ring.ringhd
ld [%l4 + 0x00], %l7 ! head->next
......@@ -1016,7 +1015,7 @@ C_LABEL(invalid_segment_patch1):
ld [%l6 + 0x08], %l5
2:
or %l4, %lo(C_LABEL(sun4c_kernel_ring)), %l4
or %l4, %lo(sun4c_kernel_ring), %l4
! head = &sun4c_kernel_ring.ringhd
ld [%l4 + 0x04], %l6 ! entry = head->prev
......@@ -1030,11 +1029,11 @@ C_LABEL(invalid_segment_patch1):
sethi %hi((64 * 1024)), %l7
#endif
9:
C_LABEL(vac_hwflush_patch1):
C_LABEL(vac_linesize_patch):
vac_hwflush_patch1:
vac_linesize_patch:
subcc %l7, 16, %l7
bne 9b
C_LABEL(vac_hwflush_patch2):
vac_hwflush_patch2:
sta %g0, [%l3 + %l7] ASI_FLUSHSEG
st %l5, [%l6 + 0x08] ! entry->vaddr = address
......@@ -1055,7 +1054,7 @@ C_LABEL(vac_hwflush_patch2):
mov %l3, %l5 ! address = tmp
4:
C_LABEL(num_context_patch1):
num_context_patch1:
mov 0x08, %l7
ld [%l6 + 0x08], %l4
......@@ -1072,7 +1071,7 @@ C_LABEL(num_context_patch1):
3: deccc %l7
sethi %hi(AC_CONTEXT), %l3
stba %l7, [%l3] ASI_CONTROL
C_LABEL(invalid_segment_patch2):
invalid_segment_patch2:
mov 0x7f, %l3
stXa %l3, [%l5] ASI_SEGMAP
andn %l4, 0x1ff, %l3
......@@ -1108,12 +1107,12 @@ C_LABEL(invalid_segment_patch2):
add %l5, %l4, %l5
b 7f
sethi %hi(C_LABEL(sun4c_kernel_faults)), %l4
sethi %hi(sun4c_kernel_faults), %l4
1:
srl %l5, SUN4C_PGDIR_SHIFT, %l3
sethi %hi(C_LABEL(swapper_pg_dir)), %l4
or %l4, %lo(C_LABEL(swapper_pg_dir)), %l4
sethi %hi(swapper_pg_dir), %l4
or %l4, %lo(swapper_pg_dir), %l4
sll %l3, 2, %l3
ld [%l4 + %l3], %l4
#ifndef CONFIG_SUN4
......@@ -1137,11 +1136,11 @@ C_LABEL(invalid_segment_patch2):
bne 2b
add %l5, %l4, %l5
sethi %hi(C_LABEL(sun4c_kernel_faults)), %l4
sethi %hi(sun4c_kernel_faults), %l4
7:
ld [%l4 + %lo(C_LABEL(sun4c_kernel_faults))], %l3
ld [%l4 + %lo(sun4c_kernel_faults)], %l3
inc %l3
st %l3, [%l4 + %lo(C_LABEL(sun4c_kernel_faults))]
st %l3, [%l4 + %lo(sun4c_kernel_faults)]
/* Restore condition codes */
wr %l0, 0x0, %psr
......@@ -1163,14 +1162,14 @@ sun4c_fault_fromuser:
wr %l0, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(do_sun4c_fault)
call do_sun4c_fault
add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
RESTORE_ALL
.align 4
.globl C_LABEL(srmmu_fault)
C_LABEL(srmmu_fault):
.globl srmmu_fault
srmmu_fault:
mov 0x400, %l5
mov 0x300, %l4
......@@ -1197,7 +1196,7 @@ C_LABEL(srmmu_fault):
wr %l0, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(do_sparc_fault)
call do_sparc_fault
add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
RESTORE_ALL
......@@ -1207,19 +1206,19 @@ C_LABEL(srmmu_fault):
* like indir_syscall(scall_num, arg0, arg1, arg2...); etc.
* This is complete brain damage.
*/
.globl C_LABEL(sunos_indir)
C_LABEL(sunos_indir):
.globl sunos_indir
sunos_indir:
mov %o7, %l4
cmp %o0, NR_SYSCALLS
blu,a 1f
sll %o0, 0x2, %o0
sethi %hi(C_LABEL(sunos_nosys)), %l6
sethi %hi(sunos_nosys), %l6
b 2f
or %l6, %lo(C_LABEL(sunos_nosys)), %l6
or %l6, %lo(sunos_nosys), %l6
1:
set C_LABEL(sunos_sys_table), %l7
set sunos_sys_table, %l7
ld [%l7 + %o0], %l6
2:
......@@ -1233,17 +1232,17 @@ C_LABEL(sunos_indir):
#endif
.align 4
.globl C_LABEL(sys_nis_syscall)
C_LABEL(sys_nis_syscall):
.globl sys_nis_syscall
sys_nis_syscall:
mov %o7, %l5
add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg
call C_LABEL(c_sys_nis_syscall)
call c_sys_nis_syscall
mov %l5, %o7
.align 4
.globl C_LABEL(sys_ptrace)
C_LABEL(sys_ptrace):
call C_LABEL(do_ptrace)
.globl sys_ptrace
sys_ptrace:
call do_ptrace
add %sp, STACKFRAME_SZ, %o0
ld [%curptr + TI_FLAGS], %l5
......@@ -1251,49 +1250,49 @@ C_LABEL(sys_ptrace):
be 1f
nop
call C_LABEL(syscall_trace)
call syscall_trace
nop
1:
RESTORE_ALL
.align 4
.globl C_LABEL(sys_execve)
C_LABEL(sys_execve):
.globl sys_execve
sys_execve:
mov %o7, %l5
add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg
call C_LABEL(sparc_execve)
call sparc_execve
mov %l5, %o7
.align 4
.globl C_LABEL(sys_pipe)
C_LABEL(sys_pipe):
.globl sys_pipe
sys_pipe:
mov %o7, %l5
add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg
call C_LABEL(sparc_pipe)
call sparc_pipe
mov %l5, %o7
.align 4
.globl C_LABEL(sys_sigaltstack)
C_LABEL(sys_sigaltstack):
.globl sys_sigaltstack
sys_sigaltstack:
mov %o7, %l5
mov %fp, %o2
call C_LABEL(do_sigaltstack)
call do_sigaltstack
mov %l5, %o7
.align 4
.globl C_LABEL(sys_sigstack)
C_LABEL(sys_sigstack):
.globl sys_sigstack
sys_sigstack:
mov %o7, %l5
mov %fp, %o2
call C_LABEL(do_sys_sigstack)
call do_sys_sigstack
mov %l5, %o7
.align 4
.globl C_LABEL(sys_sigpause)
C_LABEL(sys_sigpause):
.globl sys_sigpause
sys_sigpause:
/* Note: %o0 already has correct value... */
call C_LABEL(do_sigpause)
call do_sigpause
add %sp, STACKFRAME_SZ, %o1
ld [%curptr + TI_FLAGS], %l5
......@@ -1301,7 +1300,7 @@ C_LABEL(sys_sigpause):
be 1f
nop
call C_LABEL(syscall_trace)
call syscall_trace
nop
1:
......@@ -1309,9 +1308,9 @@ C_LABEL(sys_sigpause):
RESTORE_ALL
.align 4
.globl C_LABEL(sys_sigsuspend)
C_LABEL(sys_sigsuspend):
call C_LABEL(do_sigsuspend)
.globl sys_sigsuspend
sys_sigsuspend:
call do_sigsuspend
add %sp, STACKFRAME_SZ, %o0
ld [%curptr + TI_FLAGS], %l5
......@@ -1319,7 +1318,7 @@ C_LABEL(sys_sigsuspend):
be 1f
nop
call C_LABEL(syscall_trace)
call syscall_trace
nop
1:
......@@ -1327,10 +1326,10 @@ C_LABEL(sys_sigsuspend):
RESTORE_ALL
.align 4
.globl C_LABEL(sys_rt_sigsuspend)
C_LABEL(sys_rt_sigsuspend):
.globl sys_rt_sigsuspend
sys_rt_sigsuspend:
/* Note: %o0, %o1 already have correct value... */
call C_LABEL(do_rt_sigsuspend)
call do_rt_sigsuspend
add %sp, STACKFRAME_SZ, %o2
ld [%curptr + TI_FLAGS], %l5
......@@ -1338,7 +1337,7 @@ C_LABEL(sys_rt_sigsuspend):
be 1f
nop
call C_LABEL(syscall_trace)
call syscall_trace
nop
1:
......@@ -1346,9 +1345,9 @@ C_LABEL(sys_rt_sigsuspend):
RESTORE_ALL
.align 4
.globl C_LABEL(sys_sigreturn)
C_LABEL(sys_sigreturn):
call C_LABEL(do_sigreturn)
.globl sys_sigreturn
sys_sigreturn:
call do_sigreturn
add %sp, STACKFRAME_SZ, %o0
ld [%curptr + TI_FLAGS], %l5
......@@ -1356,7 +1355,7 @@ C_LABEL(sys_sigreturn):
be 1f
nop
call C_LABEL(syscall_trace)
call syscall_trace
nop
1:
......@@ -1366,9 +1365,9 @@ C_LABEL(sys_sigreturn):
RESTORE_ALL
.align 4
.globl C_LABEL(sys_rt_sigreturn)
C_LABEL(sys_rt_sigreturn):
call C_LABEL(do_rt_sigreturn)
.globl sys_rt_sigreturn
sys_rt_sigreturn:
call do_rt_sigreturn
add %sp, STACKFRAME_SZ, %o0
ld [%curptr + TI_FLAGS], %l5
......@@ -1376,7 +1375,7 @@ C_LABEL(sys_rt_sigreturn):
be 1f
nop
call C_LABEL(syscall_trace)
call syscall_trace
nop
1:
......@@ -1391,8 +1390,8 @@ C_LABEL(sys_rt_sigreturn):
* XXX code just like on sparc64... -DaveM
*/
.align 4
.globl C_LABEL(sys_fork), flush_patch_two
C_LABEL(sys_fork):
.globl sys_fork, flush_patch_two
sys_fork:
mov %o7, %l5
flush_patch_two:
FLUSH_ALL_KERNEL_WINDOWS;
......@@ -1406,12 +1405,12 @@ flush_patch_two:
std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr
mov 0, %o3
call C_LABEL(sparc_do_fork)
call sparc_do_fork
mov %l5, %o7
/* Whee, kernel threads! */
.globl C_LABEL(sys_clone), flush_patch_three
C_LABEL(sys_clone):
.globl sys_clone, flush_patch_three
sys_clone:
mov %o7, %l5
flush_patch_three:
FLUSH_ALL_KERNEL_WINDOWS;
......@@ -1430,12 +1429,12 @@ flush_patch_three:
std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr
mov 0, %o3
call C_LABEL(sparc_do_fork)
call sparc_do_fork
mov %l5, %o7
/* Whee, real vfork! */
.globl C_LABEL(sys_vfork), flush_patch_four
C_LABEL(sys_vfork):
.globl sys_vfork, flush_patch_four
sys_vfork:
flush_patch_four:
FLUSH_ALL_KERNEL_WINDOWS;
ld [%curptr + TI_TASK], %o4
......@@ -1447,16 +1446,16 @@ flush_patch_four:
sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
mov %fp, %o1
or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
sethi %hi(C_LABEL(sparc_do_fork)), %l1
sethi %hi(sparc_do_fork), %l1
mov 0, %o3
jmpl %l1 + %lo(C_LABEL(sparc_do_fork)), %g0
jmpl %l1 + %lo(sparc_do_fork), %g0
add %sp, STACKFRAME_SZ, %o2
.align 4
linux_sparc_ni_syscall:
sethi %hi(C_LABEL(sys_ni_syscall)), %l7
sethi %hi(sys_ni_syscall), %l7
b syscall_is_too_hard
or %l7, %lo(C_LABEL(sys_ni_syscall)), %l7
or %l7, %lo(sys_ni_syscall), %l7
linux_fast_syscall:
andn %l7, 3, %l7
......@@ -1467,7 +1466,7 @@ linux_fast_syscall:
mov %i3, %o3
linux_syscall_trace:
call C_LABEL(syscall_trace)
call syscall_trace
nop
mov %i0, %o0
mov %i1, %o1
......@@ -1476,11 +1475,11 @@ linux_syscall_trace:
b 2f
mov %i4, %o4
.globl C_LABEL(ret_from_fork)
C_LABEL(ret_from_fork):
.globl ret_from_fork
ret_from_fork:
call schedule_tail
mov %g3, %o0
b C_LABEL(ret_sys_call)
b ret_sys_call
ld [%sp + STACKFRAME_SZ + PT_I0], %o0
/* Linux native and SunOS system calls enter here... */
......@@ -1518,8 +1517,8 @@ syscall_is_too_hard:
st %o0, [%sp + STACKFRAME_SZ + PT_I0]
.globl C_LABEL(ret_sys_call)
C_LABEL(ret_sys_call):
.globl ret_sys_call
ret_sys_call:
ld [%curptr + TI_FLAGS], %l6
cmp %o0, -ENOIOCTLCMD
ld [%sp + STACKFRAME_SZ + PT_PSR], %g3
......@@ -1554,7 +1553,7 @@ C_LABEL(ret_sys_call):
st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
linux_syscall_trace2:
call C_LABEL(syscall_trace)
call syscall_trace
add %l1, 0x4, %l2 /* npc = npc+4 */
st %l1, [%sp + STACKFRAME_SZ + PT_PC]
b ret_trap_entry
......@@ -1595,7 +1594,7 @@ solaris_syscall:
nop
mov %i0, %l5
call C_LABEL(do_solaris_syscall)
call do_solaris_syscall
add %sp, STACKFRAME_SZ, %o0
st %o0, [%sp + STACKFRAME_SZ + PT_I0]
......@@ -1651,7 +1650,7 @@ sunos_syscall:
nop
nop
mov %i0, %l5
call C_LABEL(do_sunos_syscall)
call do_sunos_syscall
add %sp, STACKFRAME_SZ, %o0
#endif
......@@ -1664,7 +1663,7 @@ bsd_syscall:
blu,a 1f
sll %g1, 2, %l4
set C_LABEL(sys_ni_syscall), %l7
set sys_ni_syscall, %l7
b bsd_is_too_hard
nop
......@@ -1707,8 +1706,8 @@ bsd_is_too_hard:
*/
sub %g0, %o0, %o0
#if 0 /* XXX todo XXX */
sethi %hi(C_LABEL(bsd_xlatb_rorl), %o3
or %o3, %lo(C_LABEL(bsd_xlatb_rorl)), %o3
sethi %hi(bsd_xlatb_rorl), %o3
or %o3, %lo(bsd_xlatb_rorl), %o3
sll %o0, 2, %o0
ld [%o3 + %o0], %o0
#endif
......@@ -1731,8 +1730,8 @@ bsd_is_too_hard:
* void *fpqueue, unsigned long *fpqdepth)
*/
.globl C_LABEL(fpsave)
C_LABEL(fpsave):
.globl fpsave
fpsave:
st %fsr, [%o1] ! this can trap on us if fpu is in bogon state
ld [%o1], %g1
set 0x2000, %g4
......@@ -1782,13 +1781,13 @@ fpsave_catch:
st %fsr, [%o1]
fpsave_catch2:
b C_LABEL(fpsave) + 4
b fpsave + 4
st %fsr, [%o1]
/* void fpload(unsigned long *fpregs, unsigned long *fsr); */
.globl C_LABEL(fpload)
C_LABEL(fpload):
.globl fpload
fpload:
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
......@@ -1809,8 +1808,8 @@ C_LABEL(fpload):
retl
nop
.globl C_LABEL(ndelay)
C_LABEL(ndelay):
.globl ndelay
ndelay:
save %sp, -STACKFRAME_SZ, %sp
mov %i0, %o0
call .umul
......@@ -1818,8 +1817,8 @@ C_LABEL(ndelay):
ba delay_continue
nop
.globl C_LABEL(udelay)
C_LABEL(udelay):
.globl udelay
udelay:
save %sp, -STACKFRAME_SZ, %sp
mov %i0, %o0
sethi %hi(0x10c6), %o1
......@@ -1827,12 +1826,12 @@ C_LABEL(udelay):
or %o1, %lo(0x10c6), %o1
delay_continue:
#ifndef CONFIG_SMP
sethi %hi(C_LABEL(loops_per_jiffy)), %o3
sethi %hi(loops_per_jiffy), %o3
call .umul
ld [%o3 + %lo(C_LABEL(loops_per_jiffy))], %o1
ld [%o3 + %lo(loops_per_jiffy)], %o1
#else
GET_PROCESSOR_OFFSET(o4, o2)
set C_LABEL(cpu_data), %o3
set cpu_data, %o3
call .umul
ld [%o3 + %o4], %o1
#endif
......@@ -1858,14 +1857,14 @@ breakpoint_trap:
WRITE_PAUSE
st %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
call C_LABEL(sparc_breakpoint)
call sparc_breakpoint
add %sp, STACKFRAME_SZ, %o0
RESTORE_ALL
.align 4
.globl C_LABEL(__handle_exception), flush_patch_exception
C_LABEL(__handle_exception):
.globl __handle_exception, flush_patch_exception
__handle_exception:
flush_patch_exception:
FLUSH_ALL_KERNEL_WINDOWS;
ldd [%o0], %o6
......@@ -1873,7 +1872,7 @@ flush_patch_exception:
mov 1, %g1 ! signal EFAULT condition
.align 4
.globl C_LABEL(kill_user_windows), kuw_patch1_7win
.globl kill_user_windows, kuw_patch1_7win
.globl kuw_patch1
kuw_patch1_7win: sll %o3, 6, %o3
......@@ -1881,7 +1880,7 @@ kuw_patch1_7win: sll %o3, 6, %o3
* case scenerio, it is several times better than taking the
* traps with the old method of just doing flush_user_windows().
*/
C_LABEL(kill_user_windows):
kill_user_windows:
ld [%g6 + TI_UWINMASK], %o0 ! get current umask
orcc %g0, %o0, %g0 ! if no bits set, we are done
be 3f ! nothing to do
......@@ -1911,8 +1910,8 @@ kuw_patch1:
st %g0, [%g6 + TI_W_SAVED] ! no windows saved
.align 4
.globl C_LABEL(restore_current)
C_LABEL(restore_current):
.globl restore_current
restore_current:
LOAD_CURRENT(g6, o0)
retl
nop
......@@ -1932,8 +1931,8 @@ linux_trap_ipi15_pcic:
* The busy loop is necessary because the PIO error
* sometimes does not go away quickly and we trap again.
*/
sethi %hi(C_LABEL(pcic_regs)), %o1
ld [%o1 + %lo(C_LABEL(pcic_regs))], %o2
sethi %hi(pcic_regs), %o1
ld [%o1 + %lo(pcic_regs)], %o2
! Get pending status for printouts later.
ld [%o2 + PCI_SYS_INT_PENDING], %o0
......@@ -1952,12 +1951,12 @@ linux_trap_ipi15_pcic:
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(pcic_nmi)
call pcic_nmi
add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
RESTORE_ALL
.globl C_LABEL(pcic_nmi_trap_patch)
C_LABEL(pcic_nmi_trap_patch):
.globl pcic_nmi_trap_patch
pcic_nmi_trap_patch:
sethi %hi(linux_trap_ipi15_pcic), %l3
jmpl %l3 + %lo(linux_trap_ipi15_pcic), %g0
rd %psr, %l0
......
......@@ -5,7 +5,6 @@
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/cprefix.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/contregs.h>
......@@ -217,9 +216,9 @@ tsetup_patch6:
/* Call MMU-architecture dependent stack checking
* routine.
*/
.globl C_LABEL(tsetup_mmu_patchme)
C_LABEL(tsetup_mmu_patchme):
b C_LABEL(tsetup_sun4c_stackchk)
.globl tsetup_mmu_patchme
tsetup_mmu_patchme:
b tsetup_sun4c_stackchk
andcc %sp, 0x7, %g0
/* Architecture specific stack checking routines. When either
......@@ -229,8 +228,8 @@ C_LABEL(tsetup_mmu_patchme):
*/
#define glob_tmp g1
.globl C_LABEL(tsetup_sun4c_stackchk)
C_LABEL(tsetup_sun4c_stackchk):
.globl tsetup_sun4c_stackchk
tsetup_sun4c_stackchk:
/* Done by caller: andcc %sp, 0x7, %g0 */
bne trap_setup_user_stack_is_bolixed
sra %sp, 29, %glob_tmp
......@@ -276,8 +275,8 @@ tsetup_sun4c_onepage:
jmpl %t_retpc + 0x8, %g0
mov %t_kstack, %sp
.globl C_LABEL(tsetup_srmmu_stackchk)
C_LABEL(tsetup_srmmu_stackchk):
.globl tsetup_srmmu_stackchk
tsetup_srmmu_stackchk:
/* Check results of callers andcc %sp, 0x7, %g0 */
bne trap_setup_user_stack_is_bolixed
sethi %hi(PAGE_OFFSET), %glob_tmp
......
......@@ -14,7 +14,6 @@
#include <linux/config.h>
#include <linux/init.h>
#include <asm/cprefix.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/contregs.h>
......@@ -34,18 +33,18 @@
*/
.align 4
.globl C_LABEL(cputyp)
C_LABEL(cputyp):
.globl cputyp
cputyp:
.word 1
.align 4
.globl C_LABEL(cputypval)
C_LABEL(cputypval):
.globl cputypval
cputypval:
.asciz "sun4c"
.ascii " "
C_LABEL(cputypvalend):
C_LABEL(cputypvallen) = C_LABEL(cputypvar) - C_LABEL(cputypval)
cputypvalend:
cputypvallen = cputypvar - cputypval
.align 4
/*
......@@ -56,12 +55,12 @@ C_LABEL(cputypvallen) = C_LABEL(cputypvar) - C_LABEL(cputypval)
/* Uh, actually Linus it is I who cannot spell. Too much murky
* Sparc assembly will do this to ya.
*/
C_LABEL(cputypvar):
cputypvar:
.asciz "compatability"
/* Tested on SS-5, SS-10. Probably someone at Sun applied a spell-checker. */
.align 4
C_LABEL(cputypvar_sun4m):
cputypvar_sun4m:
.asciz "compatible"
.align 4
......@@ -88,14 +87,14 @@ sun4e_notsup:
/* The Sparc trap table, bootloader gives us control at _start. */
.text
.globl start, _stext, _start, __stext
.globl C_LABEL(trapbase)
.globl trapbase
_start: /* danger danger */
__stext:
_stext:
start:
C_LABEL(trapbase):
trapbase:
#ifdef CONFIG_SMP
C_LABEL(trapbase_cpu0):
trapbase_cpu0:
#endif
/* We get control passed to us here at t_zero. */
t_zero: b gokernel; nop; nop; nop;
......@@ -202,13 +201,13 @@ t_badfc:BAD_TRAP(0xfc) BAD_TRAP(0xfd)
dbtrap: BAD_TRAP(0xfe) /* Debugger/PROM breakpoint #1 */
dbtrap2:BAD_TRAP(0xff) /* Debugger/PROM breakpoint #2 */
.globl C_LABEL(end_traptable)
C_LABEL(end_traptable):
.globl end_traptable
end_traptable:
#ifdef CONFIG_SMP
/* Trap tables for the other cpus. */
.globl C_LABEL(trapbase_cpu1), C_LABEL(trapbase_cpu2), C_LABEL(trapbase_cpu3)
C_LABEL(trapbase_cpu1):
.globl trapbase_cpu1, trapbase_cpu2, trapbase_cpu3
trapbase_cpu1:
BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
......@@ -276,7 +275,7 @@ C_LABEL(trapbase_cpu1):
BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
C_LABEL(trapbase_cpu2):
trapbase_cpu2:
BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
......@@ -344,7 +343,7 @@ C_LABEL(trapbase_cpu2):
BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
C_LABEL(trapbase_cpu3):
trapbase_cpu3:
BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
......@@ -418,25 +417,25 @@ C_LABEL(trapbase_cpu3):
/* This was the only reasonable way I could think of to properly align
* these page-table data structures.
*/
.globl C_LABEL(pg0), C_LABEL(pg1), C_LABEL(pg2), C_LABEL(pg3)
.globl C_LABEL(empty_bad_page)
.globl C_LABEL(empty_bad_page_table)
.globl C_LABEL(empty_zero_page)
.globl C_LABEL(swapper_pg_dir)
C_LABEL(swapper_pg_dir): .skip PAGE_SIZE
C_LABEL(pg0): .skip PAGE_SIZE
C_LABEL(pg1): .skip PAGE_SIZE
C_LABEL(pg2): .skip PAGE_SIZE
C_LABEL(pg3): .skip PAGE_SIZE
C_LABEL(empty_bad_page): .skip PAGE_SIZE
C_LABEL(empty_bad_page_table): .skip PAGE_SIZE
C_LABEL(empty_zero_page): .skip PAGE_SIZE
.global C_LABEL(root_flags)
.global C_LABEL(ram_flags)
.global C_LABEL(root_dev)
.global C_LABEL(sparc_ramdisk_image)
.global C_LABEL(sparc_ramdisk_size)
.globl pg0, pg1, pg2, pg3
.globl empty_bad_page
.globl empty_bad_page_table
.globl empty_zero_page
.globl swapper_pg_dir
swapper_pg_dir: .skip PAGE_SIZE
pg0: .skip PAGE_SIZE
pg1: .skip PAGE_SIZE
pg2: .skip PAGE_SIZE
pg3: .skip PAGE_SIZE
empty_bad_page: .skip PAGE_SIZE
empty_bad_page_table: .skip PAGE_SIZE
empty_zero_page: .skip PAGE_SIZE
.global root_flags
.global ram_flags
.global root_dev
.global sparc_ramdisk_image
.global sparc_ramdisk_size
/* This stuff has to be in sync with SILO and other potential boot loaders
* Fields should be kept upward compatible and whenever any change is made,
......@@ -445,17 +444,17 @@ C_LABEL(empty_zero_page): .skip PAGE_SIZE
.ascii "HdrS"
.word LINUX_VERSION_CODE
.half 0x0203 /* HdrS version */
C_LABEL(root_flags):
root_flags:
.half 1
C_LABEL(root_dev):
root_dev:
.half 0
C_LABEL(ram_flags):
ram_flags:
.half 0
C_LABEL(sparc_ramdisk_image):
sparc_ramdisk_image:
.word 0
C_LABEL(sparc_ramdisk_size):
sparc_ramdisk_size:
.word 0
.word C_LABEL(reboot_command)
.word reboot_command
.word 0, 0, 0
.word _end
......@@ -517,7 +516,7 @@ copy_prom_lvl14:
/* DJHR
* preserve our linked/calculated instructions
*/
set C_LABEL(lvl14_save), %g1
set lvl14_save, %g1
set t_irq14, %g3
sub %g1, %l6, %g1 ! translate to physical
sub %g3, %l6, %g3 ! translate to physical
......@@ -761,11 +760,11 @@ execute_in_high_mem:
mov %l0, %o0 ! put back romvec
mov %l1, %o1 ! and debug_vec
sethi %hi( C_LABEL(prom_vector_p) ), %g1
st %o0, [%g1 + %lo( C_LABEL(prom_vector_p) )]
sethi %hi(prom_vector_p), %g1
st %o0, [%g1 + %lo(prom_vector_p)]
sethi %hi( C_LABEL(linux_dbvec) ), %g1
st %o1, [%g1 + %lo( C_LABEL(linux_dbvec) )]
sethi %hi(linux_dbvec), %g1
st %o1, [%g1 + %lo(linux_dbvec)]
ld [%o0 + 0x4], %o3
and %o3, 0x3, %o5 ! get the version
......@@ -808,10 +807,10 @@ found_version:
or %g0, %g0, %o0 ! next_node(0) = first_node
or %o0, %g0, %g6
sethi %hi( C_LABEL(cputypvar) ), %o1 ! First node has cpu-arch
or %o1, %lo( C_LABEL(cputypvar) ), %o1
sethi %hi( C_LABEL(cputypval) ), %o2 ! information, the string
or %o2, %lo( C_LABEL(cputypval) ), %o2
sethi %hi(cputypvar), %o1 ! First node has cpu-arch
or %o1, %lo(cputypvar), %o1
sethi %hi(cputypval), %o2 ! information, the string
or %o2, %lo(cputypval), %o2
ld [%l1], %l0 ! 'compatibility' tells
ld [%l0 + 0xc], %l0 ! that we want 'sun4x' where
call %l0 ! x is one of '', 'c', 'm',
......@@ -824,17 +823,17 @@ found_version:
nop
or %g6, %g0, %o0
sethi %hi( C_LABEL(cputypvar_sun4m) ), %o1
or %o1, %lo( C_LABEL(cputypvar_sun4m) ), %o1
sethi %hi( C_LABEL(cputypval) ), %o2
or %o2, %lo( C_LABEL(cputypval) ), %o2
sethi %hi(cputypvar_sun4m), %o1
or %o1, %lo(cputypvar_sun4m), %o1
sethi %hi(cputypval), %o2
or %o2, %lo(cputypval), %o2
ld [%l1], %l0
ld [%l0 + 0xc], %l0
call %l0
nop
got_prop:
set C_LABEL(cputypval), %o2
set cputypval, %o2
ldub [%o2 + 0x4], %l1
cmp %l1, ' '
......@@ -853,7 +852,7 @@ got_prop:
b no_sun4u_here ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
nop
1: set C_LABEL(cputypval), %l1
1: set cputypval, %l1
ldub [%l1 + 0x4], %l1
cmp %l1, 'm' ! Test for sun4d, sun4e ?
be sun4m_init
......@@ -875,8 +874,8 @@ got_prop:
sun4d_init:
/* Need to patch call to handler_irq */
set C_LABEL(patch_handler_irq), %g4
set C_LABEL(sun4d_handler_irq), %g5
set patch_handler_irq, %g4
set sun4d_handler_irq, %g5
sethi %hi(0x40000000), %g3 ! call
sub %g5, %g4, %g5
srl %g5, 2, %g5
......@@ -997,8 +996,8 @@ sun4c_continue_boot:
* show-time!
*/
sethi %hi( C_LABEL(cputyp) ), %o0
st %g4, [%o0 + %lo( C_LABEL(cputyp) )]
sethi %hi(cputyp), %o0
st %g4, [%o0 + %lo(cputyp)]
/* Turn on Supervisor, EnableFloating, and all the PIL bits.
* Also puts us in register window zero with traps off.
......@@ -1008,14 +1007,14 @@ sun4c_continue_boot:
WRITE_PAUSE
/* I want a kernel stack NOW! */
set C_LABEL(init_thread_union), %g1
set init_thread_union, %g1
set (THREAD_SIZE - STACKFRAME_SZ), %g2
add %g1, %g2, %sp
mov 0, %fp /* And for good luck */
/* Zero out our BSS section. */
set C_LABEL(__bss_start) , %o0 ! First address of BSS
set C_LABEL(end) , %o1 ! Last address of BSS
set __bss_start , %o0 ! First address of BSS
set end , %o1 ! Last address of BSS
add %o0, 0x1, %o0
1:
stb %g0, [%o0]
......@@ -1026,11 +1025,11 @@ sun4c_continue_boot:
/* Initialize the uwinmask value for init task just in case.
* But first make current_set[boot_cpu_id] point to something useful.
*/
set C_LABEL(init_thread_union), %g6
set C_LABEL(current_set), %g2
set init_thread_union, %g6
set current_set, %g2
#ifdef CONFIG_SMP
sethi %hi(C_LABEL(boot_cpu_id4)), %g3
ldub [%g3 + %lo(C_LABEL(boot_cpu_id4))], %g3
sethi %hi(boot_cpu_id4), %g3
ldub [%g3 + %lo(boot_cpu_id4)], %g3
st %g6, [%g2]
add %g2, %g3, %g2
#endif
......@@ -1124,14 +1123,14 @@ sun4c_continue_boot:
st %g4, [%g5 + 0x1c]
2:
sethi %hi( C_LABEL(nwindows) ), %g4
st %g3, [%g4 + %lo( C_LABEL(nwindows) )] ! store final value
sethi %hi(nwindows), %g4
st %g3, [%g4 + %lo(nwindows)] ! store final value
sub %g3, 0x1, %g3
sethi %hi( C_LABEL(nwindowsm1) ), %g4
st %g3, [%g4 + %lo( C_LABEL(nwindowsm1) )]
sethi %hi(nwindowsm1), %g4
st %g3, [%g4 + %lo(nwindowsm1)]
/* Here we go, start using Linux's trap table... */
set C_LABEL(trapbase), %g3
set trapbase, %g3
wr %g3, 0x0, %tbr
WRITE_PAUSE
......@@ -1147,12 +1146,12 @@ sun4c_continue_boot:
* off to start_kernel().
*/
sethi %hi( C_LABEL(prom_vector_p) ), %g5
ld [%g5 + %lo( C_LABEL(prom_vector_p) )], %o0
call C_LABEL(prom_init)
sethi %hi(prom_vector_p), %g5
ld [%g5 + %lo(prom_vector_p)], %o0
call prom_init
nop
call C_LABEL(start_kernel)
call start_kernel
nop
/* We should not get here. */
......@@ -1162,7 +1161,7 @@ sun4c_continue_boot:
sun4_init:
#ifdef CONFIG_SUN4
/* There, happy now Adrian? */
set C_LABEL(cputypval), %o2 ! Let everyone know we
set cputypval, %o2 ! Let everyone know we
set ' ', %o0 ! are a "sun4 " architecture
stb %o0, [%o2 + 0x4]
......@@ -1289,8 +1288,8 @@ halt_me:
* gets initialized in c-code so all routines can use it.
*/
.globl C_LABEL(prom_vector_p)
C_LABEL(prom_vector_p):
.globl prom_vector_p
prom_vector_p:
.word 0
/* We calculate the following at boot time, window fills/spills and trap entry
......@@ -1298,25 +1297,25 @@ C_LABEL(prom_vector_p):
*/
.align 4
.globl C_LABEL(nwindows)
.globl C_LABEL(nwindowsm1)
C_LABEL(nwindows):
.globl nwindows
.globl nwindowsm1
nwindows:
.word 8
C_LABEL(nwindowsm1):
nwindowsm1:
.word 7
/* Boot time debugger vector value. We need this later on. */
.align 4
.globl C_LABEL(linux_dbvec)
C_LABEL(linux_dbvec):
.globl linux_dbvec
linux_dbvec:
.word 0
.word 0
.align 8
.globl C_LABEL(lvl14_save)
C_LABEL(lvl14_save):
.globl lvl14_save
lvl14_save:
.word 0
.word 0
.word 0
......
......@@ -4,7 +4,6 @@
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/cprefix.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
......@@ -47,9 +46,9 @@ rtrap_7win_patch5: and %g1, 0x7f, %g1
.globl ret_trap_entry, rtrap_patch1, rtrap_patch2
.globl rtrap_patch3, rtrap_patch4, rtrap_patch5
.globl C_LABEL(ret_trap_lockless_ipi)
.globl ret_trap_lockless_ipi
ret_trap_entry:
C_LABEL(ret_trap_lockless_ipi):
ret_trap_lockless_ipi:
andcc %t_psr, PSR_PS, %g0
be 1f
nop
......@@ -64,7 +63,7 @@ C_LABEL(ret_trap_lockless_ipi):
be signal_p
nop
call C_LABEL(schedule)
call schedule
nop
ld [%curptr + TI_FLAGS], %g2
......@@ -76,7 +75,7 @@ signal_p:
clr %o0
mov %l5, %o2
mov %l6, %o3
call C_LABEL(do_signal)
call do_signal
add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
/* Fall through. */
......@@ -95,7 +94,7 @@ ret_trap_continue:
WRITE_PAUSE
mov 1, %o1
call C_LABEL(try_to_clear_window_buffer)
call try_to_clear_window_buffer
add %sp, STACKFRAME_SZ, %o0
b signal_p
......@@ -131,8 +130,8 @@ rtrap_patch2: and %glob_tmp, 0xff, %glob_tmp
* branch to the user stack checking routine
* for return from traps.
*/
.globl C_LABEL(rtrap_mmu_patchme)
C_LABEL(rtrap_mmu_patchme): b C_LABEL(sun4c_rett_stackchk)
.globl rtrap_mmu_patchme
rtrap_mmu_patchme: b sun4c_rett_stackchk
andcc %fp, 0x7, %g0
ret_trap_userwins_ok:
......@@ -165,7 +164,7 @@ ret_trap_unaligned_pc:
wr %t_psr, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(do_memaccess_unaligned)
call do_memaccess_unaligned
nop
b signal_p
......@@ -215,15 +214,15 @@ ret_trap_user_stack_is_bolixed:
wr %t_psr, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(window_ret_fault)
call window_ret_fault
add %sp, STACKFRAME_SZ, %o0
b signal_p
ld [%curptr + TI_FLAGS], %g2
.globl C_LABEL(sun4c_rett_stackchk)
C_LABEL(sun4c_rett_stackchk):
.globl sun4c_rett_stackchk
sun4c_rett_stackchk:
be 1f
and %fp, 0xfff, %g1 ! delay slot
......@@ -286,8 +285,8 @@ sun4c_rett_onepage:
b ret_trap_userwins_ok
save %g0, %g0, %g0
.globl C_LABEL(srmmu_rett_stackchk)
C_LABEL(srmmu_rett_stackchk):
.globl srmmu_rett_stackchk
srmmu_rett_stackchk:
bne ret_trap_user_stack_is_bolixed
sethi %hi(PAGE_OFFSET), %g1
cmp %g1, %fp
......
......@@ -6,7 +6,6 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/cprefix.h>
#include <asm/ptrace.h>
#include <asm/asm_offsets.h>
#include <asm/errno.h>
......@@ -31,7 +30,7 @@
jmp %l2; \
rett %l2 + 4;
#define LABEL(func) CONCAT(func, _low)
#define LABEL(func) func##_low
.globl LABEL(sunosnop)
LABEL(sunosnop):
......
......@@ -56,9 +56,6 @@ int smp_activated = 0;
volatile int __cpu_number_map[NR_CPUS];
volatile int __cpu_logical_map[NR_CPUS];
cycles_t cacheflush_time = 0; /* XXX */
spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
[0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
};
/* The only guaranteed locking primitive available on all Sparc
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
......
......@@ -298,8 +298,7 @@ EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__strncpy_from_user);
/* Networking helper routines. */
/* XXX This is NOVERS because C_LABEL_STR doesn't get the version number. -DaveM */
EXPORT_SYMBOL_NOVERS(__csum_partial_copy_sparc_generic);
EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
EXPORT_SYMBOL(csum_partial);
/* Cache flushing. */
......
......@@ -9,7 +9,6 @@
* Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
*/
#include <asm/cprefix.h>
#include <asm/ptrace.h>
.text
......@@ -19,50 +18,50 @@
* value as in [%sp + STACKFRAME_SZ + PT_I0] */
/* SunOS getpid() returns pid in %o0 and ppid in %o1 */
.globl C_LABEL(sunos_getpid)
C_LABEL(sunos_getpid):
call C_LABEL(sys_getppid)
.globl sunos_getpid
sunos_getpid:
call sys_getppid
nop
call C_LABEL(sys_getpid)
call sys_getpid
st %o0, [%sp + STACKFRAME_SZ + PT_I1]
b C_LABEL(ret_sys_call)
b ret_sys_call
st %o0, [%sp + STACKFRAME_SZ + PT_I0]
/* SunOS getuid() returns uid in %o0 and euid in %o1 */
.globl C_LABEL(sunos_getuid)
C_LABEL(sunos_getuid):
call C_LABEL(sys_geteuid16)
.globl sunos_getuid
sunos_getuid:
call sys_geteuid16
nop
call C_LABEL(sys_getuid16)
call sys_getuid16
st %o0, [%sp + STACKFRAME_SZ + PT_I1]
b C_LABEL(ret_sys_call)
b ret_sys_call
st %o0, [%sp + STACKFRAME_SZ + PT_I0]
/* SunOS getgid() returns gid in %o0 and egid in %o1 */
.globl C_LABEL(sunos_getgid)
C_LABEL(sunos_getgid):
call C_LABEL(sys_getegid16)
.globl sunos_getgid
sunos_getgid:
call sys_getegid16
nop
call C_LABEL(sys_getgid16)
call sys_getgid16
st %o0, [%sp + STACKFRAME_SZ + PT_I1]
b C_LABEL(ret_sys_call)
b ret_sys_call
st %o0, [%sp + STACKFRAME_SZ + PT_I0]
/* SunOS's execv() call only specifies the argv argument, the
* environment settings are the same as the calling processes.
*/
.globl C_LABEL(sunos_execv)
C_LABEL(sunos_execv):
.globl sunos_execv
sunos_execv:
st %g0, [%sp + STACKFRAME_SZ + PT_I2]
call C_LABEL(sparc_execve)
call sparc_execve
add %sp, STACKFRAME_SZ, %o0
b C_LABEL(ret_sys_call)
b ret_sys_call
ld [%sp + STACKFRAME_SZ + PT_I0], %o0
......@@ -6,7 +6,6 @@
*/
#include <linux/init.h>
#include <asm/cprefix.h>
#include <asm/head.h>
#include <asm/psr.h>
#include <asm/page.h>
......@@ -15,8 +14,8 @@
#include <asm/vaddrs.h>
#include <asm/contregs.h>
.globl C_LABEL(sun4m_cpu_startup), C_LABEL(__smp4m_processor_id)
.globl C_LABEL(sun4d_cpu_startup), C_LABEL(__smp4d_processor_id)
.globl sun4m_cpu_startup, __smp4m_processor_id
.globl sun4d_cpu_startup, __smp4d_processor_id
__INIT
.align 4
......@@ -26,21 +25,21 @@
* in and sets PIL in %psr to 15, no irqs.
*/
C_LABEL(sun4m_cpu_startup):
sun4m_cpu_startup:
cpu1_startup:
sethi %hi(C_LABEL(trapbase_cpu1)), %g3
sethi %hi(trapbase_cpu1), %g3
b 1f
or %g3, %lo(C_LABEL(trapbase_cpu1)), %g3
or %g3, %lo(trapbase_cpu1), %g3
cpu2_startup:
sethi %hi(C_LABEL(trapbase_cpu2)), %g3
sethi %hi(trapbase_cpu2), %g3
b 1f
or %g3, %lo(C_LABEL(trapbase_cpu2)), %g3
or %g3, %lo(trapbase_cpu2), %g3
cpu3_startup:
sethi %hi(C_LABEL(trapbase_cpu3)), %g3
sethi %hi(trapbase_cpu3), %g3
b 1f
or %g3, %lo(C_LABEL(trapbase_cpu3)), %g3
or %g3, %lo(trapbase_cpu3), %g3
1:
/* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
......@@ -58,7 +57,7 @@ cpu3_startup:
WRITE_PAUSE
/* Give ourselves a stack and curptr. */
set C_LABEL(current_set), %g5
set current_set, %g5
srl %g3, 10, %g4
and %g4, 0xc, %g4
ld [%g5 + %g4], %g6
......@@ -73,13 +72,13 @@ cpu3_startup:
WRITE_PAUSE
/* Init our caches, etc. */
set C_LABEL(poke_srmmu), %g5
set poke_srmmu, %g5
ld [%g5], %g5
call %g5
nop
/* Start this processor. */
call C_LABEL(smp4m_callin)
call smp4m_callin
nop
b,a smp_do_cpu_idle
......@@ -88,22 +87,22 @@ cpu3_startup:
.align 4
smp_do_cpu_idle:
call C_LABEL(init_idle)
call init_idle
nop
call C_LABEL(cpu_idle)
call cpu_idle
mov 0, %o0
call C_LABEL(cpu_panic)
call cpu_panic
nop
C_LABEL(__smp4m_processor_id):
__smp4m_processor_id:
rd %tbr, %g2
srl %g2, 12, %g2
and %g2, 3, %g2
retl
mov %g1, %o7
C_LABEL(__smp4d_processor_id):
__smp4d_processor_id:
lda [%g0] ASI_M_VIKING_TMP1, %g2
retl
mov %g1, %o7
......@@ -114,7 +113,7 @@ C_LABEL(__smp4d_processor_id):
__INIT
.align 4
C_LABEL(sun4d_cpu_startup):
sun4d_cpu_startup:
/* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
set (PSR_PIL | PSR_S | PSR_PS), %g1
wr %g1, 0x0, %psr ! traps off though
......@@ -126,7 +125,7 @@ C_LABEL(sun4d_cpu_startup):
WRITE_PAUSE
/* Set tbr - we use just one trap table. */
set C_LABEL(trapbase), %g1
set trapbase, %g1
wr %g1, 0x0, %tbr
WRITE_PAUSE
......@@ -138,7 +137,7 @@ C_LABEL(sun4d_cpu_startup):
sta %g1, [%g0] ASI_M_VIKING_TMP1
/* Give ourselves a stack and curptr. */
set C_LABEL(current_set), %g5
set current_set, %g5
srl %g3, 1, %g4
ld [%g5 + %g4], %g6
......@@ -152,13 +151,13 @@ C_LABEL(sun4d_cpu_startup):
WRITE_PAUSE
/* Init our caches, etc. */
set C_LABEL(poke_srmmu), %g5
set poke_srmmu, %g5
ld [%g5], %g5
call %g5
nop
/* Start this processor. */
call C_LABEL(smp4d_callin)
call smp4d_callin
nop
b,a smp_do_cpu_idle
......@@ -4,7 +4,6 @@
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/cprefix.h>
#include <asm/contregs.h>
#include <asm/page.h>
#include <asm/ptrace.h>
......@@ -164,8 +163,8 @@ spwin_fromuser:
* the label 'spwin_user_stack_is_bolixed' which will take
* care of things at that point.
*/
.globl C_LABEL(spwin_mmu_patchme)
C_LABEL(spwin_mmu_patchme): b C_LABEL(spwin_sun4c_stackchk)
.globl spwin_mmu_patchme
spwin_mmu_patchme: b spwin_sun4c_stackchk
andcc %sp, 0x7, %g0
spwin_good_ustack:
......@@ -253,7 +252,7 @@ spnwin_patch3: and %twin_tmp, 0xff, %twin_tmp ! patched on 7win Sparcs
/* Turn on traps and call c-code to deal with it. */
wr %t_psr, PSR_ET, %psr
nop
call C_LABEL(window_overflow_fault)
call window_overflow_fault
nop
/* Return from trap if C-code actually fixes things, if it
......@@ -307,8 +306,8 @@ spwin_bad_ustack_from_kernel:
* As noted above %curptr cannot be touched by this routine at all.
*/
.globl C_LABEL(spwin_sun4c_stackchk)
C_LABEL(spwin_sun4c_stackchk):
.globl spwin_sun4c_stackchk
spwin_sun4c_stackchk:
/* LOCATION: Window to be saved on the stack */
/* See if the stack is in the address space hole but first,
......@@ -379,8 +378,8 @@ spwin_sun4c_onepage:
* works for all current v8/srmmu implementations, we'll
* see...
*/
.globl C_LABEL(spwin_srmmu_stackchk)
C_LABEL(spwin_srmmu_stackchk):
.globl spwin_srmmu_stackchk
spwin_srmmu_stackchk:
/* LOCATION: Window to be saved on the stack */
/* Because of SMP concerns and speed we play a trick.
......
......@@ -4,7 +4,6 @@
* Copyright (C) 1995 David S. Miller
*/
#include <asm/cprefix.h>
#include <asm/contregs.h>
#include <asm/page.h>
#include <asm/ptrace.h>
......@@ -135,8 +134,8 @@ fwin_from_user:
/* Branch to the architecture specific stack validation
* routine. They can be found below...
*/
.globl C_LABEL(fwin_mmu_patchme)
C_LABEL(fwin_mmu_patchme): b C_LABEL(sun4c_fwin_stackchk)
.globl fwin_mmu_patchme
fwin_mmu_patchme: b sun4c_fwin_stackchk
andcc %sp, 0x7, %g0
#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
......@@ -190,7 +189,7 @@ fwin_user_stack_is_bolixed:
wr %t_psr, PSR_ET, %psr ! enable traps
nop
call C_LABEL(window_underflow_fault)
call window_underflow_fault
mov %g4, %o0
b ret_trap_entry
......@@ -244,8 +243,8 @@ fwin_user_finish_up:
*/
.align 4
.globl C_LABEL(sun4c_fwin_stackchk)
C_LABEL(sun4c_fwin_stackchk):
.globl sun4c_fwin_stackchk
sun4c_fwin_stackchk:
/* LOCATION: Window 'W' */
/* Caller did 'andcc %sp, 0x7, %g0' */
......@@ -295,8 +294,8 @@ sun4c_fwin_onepage:
/* A page had bad page permissions, losing... */
b,a fwin_user_stack_is_bolixed
.globl C_LABEL(srmmu_fwin_stackchk)
C_LABEL(srmmu_fwin_stackchk):
.globl srmmu_fwin_stackchk
srmmu_fwin_stackchk:
/* LOCATION: Window 'W' */
/* Caller did 'andcc %sp, 0x7, %g0' */
......
......@@ -7,5 +7,5 @@ EXTRA_AFLAGS := -ansi -DST_DIV0=0x02
lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \
strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
copy_user.o locks.o atomic.o bitops.o debuglocks.o lshrdi3.o \
ashldi3.o rwsem.o muldi3.o bitext.o
copy_user.o locks.o atomic.o atomic32.o bitops.o debuglocks.o \
lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
......@@ -5,12 +5,10 @@
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/
#include <asm/cprefix.h>
.text
.align 4
.globl C_LABEL(__ashldi3)
C_LABEL(__ashldi3):
.globl __ashldi3
__ashldi3:
cmp %o2, 0
be 9f
mov 0x20, %g2
......
......@@ -5,12 +5,10 @@
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/cprefix.h>
.text
.align 4
.globl C_LABEL(__ashrdi3)
C_LABEL(__ashrdi3):
.globl __ashrdi3
__ashrdi3:
tst %o2
be 3f
or %g0, 32, %g2
......
......@@ -4,7 +4,6 @@
*/
#include <linux/config.h>
#include <asm/cprefix.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
......
/*
* atomic32.c: 32-bit atomic_t implementation
*
* Copyright (C) 2004 Keith M Wesolowski
*
* Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
*/
#include <asm/atomic.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#ifdef CONFIG_SMP
#define ATOMIC_HASH_SIZE 4
#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
[0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
};
#else /* SMP */
#define ATOMIC_HASH_SIZE 1
#define ATOMIC_HASH(a) 0
#endif /* SMP */
int __atomic_add_return(int i, atomic_t *v)
{
int ret;
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
ret = (v->counter += i);
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
void atomic_set(atomic_t *v, int i)
{
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
v->counter = i;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
}
EXPORT_SYMBOL(__atomic_add_return);
EXPORT_SYMBOL(atomic_set);
......@@ -4,7 +4,6 @@
*/
#include <linux/config.h>
#include <asm/cprefix.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
......@@ -29,7 +28,7 @@ ___set_bit:
wr %g5, 0x0, %psr
nop; nop; nop
#ifdef CONFIG_SMP
set C_LABEL(bitops_spinlock), %g5
set bitops_spinlock, %g5
2: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
orcc %g7, 0x0, %g0 ! Did we get it?
bne 2b ! Nope...
......@@ -39,7 +38,7 @@ ___set_bit:
and %g7, %g2, %g2
#ifdef CONFIG_SMP
st %g5, [%g1]
set C_LABEL(bitops_spinlock), %g5
set bitops_spinlock, %g5
stb %g0, [%g5]
#else
st %g5, [%g1]
......@@ -58,7 +57,7 @@ ___clear_bit:
wr %g5, 0x0, %psr
nop; nop; nop
#ifdef CONFIG_SMP
set C_LABEL(bitops_spinlock), %g5
set bitops_spinlock, %g5
2: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
orcc %g7, 0x0, %g0 ! Did we get it?
bne 2b ! Nope...
......@@ -68,7 +67,7 @@ ___clear_bit:
and %g7, %g2, %g2
#ifdef CONFIG_SMP
st %g5, [%g1]
set C_LABEL(bitops_spinlock), %g5
set bitops_spinlock, %g5
stb %g0, [%g5]
#else
st %g5, [%g1]
......@@ -87,7 +86,7 @@ ___change_bit:
wr %g5, 0x0, %psr
nop; nop; nop
#ifdef CONFIG_SMP
set C_LABEL(bitops_spinlock), %g5
set bitops_spinlock, %g5
2: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
orcc %g7, 0x0, %g0 ! Did we get it?
bne 2b ! Nope...
......@@ -97,7 +96,7 @@ ___change_bit:
and %g7, %g2, %g2
#ifdef CONFIG_SMP
st %g5, [%g1]
set C_LABEL(bitops_spinlock), %g5
set bitops_spinlock, %g5
stb %g0, [%g5]
#else
st %g5, [%g1]
......
......@@ -4,7 +4,6 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/cprefix.h>
#include <asm/page.h>
/* Zero out 64 bytes of memory at (buf + offset).
......@@ -46,9 +45,9 @@
.text
.align 4
.globl C_LABEL(bzero_1page), C_LABEL(__copy_1page)
.globl bzero_1page, __copy_1page
C_LABEL(bzero_1page):
bzero_1page:
/* NOTE: If you change the number of insns of this routine, please check
* arch/sparc/mm/hypersparc.S */
/* %o0 = buf */
......@@ -67,7 +66,7 @@ C_LABEL(bzero_1page):
retl
nop
C_LABEL(__copy_1page):
__copy_1page:
/* NOTE: If you change the number of insns of this routine, please check
* arch/sparc/mm/hypersparc.S */
/* %o0 = dst, %o1 = src */
......
......@@ -13,7 +13,6 @@
* BSD4.4 portable checksum routine
*/
#include <asm/cprefix.h>
#include <asm/errno.h>
#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \
......@@ -104,8 +103,8 @@ csum_partial_fix_alignment:
/* The common case is to get called with a nicely aligned
* buffer of size 0x20. Follow the code path for that case.
*/
.globl C_LABEL(csum_partial)
C_LABEL(csum_partial): /* %o0=buf, %o1=len, %o2=sum */
.globl csum_partial
csum_partial: /* %o0=buf, %o1=len, %o2=sum */
andcc %o0, 0x7, %g0 ! alignment problems?
bne csum_partial_fix_alignment ! yep, handle it
sethi %hi(cpte - 8), %g7 ! prepare table jmp ptr
......@@ -142,8 +141,8 @@ cpte: bne csum_partial_end_cruft ! yep, handle it
cpout: retl ! get outta here
mov %o2, %o0 ! return computed csum
.globl C_LABEL(__csum_partial_copy_start), C_LABEL(__csum_partial_copy_end)
C_LABEL(__csum_partial_copy_start):
.globl __csum_partial_copy_start, __csum_partial_copy_end
__csum_partial_copy_start:
/* Work around cpp -rob */
#define ALLOC #alloc
......@@ -329,8 +328,8 @@ cc_dword_align:
* out of you, game over, lights out.
*/
.align 8
.globl C_LABEL(__csum_partial_copy_sparc_generic)
C_LABEL(__csum_partial_copy_sparc_generic):
.globl __csum_partial_copy_sparc_generic
__csum_partial_copy_sparc_generic:
/* %o0=src, %o1=dest, %g1=len, %g7=sum */
xor %o0, %o1, %o4 ! get changing bits
andcc %o4, 3, %g0 ! check for mismatched alignment
......@@ -472,7 +471,7 @@ ccslow: cmp %g1, 0
4: addcc %g7, %g5, %g7
retl
addx %g0, %g7, %o0
C_LABEL(__csum_partial_copy_end):
__csum_partial_copy_end:
/* We do these strange calculations for the csum_*_from_user case only, ie.
* we only bother with faults on loads... */
......@@ -551,7 +550,7 @@ C_LABEL(__csum_partial_copy_end):
mov %i5, %o0
mov %i7, %o1
mov %i4, %o2
call C_LABEL(lookup_fault)
call lookup_fault
mov %g7, %i4
cmp %o0, 2
bne 1f
......@@ -561,7 +560,7 @@ C_LABEL(__csum_partial_copy_end):
mov %i0, %o1
mov %i1, %o0
5:
call C_LABEL(__memcpy)
call __memcpy
mov %i2, %o2
tst %o0
bne,a 2f
......@@ -570,7 +569,7 @@ C_LABEL(__csum_partial_copy_end):
2:
mov %i1, %o0
6:
call C_LABEL(__bzero)
call __bzero
mov %i3, %o1
1:
ld [%sp + 168], %o2 ! struct_ptr of parent
......
......@@ -11,7 +11,6 @@
* Returns 0 if successful, otherwise count of bytes not copied yet
*/
#include <asm/cprefix.h>
#include <asm/ptrace.h>
#include <asm/asmmacro.h>
#include <asm/page.h>
......@@ -118,7 +117,7 @@
.globl __copy_user_begin
__copy_user_begin:
.globl C_LABEL(__copy_user)
.globl __copy_user
dword_align:
andcc %o1, 1, %g0
be 4f
......@@ -145,7 +144,7 @@ dword_align:
b 3f
add %o0, 2, %o0
C_LABEL(__copy_user): /* %o0=dst %o1=src %o2=len */
__copy_user: /* %o0=dst %o1=src %o2=len */
xor %o0, %o1, %o4
1:
andcc %o4, 3, %o5
......
......@@ -6,7 +6,6 @@
* Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <asm/cprefix.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/smp.h>
......
/* $Id: lshrdi3.S,v 1.1 1999/03/21 06:37:45 davem Exp $ */
#include <asm/cprefix.h>
.globl C_LABEL(__lshrdi3)
C_LABEL(__lshrdi3):
.globl __lshrdi3
__lshrdi3:
cmp %o2, 0
be 3f
mov 0x20, %g2
......
#include <asm/cprefix.h>
.text
.align 4
.global C_LABEL(__memcmp), C_LABEL(memcmp)
C_LABEL(__memcmp):
C_LABEL(memcmp):
.global __memcmp, memcmp
__memcmp:
memcmp:
#if 1
cmp %o2, 0
ble L3
......
......@@ -9,13 +9,11 @@
#ifdef __KERNEL__
#include <asm/cprefix.h>
#define FUNC(x) \
.globl C_LABEL(x); \
.type C_LABEL(x),@function; \
.globl x; \
.type x,@function; \
.align 4; \
C_LABEL(x):
x:
#undef FASTER_REVERSE
#undef FASTER_NONALIGNED
......
......@@ -4,8 +4,6 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/cprefix.h>
/* In essence, this is just a fancy strlen. */
#define LO_MAGIC 0x01010101
......@@ -13,9 +11,9 @@
.text
.align 4
.globl C_LABEL(__memscan_zero), C_LABEL(__memscan_generic)
.globl C_LABEL(memscan)
C_LABEL(__memscan_zero):
.globl __memscan_zero, __memscan_generic
.globl memscan
__memscan_zero:
/* %o0 = addr, %o1 = size */
cmp %o1, 0
bne,a 1f
......@@ -114,8 +112,8 @@ mzero_found_it:
retl
sub %o0, 2, %o0
C_LABEL(memscan):
C_LABEL(__memscan_generic):
memscan:
__memscan_generic:
/* %o0 = addr, %o1 = c, %o2 = size */
cmp %o2, 0
bne,a 0f
......
......@@ -7,7 +7,6 @@
* occurs and we were called as clear_user.
*/
#include <asm/cprefix.h>
#include <asm/ptrace.h>
/* Work around cpp -rob */
......@@ -61,12 +60,12 @@
.globl __bzero_begin
__bzero_begin:
.globl C_LABEL(__bzero), C_LABEL(__memset),
.globl C_LABEL(memset)
.globl C_LABEL(__memset_start), C_LABEL(__memset_end)
C_LABEL(__memset_start):
C_LABEL(__memset):
C_LABEL(memset):
.globl __bzero, __memset,
.globl memset
.globl __memset_start, __memset_end
__memset_start:
__memset:
memset:
and %o1, 0xff, %g3
sll %g3, 8, %g2
or %g3, %g2, %g3
......@@ -90,7 +89,7 @@ C_LABEL(memset):
b 4f
sub %o0, %o2, %o0
C_LABEL(__bzero):
__bzero:
mov %g0, %g3
1:
cmp %o1, 7
......@@ -168,7 +167,7 @@ C_LABEL(__bzero):
0:
retl
clr %o0
C_LABEL(__memset_end):
__memset_end:
.section .fixup,#alloc,#execinstr
.align 4
......@@ -195,7 +194,7 @@ C_LABEL(__memset_end):
save %sp, -104, %sp
mov %i5, %o0
mov %i7, %o1
call C_LABEL(lookup_fault)
call lookup_fault
mov %i4, %o2
ret
restore
......
......@@ -5,8 +5,6 @@
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <asm/cprefix.h>
#define LO_MAGIC 0x01010101
#define HI_MAGIC 0x80808080
......@@ -42,8 +40,8 @@
mov 2, %o0
.align 4
.global C_LABEL(strlen)
C_LABEL(strlen):
.global strlen
strlen:
mov %o0, %o1
andcc %o0, 3, %g0
bne 0b
......
......@@ -8,8 +8,6 @@
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <asm/cprefix.h>
#define LO_MAGIC 0x01010101
#define HI_MAGIC 0x80808080
......@@ -47,10 +45,10 @@
mov 3, %o0
.align 4
.global C_LABEL(__strlen_user), C_LABEL(__strnlen_user)
C_LABEL(__strlen_user):
.global __strlen_user, __strnlen_user
__strlen_user:
sethi %hi(32768), %o1
C_LABEL(__strnlen_user):
__strnlen_user:
mov %o1, %g1
mov %o0, %o1
andcc %o0, 3, %g0
......
......@@ -3,13 +3,11 @@
* generic strncmp routine.
*/
#include <asm/cprefix.h>
.text
.align 4
.global C_LABEL(__strncmp), C_LABEL(strncmp)
C_LABEL(__strncmp):
C_LABEL(strncmp):
.global __strncmp, strncmp
__strncmp:
strncmp:
mov %o0, %g3
mov 0, %o3
......
......@@ -3,7 +3,6 @@
* Copyright(C) 1996 David S. Miller
*/
#include <asm/cprefix.h>
#include <asm/ptrace.h>
#include <asm/errno.h>
......@@ -17,8 +16,8 @@
* bytes copied if we hit a null byte
*/
.globl C_LABEL(__strncpy_from_user)
C_LABEL(__strncpy_from_user):
.globl __strncpy_from_user
__strncpy_from_user:
/* %o0=dest, %o1=src, %o2=count */
mov %o2, %o3
1:
......
......@@ -2,7 +2,7 @@
# Makefile for the FPU instruction emulation.
#
obj-y := math.o ashldi3.o
obj-y := math.o
EXTRA_AFLAGS := -ansi
EXTRA_CFLAGS = -I. -I$(TOPDIR)/include/math-emu -w
......@@ -15,7 +15,6 @@
#include <asm/page.h>
#include <asm/pgtsrmmu.h>
#include <asm/viking.h>
#include <asm/cprefix.h>
#include <asm/btfixup.h>
#ifdef CONFIG_SMP
......
......@@ -26,17 +26,17 @@
#define GET_PROCESSOR_MID(reg, tmp) \
rd %tbr, %reg; \
sethi %hi(C_LABEL(mid_xlate)), %tmp; \
sethi %hi(mid_xlate), %tmp; \
srl %reg, 12, %reg; \
or %tmp, %lo(C_LABEL(mid_xlate)), %tmp; \
or %tmp, %lo(mid_xlate), %tmp; \
and %reg, 3, %reg; \
ldub [%tmp + %reg], %reg;
#define GET_PROCESSOR_OFFSET(reg, tmp) \
GET_PROCESSOR_ID(reg) \
sethi %hi(C_LABEL(cpu_offset)), %tmp; \
sethi %hi(cpu_offset), %tmp; \
sll %reg, 2, %reg; \
or %tmp, %lo(C_LABEL(cpu_offset)), %tmp; \
or %tmp, %lo(cpu_offset), %tmp; \
ld [%tmp + %reg], %reg;
/* All trap entry points _must_ begin with this macro or else you
......
......@@ -11,49 +11,16 @@
#define __ARCH_SPARC_ATOMIC__
#include <linux/config.h>
#include <linux/spinlock.h>
typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
#ifdef CONFIG_SMP
#define ATOMIC_HASH_SIZE 4
#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE];
#else /* SMP */
#define ATOMIC_HASH_SIZE 1
#define ATOMIC_HASH(a) 0
#endif /* SMP */
static inline int __atomic_add_return(int i, atomic_t *v)
{
int ret;
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
ret = (v->counter += i);
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
static inline void atomic_set(atomic_t *v, int i)
{
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
v->counter = i;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
}
#define ATOMIC_INIT(i) { (i) }
extern int __atomic_add_return(int, atomic_t *);
extern void atomic_set(atomic_t *, int);
#define atomic_read(v) ((v)->counter)
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
......
......@@ -18,7 +18,6 @@
#include <linux/in6.h>
#include <asm/uaccess.h>
#include <asm/cprefix.h>
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
......@@ -51,7 +50,7 @@ csum_partial_copy_nocheck (const char *src, char *dst, int len,
register int l asm("g1") = len;
__asm__ __volatile__ (
"call " C_LABEL_STR(__csum_partial_copy_sparc_generic) "\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" mov %6, %%g7\n"
: "=&r" (ret), "=&r" (d), "=&r" (l)
: "0" (ret), "1" (d), "2" (l), "r" (sum)
......@@ -81,7 +80,7 @@ csum_partial_copy_from_user(const char *src, char *dst, int len,
".word 1f,2\n\t"
".previous\n"
"1:\n\t"
"call " C_LABEL_STR(__csum_partial_copy_sparc_generic) "\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
......@@ -110,7 +109,7 @@ csum_partial_copy_to_user(const char *src, char *dst, int len,
".word 1f,1\n\t"
".previous\n"
"1:\n\t"
"call " C_LABEL_STR(__csum_partial_copy_sparc_generic) "\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
......
/* cprefix.h: This file is included by assembly source which needs
* to know what the c-label prefixes are. The newer versions
* of cpp that come with gcc predefine such things to help
* us out. The reason this stuff is needed is to make
* solaris compiles of the kernel work.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef __SPARC_CPREFIX_H
#define __SPARC_CPREFIX_H
#if defined(__svr4__) || defined(__ELF__)
#define C_LABEL_PREFIX
#define C_LABEL_STR(name) #name
#else
#define C_LABEL_PREFIX _
#define C_LABEL_STR(name) "_" #name
#endif
#define CONCAT(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a##b
#define C_LABEL(name) CONCAT(C_LABEL_PREFIX, name)
#endif /* !(__SPARC_CPREFIX_H) */
......@@ -22,8 +22,8 @@
/* Data/text faults. Defaults to sun4c version at boot time. */
#define SPARC_TFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 1, %l7;
#define SPARC_DFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 0, %l7;
#define SRMMU_TFAULT rd %psr, %l0; rd %wim, %l3; b C_LABEL(srmmu_fault); mov 1, %l7;
#define SRMMU_DFAULT rd %psr, %l0; rd %wim, %l3; b C_LABEL(srmmu_fault); mov 0, %l7;
#define SRMMU_TFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 1, %l7;
#define SRMMU_DFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 0, %l7;
/* This is for traps we should NEVER get. */
#define BAD_TRAP(num) \
......@@ -41,17 +41,17 @@
/* Software trap for Linux system calls. */
#define LINUX_SYSCALL_TRAP \
sethi %hi(C_LABEL(sys_call_table)), %l7; \
or %l7, %lo(C_LABEL(sys_call_table)), %l7; \
sethi %hi(sys_call_table), %l7; \
or %l7, %lo(sys_call_table), %l7; \
b linux_sparc_syscall; \
rd %psr, %l0;
/* Software trap for SunOS4.1.x system calls. */
#define SUNOS_SYSCALL_TRAP \
rd %psr, %l0; \
sethi %hi(C_LABEL(sunos_sys_table)), %l7; \
sethi %hi(sunos_sys_table), %l7; \
b linux_sparc_syscall; \
or %l7, %lo(C_LABEL(sunos_sys_table)), %l7;
or %l7, %lo(sunos_sys_table), %l7;
#define SUNOS_NO_SYSCALL_TRAP \
b sunos_syscall; \
......@@ -80,8 +80,8 @@
/* Software trap for Sparc-netbsd system calls. */
#define NETBSD_SYSCALL_TRAP \
sethi %hi(C_LABEL(sys_call_table)), %l7; \
or %l7, %lo(C_LABEL(sys_call_table)), %l7; \
sethi %hi(sys_call_table), %l7; \
or %l7, %lo(sys_call_table), %l7; \
b bsd_syscall; \
rd %psr, %l0;
......
......@@ -11,16 +11,16 @@
#include <asm/page.h> /* IO address mapping routines need this */
#include <asm/system.h>
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
#define page_to_phys(page) (((page) - mem_map) << PAGE_SHIFT)
static inline u32 flip_dword (u32 d)
static inline u32 flip_dword (u32 l)
{
return ((d&0xff)<<24) | (((d>>8)&0xff)<<16) | (((d>>16)&0xff)<<8)| ((d>>24)&0xff);
return ((l&0xff)<<24) | (((l>>8)&0xff)<<16) | (((l>>16)&0xff)<<8)| ((l>>24)&0xff);
}
static inline u16 flip_word (u16 d)
static inline u16 flip_word (u16 w)
{
return ((d&0xff) << 8) | ((d>>8)&0xff);
return ((w&0xff) << 8) | ((w>>8)&0xff);
}
/*
......@@ -56,14 +56,14 @@ static inline void __raw_writeb(u8 b, unsigned long addr)
*(volatile u8 *)addr = b;
}
static inline void __raw_writew(u16 b, unsigned long addr)
static inline void __raw_writew(u16 w, unsigned long addr)
{
*(volatile u16 *)addr = b;
*(volatile u16 *)addr = w;
}
static inline void __raw_writel(u32 b, unsigned long addr)
static inline void __raw_writel(u32 l, unsigned long addr)
{
*(volatile u32 *)addr = b;
*(volatile u32 *)addr = l;
}
static inline u8 __readb(unsigned long addr)
......@@ -86,26 +86,26 @@ static inline void __writeb(u8 b, unsigned long addr)
*(volatile u8 *)addr = b;
}
static inline void __writew(u16 b, unsigned long addr)
static inline void __writew(u16 w, unsigned long addr)
{
*(volatile u16 *)addr = flip_word(b);
*(volatile u16 *)addr = flip_word(w);
}
static inline void __writel(u32 b, unsigned long addr)
static inline void __writel(u32 l, unsigned long addr)
{
*(volatile u32 *)addr = flip_dword(b);
*(volatile u32 *)addr = flip_dword(l);
}
#define readb(addr) __readb((unsigned long)(addr))
#define readw(addr) __readw((unsigned long)(addr))
#define readl(addr) __readl((unsigned long)(addr))
#define readb_relaxed(addr) readb(addr)
#define readw_relaxed(addr) readw(addr)
#define readl_relaxed(addr) readl(addr)
#define readb(__addr) __readb((unsigned long)(__addr))
#define readw(__addr) __readw((unsigned long)(__addr))
#define readl(__addr) __readl((unsigned long)(__addr))
#define readb_relaxed(__addr) readb(__addr)
#define readw_relaxed(__addr) readw(__addr)
#define readl_relaxed(__addr) readl(__addr)
#define writeb(b, addr) __writeb((b),(unsigned long)(addr))
#define writew(b, addr) __writew((b),(unsigned long)(addr))
#define writel(b, addr) __writel((b),(unsigned long)(addr))
#define writeb(__b, __addr) __writeb((__b),(unsigned long)(__addr))
#define writew(__w, __addr) __writew((__w),(unsigned long)(__addr))
#define writel(__l, __addr) __writel((__l),(unsigned long)(__addr))
/*
* I/O space operations
......@@ -124,22 +124,22 @@ static inline void __writel(u32 b, unsigned long addr)
* mapped somewhere into virtual kernel space and we
* can use inb/outb again.
*/
#define inb_local(addr) __readb(addr)
#define inb(addr) __readb(addr)
#define inw(addr) __readw(addr)
#define inl(addr) __readl(addr)
#define outb_local(b, addr) __writeb(b, addr)
#define outb(b, addr) __writeb(b, addr)
#define outw(b, addr) __writew(b, addr)
#define outl(b, addr) __writel(b, addr)
#define inb_p inb
#define outb_p outb
#define inw_p inw
#define outw_p outw
#define inl_p inl
#define outl_p outl
#define inb_local(__addr) __readb((unsigned long)(__addr))
#define inb(__addr) __readb((unsigned long)(__addr))
#define inw(__addr) __readw((unsigned long)(__addr))
#define inl(__addr) __readl((unsigned long)(__addr))
#define outb_local(__b, __addr) __writeb(__b, (unsigned long)(__addr))
#define outb(__b, __addr) __writeb(__b, (unsigned long)(__addr))
#define outw(__w, __addr) __writew(__w, (unsigned long)(__addr))
#define outl(__l, __addr) __writel(__l, (unsigned long)(__addr))
#define inb_p(__addr) inb(__addr)
#define outb_p(__b, __addr) outb(__b, __addr)
#define inw_p(__addr) inw(__addr)
#define outw_p(__w, __addr) outw(__w, __addr)
#define inl_p(__addr) inl(__addr)
#define outl_p(__l, __addr) outl(__l, __addr)
extern void outsb(unsigned long addr, const void *src, unsigned long cnt);
extern void outsw(unsigned long addr, const void *src, unsigned long cnt);
......@@ -176,25 +176,25 @@ static inline void _sbus_writeb(u8 b, unsigned long addr)
*(volatile u8 *)addr = b;
}
static inline void _sbus_writew(u16 b, unsigned long addr)
static inline void _sbus_writew(u16 w, unsigned long addr)
{
*(volatile u16 *)addr = b;
*(volatile u16 *)addr = w;
}
static inline void _sbus_writel(u32 b, unsigned long addr)
static inline void _sbus_writel(u32 l, unsigned long addr)
{
*(volatile u32 *)addr = b;
*(volatile u32 *)addr = l;
}
/*
* The only reason for #define's is to hide casts to unsigned long.
*/
#define sbus_readb(a) _sbus_readb((unsigned long)(a))
#define sbus_readw(a) _sbus_readw((unsigned long)(a))
#define sbus_readl(a) _sbus_readl((unsigned long)(a))
#define sbus_writeb(v, a) _sbus_writeb(v, (unsigned long)(a))
#define sbus_writew(v, a) _sbus_writew(v, (unsigned long)(a))
#define sbus_writel(v, a) _sbus_writel(v, (unsigned long)(a))
#define sbus_readb(__addr) _sbus_readb((unsigned long)(__addr))
#define sbus_readw(__addr) _sbus_readw((unsigned long)(__addr))
#define sbus_readl(__addr) _sbus_readl((unsigned long)(__addr))
#define sbus_writeb(__b, __addr) _sbus_writeb(__b, (unsigned long)(__addr))
#define sbus_writew(__w, __addr) _sbus_writew(__w, (unsigned long)(__addr))
#define sbus_writel(__l, __addr) _sbus_writel(__l, (unsigned long)(__addr))
static inline void *sbus_memset_io(void *__dst, int c, __kernel_size_t n)
{
......
......@@ -106,9 +106,9 @@
#ifdef CONFIG_SMP
#define LOAD_CURRENT4M(dest_reg, idreg) \
rd %tbr, %idreg; \
sethi %hi(C_LABEL(current_set)), %dest_reg; \
sethi %hi(current_set), %dest_reg; \
srl %idreg, 10, %idreg; \
or %dest_reg, %lo(C_LABEL(current_set)), %dest_reg; \
or %dest_reg, %lo(current_set), %dest_reg; \
and %idreg, 0xc, %idreg; \
ld [%idreg + %dest_reg], %dest_reg;
......@@ -119,15 +119,15 @@
/* Blackbox - take care with this... - check smp4m and smp4d before changing this. */
#define LOAD_CURRENT(dest_reg, idreg) \
sethi %hi(___b_load_current), %idreg; \
sethi %hi(C_LABEL(current_set)), %dest_reg; \
sethi %hi(C_LABEL(boot_cpu_id4)), %idreg; \
or %dest_reg, %lo(C_LABEL(current_set)), %dest_reg; \
ldub [%idreg + %lo(C_LABEL(boot_cpu_id4))], %idreg; \
sethi %hi(current_set), %dest_reg; \
sethi %hi(boot_cpu_id4), %idreg; \
or %dest_reg, %lo(current_set), %dest_reg; \
ldub [%idreg + %lo(boot_cpu_id4)], %idreg; \
ld [%idreg + %dest_reg], %dest_reg;
#else
#define LOAD_CURRENT(dest_reg, idreg) \
sethi %hi(C_LABEL(current_set)), %idreg; \
ld [%idreg + %lo(C_LABEL(current_set))], %dest_reg;
sethi %hi(current_set), %idreg; \
ld [%idreg + %lo(current_set)], %dest_reg;
#endif
#endif /* !(_SPARC_WINMACRO_H) */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment