Commit 8adbb371 authored by Nicolas Pitre's avatar Nicolas Pitre Committed by Russell King

[ARM] 3152/1: make various assembly local labels actually local (the rest)

Patch from Nicolas Pitre

For assembly labels to actually be local they must start with ".L" and
not only "." otherwise they still remain visible in the final link and
clutter kallsyms needlessly, and possibly make for unclear symbolic
backtrace. This patch simply inserts a"L" where appropriate. The code
itself is unchanged.
Signed-off-by: default avatarNicolas Pitre <nico@cam.org>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent a9c4814d
......@@ -26,7 +26,7 @@ td1 .req r4 @ save before use
td2 .req r5 @ save before use
td3 .req lr
.zero: mov r0, sum
.Lzero: mov r0, sum
add sp, sp, #4
ldr pc, [sp], #4
......@@ -34,8 +34,8 @@ td3 .req lr
* Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0
*/
.less8: teq len, #0 @ check for zero count
beq .zero
.Lless8: teq len, #0 @ check for zero count
beq .Lzero
/* we must have at least one byte. */
tst buf, #1 @ odd address?
......@@ -44,12 +44,12 @@ td3 .req lr
subne len, len, #1
adcnes sum, sum, td0, put_byte_1
.less4: tst len, #6
beq .less8_byte
.Lless4: tst len, #6
beq .Lless8_byte
/* we are now half-word aligned */
.less8_wordlp:
.Lless8_wordlp:
#if __LINUX_ARM_ARCH__ >= 4
ldrh td0, [buf], #2
sub len, len, #2
......@@ -65,19 +65,19 @@ td3 .req lr
#endif
adcs sum, sum, td0
tst len, #6
bne .less8_wordlp
bne .Lless8_wordlp
.less8_byte: tst len, #1 @ odd number of bytes
.Lless8_byte: tst len, #1 @ odd number of bytes
ldrneb td0, [buf], #1 @ include last byte
adcnes sum, sum, td0, put_byte_0 @ update checksum
.done: adc r0, sum, #0 @ collect up the last carry
.Ldone: adc r0, sum, #0 @ collect up the last carry
ldr td0, [sp], #4
tst td0, #1 @ check buffer alignment
movne r0, r0, ror #8 @ rotate checksum by 8 bits
ldr pc, [sp], #4 @ return
.not_aligned: tst buf, #1 @ odd address
.Lnot_aligned: tst buf, #1 @ odd address
ldrneb td0, [buf], #1 @ make even
subne len, len, #1
adcnes sum, sum, td0, put_byte_1 @ update checksum
......@@ -102,14 +102,14 @@ td3 .req lr
ENTRY(csum_partial)
stmfd sp!, {buf, lr}
cmp len, #8 @ Ensure that we have at least
blo .less8 @ 8 bytes to copy.
blo .Lless8 @ 8 bytes to copy.
tst buf, #1
movne sum, sum, ror #8
adds sum, sum, #0 @ C = 0
tst buf, #3 @ Test destination alignment
blne .not_aligned @ aligh destination, return here
blne .Lnot_aligned @ align destination, return here
1: bics ip, len, #31
beq 3f
......@@ -131,11 +131,11 @@ ENTRY(csum_partial)
ldmfd sp!, {r4 - r5}
3: tst len, #0x1c @ should not change C
beq .less4
beq .Lless4
4: ldr td0, [buf], #4
sub len, len, #4
adcs sum, sum, td0
tst len, #0x1c
bne 4b
b .less4
b .Lless4
......@@ -22,7 +22,7 @@ dst .req r1
len .req r2
sum .req r3
.zero: mov r0, sum
.Lzero: mov r0, sum
load_regs ea
/*
......@@ -31,8 +31,9 @@ sum .req r3
* the length. Note that the source pointer hasn't been
* aligned yet.
*/
.dst_unaligned: tst dst, #1
beq .dst_16bit
.Ldst_unaligned:
tst dst, #1
beq .Ldst_16bit
load1b ip
sub len, len, #1
......@@ -41,7 +42,7 @@ sum .req r3
tst dst, #2
moveq pc, lr @ dst is now 32bit aligned
.dst_16bit: load2b r8, ip
.Ldst_16bit: load2b r8, ip
sub len, len, #2
adcs sum, sum, r8, put_byte_0
strb r8, [dst], #1
......@@ -53,12 +54,12 @@ sum .req r3
* Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0
*/
.less8: teq len, #0 @ check for zero count
beq .zero
.Lless8: teq len, #0 @ check for zero count
beq .Lzero
/* we must have at least one byte. */
tst dst, #1 @ dst 16-bit aligned
beq .less8_aligned
beq .Lless8_aligned
/* Align dst */
load1b ip
......@@ -66,7 +67,7 @@ sum .req r3
adcs sum, sum, ip, put_byte_1 @ update checksum
strb ip, [dst], #1
tst len, #6
beq .less8_byteonly
beq .Lless8_byteonly
1: load2b r8, ip
sub len, len, #2
......@@ -74,15 +75,16 @@ sum .req r3
strb r8, [dst], #1
adcs sum, sum, ip, put_byte_1
strb ip, [dst], #1
.less8_aligned: tst len, #6
.Lless8_aligned:
tst len, #6
bne 1b
.less8_byteonly:
.Lless8_byteonly:
tst len, #1
beq .done
beq .Ldone
load1b r8
adcs sum, sum, r8, put_byte_0 @ update checksum
strb r8, [dst], #1
b .done
b .Ldone
FN_ENTRY
mov ip, sp
......@@ -90,11 +92,11 @@ FN_ENTRY
sub fp, ip, #4
cmp len, #8 @ Ensure that we have at least
blo .less8 @ 8 bytes to copy.
blo .Lless8 @ 8 bytes to copy.
adds sum, sum, #0 @ C = 0
tst dst, #3 @ Test destination alignment
blne .dst_unaligned @ align destination, return here
blne .Ldst_unaligned @ align destination, return here
/*
* Ok, the dst pointer is now 32bit aligned, and we know
......@@ -103,7 +105,7 @@ FN_ENTRY
*/
tst src, #3 @ Test source alignment
bne .src_not_aligned
bne .Lsrc_not_aligned
/* Routine for src & dst aligned */
......@@ -136,17 +138,17 @@ FN_ENTRY
adcs sum, sum, r4
4: ands len, len, #3
beq .done
beq .Ldone
load1l r4
tst len, #2
mov r5, r4, get_byte_0
beq .exit
beq .Lexit
adcs sum, sum, r4, push #16
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
mov r5, r4, get_byte_2
.exit: tst len, #1
.Lexit: tst len, #1
strneb r5, [dst], #1
andne r5, r5, #255
adcnes sum, sum, r5, put_byte_0
......@@ -157,20 +159,20 @@ FN_ENTRY
* the inefficient byte manipulations in the
* architecture independent code.
*/
.done: adc r0, sum, #0
.Ldone: adc r0, sum, #0
ldr sum, [sp, #0] @ dst
tst sum, #1
movne r0, r0, ror #8
load_regs ea
.src_not_aligned:
.Lsrc_not_aligned:
adc sum, sum, #0 @ include C from dst alignment
and ip, src, #3
bic src, src, #3
load1l r5
cmp ip, #2
beq .src2_aligned
bhi .src3_aligned
beq .Lsrc2_aligned
bhi .Lsrc3_aligned
mov r4, r5, pull #8 @ C = 0
bics ip, len, #15
beq 2f
......@@ -211,18 +213,18 @@ FN_ENTRY
adcs sum, sum, r4
mov r4, r5, pull #8
4: ands len, len, #3
beq .done
beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
beq .exit
beq .Lexit
adcs sum, sum, r4, push #16
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
mov r5, r4, get_byte_2
b .exit
b .Lexit
.src2_aligned: mov r4, r5, pull #16
.Lsrc2_aligned: mov r4, r5, pull #16
adds sum, sum, #0
bics ip, len, #15
beq 2f
......@@ -263,20 +265,20 @@ FN_ENTRY
adcs sum, sum, r4
mov r4, r5, pull #16
4: ands len, len, #3
beq .done
beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
beq .exit
beq .Lexit
adcs sum, sum, r4
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
tst len, #1
beq .done
beq .Ldone
load1b r5
b .exit
b .Lexit
.src3_aligned: mov r4, r5, pull #24
.Lsrc3_aligned: mov r4, r5, pull #24
adds sum, sum, #0
bics ip, len, #15
beq 2f
......@@ -317,10 +319,10 @@ FN_ENTRY
adcs sum, sum, r4
mov r4, r5, pull #24
4: ands len, len, #3
beq .done
beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
beq .exit
beq .Lexit
strb r5, [dst], #1
adcs sum, sum, r4
load1l r4
......@@ -328,4 +330,4 @@ FN_ENTRY
strb r5, [dst], #1
adcs sum, sum, r4, push #24
mov r5, r4, get_byte_1
b .exit
b .Lexit
......@@ -11,7 +11,7 @@
#include <asm/assembler.h>
.text
LC0: .word loops_per_jiffy
.LC0: .word loops_per_jiffy
/*
* 0 <= r0 <= 2000
......@@ -21,7 +21,7 @@ ENTRY(__udelay)
orr r2, r2, #0x00db
mul r0, r2, r0
ENTRY(__const_udelay) @ 0 <= r0 <= 0x01ffffff
ldr r2, LC0
ldr r2, .LC0
ldr r2, [r2] @ max = 0x0fffffff
mov r0, r0, lsr #11 @ max = 0x00003fff
mov r2, r2, lsr #11 @ max = 0x0003ffff
......
......@@ -27,7 +27,7 @@ ENTRY(_find_first_zero_bit_le)
mov r2, #0
1: ldrb r3, [r0, r2, lsr #3]
eors r3, r3, #0xff @ invert bits
bne .found @ any now set - found zero bit
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
......@@ -46,7 +46,7 @@ ENTRY(_find_next_zero_bit_le)
ldrb r3, [r0, r2, lsr #3]
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
bne .found
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
......@@ -61,7 +61,7 @@ ENTRY(_find_first_bit_le)
mov r2, #0
1: ldrb r3, [r0, r2, lsr #3]
movs r3, r3
bne .found @ any now set - found zero bit
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
......@@ -79,7 +79,7 @@ ENTRY(_find_next_bit_le)
beq 1b @ If new byte, goto old routine
ldrb r3, [r0, r2, lsr #3]
movs r3, r3, lsr ip @ shift off unused bits
bne .found
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
......@@ -93,7 +93,7 @@ ENTRY(_find_first_zero_bit_be)
1: eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3]
eors r3, r3, #0xff @ invert bits
bne .found @ any now set - found zero bit
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
......@@ -109,7 +109,7 @@ ENTRY(_find_next_zero_bit_be)
ldrb r3, [r0, r3, lsr #3]
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
bne .found
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
......@@ -121,7 +121,7 @@ ENTRY(_find_first_bit_be)
1: eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3]
movs r3, r3
bne .found @ any now set - found zero bit
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
......@@ -136,7 +136,7 @@ ENTRY(_find_next_bit_be)
eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3]
movs r3, r3, lsr ip @ shift off unused bits
bne .found
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
......@@ -146,7 +146,7 @@ ENTRY(_find_next_bit_be)
/*
* One or more bits in the LSB of r3 are assumed to be set.
*/
.found:
.L_found:
#if __LINUX_ARM_ARCH__ >= 5
rsb r1, r3, #0
and r3, r3, r1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment