Commit 8adbb3718d6cead304f84f7dd60ad65274df0b15

Authored by Nicolas Pitre
Committed by Russell King
1 parent a9c4814d8d

[ARM] 3152/1: make various assembly local labels actually local (the rest)

Patch from Nicolas Pitre

For assembly labels to actually be local they must start with ".L" and
not only "." otherwise they still remain visible in the final link and
clutter kallsyms needlessly, and possibly make for unclear symbolic
backtrace. This patch simply inserts a"L" where appropriate. The code
itself is unchanged.

Signed-off-by: Nicolas Pitre <nico@cam.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Showing 4 changed files with 61 additions and 59 deletions Side-by-side Diff

arch/arm/lib/csumpartial.S
... ... @@ -26,7 +26,7 @@
26 26 td2 .req r5 @ save before use
27 27 td3 .req lr
28 28  
29   -.zero: mov r0, sum
  29 +.Lzero: mov r0, sum
30 30 add sp, sp, #4
31 31 ldr pc, [sp], #4
32 32  
... ... @@ -34,8 +34,8 @@
34 34 * Handle 0 to 7 bytes, with any alignment of source and
35 35 * destination pointers. Note that when we get here, C = 0
36 36 */
37   -.less8: teq len, #0 @ check for zero count
38   - beq .zero
  37 +.Lless8: teq len, #0 @ check for zero count
  38 + beq .Lzero
39 39  
40 40 /* we must have at least one byte. */
41 41 tst buf, #1 @ odd address?
42 42  
... ... @@ -44,12 +44,12 @@
44 44 subne len, len, #1
45 45 adcnes sum, sum, td0, put_byte_1
46 46  
47   -.less4: tst len, #6
48   - beq .less8_byte
  47 +.Lless4: tst len, #6
  48 + beq .Lless8_byte
49 49  
50 50 /* we are now half-word aligned */
51 51  
52   -.less8_wordlp:
  52 +.Lless8_wordlp:
53 53 #if __LINUX_ARM_ARCH__ >= 4
54 54 ldrh td0, [buf], #2
55 55 sub len, len, #2
56 56  
57 57  
58 58  
... ... @@ -65,19 +65,19 @@
65 65 #endif
66 66 adcs sum, sum, td0
67 67 tst len, #6
68   - bne .less8_wordlp
  68 + bne .Lless8_wordlp
69 69  
70   -.less8_byte: tst len, #1 @ odd number of bytes
  70 +.Lless8_byte: tst len, #1 @ odd number of bytes
71 71 ldrneb td0, [buf], #1 @ include last byte
72 72 adcnes sum, sum, td0, put_byte_0 @ update checksum
73 73  
74   -.done: adc r0, sum, #0 @ collect up the last carry
  74 +.Ldone: adc r0, sum, #0 @ collect up the last carry
75 75 ldr td0, [sp], #4
76 76 tst td0, #1 @ check buffer alignment
77 77 movne r0, r0, ror #8 @ rotate checksum by 8 bits
78 78 ldr pc, [sp], #4 @ return
79 79  
80   -.not_aligned: tst buf, #1 @ odd address
  80 +.Lnot_aligned: tst buf, #1 @ odd address
81 81 ldrneb td0, [buf], #1 @ make even
82 82 subne len, len, #1
83 83 adcnes sum, sum, td0, put_byte_1 @ update checksum
84 84  
... ... @@ -102,14 +102,14 @@
102 102 ENTRY(csum_partial)
103 103 stmfd sp!, {buf, lr}
104 104 cmp len, #8 @ Ensure that we have at least
105   - blo .less8 @ 8 bytes to copy.
  105 + blo .Lless8 @ 8 bytes to copy.
106 106  
107 107 tst buf, #1
108 108 movne sum, sum, ror #8
109 109  
110 110 adds sum, sum, #0 @ C = 0
111 111 tst buf, #3 @ Test destination alignment
112   - blne .not_aligned @ aligh destination, return here
  112 + blne .Lnot_aligned @ align destination, return here
113 113  
114 114 1: bics ip, len, #31
115 115 beq 3f
116 116  
... ... @@ -131,12 +131,12 @@
131 131 ldmfd sp!, {r4 - r5}
132 132  
133 133 3: tst len, #0x1c @ should not change C
134   - beq .less4
  134 + beq .Lless4
135 135  
136 136 4: ldr td0, [buf], #4
137 137 sub len, len, #4
138 138 adcs sum, sum, td0
139 139 tst len, #0x1c
140 140 bne 4b
141   - b .less4
  141 + b .Lless4
arch/arm/lib/csumpartialcopygeneric.S
... ... @@ -22,7 +22,7 @@
22 22 len .req r2
23 23 sum .req r3
24 24  
25   -.zero: mov r0, sum
  25 +.Lzero: mov r0, sum
26 26 load_regs ea
27 27  
28 28 /*
... ... @@ -31,8 +31,9 @@
31 31 * the length. Note that the source pointer hasn't been
32 32 * aligned yet.
33 33 */
34   -.dst_unaligned: tst dst, #1
35   - beq .dst_16bit
  34 +.Ldst_unaligned:
  35 + tst dst, #1
  36 + beq .Ldst_16bit
36 37  
37 38 load1b ip
38 39 sub len, len, #1
... ... @@ -41,7 +42,7 @@
41 42 tst dst, #2
42 43 moveq pc, lr @ dst is now 32bit aligned
43 44  
44   -.dst_16bit: load2b r8, ip
  45 +.Ldst_16bit: load2b r8, ip
45 46 sub len, len, #2
46 47 adcs sum, sum, r8, put_byte_0
47 48 strb r8, [dst], #1
48 49  
... ... @@ -53,12 +54,12 @@
53 54 * Handle 0 to 7 bytes, with any alignment of source and
54 55 * destination pointers. Note that when we get here, C = 0
55 56 */
56   -.less8: teq len, #0 @ check for zero count
57   - beq .zero
  57 +.Lless8: teq len, #0 @ check for zero count
  58 + beq .Lzero
58 59  
59 60 /* we must have at least one byte. */
60 61 tst dst, #1 @ dst 16-bit aligned
61   - beq .less8_aligned
  62 + beq .Lless8_aligned
62 63  
63 64 /* Align dst */
64 65 load1b ip
... ... @@ -66,7 +67,7 @@
66 67 adcs sum, sum, ip, put_byte_1 @ update checksum
67 68 strb ip, [dst], #1
68 69 tst len, #6
69   - beq .less8_byteonly
  70 + beq .Lless8_byteonly
70 71  
71 72 1: load2b r8, ip
72 73 sub len, len, #2
73 74  
74 75  
75 76  
... ... @@ -74,15 +75,16 @@
74 75 strb r8, [dst], #1
75 76 adcs sum, sum, ip, put_byte_1
76 77 strb ip, [dst], #1
77   -.less8_aligned: tst len, #6
  78 +.Lless8_aligned:
  79 + tst len, #6
78 80 bne 1b
79   -.less8_byteonly:
  81 +.Lless8_byteonly:
80 82 tst len, #1
81   - beq .done
  83 + beq .Ldone
82 84 load1b r8
83 85 adcs sum, sum, r8, put_byte_0 @ update checksum
84 86 strb r8, [dst], #1
85   - b .done
  87 + b .Ldone
86 88  
87 89 FN_ENTRY
88 90 mov ip, sp
89 91  
... ... @@ -90,11 +92,11 @@
90 92 sub fp, ip, #4
91 93  
92 94 cmp len, #8 @ Ensure that we have at least
93   - blo .less8 @ 8 bytes to copy.
  95 + blo .Lless8 @ 8 bytes to copy.
94 96  
95 97 adds sum, sum, #0 @ C = 0
96 98 tst dst, #3 @ Test destination alignment
97   - blne .dst_unaligned @ align destination, return here
  99 + blne .Ldst_unaligned @ align destination, return here
98 100  
99 101 /*
100 102 * Ok, the dst pointer is now 32bit aligned, and we know
... ... @@ -103,7 +105,7 @@
103 105 */
104 106  
105 107 tst src, #3 @ Test source alignment
106   - bne .src_not_aligned
  108 + bne .Lsrc_not_aligned
107 109  
108 110 /* Routine for src & dst aligned */
109 111  
110 112  
111 113  
... ... @@ -136,17 +138,17 @@
136 138 adcs sum, sum, r4
137 139  
138 140 4: ands len, len, #3
139   - beq .done
  141 + beq .Ldone
140 142 load1l r4
141 143 tst len, #2
142 144 mov r5, r4, get_byte_0
143   - beq .exit
  145 + beq .Lexit
144 146 adcs sum, sum, r4, push #16
145 147 strb r5, [dst], #1
146 148 mov r5, r4, get_byte_1
147 149 strb r5, [dst], #1
148 150 mov r5, r4, get_byte_2
149   -.exit: tst len, #1
  151 +.Lexit: tst len, #1
150 152 strneb r5, [dst], #1
151 153 andne r5, r5, #255
152 154 adcnes sum, sum, r5, put_byte_0
153 155  
154 156  
... ... @@ -157,20 +159,20 @@
157 159 * the inefficient byte manipulations in the
158 160 * architecture independent code.
159 161 */
160   -.done: adc r0, sum, #0
  162 +.Ldone: adc r0, sum, #0
161 163 ldr sum, [sp, #0] @ dst
162 164 tst sum, #1
163 165 movne r0, r0, ror #8
164 166 load_regs ea
165 167  
166   -.src_not_aligned:
  168 +.Lsrc_not_aligned:
167 169 adc sum, sum, #0 @ include C from dst alignment
168 170 and ip, src, #3
169 171 bic src, src, #3
170 172 load1l r5
171 173 cmp ip, #2
172   - beq .src2_aligned
173   - bhi .src3_aligned
  174 + beq .Lsrc2_aligned
  175 + bhi .Lsrc3_aligned
174 176 mov r4, r5, pull #8 @ C = 0
175 177 bics ip, len, #15
176 178 beq 2f
177 179  
178 180  
179 181  
... ... @@ -211,18 +213,18 @@
211 213 adcs sum, sum, r4
212 214 mov r4, r5, pull #8
213 215 4: ands len, len, #3
214   - beq .done
  216 + beq .Ldone
215 217 mov r5, r4, get_byte_0
216 218 tst len, #2
217   - beq .exit
  219 + beq .Lexit
218 220 adcs sum, sum, r4, push #16
219 221 strb r5, [dst], #1
220 222 mov r5, r4, get_byte_1
221 223 strb r5, [dst], #1
222 224 mov r5, r4, get_byte_2
223   - b .exit
  225 + b .Lexit
224 226  
225   -.src2_aligned: mov r4, r5, pull #16
  227 +.Lsrc2_aligned: mov r4, r5, pull #16
226 228 adds sum, sum, #0
227 229 bics ip, len, #15
228 230 beq 2f
229 231  
230 232  
231 233  
232 234  
... ... @@ -263,20 +265,20 @@
263 265 adcs sum, sum, r4
264 266 mov r4, r5, pull #16
265 267 4: ands len, len, #3
266   - beq .done
  268 + beq .Ldone
267 269 mov r5, r4, get_byte_0
268 270 tst len, #2
269   - beq .exit
  271 + beq .Lexit
270 272 adcs sum, sum, r4
271 273 strb r5, [dst], #1
272 274 mov r5, r4, get_byte_1
273 275 strb r5, [dst], #1
274 276 tst len, #1
275   - beq .done
  277 + beq .Ldone
276 278 load1b r5
277   - b .exit
  279 + b .Lexit
278 280  
279   -.src3_aligned: mov r4, r5, pull #24
  281 +.Lsrc3_aligned: mov r4, r5, pull #24
280 282 adds sum, sum, #0
281 283 bics ip, len, #15
282 284 beq 2f
283 285  
... ... @@ -317,10 +319,10 @@
317 319 adcs sum, sum, r4
318 320 mov r4, r5, pull #24
319 321 4: ands len, len, #3
320   - beq .done
  322 + beq .Ldone
321 323 mov r5, r4, get_byte_0
322 324 tst len, #2
323   - beq .exit
  325 + beq .Lexit
324 326 strb r5, [dst], #1
325 327 adcs sum, sum, r4
326 328 load1l r4
... ... @@ -328,5 +330,5 @@
328 330 strb r5, [dst], #1
329 331 adcs sum, sum, r4, push #24
330 332 mov r5, r4, get_byte_1
331   - b .exit
  333 + b .Lexit
arch/arm/lib/delay.S
... ... @@ -11,7 +11,7 @@
11 11 #include <asm/assembler.h>
12 12 .text
13 13  
14   -LC0: .word loops_per_jiffy
  14 +.LC0: .word loops_per_jiffy
15 15  
16 16 /*
17 17 * 0 <= r0 <= 2000
... ... @@ -21,7 +21,7 @@
21 21 orr r2, r2, #0x00db
22 22 mul r0, r2, r0
23 23 ENTRY(__const_udelay) @ 0 <= r0 <= 0x01ffffff
24   - ldr r2, LC0
  24 + ldr r2, .LC0
25 25 ldr r2, [r2] @ max = 0x0fffffff
26 26 mov r0, r0, lsr #11 @ max = 0x00003fff
27 27 mov r2, r2, lsr #11 @ max = 0x0003ffff
arch/arm/lib/findbit.S
... ... @@ -27,7 +27,7 @@
27 27 mov r2, #0
28 28 1: ldrb r3, [r0, r2, lsr #3]
29 29 eors r3, r3, #0xff @ invert bits
30   - bne .found @ any now set - found zero bit
  30 + bne .L_found @ any now set - found zero bit
31 31 add r2, r2, #8 @ next bit pointer
32 32 2: cmp r2, r1 @ any more?
33 33 blo 1b
... ... @@ -46,7 +46,7 @@
46 46 ldrb r3, [r0, r2, lsr #3]
47 47 eor r3, r3, #0xff @ now looking for a 1 bit
48 48 movs r3, r3, lsr ip @ shift off unused bits
49   - bne .found
  49 + bne .L_found
50 50 orr r2, r2, #7 @ if zero, then no bits here
51 51 add r2, r2, #1 @ align bit pointer
52 52 b 2b @ loop for next bit
... ... @@ -61,7 +61,7 @@
61 61 mov r2, #0
62 62 1: ldrb r3, [r0, r2, lsr #3]
63 63 movs r3, r3
64   - bne .found @ any now set - found zero bit
  64 + bne .L_found @ any now set - found zero bit
65 65 add r2, r2, #8 @ next bit pointer
66 66 2: cmp r2, r1 @ any more?
67 67 blo 1b
... ... @@ -79,7 +79,7 @@
79 79 beq 1b @ If new byte, goto old routine
80 80 ldrb r3, [r0, r2, lsr #3]
81 81 movs r3, r3, lsr ip @ shift off unused bits
82   - bne .found
  82 + bne .L_found
83 83 orr r2, r2, #7 @ if zero, then no bits here
84 84 add r2, r2, #1 @ align bit pointer
85 85 b 2b @ loop for next bit
... ... @@ -93,7 +93,7 @@
93 93 1: eor r3, r2, #0x18 @ big endian byte ordering
94 94 ldrb r3, [r0, r3, lsr #3]
95 95 eors r3, r3, #0xff @ invert bits
96   - bne .found @ any now set - found zero bit
  96 + bne .L_found @ any now set - found zero bit
97 97 add r2, r2, #8 @ next bit pointer
98 98 2: cmp r2, r1 @ any more?
99 99 blo 1b
... ... @@ -109,7 +109,7 @@
109 109 ldrb r3, [r0, r3, lsr #3]
110 110 eor r3, r3, #0xff @ now looking for a 1 bit
111 111 movs r3, r3, lsr ip @ shift off unused bits
112   - bne .found
  112 + bne .L_found
113 113 orr r2, r2, #7 @ if zero, then no bits here
114 114 add r2, r2, #1 @ align bit pointer
115 115 b 2b @ loop for next bit
... ... @@ -121,7 +121,7 @@
121 121 1: eor r3, r2, #0x18 @ big endian byte ordering
122 122 ldrb r3, [r0, r3, lsr #3]
123 123 movs r3, r3
124   - bne .found @ any now set - found zero bit
  124 + bne .L_found @ any now set - found zero bit
125 125 add r2, r2, #8 @ next bit pointer
126 126 2: cmp r2, r1 @ any more?
127 127 blo 1b
... ... @@ -136,7 +136,7 @@
136 136 eor r3, r2, #0x18 @ big endian byte ordering
137 137 ldrb r3, [r0, r3, lsr #3]
138 138 movs r3, r3, lsr ip @ shift off unused bits
139   - bne .found
  139 + bne .L_found
140 140 orr r2, r2, #7 @ if zero, then no bits here
141 141 add r2, r2, #1 @ align bit pointer
142 142 b 2b @ loop for next bit
... ... @@ -146,7 +146,7 @@
146 146 /*
147 147 * One or more bits in the LSB of r3 are assumed to be set.
148 148 */
149   -.found:
  149 +.L_found:
150 150 #if __LINUX_ARM_ARCH__ >= 5
151 151 rsb r1, r3, #0
152 152 and r3, r3, r1