Commit fb5e2b379732e1a6ea32392980bb42e0212db842

Authored by Jan Beulich
Committed by Sam Ravnborg
1 parent 37a4c94074

vmlinux.lds: move __attribute__((__cold__)) functions back into final .text section

Due to the addition of __attribute__((__cold__)) to a few symbols
without adjusting the linker scripts, those symbols currently may end
up outside the [_stext,_etext) range, as they get placed in
.text.unlikely by (at least) gcc 4.3.0. This may confuse code not only
outside of the kernel, symbol_put_addr()'s BUG() could also trigger.
Hence we need to add .text.unlikely (and for future uses of
__attribute__((__hot__)) also .text.hot) to the TEXT_TEXT() macro.

Issue observed by Lukas Lipavsky.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Tested-by: Lukas Lipavsky <llipavsky@suse.cz>
Cc: <stable@kernel.org>
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>

Showing 1 changed file with 3 additions and 1 deletions Inline Diff

include/asm-generic/vmlinux.lds.h
1 #ifndef LOAD_OFFSET 1 #ifndef LOAD_OFFSET
2 #define LOAD_OFFSET 0 2 #define LOAD_OFFSET 0
3 #endif 3 #endif
4 4
5 #ifndef VMLINUX_SYMBOL 5 #ifndef VMLINUX_SYMBOL
6 #define VMLINUX_SYMBOL(_sym_) _sym_ 6 #define VMLINUX_SYMBOL(_sym_) _sym_
7 #endif 7 #endif
8 8
9 /* Align . to a 8 byte boundary equals to maximum function alignment. */ 9 /* Align . to a 8 byte boundary equals to maximum function alignment. */
10 #define ALIGN_FUNCTION() . = ALIGN(8) 10 #define ALIGN_FUNCTION() . = ALIGN(8)
11 11
12 /* The actual configuration determine if the init/exit sections 12 /* The actual configuration determine if the init/exit sections
13 * are handled as text/data or they can be discarded (which 13 * are handled as text/data or they can be discarded (which
14 * often happens at runtime) 14 * often happens at runtime)
15 */ 15 */
16 #ifdef CONFIG_HOTPLUG 16 #ifdef CONFIG_HOTPLUG
17 #define DEV_KEEP(sec) *(.dev##sec) 17 #define DEV_KEEP(sec) *(.dev##sec)
18 #define DEV_DISCARD(sec) 18 #define DEV_DISCARD(sec)
19 #else 19 #else
20 #define DEV_KEEP(sec) 20 #define DEV_KEEP(sec)
21 #define DEV_DISCARD(sec) *(.dev##sec) 21 #define DEV_DISCARD(sec) *(.dev##sec)
22 #endif 22 #endif
23 23
24 #ifdef CONFIG_HOTPLUG_CPU 24 #ifdef CONFIG_HOTPLUG_CPU
25 #define CPU_KEEP(sec) *(.cpu##sec) 25 #define CPU_KEEP(sec) *(.cpu##sec)
26 #define CPU_DISCARD(sec) 26 #define CPU_DISCARD(sec)
27 #else 27 #else
28 #define CPU_KEEP(sec) 28 #define CPU_KEEP(sec)
29 #define CPU_DISCARD(sec) *(.cpu##sec) 29 #define CPU_DISCARD(sec) *(.cpu##sec)
30 #endif 30 #endif
31 31
32 #if defined(CONFIG_MEMORY_HOTPLUG) 32 #if defined(CONFIG_MEMORY_HOTPLUG)
33 #define MEM_KEEP(sec) *(.mem##sec) 33 #define MEM_KEEP(sec) *(.mem##sec)
34 #define MEM_DISCARD(sec) 34 #define MEM_DISCARD(sec)
35 #else 35 #else
36 #define MEM_KEEP(sec) 36 #define MEM_KEEP(sec)
37 #define MEM_DISCARD(sec) *(.mem##sec) 37 #define MEM_DISCARD(sec) *(.mem##sec)
38 #endif 38 #endif
39 39
40 40
41 /* .data section */ 41 /* .data section */
42 #define DATA_DATA \ 42 #define DATA_DATA \
43 *(.data) \ 43 *(.data) \
44 *(.data.init.refok) \ 44 *(.data.init.refok) \
45 *(.ref.data) \ 45 *(.ref.data) \
46 DEV_KEEP(init.data) \ 46 DEV_KEEP(init.data) \
47 DEV_KEEP(exit.data) \ 47 DEV_KEEP(exit.data) \
48 CPU_KEEP(init.data) \ 48 CPU_KEEP(init.data) \
49 CPU_KEEP(exit.data) \ 49 CPU_KEEP(exit.data) \
50 MEM_KEEP(init.data) \ 50 MEM_KEEP(init.data) \
51 MEM_KEEP(exit.data) \ 51 MEM_KEEP(exit.data) \
52 . = ALIGN(8); \ 52 . = ALIGN(8); \
53 VMLINUX_SYMBOL(__start___markers) = .; \ 53 VMLINUX_SYMBOL(__start___markers) = .; \
54 *(__markers) \ 54 *(__markers) \
55 VMLINUX_SYMBOL(__stop___markers) = .; 55 VMLINUX_SYMBOL(__stop___markers) = .;
56 56
57 #define RO_DATA(align) \ 57 #define RO_DATA(align) \
58 . = ALIGN((align)); \ 58 . = ALIGN((align)); \
59 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 59 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
60 VMLINUX_SYMBOL(__start_rodata) = .; \ 60 VMLINUX_SYMBOL(__start_rodata) = .; \
61 *(.rodata) *(.rodata.*) \ 61 *(.rodata) *(.rodata.*) \
62 *(__vermagic) /* Kernel version magic */ \ 62 *(__vermagic) /* Kernel version magic */ \
63 *(__markers_strings) /* Markers: strings */ \ 63 *(__markers_strings) /* Markers: strings */ \
64 } \ 64 } \
65 \ 65 \
66 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 66 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
67 *(.rodata1) \ 67 *(.rodata1) \
68 } \ 68 } \
69 \ 69 \
70 BUG_TABLE \ 70 BUG_TABLE \
71 \ 71 \
72 /* PCI quirks */ \ 72 /* PCI quirks */ \
73 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 73 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
74 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 74 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
75 *(.pci_fixup_early) \ 75 *(.pci_fixup_early) \
76 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 76 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
77 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 77 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
78 *(.pci_fixup_header) \ 78 *(.pci_fixup_header) \
79 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 79 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
80 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 80 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
81 *(.pci_fixup_final) \ 81 *(.pci_fixup_final) \
82 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 82 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
83 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 83 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
84 *(.pci_fixup_enable) \ 84 *(.pci_fixup_enable) \
85 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 85 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
86 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 86 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
87 *(.pci_fixup_resume) \ 87 *(.pci_fixup_resume) \
88 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 88 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
89 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 89 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
90 *(.pci_fixup_resume_early) \ 90 *(.pci_fixup_resume_early) \
91 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 91 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
92 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 92 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
93 *(.pci_fixup_suspend) \ 93 *(.pci_fixup_suspend) \
94 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 94 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
95 } \ 95 } \
96 \ 96 \
97 /* Built-in firmware blobs */ \ 97 /* Built-in firmware blobs */ \
98 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 98 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
99 VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 99 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
100 *(.builtin_fw) \ 100 *(.builtin_fw) \
101 VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 101 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
102 } \ 102 } \
103 \ 103 \
104 /* RapidIO route ops */ \ 104 /* RapidIO route ops */ \
105 .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \ 105 .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \
106 VMLINUX_SYMBOL(__start_rio_route_ops) = .; \ 106 VMLINUX_SYMBOL(__start_rio_route_ops) = .; \
107 *(.rio_route_ops) \ 107 *(.rio_route_ops) \
108 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ 108 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \
109 } \ 109 } \
110 \ 110 \
111 TRACEDATA \ 111 TRACEDATA \
112 \ 112 \
113 /* Kernel symbol table: Normal symbols */ \ 113 /* Kernel symbol table: Normal symbols */ \
114 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 114 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
115 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 115 VMLINUX_SYMBOL(__start___ksymtab) = .; \
116 *(__ksymtab) \ 116 *(__ksymtab) \
117 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 117 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
118 } \ 118 } \
119 \ 119 \
120 /* Kernel symbol table: GPL-only symbols */ \ 120 /* Kernel symbol table: GPL-only symbols */ \
121 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 121 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
122 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 122 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
123 *(__ksymtab_gpl) \ 123 *(__ksymtab_gpl) \
124 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 124 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
125 } \ 125 } \
126 \ 126 \
127 /* Kernel symbol table: Normal unused symbols */ \ 127 /* Kernel symbol table: Normal unused symbols */ \
128 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 128 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
129 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 129 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
130 *(__ksymtab_unused) \ 130 *(__ksymtab_unused) \
131 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 131 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
132 } \ 132 } \
133 \ 133 \
134 /* Kernel symbol table: GPL-only unused symbols */ \ 134 /* Kernel symbol table: GPL-only unused symbols */ \
135 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 135 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
136 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 136 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
137 *(__ksymtab_unused_gpl) \ 137 *(__ksymtab_unused_gpl) \
138 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 138 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
139 } \ 139 } \
140 \ 140 \
141 /* Kernel symbol table: GPL-future-only symbols */ \ 141 /* Kernel symbol table: GPL-future-only symbols */ \
142 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 142 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
143 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 143 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
144 *(__ksymtab_gpl_future) \ 144 *(__ksymtab_gpl_future) \
145 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 145 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
146 } \ 146 } \
147 \ 147 \
148 /* Kernel symbol table: Normal symbols */ \ 148 /* Kernel symbol table: Normal symbols */ \
149 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 149 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
150 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 150 VMLINUX_SYMBOL(__start___kcrctab) = .; \
151 *(__kcrctab) \ 151 *(__kcrctab) \
152 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 152 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
153 } \ 153 } \
154 \ 154 \
155 /* Kernel symbol table: GPL-only symbols */ \ 155 /* Kernel symbol table: GPL-only symbols */ \
156 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 156 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
157 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 157 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
158 *(__kcrctab_gpl) \ 158 *(__kcrctab_gpl) \
159 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 159 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
160 } \ 160 } \
161 \ 161 \
162 /* Kernel symbol table: Normal unused symbols */ \ 162 /* Kernel symbol table: Normal unused symbols */ \
163 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 163 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
164 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 164 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
165 *(__kcrctab_unused) \ 165 *(__kcrctab_unused) \
166 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 166 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
167 } \ 167 } \
168 \ 168 \
169 /* Kernel symbol table: GPL-only unused symbols */ \ 169 /* Kernel symbol table: GPL-only unused symbols */ \
170 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 170 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
171 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 171 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
172 *(__kcrctab_unused_gpl) \ 172 *(__kcrctab_unused_gpl) \
173 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 173 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
174 } \ 174 } \
175 \ 175 \
176 /* Kernel symbol table: GPL-future-only symbols */ \ 176 /* Kernel symbol table: GPL-future-only symbols */ \
177 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 177 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
178 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 178 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
179 *(__kcrctab_gpl_future) \ 179 *(__kcrctab_gpl_future) \
180 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 180 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
181 } \ 181 } \
182 \ 182 \
183 /* Kernel symbol table: strings */ \ 183 /* Kernel symbol table: strings */ \
184 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 184 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
185 *(__ksymtab_strings) \ 185 *(__ksymtab_strings) \
186 } \ 186 } \
187 \ 187 \
188 /* __*init sections */ \ 188 /* __*init sections */ \
189 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 189 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
190 *(.ref.rodata) \ 190 *(.ref.rodata) \
191 DEV_KEEP(init.rodata) \ 191 DEV_KEEP(init.rodata) \
192 DEV_KEEP(exit.rodata) \ 192 DEV_KEEP(exit.rodata) \
193 CPU_KEEP(init.rodata) \ 193 CPU_KEEP(init.rodata) \
194 CPU_KEEP(exit.rodata) \ 194 CPU_KEEP(exit.rodata) \
195 MEM_KEEP(init.rodata) \ 195 MEM_KEEP(init.rodata) \
196 MEM_KEEP(exit.rodata) \ 196 MEM_KEEP(exit.rodata) \
197 } \ 197 } \
198 \ 198 \
199 /* Built-in module parameters. */ \ 199 /* Built-in module parameters. */ \
200 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 200 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
201 VMLINUX_SYMBOL(__start___param) = .; \ 201 VMLINUX_SYMBOL(__start___param) = .; \
202 *(__param) \ 202 *(__param) \
203 VMLINUX_SYMBOL(__stop___param) = .; \ 203 VMLINUX_SYMBOL(__stop___param) = .; \
204 . = ALIGN((align)); \ 204 . = ALIGN((align)); \
205 VMLINUX_SYMBOL(__end_rodata) = .; \ 205 VMLINUX_SYMBOL(__end_rodata) = .; \
206 } \ 206 } \
207 . = ALIGN((align)); 207 . = ALIGN((align));
208 208
209 /* RODATA provided for backward compatibility. 209 /* RODATA provided for backward compatibility.
210 * All archs are supposed to use RO_DATA() */ 210 * All archs are supposed to use RO_DATA() */
211 #define RODATA RO_DATA(4096) 211 #define RODATA RO_DATA(4096)
212 212
213 #define SECURITY_INIT \ 213 #define SECURITY_INIT \
214 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 214 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
215 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 215 VMLINUX_SYMBOL(__security_initcall_start) = .; \
216 *(.security_initcall.init) \ 216 *(.security_initcall.init) \
217 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 217 VMLINUX_SYMBOL(__security_initcall_end) = .; \
218 } 218 }
219 219
220 /* .text section. Map to function alignment to avoid address changes 220 /* .text section. Map to function alignment to avoid address changes
221 * during second ld run in second ld pass when generating System.map */ 221 * during second ld run in second ld pass when generating System.map */
222 #define TEXT_TEXT \ 222 #define TEXT_TEXT \
223 ALIGN_FUNCTION(); \ 223 ALIGN_FUNCTION(); \
224 *(.text.hot) \
224 *(.text) \ 225 *(.text) \
225 *(.ref.text) \ 226 *(.ref.text) \
226 *(.text.init.refok) \ 227 *(.text.init.refok) \
227 *(.exit.text.refok) \ 228 *(.exit.text.refok) \
228 DEV_KEEP(init.text) \ 229 DEV_KEEP(init.text) \
229 DEV_KEEP(exit.text) \ 230 DEV_KEEP(exit.text) \
230 CPU_KEEP(init.text) \ 231 CPU_KEEP(init.text) \
231 CPU_KEEP(exit.text) \ 232 CPU_KEEP(exit.text) \
232 MEM_KEEP(init.text) \ 233 MEM_KEEP(init.text) \
233 MEM_KEEP(exit.text) 234 MEM_KEEP(exit.text) \
235 *(.text.unlikely)
234 236
235 237
236 /* sched.text is aling to function alignment to secure we have same 238 /* sched.text is aling to function alignment to secure we have same
237 * address even at second ld pass when generating System.map */ 239 * address even at second ld pass when generating System.map */
238 #define SCHED_TEXT \ 240 #define SCHED_TEXT \
239 ALIGN_FUNCTION(); \ 241 ALIGN_FUNCTION(); \
240 VMLINUX_SYMBOL(__sched_text_start) = .; \ 242 VMLINUX_SYMBOL(__sched_text_start) = .; \
241 *(.sched.text) \ 243 *(.sched.text) \
242 VMLINUX_SYMBOL(__sched_text_end) = .; 244 VMLINUX_SYMBOL(__sched_text_end) = .;
243 245
244 /* spinlock.text is aling to function alignment to secure we have same 246 /* spinlock.text is aling to function alignment to secure we have same
245 * address even at second ld pass when generating System.map */ 247 * address even at second ld pass when generating System.map */
246 #define LOCK_TEXT \ 248 #define LOCK_TEXT \
247 ALIGN_FUNCTION(); \ 249 ALIGN_FUNCTION(); \
248 VMLINUX_SYMBOL(__lock_text_start) = .; \ 250 VMLINUX_SYMBOL(__lock_text_start) = .; \
249 *(.spinlock.text) \ 251 *(.spinlock.text) \
250 VMLINUX_SYMBOL(__lock_text_end) = .; 252 VMLINUX_SYMBOL(__lock_text_end) = .;
251 253
252 #define KPROBES_TEXT \ 254 #define KPROBES_TEXT \
253 ALIGN_FUNCTION(); \ 255 ALIGN_FUNCTION(); \
254 VMLINUX_SYMBOL(__kprobes_text_start) = .; \ 256 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
255 *(.kprobes.text) \ 257 *(.kprobes.text) \
256 VMLINUX_SYMBOL(__kprobes_text_end) = .; 258 VMLINUX_SYMBOL(__kprobes_text_end) = .;
257 259
258 /* Section used for early init (in .S files) */ 260 /* Section used for early init (in .S files) */
259 #define HEAD_TEXT *(.head.text) 261 #define HEAD_TEXT *(.head.text)
260 262
261 /* init and exit section handling */ 263 /* init and exit section handling */
262 #define INIT_DATA \ 264 #define INIT_DATA \
263 *(.init.data) \ 265 *(.init.data) \
264 DEV_DISCARD(init.data) \ 266 DEV_DISCARD(init.data) \
265 DEV_DISCARD(init.rodata) \ 267 DEV_DISCARD(init.rodata) \
266 CPU_DISCARD(init.data) \ 268 CPU_DISCARD(init.data) \
267 CPU_DISCARD(init.rodata) \ 269 CPU_DISCARD(init.rodata) \
268 MEM_DISCARD(init.data) \ 270 MEM_DISCARD(init.data) \
269 MEM_DISCARD(init.rodata) 271 MEM_DISCARD(init.rodata)
270 272
271 #define INIT_TEXT \ 273 #define INIT_TEXT \
272 *(.init.text) \ 274 *(.init.text) \
273 DEV_DISCARD(init.text) \ 275 DEV_DISCARD(init.text) \
274 CPU_DISCARD(init.text) \ 276 CPU_DISCARD(init.text) \
275 MEM_DISCARD(init.text) 277 MEM_DISCARD(init.text)
276 278
277 #define EXIT_DATA \ 279 #define EXIT_DATA \
278 *(.exit.data) \ 280 *(.exit.data) \
279 DEV_DISCARD(exit.data) \ 281 DEV_DISCARD(exit.data) \
280 DEV_DISCARD(exit.rodata) \ 282 DEV_DISCARD(exit.rodata) \
281 CPU_DISCARD(exit.data) \ 283 CPU_DISCARD(exit.data) \
282 CPU_DISCARD(exit.rodata) \ 284 CPU_DISCARD(exit.rodata) \
283 MEM_DISCARD(exit.data) \ 285 MEM_DISCARD(exit.data) \
284 MEM_DISCARD(exit.rodata) 286 MEM_DISCARD(exit.rodata)
285 287
286 #define EXIT_TEXT \ 288 #define EXIT_TEXT \
287 *(.exit.text) \ 289 *(.exit.text) \
288 DEV_DISCARD(exit.text) \ 290 DEV_DISCARD(exit.text) \
289 CPU_DISCARD(exit.text) \ 291 CPU_DISCARD(exit.text) \
290 MEM_DISCARD(exit.text) 292 MEM_DISCARD(exit.text)
291 293
292 /* DWARF debug sections. 294 /* DWARF debug sections.
293 Symbols in the DWARF debugging sections are relative to 295 Symbols in the DWARF debugging sections are relative to
294 the beginning of the section so we begin them at 0. */ 296 the beginning of the section so we begin them at 0. */
295 #define DWARF_DEBUG \ 297 #define DWARF_DEBUG \
296 /* DWARF 1 */ \ 298 /* DWARF 1 */ \
297 .debug 0 : { *(.debug) } \ 299 .debug 0 : { *(.debug) } \
298 .line 0 : { *(.line) } \ 300 .line 0 : { *(.line) } \
299 /* GNU DWARF 1 extensions */ \ 301 /* GNU DWARF 1 extensions */ \
300 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 302 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
301 .debug_sfnames 0 : { *(.debug_sfnames) } \ 303 .debug_sfnames 0 : { *(.debug_sfnames) } \
302 /* DWARF 1.1 and DWARF 2 */ \ 304 /* DWARF 1.1 and DWARF 2 */ \
303 .debug_aranges 0 : { *(.debug_aranges) } \ 305 .debug_aranges 0 : { *(.debug_aranges) } \
304 .debug_pubnames 0 : { *(.debug_pubnames) } \ 306 .debug_pubnames 0 : { *(.debug_pubnames) } \
305 /* DWARF 2 */ \ 307 /* DWARF 2 */ \
306 .debug_info 0 : { *(.debug_info \ 308 .debug_info 0 : { *(.debug_info \
307 .gnu.linkonce.wi.*) } \ 309 .gnu.linkonce.wi.*) } \
308 .debug_abbrev 0 : { *(.debug_abbrev) } \ 310 .debug_abbrev 0 : { *(.debug_abbrev) } \
309 .debug_line 0 : { *(.debug_line) } \ 311 .debug_line 0 : { *(.debug_line) } \
310 .debug_frame 0 : { *(.debug_frame) } \ 312 .debug_frame 0 : { *(.debug_frame) } \
311 .debug_str 0 : { *(.debug_str) } \ 313 .debug_str 0 : { *(.debug_str) } \
312 .debug_loc 0 : { *(.debug_loc) } \ 314 .debug_loc 0 : { *(.debug_loc) } \
313 .debug_macinfo 0 : { *(.debug_macinfo) } \ 315 .debug_macinfo 0 : { *(.debug_macinfo) } \
314 /* SGI/MIPS DWARF 2 extensions */ \ 316 /* SGI/MIPS DWARF 2 extensions */ \
315 .debug_weaknames 0 : { *(.debug_weaknames) } \ 317 .debug_weaknames 0 : { *(.debug_weaknames) } \
316 .debug_funcnames 0 : { *(.debug_funcnames) } \ 318 .debug_funcnames 0 : { *(.debug_funcnames) } \
317 .debug_typenames 0 : { *(.debug_typenames) } \ 319 .debug_typenames 0 : { *(.debug_typenames) } \
318 .debug_varnames 0 : { *(.debug_varnames) } \ 320 .debug_varnames 0 : { *(.debug_varnames) } \
319 321
320 /* Stabs debugging sections. */ 322 /* Stabs debugging sections. */
321 #define STABS_DEBUG \ 323 #define STABS_DEBUG \
322 .stab 0 : { *(.stab) } \ 324 .stab 0 : { *(.stab) } \
323 .stabstr 0 : { *(.stabstr) } \ 325 .stabstr 0 : { *(.stabstr) } \
324 .stab.excl 0 : { *(.stab.excl) } \ 326 .stab.excl 0 : { *(.stab.excl) } \
325 .stab.exclstr 0 : { *(.stab.exclstr) } \ 327 .stab.exclstr 0 : { *(.stab.exclstr) } \
326 .stab.index 0 : { *(.stab.index) } \ 328 .stab.index 0 : { *(.stab.index) } \
327 .stab.indexstr 0 : { *(.stab.indexstr) } \ 329 .stab.indexstr 0 : { *(.stab.indexstr) } \
328 .comment 0 : { *(.comment) } 330 .comment 0 : { *(.comment) }
329 331
330 #ifdef CONFIG_GENERIC_BUG 332 #ifdef CONFIG_GENERIC_BUG
331 #define BUG_TABLE \ 333 #define BUG_TABLE \
332 . = ALIGN(8); \ 334 . = ALIGN(8); \
333 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 335 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
334 __start___bug_table = .; \ 336 __start___bug_table = .; \
335 *(__bug_table) \ 337 *(__bug_table) \
336 __stop___bug_table = .; \ 338 __stop___bug_table = .; \
337 } 339 }
338 #else 340 #else
339 #define BUG_TABLE 341 #define BUG_TABLE
340 #endif 342 #endif
341 343
342 #ifdef CONFIG_PM_TRACE 344 #ifdef CONFIG_PM_TRACE
343 #define TRACEDATA \ 345 #define TRACEDATA \
344 . = ALIGN(4); \ 346 . = ALIGN(4); \
345 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 347 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
346 __tracedata_start = .; \ 348 __tracedata_start = .; \
347 *(.tracedata) \ 349 *(.tracedata) \
348 __tracedata_end = .; \ 350 __tracedata_end = .; \
349 } 351 }
350 #else 352 #else
351 #define TRACEDATA 353 #define TRACEDATA
352 #endif 354 #endif
353 355
354 #define NOTES \ 356 #define NOTES \
355 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 357 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
356 VMLINUX_SYMBOL(__start_notes) = .; \ 358 VMLINUX_SYMBOL(__start_notes) = .; \
357 *(.note.*) \ 359 *(.note.*) \
358 VMLINUX_SYMBOL(__stop_notes) = .; \ 360 VMLINUX_SYMBOL(__stop_notes) = .; \
359 } 361 }
360 362
361 #define INITCALLS \ 363 #define INITCALLS \
362 *(.initcall0.init) \ 364 *(.initcall0.init) \
363 *(.initcall0s.init) \ 365 *(.initcall0s.init) \
364 *(.initcall1.init) \ 366 *(.initcall1.init) \
365 *(.initcall1s.init) \ 367 *(.initcall1s.init) \
366 *(.initcall2.init) \ 368 *(.initcall2.init) \
367 *(.initcall2s.init) \ 369 *(.initcall2s.init) \
368 *(.initcall3.init) \ 370 *(.initcall3.init) \
369 *(.initcall3s.init) \ 371 *(.initcall3s.init) \
370 *(.initcall4.init) \ 372 *(.initcall4.init) \
371 *(.initcall4s.init) \ 373 *(.initcall4s.init) \
372 *(.initcall5.init) \ 374 *(.initcall5.init) \
373 *(.initcall5s.init) \ 375 *(.initcall5s.init) \
374 *(.initcallrootfs.init) \ 376 *(.initcallrootfs.init) \
375 *(.initcall6.init) \ 377 *(.initcall6.init) \
376 *(.initcall6s.init) \ 378 *(.initcall6s.init) \
377 *(.initcall7.init) \ 379 *(.initcall7.init) \
378 *(.initcall7s.init) 380 *(.initcall7s.init)
379 381
380 #define PERCPU(align) \ 382 #define PERCPU(align) \
381 . = ALIGN(align); \ 383 . = ALIGN(align); \
382 __per_cpu_start = .; \ 384 __per_cpu_start = .; \
383 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ 385 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
384 *(.data.percpu) \ 386 *(.data.percpu) \
385 *(.data.percpu.shared_aligned) \ 387 *(.data.percpu.shared_aligned) \
386 } \ 388 } \
387 __per_cpu_end = .; 389 __per_cpu_end = .;
388 390