added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / include / asm-generic / vmlinux.lds.h
blobd3bc3c86df6a76efaaf2c025470661dc44d37e4c
1 #ifndef LOAD_OFFSET
2 #define LOAD_OFFSET 0
3 #endif
5 #ifndef VMLINUX_SYMBOL
6 #define VMLINUX_SYMBOL(_sym_) _sym_
7 #endif
9 /* Align . to a 8 byte boundary equals to maximum function alignment. */
10 #define ALIGN_FUNCTION() . = ALIGN(8)
12 /* The actual configuration determine if the init/exit sections
13 * are handled as text/data or they can be discarded (which
14 * often happens at runtime)
16 #ifdef CONFIG_HOTPLUG
17 #define DEV_KEEP(sec) *(.dev##sec)
18 #define DEV_DISCARD(sec)
19 #else
20 #define DEV_KEEP(sec)
21 #define DEV_DISCARD(sec) *(.dev##sec)
22 #endif
24 #ifdef CONFIG_HOTPLUG_CPU
25 #define CPU_KEEP(sec) *(.cpu##sec)
26 #define CPU_DISCARD(sec)
27 #else
28 #define CPU_KEEP(sec)
29 #define CPU_DISCARD(sec) *(.cpu##sec)
30 #endif
32 #if defined(CONFIG_MEMORY_HOTPLUG)
33 #define MEM_KEEP(sec) *(.mem##sec)
34 #define MEM_DISCARD(sec)
35 #else
36 #define MEM_KEEP(sec)
37 #define MEM_DISCARD(sec) *(.mem##sec)
38 #endif
40 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
41 #define MCOUNT_REC() VMLINUX_SYMBOL(__start_mcount_loc) = .; \
42 *(__mcount_loc) \
43 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
44 #else
45 #define MCOUNT_REC()
46 #endif
48 #ifdef CONFIG_TRACE_BRANCH_PROFILING
49 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
50 *(_ftrace_annotated_branch) \
51 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
52 #else
53 #define LIKELY_PROFILE()
54 #endif
56 #ifdef CONFIG_PROFILE_ALL_BRANCHES
57 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
58 *(_ftrace_branch) \
59 VMLINUX_SYMBOL(__stop_branch_profile) = .;
60 #else
61 #define BRANCH_PROFILE()
62 #endif
64 #ifdef CONFIG_EVENT_TRACER
65 #define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
66 *(_ftrace_events) \
67 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
68 #else
69 #define FTRACE_EVENTS()
70 #endif
72 #ifdef CONFIG_TRACING
73 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
74 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
75 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
76 #else
77 #define TRACE_PRINTKS()
78 #endif
80 #ifdef CONFIG_FTRACE_SYSCALLS
81 #define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
82 *(__syscalls_metadata) \
83 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
84 #else
85 #define TRACE_SYSCALLS()
86 #endif
88 /* .data section */
89 #define DATA_DATA \
90 *(.data) \
91 *(.data.init.refok) \
92 *(.ref.data) \
93 DEV_KEEP(init.data) \
94 DEV_KEEP(exit.data) \
95 CPU_KEEP(init.data) \
96 CPU_KEEP(exit.data) \
97 MEM_KEEP(init.data) \
98 MEM_KEEP(exit.data) \
99 . = ALIGN(8); \
100 VMLINUX_SYMBOL(__start___markers) = .; \
101 *(__markers) \
102 VMLINUX_SYMBOL(__stop___markers) = .; \
103 . = ALIGN(32); \
104 VMLINUX_SYMBOL(__start___tracepoints) = .; \
105 *(__tracepoints) \
106 VMLINUX_SYMBOL(__stop___tracepoints) = .; \
107 LIKELY_PROFILE() \
108 BRANCH_PROFILE() \
109 TRACE_PRINTKS() \
110 FTRACE_EVENTS() \
111 TRACE_SYSCALLS()
113 #define RO_DATA(align) \
114 . = ALIGN((align)); \
115 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
116 VMLINUX_SYMBOL(__start_rodata) = .; \
117 *(.rodata) *(.rodata.*) \
118 *(__vermagic) /* Kernel version magic */ \
119 *(__markers_strings) /* Markers: strings */ \
120 *(__tracepoints_strings)/* Tracepoints: strings */ \
123 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
124 *(.rodata1) \
127 BUG_TABLE \
129 /* PCI quirks */ \
130 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
131 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
132 *(.pci_fixup_early) \
133 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
134 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
135 *(.pci_fixup_header) \
136 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
137 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
138 *(.pci_fixup_final) \
139 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
140 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
141 *(.pci_fixup_enable) \
142 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
143 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
144 *(.pci_fixup_resume) \
145 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
146 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
147 *(.pci_fixup_resume_early) \
148 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
149 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
150 *(.pci_fixup_suspend) \
151 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
154 /* Built-in firmware blobs */ \
155 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
156 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
157 *(.builtin_fw) \
158 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
161 /* RapidIO route ops */ \
162 .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \
163 VMLINUX_SYMBOL(__start_rio_route_ops) = .; \
164 *(.rio_route_ops) \
165 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \
168 TRACEDATA \
170 /* Kernel symbol table: Normal symbols */ \
171 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
172 VMLINUX_SYMBOL(__start___ksymtab) = .; \
173 *(__ksymtab) \
174 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
177 /* Kernel symbol table: GPL-only symbols */ \
178 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
179 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
180 *(__ksymtab_gpl) \
181 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
184 /* Kernel symbol table: Normal unused symbols */ \
185 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
186 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
187 *(__ksymtab_unused) \
188 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
191 /* Kernel symbol table: GPL-only unused symbols */ \
192 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
193 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
194 *(__ksymtab_unused_gpl) \
195 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
198 /* Kernel symbol table: GPL-future-only symbols */ \
199 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
200 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
201 *(__ksymtab_gpl_future) \
202 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
205 /* Kernel symbol table: Normal symbols */ \
206 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
207 VMLINUX_SYMBOL(__start___kcrctab) = .; \
208 *(__kcrctab) \
209 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
212 /* Kernel symbol table: GPL-only symbols */ \
213 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
214 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
215 *(__kcrctab_gpl) \
216 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
219 /* Kernel symbol table: Normal unused symbols */ \
220 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
221 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
222 *(__kcrctab_unused) \
223 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
226 /* Kernel symbol table: GPL-only unused symbols */ \
227 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
228 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
229 *(__kcrctab_unused_gpl) \
230 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
233 /* Kernel symbol table: GPL-future-only symbols */ \
234 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
235 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
236 *(__kcrctab_gpl_future) \
237 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
240 /* Kernel symbol table: strings */ \
241 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
242 *(__ksymtab_strings) \
245 /* __*init sections */ \
246 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
247 *(.ref.rodata) \
248 MCOUNT_REC() \
249 DEV_KEEP(init.rodata) \
250 DEV_KEEP(exit.rodata) \
251 CPU_KEEP(init.rodata) \
252 CPU_KEEP(exit.rodata) \
253 MEM_KEEP(init.rodata) \
254 MEM_KEEP(exit.rodata) \
257 /* Built-in module parameters. */ \
258 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
259 VMLINUX_SYMBOL(__start___param) = .; \
260 *(__param) \
261 VMLINUX_SYMBOL(__stop___param) = .; \
262 . = ALIGN((align)); \
263 VMLINUX_SYMBOL(__end_rodata) = .; \
265 . = ALIGN((align));
267 /* RODATA provided for backward compatibility.
268 * All archs are supposed to use RO_DATA() */
269 #define RODATA RO_DATA(4096)
271 #define SECURITY_INIT \
272 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
273 VMLINUX_SYMBOL(__security_initcall_start) = .; \
274 *(.security_initcall.init) \
275 VMLINUX_SYMBOL(__security_initcall_end) = .; \
278 /* .text section. Map to function alignment to avoid address changes
279 * during second ld run in second ld pass when generating System.map */
280 #define TEXT_TEXT \
281 ALIGN_FUNCTION(); \
282 *(.text.hot) \
283 *(.text) \
284 *(.ref.text) \
285 *(.text.init.refok) \
286 *(.exit.text.refok) \
287 DEV_KEEP(init.text) \
288 DEV_KEEP(exit.text) \
289 CPU_KEEP(init.text) \
290 CPU_KEEP(exit.text) \
291 MEM_KEEP(init.text) \
292 MEM_KEEP(exit.text) \
293 *(.text.unlikely)
296 /* sched.text is aling to function alignment to secure we have same
297 * address even at second ld pass when generating System.map */
298 #define SCHED_TEXT \
299 ALIGN_FUNCTION(); \
300 VMLINUX_SYMBOL(__sched_text_start) = .; \
301 *(.sched.text) \
302 VMLINUX_SYMBOL(__sched_text_end) = .;
304 /* spinlock.text is aling to function alignment to secure we have same
305 * address even at second ld pass when generating System.map */
306 #define LOCK_TEXT \
307 ALIGN_FUNCTION(); \
308 VMLINUX_SYMBOL(__lock_text_start) = .; \
309 *(.spinlock.text) \
310 VMLINUX_SYMBOL(__lock_text_end) = .;
312 #define KPROBES_TEXT \
313 ALIGN_FUNCTION(); \
314 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
315 *(.kprobes.text) \
316 VMLINUX_SYMBOL(__kprobes_text_end) = .;
318 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
319 #define IRQENTRY_TEXT \
320 ALIGN_FUNCTION(); \
321 VMLINUX_SYMBOL(__irqentry_text_start) = .; \
322 *(.irqentry.text) \
323 VMLINUX_SYMBOL(__irqentry_text_end) = .;
324 #else
325 #define IRQENTRY_TEXT
326 #endif
328 /* Section used for early init (in .S files) */
329 #define HEAD_TEXT *(.head.text)
331 /* init and exit section handling */
332 #define INIT_DATA \
333 *(.init.data) \
334 DEV_DISCARD(init.data) \
335 DEV_DISCARD(init.rodata) \
336 CPU_DISCARD(init.data) \
337 CPU_DISCARD(init.rodata) \
338 MEM_DISCARD(init.data) \
339 MEM_DISCARD(init.rodata) \
340 /* implement dynamic printk debug */ \
341 VMLINUX_SYMBOL(__start___verbose_strings) = .; \
342 *(__verbose_strings) \
343 VMLINUX_SYMBOL(__stop___verbose_strings) = .; \
344 . = ALIGN(8); \
345 VMLINUX_SYMBOL(__start___verbose) = .; \
346 *(__verbose) \
347 VMLINUX_SYMBOL(__stop___verbose) = .;
349 #define INIT_TEXT \
350 *(.init.text) \
351 DEV_DISCARD(init.text) \
352 CPU_DISCARD(init.text) \
353 MEM_DISCARD(init.text)
355 #define EXIT_DATA \
356 *(.exit.data) \
357 DEV_DISCARD(exit.data) \
358 DEV_DISCARD(exit.rodata) \
359 CPU_DISCARD(exit.data) \
360 CPU_DISCARD(exit.rodata) \
361 MEM_DISCARD(exit.data) \
362 MEM_DISCARD(exit.rodata)
364 #define EXIT_TEXT \
365 *(.exit.text) \
366 DEV_DISCARD(exit.text) \
367 CPU_DISCARD(exit.text) \
368 MEM_DISCARD(exit.text)
370 /* DWARF debug sections.
371 Symbols in the DWARF debugging sections are relative to
372 the beginning of the section so we begin them at 0. */
373 #define DWARF_DEBUG \
374 /* DWARF 1 */ \
375 .debug 0 : { *(.debug) } \
376 .line 0 : { *(.line) } \
377 /* GNU DWARF 1 extensions */ \
378 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
379 .debug_sfnames 0 : { *(.debug_sfnames) } \
380 /* DWARF 1.1 and DWARF 2 */ \
381 .debug_aranges 0 : { *(.debug_aranges) } \
382 .debug_pubnames 0 : { *(.debug_pubnames) } \
383 /* DWARF 2 */ \
384 .debug_info 0 : { *(.debug_info \
385 .gnu.linkonce.wi.*) } \
386 .debug_abbrev 0 : { *(.debug_abbrev) } \
387 .debug_line 0 : { *(.debug_line) } \
388 .debug_frame 0 : { *(.debug_frame) } \
389 .debug_str 0 : { *(.debug_str) } \
390 .debug_loc 0 : { *(.debug_loc) } \
391 .debug_macinfo 0 : { *(.debug_macinfo) } \
392 /* SGI/MIPS DWARF 2 extensions */ \
393 .debug_weaknames 0 : { *(.debug_weaknames) } \
394 .debug_funcnames 0 : { *(.debug_funcnames) } \
395 .debug_typenames 0 : { *(.debug_typenames) } \
396 .debug_varnames 0 : { *(.debug_varnames) } \
398 /* Stabs debugging sections. */
399 #define STABS_DEBUG \
400 .stab 0 : { *(.stab) } \
401 .stabstr 0 : { *(.stabstr) } \
402 .stab.excl 0 : { *(.stab.excl) } \
403 .stab.exclstr 0 : { *(.stab.exclstr) } \
404 .stab.index 0 : { *(.stab.index) } \
405 .stab.indexstr 0 : { *(.stab.indexstr) } \
406 .comment 0 : { *(.comment) }
408 #ifdef CONFIG_GENERIC_BUG
409 #define BUG_TABLE \
410 . = ALIGN(8); \
411 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
412 VMLINUX_SYMBOL(__start___bug_table) = .; \
413 *(__bug_table) \
414 VMLINUX_SYMBOL(__stop___bug_table) = .; \
416 #else
417 #define BUG_TABLE
418 #endif
420 #ifdef CONFIG_PM_TRACE
421 #define TRACEDATA \
422 . = ALIGN(4); \
423 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
424 VMLINUX_SYMBOL(__tracedata_start) = .; \
425 *(.tracedata) \
426 VMLINUX_SYMBOL(__tracedata_end) = .; \
428 #else
429 #define TRACEDATA
430 #endif
432 #define NOTES \
433 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
434 VMLINUX_SYMBOL(__start_notes) = .; \
435 *(.note.*) \
436 VMLINUX_SYMBOL(__stop_notes) = .; \
439 #define INITCALLS \
440 *(.initcallearly.init) \
441 VMLINUX_SYMBOL(__early_initcall_end) = .; \
442 *(.initcall0.init) \
443 *(.initcall0s.init) \
444 *(.initcall1.init) \
445 *(.initcall1s.init) \
446 *(.initcall2.init) \
447 *(.initcall2s.init) \
448 *(.initcall3.init) \
449 *(.initcall3s.init) \
450 *(.initcall4.init) \
451 *(.initcall4s.init) \
452 *(.initcall5.init) \
453 *(.initcall5s.init) \
454 *(.initcallrootfs.init) \
455 *(.initcall6.init) \
456 *(.initcall6s.init) \
457 *(.initcall7.init) \
458 *(.initcall7s.init)
461 * PERCPU_VADDR - define output section for percpu area
462 * @vaddr: explicit base address (optional)
463 * @phdr: destination PHDR (optional)
465 * Macro which expands to output section for percpu area. If @vaddr
466 * is not blank, it specifies explicit base address and all percpu
467 * symbols will be offset from the given address. If blank, @vaddr
468 * always equals @laddr + LOAD_OFFSET.
470 * @phdr defines the output PHDR to use if not blank. Be warned that
471 * output PHDR is sticky. If @phdr is specified, the next output
472 * section in the linker script will go there too. @phdr should have
473 * a leading colon.
475 * Note that this macros defines __per_cpu_load as an absolute symbol.
476 * If there is no need to put the percpu section at a predetermined
477 * address, use PERCPU().
479 #define PERCPU_VADDR(vaddr, phdr) \
480 VMLINUX_SYMBOL(__per_cpu_load) = .; \
481 .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
482 - LOAD_OFFSET) { \
483 VMLINUX_SYMBOL(__per_cpu_start) = .; \
484 *(.data.percpu.first) \
485 *(.data.percpu.page_aligned) \
486 *(.data.percpu) \
487 *(.data.percpu.shared_aligned) \
488 VMLINUX_SYMBOL(__per_cpu_end) = .; \
489 } phdr \
490 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
493 * PERCPU - define output section for percpu area, simple version
494 * @align: required alignment
496 * Align to @align and outputs output section for percpu area. This
497 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
498 * __per_cpu_start will be identical.
500 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
501 * that __per_cpu_load is defined as a relative symbol against
502 * .data.percpu which is required for relocatable x86_32
503 * configuration.
505 #define PERCPU(align) \
506 . = ALIGN(align); \
507 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
508 VMLINUX_SYMBOL(__per_cpu_load) = .; \
509 VMLINUX_SYMBOL(__per_cpu_start) = .; \
510 *(.data.percpu.first) \
511 *(.data.percpu.page_aligned) \
512 *(.data.percpu) \
513 *(.data.percpu.shared_aligned) \
514 VMLINUX_SYMBOL(__per_cpu_end) = .; \