blk-mq: add timer in blk_mq_start_request
[linux-2.6/btrfs-unstable.git] / include / asm-generic / vmlinux.lds.h
blobd647637cd699df949b01616112878cc9b1ce1ef7
1 /*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 * . = START;
14 * __init_begin = .;
15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU_SECTION(CACHELINE_SIZE)
19 * __init_end = .;
21 * _stext = .;
22 * TEXT_SECTION = 0
23 * _etext = .;
25 * _sdata = .;
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
28 * _edata = .;
30 * EXCEPTION_TABLE(...)
31 * NOTES
33 * BSS_SECTION(0, 0, 0)
34 * _end = .;
36 * STABS_DEBUG
37 * DWARF_DEBUG
39 * DISCARDS // must be the last
40 * }
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * [_stext, _etext] is the text section
44 * [_sdata, _edata] is the data section
46 * Some of the included output section have their own set of constants.
47 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
48 * [__nosave_begin, __nosave_end] for the nosave data
51 #ifndef LOAD_OFFSET
52 #define LOAD_OFFSET 0
53 #endif
55 #include <linux/export.h>
57 /* Align . to a 8 byte boundary equals to maximum function alignment. */
58 #define ALIGN_FUNCTION() . = ALIGN(8)
61 * Align to a 32 byte boundary equal to the
62 * alignment gcc 4.5 uses for a struct
64 #define STRUCT_ALIGNMENT 32
65 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
67 /* The actual configuration determine if the init/exit sections
68 * are handled as text/data or they can be discarded (which
69 * often happens at runtime)
71 #ifdef CONFIG_HOTPLUG_CPU
72 #define CPU_KEEP(sec) *(.cpu##sec)
73 #define CPU_DISCARD(sec)
74 #else
75 #define CPU_KEEP(sec)
76 #define CPU_DISCARD(sec) *(.cpu##sec)
77 #endif
79 #if defined(CONFIG_MEMORY_HOTPLUG)
80 #define MEM_KEEP(sec) *(.mem##sec)
81 #define MEM_DISCARD(sec)
82 #else
83 #define MEM_KEEP(sec)
84 #define MEM_DISCARD(sec) *(.mem##sec)
85 #endif
87 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
88 #define MCOUNT_REC() . = ALIGN(8); \
89 VMLINUX_SYMBOL(__start_mcount_loc) = .; \
90 *(__mcount_loc) \
91 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
92 #else
93 #define MCOUNT_REC()
94 #endif
96 #ifdef CONFIG_TRACE_BRANCH_PROFILING
97 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
98 *(_ftrace_annotated_branch) \
99 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
100 #else
101 #define LIKELY_PROFILE()
102 #endif
104 #ifdef CONFIG_PROFILE_ALL_BRANCHES
105 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
106 *(_ftrace_branch) \
107 VMLINUX_SYMBOL(__stop_branch_profile) = .;
108 #else
109 #define BRANCH_PROFILE()
110 #endif
112 #ifdef CONFIG_EVENT_TRACING
113 #define FTRACE_EVENTS() . = ALIGN(8); \
114 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
115 *(_ftrace_events) \
116 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
117 #else
118 #define FTRACE_EVENTS()
119 #endif
121 #ifdef CONFIG_TRACING
122 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
123 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
124 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
125 #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
126 *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
127 VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
128 #else
129 #define TRACE_PRINTKS()
130 #define TRACEPOINT_STR()
131 #endif
133 #ifdef CONFIG_FTRACE_SYSCALLS
134 #define TRACE_SYSCALLS() . = ALIGN(8); \
135 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
136 *(__syscalls_metadata) \
137 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
138 #else
139 #define TRACE_SYSCALLS()
140 #endif
143 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
144 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
145 #define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name)
146 #define _OF_TABLE_0(name)
147 #define _OF_TABLE_1(name) \
148 . = ALIGN(8); \
149 VMLINUX_SYMBOL(__##name##_of_table) = .; \
150 *(__##name##_of_table) \
151 *(__##name##_of_table_end)
153 #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
154 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
155 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
156 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
157 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
158 #define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
160 #define KERNEL_DTB() \
161 STRUCT_ALIGN(); \
162 VMLINUX_SYMBOL(__dtb_start) = .; \
163 *(.dtb.init.rodata) \
164 VMLINUX_SYMBOL(__dtb_end) = .;
166 /* .data section */
167 #define DATA_DATA \
168 *(.data) \
169 *(.ref.data) \
170 *(.data..shared_aligned) /* percpu related */ \
171 MEM_KEEP(init.data) \
172 MEM_KEEP(exit.data) \
173 *(.data.unlikely) \
174 STRUCT_ALIGN(); \
175 *(__tracepoints) \
176 /* implement dynamic printk debug */ \
177 . = ALIGN(8); \
178 VMLINUX_SYMBOL(__start___jump_table) = .; \
179 *(__jump_table) \
180 VMLINUX_SYMBOL(__stop___jump_table) = .; \
181 . = ALIGN(8); \
182 VMLINUX_SYMBOL(__start___verbose) = .; \
183 *(__verbose) \
184 VMLINUX_SYMBOL(__stop___verbose) = .; \
185 LIKELY_PROFILE() \
186 BRANCH_PROFILE() \
187 TRACE_PRINTKS() \
188 TRACEPOINT_STR()
191 * Data section helpers
193 #define NOSAVE_DATA \
194 . = ALIGN(PAGE_SIZE); \
195 VMLINUX_SYMBOL(__nosave_begin) = .; \
196 *(.data..nosave) \
197 . = ALIGN(PAGE_SIZE); \
198 VMLINUX_SYMBOL(__nosave_end) = .;
200 #define PAGE_ALIGNED_DATA(page_align) \
201 . = ALIGN(page_align); \
202 *(.data..page_aligned)
204 #define READ_MOSTLY_DATA(align) \
205 . = ALIGN(align); \
206 *(.data..read_mostly) \
207 . = ALIGN(align);
209 #define CACHELINE_ALIGNED_DATA(align) \
210 . = ALIGN(align); \
211 *(.data..cacheline_aligned)
213 #define INIT_TASK_DATA(align) \
214 . = ALIGN(align); \
215 *(.data..init_task)
218 * Read only Data
220 #define RO_DATA_SECTION(align) \
221 . = ALIGN((align)); \
222 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
223 VMLINUX_SYMBOL(__start_rodata) = .; \
224 *(.rodata) *(.rodata.*) \
225 *(__vermagic) /* Kernel version magic */ \
226 . = ALIGN(8); \
227 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
228 *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
229 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
230 *(__tracepoints_strings)/* Tracepoints: strings */ \
233 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
234 *(.rodata1) \
237 BUG_TABLE \
239 /* PCI quirks */ \
240 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
241 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
242 *(.pci_fixup_early) \
243 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
244 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
245 *(.pci_fixup_header) \
246 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
247 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
248 *(.pci_fixup_final) \
249 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
250 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
251 *(.pci_fixup_enable) \
252 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
253 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
254 *(.pci_fixup_resume) \
255 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
256 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
257 *(.pci_fixup_resume_early) \
258 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
259 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
260 *(.pci_fixup_suspend) \
261 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
264 /* Built-in firmware blobs */ \
265 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
266 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
267 *(.builtin_fw) \
268 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
271 TRACEDATA \
273 /* Kernel symbol table: Normal symbols */ \
274 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
275 VMLINUX_SYMBOL(__start___ksymtab) = .; \
276 *(SORT(___ksymtab+*)) \
277 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
280 /* Kernel symbol table: GPL-only symbols */ \
281 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
282 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
283 *(SORT(___ksymtab_gpl+*)) \
284 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
287 /* Kernel symbol table: Normal unused symbols */ \
288 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
289 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
290 *(SORT(___ksymtab_unused+*)) \
291 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
294 /* Kernel symbol table: GPL-only unused symbols */ \
295 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
296 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
297 *(SORT(___ksymtab_unused_gpl+*)) \
298 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
301 /* Kernel symbol table: GPL-future-only symbols */ \
302 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
303 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
304 *(SORT(___ksymtab_gpl_future+*)) \
305 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
308 /* Kernel symbol table: Normal symbols */ \
309 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
310 VMLINUX_SYMBOL(__start___kcrctab) = .; \
311 *(SORT(___kcrctab+*)) \
312 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
315 /* Kernel symbol table: GPL-only symbols */ \
316 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
317 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
318 *(SORT(___kcrctab_gpl+*)) \
319 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
322 /* Kernel symbol table: Normal unused symbols */ \
323 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
324 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
325 *(SORT(___kcrctab_unused+*)) \
326 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
329 /* Kernel symbol table: GPL-only unused symbols */ \
330 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
331 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
332 *(SORT(___kcrctab_unused_gpl+*)) \
333 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
336 /* Kernel symbol table: GPL-future-only symbols */ \
337 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
338 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
339 *(SORT(___kcrctab_gpl_future+*)) \
340 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
343 /* Kernel symbol table: strings */ \
344 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
345 *(__ksymtab_strings) \
348 /* __*init sections */ \
349 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
350 *(.ref.rodata) \
351 MEM_KEEP(init.rodata) \
352 MEM_KEEP(exit.rodata) \
355 /* Built-in module parameters. */ \
356 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
357 VMLINUX_SYMBOL(__start___param) = .; \
358 *(__param) \
359 VMLINUX_SYMBOL(__stop___param) = .; \
362 /* Built-in module versions. */ \
363 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
364 VMLINUX_SYMBOL(__start___modver) = .; \
365 *(__modver) \
366 VMLINUX_SYMBOL(__stop___modver) = .; \
367 . = ALIGN((align)); \
368 VMLINUX_SYMBOL(__end_rodata) = .; \
370 . = ALIGN((align));
372 /* RODATA & RO_DATA provided for backward compatibility.
373 * All archs are supposed to use RO_DATA() */
374 #define RODATA RO_DATA_SECTION(4096)
375 #define RO_DATA(align) RO_DATA_SECTION(align)
377 #define SECURITY_INIT \
378 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
379 VMLINUX_SYMBOL(__security_initcall_start) = .; \
380 *(.security_initcall.init) \
381 VMLINUX_SYMBOL(__security_initcall_end) = .; \
384 /* .text section. Map to function alignment to avoid address changes
385 * during second ld run in second ld pass when generating System.map */
386 #define TEXT_TEXT \
387 ALIGN_FUNCTION(); \
388 *(.text.hot) \
389 *(.text) \
390 *(.ref.text) \
391 MEM_KEEP(init.text) \
392 MEM_KEEP(exit.text) \
393 *(.text.unlikely)
396 /* sched.text is aling to function alignment to secure we have same
397 * address even at second ld pass when generating System.map */
398 #define SCHED_TEXT \
399 ALIGN_FUNCTION(); \
400 VMLINUX_SYMBOL(__sched_text_start) = .; \
401 *(.sched.text) \
402 VMLINUX_SYMBOL(__sched_text_end) = .;
404 /* spinlock.text is aling to function alignment to secure we have same
405 * address even at second ld pass when generating System.map */
406 #define LOCK_TEXT \
407 ALIGN_FUNCTION(); \
408 VMLINUX_SYMBOL(__lock_text_start) = .; \
409 *(.spinlock.text) \
410 VMLINUX_SYMBOL(__lock_text_end) = .;
412 #define KPROBES_TEXT \
413 ALIGN_FUNCTION(); \
414 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
415 *(.kprobes.text) \
416 VMLINUX_SYMBOL(__kprobes_text_end) = .;
418 #define ENTRY_TEXT \
419 ALIGN_FUNCTION(); \
420 VMLINUX_SYMBOL(__entry_text_start) = .; \
421 *(.entry.text) \
422 VMLINUX_SYMBOL(__entry_text_end) = .;
424 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
425 #define IRQENTRY_TEXT \
426 ALIGN_FUNCTION(); \
427 VMLINUX_SYMBOL(__irqentry_text_start) = .; \
428 *(.irqentry.text) \
429 VMLINUX_SYMBOL(__irqentry_text_end) = .;
430 #else
431 #define IRQENTRY_TEXT
432 #endif
434 /* Section used for early init (in .S files) */
435 #define HEAD_TEXT *(.head.text)
437 #define HEAD_TEXT_SECTION \
438 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
439 HEAD_TEXT \
443 * Exception table
445 #define EXCEPTION_TABLE(align) \
446 . = ALIGN(align); \
447 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
448 VMLINUX_SYMBOL(__start___ex_table) = .; \
449 *(__ex_table) \
450 VMLINUX_SYMBOL(__stop___ex_table) = .; \
454 * Init task
456 #define INIT_TASK_DATA_SECTION(align) \
457 . = ALIGN(align); \
458 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
459 INIT_TASK_DATA(align) \
462 #ifdef CONFIG_CONSTRUCTORS
463 #define KERNEL_CTORS() . = ALIGN(8); \
464 VMLINUX_SYMBOL(__ctors_start) = .; \
465 *(.ctors) \
466 *(.init_array) \
467 VMLINUX_SYMBOL(__ctors_end) = .;
468 #else
469 #define KERNEL_CTORS()
470 #endif
472 /* init and exit section handling */
473 #define INIT_DATA \
474 *(.init.data) \
475 MEM_DISCARD(init.data) \
476 KERNEL_CTORS() \
477 MCOUNT_REC() \
478 *(.init.rodata) \
479 FTRACE_EVENTS() \
480 TRACE_SYSCALLS() \
481 MEM_DISCARD(init.rodata) \
482 CLK_OF_TABLES() \
483 RESERVEDMEM_OF_TABLES() \
484 CLKSRC_OF_TABLES() \
485 CPU_METHOD_OF_TABLES() \
486 KERNEL_DTB() \
487 IRQCHIP_OF_MATCH_TABLE() \
488 EARLYCON_OF_TABLES()
490 #define INIT_TEXT \
491 *(.init.text) \
492 MEM_DISCARD(init.text)
494 #define EXIT_DATA \
495 *(.exit.data) \
496 MEM_DISCARD(exit.data) \
497 MEM_DISCARD(exit.rodata)
499 #define EXIT_TEXT \
500 *(.exit.text) \
501 MEM_DISCARD(exit.text)
503 #define EXIT_CALL \
504 *(.exitcall.exit)
507 * bss (Block Started by Symbol) - uninitialized data
508 * zeroed during startup
510 #define SBSS(sbss_align) \
511 . = ALIGN(sbss_align); \
512 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
513 *(.sbss) \
514 *(.scommon) \
518 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
519 * sections to the front of bss.
521 #ifndef BSS_FIRST_SECTIONS
522 #define BSS_FIRST_SECTIONS
523 #endif
525 #define BSS(bss_align) \
526 . = ALIGN(bss_align); \
527 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
528 BSS_FIRST_SECTIONS \
529 *(.bss..page_aligned) \
530 *(.dynbss) \
531 *(.bss) \
532 *(COMMON) \
536 * DWARF debug sections.
537 * Symbols in the DWARF debugging sections are relative to
538 * the beginning of the section so we begin them at 0.
540 #define DWARF_DEBUG \
541 /* DWARF 1 */ \
542 .debug 0 : { *(.debug) } \
543 .line 0 : { *(.line) } \
544 /* GNU DWARF 1 extensions */ \
545 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
546 .debug_sfnames 0 : { *(.debug_sfnames) } \
547 /* DWARF 1.1 and DWARF 2 */ \
548 .debug_aranges 0 : { *(.debug_aranges) } \
549 .debug_pubnames 0 : { *(.debug_pubnames) } \
550 /* DWARF 2 */ \
551 .debug_info 0 : { *(.debug_info \
552 .gnu.linkonce.wi.*) } \
553 .debug_abbrev 0 : { *(.debug_abbrev) } \
554 .debug_line 0 : { *(.debug_line) } \
555 .debug_frame 0 : { *(.debug_frame) } \
556 .debug_str 0 : { *(.debug_str) } \
557 .debug_loc 0 : { *(.debug_loc) } \
558 .debug_macinfo 0 : { *(.debug_macinfo) } \
559 /* SGI/MIPS DWARF 2 extensions */ \
560 .debug_weaknames 0 : { *(.debug_weaknames) } \
561 .debug_funcnames 0 : { *(.debug_funcnames) } \
562 .debug_typenames 0 : { *(.debug_typenames) } \
563 .debug_varnames 0 : { *(.debug_varnames) } \
565 /* Stabs debugging sections. */
566 #define STABS_DEBUG \
567 .stab 0 : { *(.stab) } \
568 .stabstr 0 : { *(.stabstr) } \
569 .stab.excl 0 : { *(.stab.excl) } \
570 .stab.exclstr 0 : { *(.stab.exclstr) } \
571 .stab.index 0 : { *(.stab.index) } \
572 .stab.indexstr 0 : { *(.stab.indexstr) } \
573 .comment 0 : { *(.comment) }
575 #ifdef CONFIG_GENERIC_BUG
576 #define BUG_TABLE \
577 . = ALIGN(8); \
578 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
579 VMLINUX_SYMBOL(__start___bug_table) = .; \
580 *(__bug_table) \
581 VMLINUX_SYMBOL(__stop___bug_table) = .; \
583 #else
584 #define BUG_TABLE
585 #endif
587 #ifdef CONFIG_PM_TRACE
588 #define TRACEDATA \
589 . = ALIGN(4); \
590 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
591 VMLINUX_SYMBOL(__tracedata_start) = .; \
592 *(.tracedata) \
593 VMLINUX_SYMBOL(__tracedata_end) = .; \
595 #else
596 #define TRACEDATA
597 #endif
599 #define NOTES \
600 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
601 VMLINUX_SYMBOL(__start_notes) = .; \
602 *(.note.*) \
603 VMLINUX_SYMBOL(__stop_notes) = .; \
606 #define INIT_SETUP(initsetup_align) \
607 . = ALIGN(initsetup_align); \
608 VMLINUX_SYMBOL(__setup_start) = .; \
609 *(.init.setup) \
610 VMLINUX_SYMBOL(__setup_end) = .;
612 #define INIT_CALLS_LEVEL(level) \
613 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
614 *(.initcall##level##.init) \
615 *(.initcall##level##s.init) \
617 #define INIT_CALLS \
618 VMLINUX_SYMBOL(__initcall_start) = .; \
619 *(.initcallearly.init) \
620 INIT_CALLS_LEVEL(0) \
621 INIT_CALLS_LEVEL(1) \
622 INIT_CALLS_LEVEL(2) \
623 INIT_CALLS_LEVEL(3) \
624 INIT_CALLS_LEVEL(4) \
625 INIT_CALLS_LEVEL(5) \
626 INIT_CALLS_LEVEL(rootfs) \
627 INIT_CALLS_LEVEL(6) \
628 INIT_CALLS_LEVEL(7) \
629 VMLINUX_SYMBOL(__initcall_end) = .;
631 #define CON_INITCALL \
632 VMLINUX_SYMBOL(__con_initcall_start) = .; \
633 *(.con_initcall.init) \
634 VMLINUX_SYMBOL(__con_initcall_end) = .;
636 #define SECURITY_INITCALL \
637 VMLINUX_SYMBOL(__security_initcall_start) = .; \
638 *(.security_initcall.init) \
639 VMLINUX_SYMBOL(__security_initcall_end) = .;
641 #ifdef CONFIG_BLK_DEV_INITRD
642 #define INIT_RAM_FS \
643 . = ALIGN(4); \
644 VMLINUX_SYMBOL(__initramfs_start) = .; \
645 *(.init.ramfs) \
646 . = ALIGN(8); \
647 *(.init.ramfs.info)
648 #else
649 #define INIT_RAM_FS
650 #endif
653 * Default discarded sections.
655 * Some archs want to discard exit text/data at runtime rather than
656 * link time due to cross-section references such as alt instructions,
657 * bug table, eh_frame, etc. DISCARDS must be the last of output
658 * section definitions so that such archs put those in earlier section
659 * definitions.
661 #define DISCARDS \
662 /DISCARD/ : { \
663 EXIT_TEXT \
664 EXIT_DATA \
665 EXIT_CALL \
666 *(.discard) \
667 *(.discard.*) \
671 * PERCPU_INPUT - the percpu input sections
672 * @cacheline: cacheline size
674 * The core percpu section names and core symbols which do not rely
675 * directly upon load addresses.
677 * @cacheline is used to align subsections to avoid false cacheline
678 * sharing between subsections for different purposes.
680 #define PERCPU_INPUT(cacheline) \
681 VMLINUX_SYMBOL(__per_cpu_start) = .; \
682 *(.data..percpu..first) \
683 . = ALIGN(PAGE_SIZE); \
684 *(.data..percpu..page_aligned) \
685 . = ALIGN(cacheline); \
686 *(.data..percpu..readmostly) \
687 . = ALIGN(cacheline); \
688 *(.data..percpu) \
689 *(.data..percpu..shared_aligned) \
690 VMLINUX_SYMBOL(__per_cpu_end) = .;
693 * PERCPU_VADDR - define output section for percpu area
694 * @cacheline: cacheline size
695 * @vaddr: explicit base address (optional)
696 * @phdr: destination PHDR (optional)
698 * Macro which expands to output section for percpu area.
700 * @cacheline is used to align subsections to avoid false cacheline
701 * sharing between subsections for different purposes.
703 * If @vaddr is not blank, it specifies explicit base address and all
704 * percpu symbols will be offset from the given address. If blank,
705 * @vaddr always equals @laddr + LOAD_OFFSET.
707 * @phdr defines the output PHDR to use if not blank. Be warned that
708 * output PHDR is sticky. If @phdr is specified, the next output
709 * section in the linker script will go there too. @phdr should have
710 * a leading colon.
712 * Note that this macros defines __per_cpu_load as an absolute symbol.
713 * If there is no need to put the percpu section at a predetermined
714 * address, use PERCPU_SECTION.
716 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
717 VMLINUX_SYMBOL(__per_cpu_load) = .; \
718 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
719 - LOAD_OFFSET) { \
720 PERCPU_INPUT(cacheline) \
721 } phdr \
722 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
725 * PERCPU_SECTION - define output section for percpu area, simple version
726 * @cacheline: cacheline size
728 * Align to PAGE_SIZE and outputs output section for percpu area. This
729 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
730 * __per_cpu_start will be identical.
732 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
733 * except that __per_cpu_load is defined as a relative symbol against
734 * .data..percpu which is required for relocatable x86_32 configuration.
736 #define PERCPU_SECTION(cacheline) \
737 . = ALIGN(PAGE_SIZE); \
738 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
739 VMLINUX_SYMBOL(__per_cpu_load) = .; \
740 PERCPU_INPUT(cacheline) \
745 * Definition of the high level *_SECTION macros
746 * They will fit only a subset of the architectures
751 * Writeable data.
752 * All sections are combined in a single .data section.
753 * The sections following CONSTRUCTORS are arranged so their
754 * typical alignment matches.
755 * A cacheline is typical/always less than a PAGE_SIZE so
756 * the sections that has this restriction (or similar)
757 * is located before the ones requiring PAGE_SIZE alignment.
758 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
759 * matches the requirement of PAGE_ALIGNED_DATA.
761 * use 0 as page_align if page_aligned data is not used */
762 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
763 . = ALIGN(PAGE_SIZE); \
764 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
765 INIT_TASK_DATA(inittask) \
766 NOSAVE_DATA \
767 PAGE_ALIGNED_DATA(pagealigned) \
768 CACHELINE_ALIGNED_DATA(cacheline) \
769 READ_MOSTLY_DATA(cacheline) \
770 DATA_DATA \
771 CONSTRUCTORS \
774 #define INIT_TEXT_SECTION(inittext_align) \
775 . = ALIGN(inittext_align); \
776 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
777 VMLINUX_SYMBOL(_sinittext) = .; \
778 INIT_TEXT \
779 VMLINUX_SYMBOL(_einittext) = .; \
782 #define INIT_DATA_SECTION(initsetup_align) \
783 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
784 INIT_DATA \
785 INIT_SETUP(initsetup_align) \
786 INIT_CALLS \
787 CON_INITCALL \
788 SECURITY_INITCALL \
789 INIT_RAM_FS \
792 #define BSS_SECTION(sbss_align, bss_align, stop_align) \
793 . = ALIGN(sbss_align); \
794 VMLINUX_SYMBOL(__bss_start) = .; \
795 SBSS(sbss_align) \
796 BSS(bss_align) \
797 . = ALIGN(stop_align); \
798 VMLINUX_SYMBOL(__bss_stop) = .;