1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2015 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
30 #include "elf-vxworks.h"
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
68 static bfd_boolean
elf32_arm_write_section (bfd
*output_bfd
,
69 struct bfd_link_info
*link_info
,
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
77 static reloc_howto_type elf32_arm_howto_table_1
[] =
80 HOWTO (R_ARM_NONE
, /* type */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
84 FALSE
, /* pc_relative */
86 complain_overflow_dont
,/* complain_on_overflow */
87 bfd_elf_generic_reloc
, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE
, /* partial_inplace */
92 FALSE
), /* pcrel_offset */
94 HOWTO (R_ARM_PC24
, /* type */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
98 TRUE
, /* pc_relative */
100 complain_overflow_signed
,/* complain_on_overflow */
101 bfd_elf_generic_reloc
, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE
, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE
), /* pcrel_offset */
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32
, /* type */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
113 FALSE
, /* pc_relative */
115 complain_overflow_bitfield
,/* complain_on_overflow */
116 bfd_elf_generic_reloc
, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE
, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE
), /* pcrel_offset */
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32
, /* type */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
128 TRUE
, /* pc_relative */
130 complain_overflow_bitfield
,/* complain_on_overflow */
131 bfd_elf_generic_reloc
, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE
, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE
), /* pcrel_offset */
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0
, /* type */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
143 TRUE
, /* pc_relative */
145 complain_overflow_dont
,/* complain_on_overflow */
146 bfd_elf_generic_reloc
, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE
, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE
), /* pcrel_offset */
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16
, /* type */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
158 FALSE
, /* pc_relative */
160 complain_overflow_bitfield
,/* complain_on_overflow */
161 bfd_elf_generic_reloc
, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE
, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE
), /* pcrel_offset */
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12
, /* type */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
173 FALSE
, /* pc_relative */
175 complain_overflow_bitfield
,/* complain_on_overflow */
176 bfd_elf_generic_reloc
, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE
, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE
), /* pcrel_offset */
183 HOWTO (R_ARM_THM_ABS5
, /* type */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
187 FALSE
, /* pc_relative */
189 complain_overflow_bitfield
,/* complain_on_overflow */
190 bfd_elf_generic_reloc
, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE
, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE
), /* pcrel_offset */
198 HOWTO (R_ARM_ABS8
, /* type */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
202 FALSE
, /* pc_relative */
204 complain_overflow_bitfield
,/* complain_on_overflow */
205 bfd_elf_generic_reloc
, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE
, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE
), /* pcrel_offset */
212 HOWTO (R_ARM_SBREL32
, /* type */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
216 FALSE
, /* pc_relative */
218 complain_overflow_dont
,/* complain_on_overflow */
219 bfd_elf_generic_reloc
, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE
, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE
), /* pcrel_offset */
226 HOWTO (R_ARM_THM_CALL
, /* type */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
230 TRUE
, /* pc_relative */
232 complain_overflow_signed
,/* complain_on_overflow */
233 bfd_elf_generic_reloc
, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE
, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE
), /* pcrel_offset */
240 HOWTO (R_ARM_THM_PC8
, /* type */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
244 TRUE
, /* pc_relative */
246 complain_overflow_signed
,/* complain_on_overflow */
247 bfd_elf_generic_reloc
, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE
, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE
), /* pcrel_offset */
254 HOWTO (R_ARM_BREL_ADJ
, /* type */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
258 FALSE
, /* pc_relative */
260 complain_overflow_signed
,/* complain_on_overflow */
261 bfd_elf_generic_reloc
, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE
, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE
), /* pcrel_offset */
268 HOWTO (R_ARM_TLS_DESC
, /* type */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
272 FALSE
, /* pc_relative */
274 complain_overflow_bitfield
,/* complain_on_overflow */
275 bfd_elf_generic_reloc
, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE
, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE
), /* pcrel_offset */
282 HOWTO (R_ARM_THM_SWI8
, /* type */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
286 FALSE
, /* pc_relative */
288 complain_overflow_signed
,/* complain_on_overflow */
289 bfd_elf_generic_reloc
, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE
, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE
), /* pcrel_offset */
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25
, /* type */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
301 TRUE
, /* pc_relative */
303 complain_overflow_signed
,/* complain_on_overflow */
304 bfd_elf_generic_reloc
, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE
, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE
), /* pcrel_offset */
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22
, /* type */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
316 TRUE
, /* pc_relative */
318 complain_overflow_signed
,/* complain_on_overflow */
319 bfd_elf_generic_reloc
, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE
, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE
), /* pcrel_offset */
326 /* Dynamic TLS relocations. */
328 HOWTO (R_ARM_TLS_DTPMOD32
, /* type */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
332 FALSE
, /* pc_relative */
334 complain_overflow_bitfield
,/* complain_on_overflow */
335 bfd_elf_generic_reloc
, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE
, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE
), /* pcrel_offset */
342 HOWTO (R_ARM_TLS_DTPOFF32
, /* type */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
346 FALSE
, /* pc_relative */
348 complain_overflow_bitfield
,/* complain_on_overflow */
349 bfd_elf_generic_reloc
, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE
, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE
), /* pcrel_offset */
356 HOWTO (R_ARM_TLS_TPOFF32
, /* type */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
360 FALSE
, /* pc_relative */
362 complain_overflow_bitfield
,/* complain_on_overflow */
363 bfd_elf_generic_reloc
, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE
, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE
), /* pcrel_offset */
370 /* Relocs used in ARM Linux */
372 HOWTO (R_ARM_COPY
, /* type */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
376 FALSE
, /* pc_relative */
378 complain_overflow_bitfield
,/* complain_on_overflow */
379 bfd_elf_generic_reloc
, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE
, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE
), /* pcrel_offset */
386 HOWTO (R_ARM_GLOB_DAT
, /* type */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
390 FALSE
, /* pc_relative */
392 complain_overflow_bitfield
,/* complain_on_overflow */
393 bfd_elf_generic_reloc
, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE
, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE
), /* pcrel_offset */
400 HOWTO (R_ARM_JUMP_SLOT
, /* type */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
404 FALSE
, /* pc_relative */
406 complain_overflow_bitfield
,/* complain_on_overflow */
407 bfd_elf_generic_reloc
, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE
, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE
), /* pcrel_offset */
414 HOWTO (R_ARM_RELATIVE
, /* type */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
418 FALSE
, /* pc_relative */
420 complain_overflow_bitfield
,/* complain_on_overflow */
421 bfd_elf_generic_reloc
, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE
, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE
), /* pcrel_offset */
428 HOWTO (R_ARM_GOTOFF32
, /* type */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
432 FALSE
, /* pc_relative */
434 complain_overflow_bitfield
,/* complain_on_overflow */
435 bfd_elf_generic_reloc
, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE
, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE
), /* pcrel_offset */
442 HOWTO (R_ARM_GOTPC
, /* type */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
446 TRUE
, /* pc_relative */
448 complain_overflow_bitfield
,/* complain_on_overflow */
449 bfd_elf_generic_reloc
, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE
, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE
), /* pcrel_offset */
456 HOWTO (R_ARM_GOT32
, /* type */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
460 FALSE
, /* pc_relative */
462 complain_overflow_bitfield
,/* complain_on_overflow */
463 bfd_elf_generic_reloc
, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE
, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE
), /* pcrel_offset */
470 HOWTO (R_ARM_PLT32
, /* type */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
474 TRUE
, /* pc_relative */
476 complain_overflow_bitfield
,/* complain_on_overflow */
477 bfd_elf_generic_reloc
, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE
, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE
), /* pcrel_offset */
484 HOWTO (R_ARM_CALL
, /* type */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
488 TRUE
, /* pc_relative */
490 complain_overflow_signed
,/* complain_on_overflow */
491 bfd_elf_generic_reloc
, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE
, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE
), /* pcrel_offset */
498 HOWTO (R_ARM_JUMP24
, /* type */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
502 TRUE
, /* pc_relative */
504 complain_overflow_signed
,/* complain_on_overflow */
505 bfd_elf_generic_reloc
, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE
, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE
), /* pcrel_offset */
512 HOWTO (R_ARM_THM_JUMP24
, /* type */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
516 TRUE
, /* pc_relative */
518 complain_overflow_signed
,/* complain_on_overflow */
519 bfd_elf_generic_reloc
, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE
, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE
), /* pcrel_offset */
526 HOWTO (R_ARM_BASE_ABS
, /* type */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
530 FALSE
, /* pc_relative */
532 complain_overflow_dont
,/* complain_on_overflow */
533 bfd_elf_generic_reloc
, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE
, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE
), /* pcrel_offset */
540 HOWTO (R_ARM_ALU_PCREL7_0
, /* type */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
544 TRUE
, /* pc_relative */
546 complain_overflow_dont
,/* complain_on_overflow */
547 bfd_elf_generic_reloc
, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE
, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE
), /* pcrel_offset */
554 HOWTO (R_ARM_ALU_PCREL15_8
, /* type */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
558 TRUE
, /* pc_relative */
560 complain_overflow_dont
,/* complain_on_overflow */
561 bfd_elf_generic_reloc
, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE
, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE
), /* pcrel_offset */
568 HOWTO (R_ARM_ALU_PCREL23_15
, /* type */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
572 TRUE
, /* pc_relative */
574 complain_overflow_dont
,/* complain_on_overflow */
575 bfd_elf_generic_reloc
, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE
, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE
), /* pcrel_offset */
582 HOWTO (R_ARM_LDR_SBREL_11_0
, /* type */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
586 FALSE
, /* pc_relative */
588 complain_overflow_dont
,/* complain_on_overflow */
589 bfd_elf_generic_reloc
, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE
, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE
), /* pcrel_offset */
596 HOWTO (R_ARM_ALU_SBREL_19_12
, /* type */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
600 FALSE
, /* pc_relative */
602 complain_overflow_dont
,/* complain_on_overflow */
603 bfd_elf_generic_reloc
, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE
, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE
), /* pcrel_offset */
610 HOWTO (R_ARM_ALU_SBREL_27_20
, /* type */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
614 FALSE
, /* pc_relative */
616 complain_overflow_dont
,/* complain_on_overflow */
617 bfd_elf_generic_reloc
, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE
, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE
), /* pcrel_offset */
624 HOWTO (R_ARM_TARGET1
, /* type */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
628 FALSE
, /* pc_relative */
630 complain_overflow_dont
,/* complain_on_overflow */
631 bfd_elf_generic_reloc
, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE
, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE
), /* pcrel_offset */
638 HOWTO (R_ARM_ROSEGREL32
, /* type */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
642 FALSE
, /* pc_relative */
644 complain_overflow_dont
,/* complain_on_overflow */
645 bfd_elf_generic_reloc
, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE
, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE
), /* pcrel_offset */
652 HOWTO (R_ARM_V4BX
, /* type */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
656 FALSE
, /* pc_relative */
658 complain_overflow_dont
,/* complain_on_overflow */
659 bfd_elf_generic_reloc
, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE
, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE
), /* pcrel_offset */
666 HOWTO (R_ARM_TARGET2
, /* type */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
670 FALSE
, /* pc_relative */
672 complain_overflow_signed
,/* complain_on_overflow */
673 bfd_elf_generic_reloc
, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE
, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE
), /* pcrel_offset */
680 HOWTO (R_ARM_PREL31
, /* type */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
684 TRUE
, /* pc_relative */
686 complain_overflow_signed
,/* complain_on_overflow */
687 bfd_elf_generic_reloc
, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE
, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE
), /* pcrel_offset */
694 HOWTO (R_ARM_MOVW_ABS_NC
, /* type */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
698 FALSE
, /* pc_relative */
700 complain_overflow_dont
,/* complain_on_overflow */
701 bfd_elf_generic_reloc
, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE
, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE
), /* pcrel_offset */
708 HOWTO (R_ARM_MOVT_ABS
, /* type */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
712 FALSE
, /* pc_relative */
714 complain_overflow_bitfield
,/* complain_on_overflow */
715 bfd_elf_generic_reloc
, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE
, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE
), /* pcrel_offset */
722 HOWTO (R_ARM_MOVW_PREL_NC
, /* type */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
726 TRUE
, /* pc_relative */
728 complain_overflow_dont
,/* complain_on_overflow */
729 bfd_elf_generic_reloc
, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE
, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE
), /* pcrel_offset */
736 HOWTO (R_ARM_MOVT_PREL
, /* type */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
740 TRUE
, /* pc_relative */
742 complain_overflow_bitfield
,/* complain_on_overflow */
743 bfd_elf_generic_reloc
, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE
, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE
), /* pcrel_offset */
750 HOWTO (R_ARM_THM_MOVW_ABS_NC
, /* type */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
754 FALSE
, /* pc_relative */
756 complain_overflow_dont
,/* complain_on_overflow */
757 bfd_elf_generic_reloc
, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE
, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE
), /* pcrel_offset */
764 HOWTO (R_ARM_THM_MOVT_ABS
, /* type */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
768 FALSE
, /* pc_relative */
770 complain_overflow_bitfield
,/* complain_on_overflow */
771 bfd_elf_generic_reloc
, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE
, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE
), /* pcrel_offset */
778 HOWTO (R_ARM_THM_MOVW_PREL_NC
,/* type */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
782 TRUE
, /* pc_relative */
784 complain_overflow_dont
,/* complain_on_overflow */
785 bfd_elf_generic_reloc
, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE
, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE
), /* pcrel_offset */
792 HOWTO (R_ARM_THM_MOVT_PREL
, /* type */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
796 TRUE
, /* pc_relative */
798 complain_overflow_bitfield
,/* complain_on_overflow */
799 bfd_elf_generic_reloc
, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE
, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE
), /* pcrel_offset */
806 HOWTO (R_ARM_THM_JUMP19
, /* type */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
810 TRUE
, /* pc_relative */
812 complain_overflow_signed
,/* complain_on_overflow */
813 bfd_elf_generic_reloc
, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE
, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE
), /* pcrel_offset */
820 HOWTO (R_ARM_THM_JUMP6
, /* type */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
824 TRUE
, /* pc_relative */
826 complain_overflow_unsigned
,/* complain_on_overflow */
827 bfd_elf_generic_reloc
, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE
, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE
), /* pcrel_offset */
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 HOWTO (R_ARM_THM_ALU_PREL_11_0
,/* type */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
841 TRUE
, /* pc_relative */
843 complain_overflow_dont
,/* complain_on_overflow */
844 bfd_elf_generic_reloc
, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE
, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE
), /* pcrel_offset */
851 HOWTO (R_ARM_THM_PC12
, /* type */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
855 TRUE
, /* pc_relative */
857 complain_overflow_dont
,/* complain_on_overflow */
858 bfd_elf_generic_reloc
, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE
, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE
), /* pcrel_offset */
865 HOWTO (R_ARM_ABS32_NOI
, /* type */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
869 FALSE
, /* pc_relative */
871 complain_overflow_dont
,/* complain_on_overflow */
872 bfd_elf_generic_reloc
, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE
, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE
), /* pcrel_offset */
879 HOWTO (R_ARM_REL32_NOI
, /* type */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
883 TRUE
, /* pc_relative */
885 complain_overflow_dont
,/* complain_on_overflow */
886 bfd_elf_generic_reloc
, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE
, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE
), /* pcrel_offset */
893 /* Group relocations. */
895 HOWTO (R_ARM_ALU_PC_G0_NC
, /* type */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
899 TRUE
, /* pc_relative */
901 complain_overflow_dont
,/* complain_on_overflow */
902 bfd_elf_generic_reloc
, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE
, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE
), /* pcrel_offset */
909 HOWTO (R_ARM_ALU_PC_G0
, /* type */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
913 TRUE
, /* pc_relative */
915 complain_overflow_dont
,/* complain_on_overflow */
916 bfd_elf_generic_reloc
, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE
, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE
), /* pcrel_offset */
923 HOWTO (R_ARM_ALU_PC_G1_NC
, /* type */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
927 TRUE
, /* pc_relative */
929 complain_overflow_dont
,/* complain_on_overflow */
930 bfd_elf_generic_reloc
, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE
, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE
), /* pcrel_offset */
937 HOWTO (R_ARM_ALU_PC_G1
, /* type */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
941 TRUE
, /* pc_relative */
943 complain_overflow_dont
,/* complain_on_overflow */
944 bfd_elf_generic_reloc
, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE
, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE
), /* pcrel_offset */
951 HOWTO (R_ARM_ALU_PC_G2
, /* type */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
955 TRUE
, /* pc_relative */
957 complain_overflow_dont
,/* complain_on_overflow */
958 bfd_elf_generic_reloc
, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE
, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE
), /* pcrel_offset */
965 HOWTO (R_ARM_LDR_PC_G1
, /* type */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
969 TRUE
, /* pc_relative */
971 complain_overflow_dont
,/* complain_on_overflow */
972 bfd_elf_generic_reloc
, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE
, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE
), /* pcrel_offset */
979 HOWTO (R_ARM_LDR_PC_G2
, /* type */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
983 TRUE
, /* pc_relative */
985 complain_overflow_dont
,/* complain_on_overflow */
986 bfd_elf_generic_reloc
, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE
, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE
), /* pcrel_offset */
993 HOWTO (R_ARM_LDRS_PC_G0
, /* type */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
997 TRUE
, /* pc_relative */
999 complain_overflow_dont
,/* complain_on_overflow */
1000 bfd_elf_generic_reloc
, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE
, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE
), /* pcrel_offset */
1007 HOWTO (R_ARM_LDRS_PC_G1
, /* type */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 TRUE
, /* pc_relative */
1013 complain_overflow_dont
,/* complain_on_overflow */
1014 bfd_elf_generic_reloc
, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE
, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE
), /* pcrel_offset */
1021 HOWTO (R_ARM_LDRS_PC_G2
, /* type */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 TRUE
, /* pc_relative */
1027 complain_overflow_dont
,/* complain_on_overflow */
1028 bfd_elf_generic_reloc
, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE
, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE
), /* pcrel_offset */
1035 HOWTO (R_ARM_LDC_PC_G0
, /* type */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 TRUE
, /* pc_relative */
1041 complain_overflow_dont
,/* complain_on_overflow */
1042 bfd_elf_generic_reloc
, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE
, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE
), /* pcrel_offset */
1049 HOWTO (R_ARM_LDC_PC_G1
, /* type */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 TRUE
, /* pc_relative */
1055 complain_overflow_dont
,/* complain_on_overflow */
1056 bfd_elf_generic_reloc
, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE
, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE
), /* pcrel_offset */
1063 HOWTO (R_ARM_LDC_PC_G2
, /* type */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 TRUE
, /* pc_relative */
1069 complain_overflow_dont
,/* complain_on_overflow */
1070 bfd_elf_generic_reloc
, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE
, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE
), /* pcrel_offset */
1077 HOWTO (R_ARM_ALU_SB_G0_NC
, /* type */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 TRUE
, /* pc_relative */
1083 complain_overflow_dont
,/* complain_on_overflow */
1084 bfd_elf_generic_reloc
, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE
, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE
), /* pcrel_offset */
1091 HOWTO (R_ARM_ALU_SB_G0
, /* type */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 TRUE
, /* pc_relative */
1097 complain_overflow_dont
,/* complain_on_overflow */
1098 bfd_elf_generic_reloc
, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE
, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE
), /* pcrel_offset */
1105 HOWTO (R_ARM_ALU_SB_G1_NC
, /* type */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 TRUE
, /* pc_relative */
1111 complain_overflow_dont
,/* complain_on_overflow */
1112 bfd_elf_generic_reloc
, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE
, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE
), /* pcrel_offset */
1119 HOWTO (R_ARM_ALU_SB_G1
, /* type */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 TRUE
, /* pc_relative */
1125 complain_overflow_dont
,/* complain_on_overflow */
1126 bfd_elf_generic_reloc
, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE
, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE
), /* pcrel_offset */
1133 HOWTO (R_ARM_ALU_SB_G2
, /* type */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 TRUE
, /* pc_relative */
1139 complain_overflow_dont
,/* complain_on_overflow */
1140 bfd_elf_generic_reloc
, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE
, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE
), /* pcrel_offset */
1147 HOWTO (R_ARM_LDR_SB_G0
, /* type */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 TRUE
, /* pc_relative */
1153 complain_overflow_dont
,/* complain_on_overflow */
1154 bfd_elf_generic_reloc
, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE
, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE
), /* pcrel_offset */
1161 HOWTO (R_ARM_LDR_SB_G1
, /* type */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 TRUE
, /* pc_relative */
1167 complain_overflow_dont
,/* complain_on_overflow */
1168 bfd_elf_generic_reloc
, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE
, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE
), /* pcrel_offset */
1175 HOWTO (R_ARM_LDR_SB_G2
, /* type */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 TRUE
, /* pc_relative */
1181 complain_overflow_dont
,/* complain_on_overflow */
1182 bfd_elf_generic_reloc
, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE
, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE
), /* pcrel_offset */
1189 HOWTO (R_ARM_LDRS_SB_G0
, /* type */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 TRUE
, /* pc_relative */
1195 complain_overflow_dont
,/* complain_on_overflow */
1196 bfd_elf_generic_reloc
, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE
, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE
), /* pcrel_offset */
1203 HOWTO (R_ARM_LDRS_SB_G1
, /* type */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 TRUE
, /* pc_relative */
1209 complain_overflow_dont
,/* complain_on_overflow */
1210 bfd_elf_generic_reloc
, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE
, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE
), /* pcrel_offset */
1217 HOWTO (R_ARM_LDRS_SB_G2
, /* type */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 TRUE
, /* pc_relative */
1223 complain_overflow_dont
,/* complain_on_overflow */
1224 bfd_elf_generic_reloc
, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE
, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE
), /* pcrel_offset */
1231 HOWTO (R_ARM_LDC_SB_G0
, /* type */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 TRUE
, /* pc_relative */
1237 complain_overflow_dont
,/* complain_on_overflow */
1238 bfd_elf_generic_reloc
, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE
, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE
), /* pcrel_offset */
1245 HOWTO (R_ARM_LDC_SB_G1
, /* type */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 TRUE
, /* pc_relative */
1251 complain_overflow_dont
,/* complain_on_overflow */
1252 bfd_elf_generic_reloc
, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE
, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE
), /* pcrel_offset */
1259 HOWTO (R_ARM_LDC_SB_G2
, /* type */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 TRUE
, /* pc_relative */
1265 complain_overflow_dont
,/* complain_on_overflow */
1266 bfd_elf_generic_reloc
, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE
, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE
), /* pcrel_offset */
1273 /* End of group relocations. */
1275 HOWTO (R_ARM_MOVW_BREL_NC
, /* type */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 FALSE
, /* pc_relative */
1281 complain_overflow_dont
,/* complain_on_overflow */
1282 bfd_elf_generic_reloc
, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE
, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE
), /* pcrel_offset */
1289 HOWTO (R_ARM_MOVT_BREL
, /* type */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 FALSE
, /* pc_relative */
1295 complain_overflow_bitfield
,/* complain_on_overflow */
1296 bfd_elf_generic_reloc
, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE
, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE
), /* pcrel_offset */
1303 HOWTO (R_ARM_MOVW_BREL
, /* type */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 FALSE
, /* pc_relative */
1309 complain_overflow_dont
,/* complain_on_overflow */
1310 bfd_elf_generic_reloc
, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE
, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE
), /* pcrel_offset */
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC
,/* type */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 FALSE
, /* pc_relative */
1323 complain_overflow_dont
,/* complain_on_overflow */
1324 bfd_elf_generic_reloc
, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE
, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE
), /* pcrel_offset */
1331 HOWTO (R_ARM_THM_MOVT_BREL
, /* type */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 FALSE
, /* pc_relative */
1337 complain_overflow_bitfield
,/* complain_on_overflow */
1338 bfd_elf_generic_reloc
, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE
, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE
), /* pcrel_offset */
1345 HOWTO (R_ARM_THM_MOVW_BREL
, /* type */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 FALSE
, /* pc_relative */
1351 complain_overflow_dont
,/* complain_on_overflow */
1352 bfd_elf_generic_reloc
, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE
, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE
), /* pcrel_offset */
1359 HOWTO (R_ARM_TLS_GOTDESC
, /* type */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 FALSE
, /* pc_relative */
1365 complain_overflow_bitfield
,/* complain_on_overflow */
1366 NULL
, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE
, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE
), /* pcrel_offset */
1373 HOWTO (R_ARM_TLS_CALL
, /* type */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 FALSE
, /* pc_relative */
1379 complain_overflow_dont
,/* complain_on_overflow */
1380 bfd_elf_generic_reloc
, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE
, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE
), /* pcrel_offset */
1387 HOWTO (R_ARM_TLS_DESCSEQ
, /* type */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 FALSE
, /* pc_relative */
1393 complain_overflow_bitfield
,/* complain_on_overflow */
1394 bfd_elf_generic_reloc
, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE
, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE
), /* pcrel_offset */
1401 HOWTO (R_ARM_THM_TLS_CALL
, /* type */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 FALSE
, /* pc_relative */
1407 complain_overflow_dont
,/* complain_on_overflow */
1408 bfd_elf_generic_reloc
, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE
, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE
), /* pcrel_offset */
1415 HOWTO (R_ARM_PLT32_ABS
, /* type */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 FALSE
, /* pc_relative */
1421 complain_overflow_dont
,/* complain_on_overflow */
1422 bfd_elf_generic_reloc
, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE
, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE
), /* pcrel_offset */
1429 HOWTO (R_ARM_GOT_ABS
, /* type */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 FALSE
, /* pc_relative */
1435 complain_overflow_dont
,/* complain_on_overflow */
1436 bfd_elf_generic_reloc
, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE
, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE
), /* pcrel_offset */
1443 HOWTO (R_ARM_GOT_PREL
, /* type */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 TRUE
, /* pc_relative */
1449 complain_overflow_dont
, /* complain_on_overflow */
1450 bfd_elf_generic_reloc
, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE
, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE
), /* pcrel_offset */
1457 HOWTO (R_ARM_GOT_BREL12
, /* type */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 FALSE
, /* pc_relative */
1463 complain_overflow_bitfield
,/* complain_on_overflow */
1464 bfd_elf_generic_reloc
, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE
, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE
), /* pcrel_offset */
1471 HOWTO (R_ARM_GOTOFF12
, /* type */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 FALSE
, /* pc_relative */
1477 complain_overflow_bitfield
,/* complain_on_overflow */
1478 bfd_elf_generic_reloc
, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE
, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE
), /* pcrel_offset */
1485 EMPTY_HOWTO (R_ARM_GOTRELAX
), /* reserved for future GOT-load optimizations */
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY
, /* type */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 FALSE
, /* pc_relative */
1494 complain_overflow_dont
, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn
, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE
, /* partial_inplace */
1500 FALSE
), /* pcrel_offset */
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT
, /* type */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 FALSE
, /* pc_relative */
1509 complain_overflow_dont
, /* complain_on_overflow */
1510 NULL
, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE
, /* partial_inplace */
1515 FALSE
), /* pcrel_offset */
1517 HOWTO (R_ARM_THM_JUMP11
, /* type */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 TRUE
, /* pc_relative */
1523 complain_overflow_signed
, /* complain_on_overflow */
1524 bfd_elf_generic_reloc
, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE
, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE
), /* pcrel_offset */
1531 HOWTO (R_ARM_THM_JUMP8
, /* type */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 TRUE
, /* pc_relative */
1537 complain_overflow_signed
, /* complain_on_overflow */
1538 bfd_elf_generic_reloc
, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE
, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE
), /* pcrel_offset */
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32
, /* type */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 FALSE
, /* pc_relative */
1552 complain_overflow_bitfield
,/* complain_on_overflow */
1553 NULL
, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE
, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE
), /* pcrel_offset */
1560 HOWTO (R_ARM_TLS_LDM32
, /* type */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 FALSE
, /* pc_relative */
1566 complain_overflow_bitfield
,/* complain_on_overflow */
1567 bfd_elf_generic_reloc
, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE
, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE
), /* pcrel_offset */
1574 HOWTO (R_ARM_TLS_LDO32
, /* type */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 FALSE
, /* pc_relative */
1580 complain_overflow_bitfield
,/* complain_on_overflow */
1581 bfd_elf_generic_reloc
, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE
, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE
), /* pcrel_offset */
1588 HOWTO (R_ARM_TLS_IE32
, /* type */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 FALSE
, /* pc_relative */
1594 complain_overflow_bitfield
,/* complain_on_overflow */
1595 NULL
, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE
, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE
), /* pcrel_offset */
1602 HOWTO (R_ARM_TLS_LE32
, /* type */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 FALSE
, /* pc_relative */
1608 complain_overflow_bitfield
,/* complain_on_overflow */
1609 NULL
, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE
, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE
), /* pcrel_offset */
1616 HOWTO (R_ARM_TLS_LDO12
, /* type */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 FALSE
, /* pc_relative */
1622 complain_overflow_bitfield
,/* complain_on_overflow */
1623 bfd_elf_generic_reloc
, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE
, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE
), /* pcrel_offset */
1630 HOWTO (R_ARM_TLS_LE12
, /* type */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 FALSE
, /* pc_relative */
1636 complain_overflow_bitfield
,/* complain_on_overflow */
1637 bfd_elf_generic_reloc
, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE
, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE
), /* pcrel_offset */
1644 HOWTO (R_ARM_TLS_IE12GP
, /* type */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 FALSE
, /* pc_relative */
1650 complain_overflow_bitfield
,/* complain_on_overflow */
1651 bfd_elf_generic_reloc
, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE
, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE
), /* pcrel_offset */
1658 /* 112-127 private relocations. */
1676 /* R_ARM_ME_TOO, obsolete. */
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ
, /* type */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 FALSE
, /* pc_relative */
1685 complain_overflow_bitfield
,/* complain_on_overflow */
1686 bfd_elf_generic_reloc
, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE
, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE
), /* pcrel_offset */
1695 static reloc_howto_type elf32_arm_howto_table_2
[1] =
1697 HOWTO (R_ARM_IRELATIVE
, /* type */
1699 2, /* size (0 = byte, 1 = short, 2 = long) */
1701 FALSE
, /* pc_relative */
1703 complain_overflow_bitfield
,/* complain_on_overflow */
1704 bfd_elf_generic_reloc
, /* special_function */
1705 "R_ARM_IRELATIVE", /* name */
1706 TRUE
, /* partial_inplace */
1707 0xffffffff, /* src_mask */
1708 0xffffffff, /* dst_mask */
1709 FALSE
) /* pcrel_offset */
1712 /* 249-255 extended, currently unused, relocations: */
1713 static reloc_howto_type elf32_arm_howto_table_3
[4] =
1715 HOWTO (R_ARM_RREL32
, /* type */
1717 0, /* size (0 = byte, 1 = short, 2 = long) */
1719 FALSE
, /* pc_relative */
1721 complain_overflow_dont
,/* complain_on_overflow */
1722 bfd_elf_generic_reloc
, /* special_function */
1723 "R_ARM_RREL32", /* name */
1724 FALSE
, /* partial_inplace */
1727 FALSE
), /* pcrel_offset */
1729 HOWTO (R_ARM_RABS32
, /* type */
1731 0, /* size (0 = byte, 1 = short, 2 = long) */
1733 FALSE
, /* pc_relative */
1735 complain_overflow_dont
,/* complain_on_overflow */
1736 bfd_elf_generic_reloc
, /* special_function */
1737 "R_ARM_RABS32", /* name */
1738 FALSE
, /* partial_inplace */
1741 FALSE
), /* pcrel_offset */
1743 HOWTO (R_ARM_RPC24
, /* type */
1745 0, /* size (0 = byte, 1 = short, 2 = long) */
1747 FALSE
, /* pc_relative */
1749 complain_overflow_dont
,/* complain_on_overflow */
1750 bfd_elf_generic_reloc
, /* special_function */
1751 "R_ARM_RPC24", /* name */
1752 FALSE
, /* partial_inplace */
1755 FALSE
), /* pcrel_offset */
1757 HOWTO (R_ARM_RBASE
, /* type */
1759 0, /* size (0 = byte, 1 = short, 2 = long) */
1761 FALSE
, /* pc_relative */
1763 complain_overflow_dont
,/* complain_on_overflow */
1764 bfd_elf_generic_reloc
, /* special_function */
1765 "R_ARM_RBASE", /* name */
1766 FALSE
, /* partial_inplace */
1769 FALSE
) /* pcrel_offset */
1772 static reloc_howto_type
*
1773 elf32_arm_howto_from_type (unsigned int r_type
)
1775 if (r_type
< ARRAY_SIZE (elf32_arm_howto_table_1
))
1776 return &elf32_arm_howto_table_1
[r_type
];
1778 if (r_type
== R_ARM_IRELATIVE
)
1779 return &elf32_arm_howto_table_2
[r_type
- R_ARM_IRELATIVE
];
1781 if (r_type
>= R_ARM_RREL32
1782 && r_type
< R_ARM_RREL32
+ ARRAY_SIZE (elf32_arm_howto_table_3
))
1783 return &elf32_arm_howto_table_3
[r_type
- R_ARM_RREL32
];
1789 elf32_arm_info_to_howto (bfd
* abfd ATTRIBUTE_UNUSED
, arelent
* bfd_reloc
,
1790 Elf_Internal_Rela
* elf_reloc
)
1792 unsigned int r_type
;
1794 r_type
= ELF32_R_TYPE (elf_reloc
->r_info
);
1795 bfd_reloc
->howto
= elf32_arm_howto_from_type (r_type
);
1798 struct elf32_arm_reloc_map
1800 bfd_reloc_code_real_type bfd_reloc_val
;
1801 unsigned char elf_reloc_val
;
1804 /* All entries in this list must also be present in elf32_arm_howto_table. */
1805 static const struct elf32_arm_reloc_map elf32_arm_reloc_map
[] =
1807 {BFD_RELOC_NONE
, R_ARM_NONE
},
1808 {BFD_RELOC_ARM_PCREL_BRANCH
, R_ARM_PC24
},
1809 {BFD_RELOC_ARM_PCREL_CALL
, R_ARM_CALL
},
1810 {BFD_RELOC_ARM_PCREL_JUMP
, R_ARM_JUMP24
},
1811 {BFD_RELOC_ARM_PCREL_BLX
, R_ARM_XPC25
},
1812 {BFD_RELOC_THUMB_PCREL_BLX
, R_ARM_THM_XPC22
},
1813 {BFD_RELOC_32
, R_ARM_ABS32
},
1814 {BFD_RELOC_32_PCREL
, R_ARM_REL32
},
1815 {BFD_RELOC_8
, R_ARM_ABS8
},
1816 {BFD_RELOC_16
, R_ARM_ABS16
},
1817 {BFD_RELOC_ARM_OFFSET_IMM
, R_ARM_ABS12
},
1818 {BFD_RELOC_ARM_THUMB_OFFSET
, R_ARM_THM_ABS5
},
1819 {BFD_RELOC_THUMB_PCREL_BRANCH25
, R_ARM_THM_JUMP24
},
1820 {BFD_RELOC_THUMB_PCREL_BRANCH23
, R_ARM_THM_CALL
},
1821 {BFD_RELOC_THUMB_PCREL_BRANCH12
, R_ARM_THM_JUMP11
},
1822 {BFD_RELOC_THUMB_PCREL_BRANCH20
, R_ARM_THM_JUMP19
},
1823 {BFD_RELOC_THUMB_PCREL_BRANCH9
, R_ARM_THM_JUMP8
},
1824 {BFD_RELOC_THUMB_PCREL_BRANCH7
, R_ARM_THM_JUMP6
},
1825 {BFD_RELOC_ARM_GLOB_DAT
, R_ARM_GLOB_DAT
},
1826 {BFD_RELOC_ARM_JUMP_SLOT
, R_ARM_JUMP_SLOT
},
1827 {BFD_RELOC_ARM_RELATIVE
, R_ARM_RELATIVE
},
1828 {BFD_RELOC_ARM_GOTOFF
, R_ARM_GOTOFF32
},
1829 {BFD_RELOC_ARM_GOTPC
, R_ARM_GOTPC
},
1830 {BFD_RELOC_ARM_GOT_PREL
, R_ARM_GOT_PREL
},
1831 {BFD_RELOC_ARM_GOT32
, R_ARM_GOT32
},
1832 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
1833 {BFD_RELOC_ARM_TARGET1
, R_ARM_TARGET1
},
1834 {BFD_RELOC_ARM_ROSEGREL32
, R_ARM_ROSEGREL32
},
1835 {BFD_RELOC_ARM_SBREL32
, R_ARM_SBREL32
},
1836 {BFD_RELOC_ARM_PREL31
, R_ARM_PREL31
},
1837 {BFD_RELOC_ARM_TARGET2
, R_ARM_TARGET2
},
1838 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
1839 {BFD_RELOC_ARM_TLS_GOTDESC
, R_ARM_TLS_GOTDESC
},
1840 {BFD_RELOC_ARM_TLS_CALL
, R_ARM_TLS_CALL
},
1841 {BFD_RELOC_ARM_THM_TLS_CALL
, R_ARM_THM_TLS_CALL
},
1842 {BFD_RELOC_ARM_TLS_DESCSEQ
, R_ARM_TLS_DESCSEQ
},
1843 {BFD_RELOC_ARM_THM_TLS_DESCSEQ
, R_ARM_THM_TLS_DESCSEQ
},
1844 {BFD_RELOC_ARM_TLS_DESC
, R_ARM_TLS_DESC
},
1845 {BFD_RELOC_ARM_TLS_GD32
, R_ARM_TLS_GD32
},
1846 {BFD_RELOC_ARM_TLS_LDO32
, R_ARM_TLS_LDO32
},
1847 {BFD_RELOC_ARM_TLS_LDM32
, R_ARM_TLS_LDM32
},
1848 {BFD_RELOC_ARM_TLS_DTPMOD32
, R_ARM_TLS_DTPMOD32
},
1849 {BFD_RELOC_ARM_TLS_DTPOFF32
, R_ARM_TLS_DTPOFF32
},
1850 {BFD_RELOC_ARM_TLS_TPOFF32
, R_ARM_TLS_TPOFF32
},
1851 {BFD_RELOC_ARM_TLS_IE32
, R_ARM_TLS_IE32
},
1852 {BFD_RELOC_ARM_TLS_LE32
, R_ARM_TLS_LE32
},
1853 {BFD_RELOC_ARM_IRELATIVE
, R_ARM_IRELATIVE
},
1854 {BFD_RELOC_VTABLE_INHERIT
, R_ARM_GNU_VTINHERIT
},
1855 {BFD_RELOC_VTABLE_ENTRY
, R_ARM_GNU_VTENTRY
},
1856 {BFD_RELOC_ARM_MOVW
, R_ARM_MOVW_ABS_NC
},
1857 {BFD_RELOC_ARM_MOVT
, R_ARM_MOVT_ABS
},
1858 {BFD_RELOC_ARM_MOVW_PCREL
, R_ARM_MOVW_PREL_NC
},
1859 {BFD_RELOC_ARM_MOVT_PCREL
, R_ARM_MOVT_PREL
},
1860 {BFD_RELOC_ARM_THUMB_MOVW
, R_ARM_THM_MOVW_ABS_NC
},
1861 {BFD_RELOC_ARM_THUMB_MOVT
, R_ARM_THM_MOVT_ABS
},
1862 {BFD_RELOC_ARM_THUMB_MOVW_PCREL
, R_ARM_THM_MOVW_PREL_NC
},
1863 {BFD_RELOC_ARM_THUMB_MOVT_PCREL
, R_ARM_THM_MOVT_PREL
},
1864 {BFD_RELOC_ARM_ALU_PC_G0_NC
, R_ARM_ALU_PC_G0_NC
},
1865 {BFD_RELOC_ARM_ALU_PC_G0
, R_ARM_ALU_PC_G0
},
1866 {BFD_RELOC_ARM_ALU_PC_G1_NC
, R_ARM_ALU_PC_G1_NC
},
1867 {BFD_RELOC_ARM_ALU_PC_G1
, R_ARM_ALU_PC_G1
},
1868 {BFD_RELOC_ARM_ALU_PC_G2
, R_ARM_ALU_PC_G2
},
1869 {BFD_RELOC_ARM_LDR_PC_G0
, R_ARM_LDR_PC_G0
},
1870 {BFD_RELOC_ARM_LDR_PC_G1
, R_ARM_LDR_PC_G1
},
1871 {BFD_RELOC_ARM_LDR_PC_G2
, R_ARM_LDR_PC_G2
},
1872 {BFD_RELOC_ARM_LDRS_PC_G0
, R_ARM_LDRS_PC_G0
},
1873 {BFD_RELOC_ARM_LDRS_PC_G1
, R_ARM_LDRS_PC_G1
},
1874 {BFD_RELOC_ARM_LDRS_PC_G2
, R_ARM_LDRS_PC_G2
},
1875 {BFD_RELOC_ARM_LDC_PC_G0
, R_ARM_LDC_PC_G0
},
1876 {BFD_RELOC_ARM_LDC_PC_G1
, R_ARM_LDC_PC_G1
},
1877 {BFD_RELOC_ARM_LDC_PC_G2
, R_ARM_LDC_PC_G2
},
1878 {BFD_RELOC_ARM_ALU_SB_G0_NC
, R_ARM_ALU_SB_G0_NC
},
1879 {BFD_RELOC_ARM_ALU_SB_G0
, R_ARM_ALU_SB_G0
},
1880 {BFD_RELOC_ARM_ALU_SB_G1_NC
, R_ARM_ALU_SB_G1_NC
},
1881 {BFD_RELOC_ARM_ALU_SB_G1
, R_ARM_ALU_SB_G1
},
1882 {BFD_RELOC_ARM_ALU_SB_G2
, R_ARM_ALU_SB_G2
},
1883 {BFD_RELOC_ARM_LDR_SB_G0
, R_ARM_LDR_SB_G0
},
1884 {BFD_RELOC_ARM_LDR_SB_G1
, R_ARM_LDR_SB_G1
},
1885 {BFD_RELOC_ARM_LDR_SB_G2
, R_ARM_LDR_SB_G2
},
1886 {BFD_RELOC_ARM_LDRS_SB_G0
, R_ARM_LDRS_SB_G0
},
1887 {BFD_RELOC_ARM_LDRS_SB_G1
, R_ARM_LDRS_SB_G1
},
1888 {BFD_RELOC_ARM_LDRS_SB_G2
, R_ARM_LDRS_SB_G2
},
1889 {BFD_RELOC_ARM_LDC_SB_G0
, R_ARM_LDC_SB_G0
},
1890 {BFD_RELOC_ARM_LDC_SB_G1
, R_ARM_LDC_SB_G1
},
1891 {BFD_RELOC_ARM_LDC_SB_G2
, R_ARM_LDC_SB_G2
},
1892 {BFD_RELOC_ARM_V4BX
, R_ARM_V4BX
}
1895 static reloc_howto_type
*
1896 elf32_arm_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
1897 bfd_reloc_code_real_type code
)
1901 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_reloc_map
); i
++)
1902 if (elf32_arm_reloc_map
[i
].bfd_reloc_val
== code
)
1903 return elf32_arm_howto_from_type (elf32_arm_reloc_map
[i
].elf_reloc_val
);
1908 static reloc_howto_type
*
1909 elf32_arm_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
1914 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_1
); i
++)
1915 if (elf32_arm_howto_table_1
[i
].name
!= NULL
1916 && strcasecmp (elf32_arm_howto_table_1
[i
].name
, r_name
) == 0)
1917 return &elf32_arm_howto_table_1
[i
];
1919 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_2
); i
++)
1920 if (elf32_arm_howto_table_2
[i
].name
!= NULL
1921 && strcasecmp (elf32_arm_howto_table_2
[i
].name
, r_name
) == 0)
1922 return &elf32_arm_howto_table_2
[i
];
1924 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_3
); i
++)
1925 if (elf32_arm_howto_table_3
[i
].name
!= NULL
1926 && strcasecmp (elf32_arm_howto_table_3
[i
].name
, r_name
) == 0)
1927 return &elf32_arm_howto_table_3
[i
];
1932 /* Support for core dump NOTE sections. */
1935 elf32_arm_nabi_grok_prstatus (bfd
*abfd
, Elf_Internal_Note
*note
)
1940 switch (note
->descsz
)
1945 case 148: /* Linux/ARM 32-bit. */
1947 elf_tdata (abfd
)->core
->signal
= bfd_get_16 (abfd
, note
->descdata
+ 12);
1950 elf_tdata (abfd
)->core
->lwpid
= bfd_get_32 (abfd
, note
->descdata
+ 24);
1959 /* Make a ".reg/999" section. */
1960 return _bfd_elfcore_make_pseudosection (abfd
, ".reg",
1961 size
, note
->descpos
+ offset
);
1965 elf32_arm_nabi_grok_psinfo (bfd
*abfd
, Elf_Internal_Note
*note
)
1967 switch (note
->descsz
)
1972 case 124: /* Linux/ARM elf_prpsinfo. */
1973 elf_tdata (abfd
)->core
->pid
1974 = bfd_get_32 (abfd
, note
->descdata
+ 12);
1975 elf_tdata (abfd
)->core
->program
1976 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 28, 16);
1977 elf_tdata (abfd
)->core
->command
1978 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 44, 80);
1981 /* Note that for some reason, a spurious space is tacked
1982 onto the end of the args in some (at least one anyway)
1983 implementations, so strip it off if it exists. */
1985 char *command
= elf_tdata (abfd
)->core
->command
;
1986 int n
= strlen (command
);
1988 if (0 < n
&& command
[n
- 1] == ' ')
1989 command
[n
- 1] = '\0';
1996 elf32_arm_nabi_write_core_note (bfd
*abfd
, char *buf
, int *bufsiz
,
2009 va_start (ap
, note_type
);
2010 memset (data
, 0, sizeof (data
));
2011 strncpy (data
+ 28, va_arg (ap
, const char *), 16);
2012 strncpy (data
+ 44, va_arg (ap
, const char *), 80);
2015 return elfcore_write_note (abfd
, buf
, bufsiz
,
2016 "CORE", note_type
, data
, sizeof (data
));
2027 va_start (ap
, note_type
);
2028 memset (data
, 0, sizeof (data
));
2029 pid
= va_arg (ap
, long);
2030 bfd_put_32 (abfd
, pid
, data
+ 24);
2031 cursig
= va_arg (ap
, int);
2032 bfd_put_16 (abfd
, cursig
, data
+ 12);
2033 greg
= va_arg (ap
, const void *);
2034 memcpy (data
+ 72, greg
, 72);
2037 return elfcore_write_note (abfd
, buf
, bufsiz
,
2038 "CORE", note_type
, data
, sizeof (data
));
2043 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2044 #define TARGET_LITTLE_NAME "elf32-littlearm"
2045 #define TARGET_BIG_SYM arm_elf32_be_vec
2046 #define TARGET_BIG_NAME "elf32-bigarm"
2048 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2049 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2050 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2052 typedef unsigned long int insn32
;
2053 typedef unsigned short int insn16
;
2055 /* In lieu of proper flags, assume all EABIv4 or later objects are
2057 #define INTERWORK_FLAG(abfd) \
2058 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2059 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2060 || ((abfd)->flags & BFD_LINKER_CREATED))
2062 /* The linker script knows the section names for placement.
2063 The entry_names are used to do simple name mangling on the stubs.
2064 Given a function name, and its type, the stub can be found. The
2065 name can be changed. The only requirement is the %s be present. */
2066 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2067 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2069 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2070 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2072 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2073 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2075 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2076 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2078 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2079 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2081 #define STUB_ENTRY_NAME "__%s_veneer"
2083 /* The name of the dynamic interpreter. This is put in the .interp
2085 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2087 static const unsigned long tls_trampoline
[] =
2089 0xe08e0000, /* add r0, lr, r0 */
2090 0xe5901004, /* ldr r1, [r0,#4] */
2091 0xe12fff11, /* bx r1 */
2094 static const unsigned long dl_tlsdesc_lazy_trampoline
[] =
2096 0xe52d2004, /* push {r2} */
2097 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2098 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2099 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2100 0xe081100f, /* 2: add r1, pc */
2101 0xe12fff12, /* bx r2 */
2102 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2103 + dl_tlsdesc_lazy_resolver(GOT) */
2104 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2107 #ifdef FOUR_WORD_PLT
2109 /* The first entry in a procedure linkage table looks like
2110 this. It is set up so that any shared library function that is
2111 called before the relocation has been set up calls the dynamic
2113 static const bfd_vma elf32_arm_plt0_entry
[] =
2115 0xe52de004, /* str lr, [sp, #-4]! */
2116 0xe59fe010, /* ldr lr, [pc, #16] */
2117 0xe08fe00e, /* add lr, pc, lr */
2118 0xe5bef008, /* ldr pc, [lr, #8]! */
2121 /* Subsequent entries in a procedure linkage table look like
2123 static const bfd_vma elf32_arm_plt_entry
[] =
2125 0xe28fc600, /* add ip, pc, #NN */
2126 0xe28cca00, /* add ip, ip, #NN */
2127 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2128 0x00000000, /* unused */
2131 #else /* not FOUR_WORD_PLT */
2133 /* The first entry in a procedure linkage table looks like
2134 this. It is set up so that any shared library function that is
2135 called before the relocation has been set up calls the dynamic
2137 static const bfd_vma elf32_arm_plt0_entry
[] =
2139 0xe52de004, /* str lr, [sp, #-4]! */
2140 0xe59fe004, /* ldr lr, [pc, #4] */
2141 0xe08fe00e, /* add lr, pc, lr */
2142 0xe5bef008, /* ldr pc, [lr, #8]! */
2143 0x00000000, /* &GOT[0] - . */
2146 /* By default subsequent entries in a procedure linkage table look like
2147 this. Offsets that don't fit into 28 bits will cause link error. */
2148 static const bfd_vma elf32_arm_plt_entry_short
[] =
2150 0xe28fc600, /* add ip, pc, #0xNN00000 */
2151 0xe28cca00, /* add ip, ip, #0xNN000 */
2152 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2155 /* When explicitly asked, we'll use this "long" entry format
2156 which can cope with arbitrary displacements. */
2157 static const bfd_vma elf32_arm_plt_entry_long
[] =
2159 0xe28fc200, /* add ip, pc, #0xN0000000 */
2160 0xe28cc600, /* add ip, ip, #0xNN00000 */
2161 0xe28cca00, /* add ip, ip, #0xNN000 */
2162 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2165 static bfd_boolean elf32_arm_use_long_plt_entry
= FALSE
;
2167 #endif /* not FOUR_WORD_PLT */
2169 /* The first entry in a procedure linkage table looks like this.
2170 It is set up so that any shared library function that is called before the
2171 relocation has been set up calls the dynamic linker first. */
2172 static const bfd_vma elf32_thumb2_plt0_entry
[] =
2174 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2175 an instruction maybe encoded to one or two array elements. */
2176 0xf8dfb500, /* push {lr} */
2177 0x44fee008, /* ldr.w lr, [pc, #8] */
2179 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2180 0x00000000, /* &GOT[0] - . */
2183 /* Subsequent entries in a procedure linkage table for thumb only target
2185 static const bfd_vma elf32_thumb2_plt_entry
[] =
2187 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2188 an instruction maybe encoded to one or two array elements. */
2189 0x0c00f240, /* movw ip, #0xNNNN */
2190 0x0c00f2c0, /* movt ip, #0xNNNN */
2191 0xf8dc44fc, /* add ip, pc */
2192 0xbf00f000 /* ldr.w pc, [ip] */
2196 /* The format of the first entry in the procedure linkage table
2197 for a VxWorks executable. */
2198 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry
[] =
2200 0xe52dc008, /* str ip,[sp,#-8]! */
2201 0xe59fc000, /* ldr ip,[pc] */
2202 0xe59cf008, /* ldr pc,[ip,#8] */
2203 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2206 /* The format of subsequent entries in a VxWorks executable. */
2207 static const bfd_vma elf32_arm_vxworks_exec_plt_entry
[] =
2209 0xe59fc000, /* ldr ip,[pc] */
2210 0xe59cf000, /* ldr pc,[ip] */
2211 0x00000000, /* .long @got */
2212 0xe59fc000, /* ldr ip,[pc] */
2213 0xea000000, /* b _PLT */
2214 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2217 /* The format of entries in a VxWorks shared library. */
2218 static const bfd_vma elf32_arm_vxworks_shared_plt_entry
[] =
2220 0xe59fc000, /* ldr ip,[pc] */
2221 0xe79cf009, /* ldr pc,[ip,r9] */
2222 0x00000000, /* .long @got */
2223 0xe59fc000, /* ldr ip,[pc] */
2224 0xe599f008, /* ldr pc,[r9,#8] */
2225 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2228 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2229 #define PLT_THUMB_STUB_SIZE 4
2230 static const bfd_vma elf32_arm_plt_thumb_stub
[] =
2236 /* The entries in a PLT when using a DLL-based target with multiple
2238 static const bfd_vma elf32_arm_symbian_plt_entry
[] =
2240 0xe51ff004, /* ldr pc, [pc, #-4] */
2241 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2244 /* The first entry in a procedure linkage table looks like
2245 this. It is set up so that any shared library function that is
2246 called before the relocation has been set up calls the dynamic
2248 static const bfd_vma elf32_arm_nacl_plt0_entry
[] =
2251 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2252 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2253 0xe08cc00f, /* add ip, ip, pc */
2254 0xe52dc008, /* str ip, [sp, #-8]! */
2255 /* Second bundle: */
2256 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2257 0xe59cc000, /* ldr ip, [ip] */
2258 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2259 0xe12fff1c, /* bx ip */
2261 0xe320f000, /* nop */
2262 0xe320f000, /* nop */
2263 0xe320f000, /* nop */
2265 0xe50dc004, /* str ip, [sp, #-4] */
2266 /* Fourth bundle: */
2267 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2268 0xe59cc000, /* ldr ip, [ip] */
2269 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2270 0xe12fff1c, /* bx ip */
2272 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2274 /* Subsequent entries in a procedure linkage table look like this. */
2275 static const bfd_vma elf32_arm_nacl_plt_entry
[] =
2277 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2278 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2279 0xe08cc00f, /* add ip, ip, pc */
2280 0xea000000, /* b .Lplt_tail */
2283 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2284 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2285 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2286 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2287 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2288 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2289 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2290 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2300 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2301 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2302 is inserted in arm_build_one_stub(). */
2303 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2304 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2305 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2306 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2307 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2308 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2313 enum stub_insn_type type
;
2314 unsigned int r_type
;
2318 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2319 to reach the stub if necessary. */
2320 static const insn_sequence elf32_arm_stub_long_branch_any_any
[] =
2322 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2323 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2326 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2328 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb
[] =
2330 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2331 ARM_INSN (0xe12fff1c), /* bx ip */
2332 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2335 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2336 static const insn_sequence elf32_arm_stub_long_branch_thumb_only
[] =
2338 THUMB16_INSN (0xb401), /* push {r0} */
2339 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2340 THUMB16_INSN (0x4684), /* mov ip, r0 */
2341 THUMB16_INSN (0xbc01), /* pop {r0} */
2342 THUMB16_INSN (0x4760), /* bx ip */
2343 THUMB16_INSN (0xbf00), /* nop */
2344 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2347 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2349 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb
[] =
2351 THUMB16_INSN (0x4778), /* bx pc */
2352 THUMB16_INSN (0x46c0), /* nop */
2353 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2354 ARM_INSN (0xe12fff1c), /* bx ip */
2355 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2358 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2360 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm
[] =
2362 THUMB16_INSN (0x4778), /* bx pc */
2363 THUMB16_INSN (0x46c0), /* nop */
2364 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2365 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2368 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2369 one, when the destination is close enough. */
2370 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm
[] =
2372 THUMB16_INSN (0x4778), /* bx pc */
2373 THUMB16_INSN (0x46c0), /* nop */
2374 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2377 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2378 blx to reach the stub if necessary. */
2379 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic
[] =
2381 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2382 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2383 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2386 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2387 blx to reach the stub if necessary. We can not add into pc;
2388 it is not guaranteed to mode switch (different in ARMv6 and
2390 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic
[] =
2392 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2393 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2394 ARM_INSN (0xe12fff1c), /* bx ip */
2395 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2398 /* V4T ARM -> ARM long branch stub, PIC. */
2399 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic
[] =
2401 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2402 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2403 ARM_INSN (0xe12fff1c), /* bx ip */
2404 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2407 /* V4T Thumb -> ARM long branch stub, PIC. */
2408 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic
[] =
2410 THUMB16_INSN (0x4778), /* bx pc */
2411 THUMB16_INSN (0x46c0), /* nop */
2412 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2413 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2414 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2417 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2419 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic
[] =
2421 THUMB16_INSN (0xb401), /* push {r0} */
2422 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2423 THUMB16_INSN (0x46fc), /* mov ip, pc */
2424 THUMB16_INSN (0x4484), /* add ip, r0 */
2425 THUMB16_INSN (0xbc01), /* pop {r0} */
2426 THUMB16_INSN (0x4760), /* bx ip */
2427 DATA_WORD (0, R_ARM_REL32
, 4), /* dcd R_ARM_REL32(X) */
2430 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2432 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic
[] =
2434 THUMB16_INSN (0x4778), /* bx pc */
2435 THUMB16_INSN (0x46c0), /* nop */
2436 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2437 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2438 ARM_INSN (0xe12fff1c), /* bx ip */
2439 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2442 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2443 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2444 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic
[] =
2446 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2447 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2448 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2451 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2452 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2453 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic
[] =
2455 THUMB16_INSN (0x4778), /* bx pc */
2456 THUMB16_INSN (0x46c0), /* nop */
2457 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2458 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2459 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2462 /* NaCl ARM -> ARM long branch stub. */
2463 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl
[] =
2465 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2466 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2467 ARM_INSN (0xe12fff1c), /* bx ip */
2468 ARM_INSN (0xe320f000), /* nop */
2469 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2470 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2471 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2472 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2475 /* NaCl ARM -> ARM long branch stub, PIC. */
2476 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic
[] =
2478 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2479 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2480 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2481 ARM_INSN (0xe12fff1c), /* bx ip */
2482 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2483 DATA_WORD (0, R_ARM_REL32
, 8), /* dcd R_ARM_REL32(X+8) */
2484 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2485 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2489 /* Cortex-A8 erratum-workaround stubs. */
2491 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2492 can't use a conditional branch to reach this stub). */
2494 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond
[] =
2496 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2497 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2498 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2501 /* Stub used for b.w and bl.w instructions. */
2503 static const insn_sequence elf32_arm_stub_a8_veneer_b
[] =
2505 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2508 static const insn_sequence elf32_arm_stub_a8_veneer_bl
[] =
2510 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2513 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2514 instruction (which switches to ARM mode) to point to this stub. Jump to the
2515 real destination using an ARM-mode branch. */
2517 static const insn_sequence elf32_arm_stub_a8_veneer_blx
[] =
2519 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2522 /* For each section group there can be a specially created linker section
2523 to hold the stubs for that group. The name of the stub section is based
2524 upon the name of another section within that group with the suffix below
2527 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2528 create what appeared to be a linker stub section when it actually
2529 contained user code/data. For example, consider this fragment:
2531 const char * stubborn_problems[] = { "np" };
2533 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2536 .data.rel.local.stubborn_problems
2538 This then causes problems in arm32_arm_build_stubs() as it triggers:
2540 // Ignore non-stub sections.
2541 if (!strstr (stub_sec->name, STUB_SUFFIX))
2544 And so the section would be ignored instead of being processed. Hence
2545 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2547 #define STUB_SUFFIX ".__stub"
2549 /* One entry per long/short branch stub defined above. */
2551 DEF_STUB(long_branch_any_any) \
2552 DEF_STUB(long_branch_v4t_arm_thumb) \
2553 DEF_STUB(long_branch_thumb_only) \
2554 DEF_STUB(long_branch_v4t_thumb_thumb) \
2555 DEF_STUB(long_branch_v4t_thumb_arm) \
2556 DEF_STUB(short_branch_v4t_thumb_arm) \
2557 DEF_STUB(long_branch_any_arm_pic) \
2558 DEF_STUB(long_branch_any_thumb_pic) \
2559 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2560 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2561 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2562 DEF_STUB(long_branch_thumb_only_pic) \
2563 DEF_STUB(long_branch_any_tls_pic) \
2564 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2565 DEF_STUB(long_branch_arm_nacl) \
2566 DEF_STUB(long_branch_arm_nacl_pic) \
2567 DEF_STUB(a8_veneer_b_cond) \
2568 DEF_STUB(a8_veneer_b) \
2569 DEF_STUB(a8_veneer_bl) \
2570 DEF_STUB(a8_veneer_blx)
2572 #define DEF_STUB(x) arm_stub_##x,
2573 enum elf32_arm_stub_type
2577 /* Note the first a8_veneer type. */
2578 arm_stub_a8_veneer_lwm
= arm_stub_a8_veneer_b_cond
2584 const insn_sequence
* template_sequence
;
2588 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2589 static const stub_def stub_definitions
[] =
2595 struct elf32_arm_stub_hash_entry
2597 /* Base hash table entry structure. */
2598 struct bfd_hash_entry root
;
2600 /* The stub section. */
2603 /* Offset within stub_sec of the beginning of this stub. */
2604 bfd_vma stub_offset
;
2606 /* Given the symbol's value and its section we can determine its final
2607 value when building the stubs (so the stub knows where to jump). */
2608 bfd_vma target_value
;
2609 asection
*target_section
;
2611 /* Offset to apply to relocation referencing target_value. */
2612 bfd_vma target_addend
;
2614 /* The instruction which caused this stub to be generated (only valid for
2615 Cortex-A8 erratum workaround stubs at present). */
2616 unsigned long orig_insn
;
2618 /* The stub type. */
2619 enum elf32_arm_stub_type stub_type
;
2620 /* Its encoding size in bytes. */
2623 const insn_sequence
*stub_template
;
2624 /* The size of the template (number of entries). */
2625 int stub_template_size
;
2627 /* The symbol table entry, if any, that this was derived from. */
2628 struct elf32_arm_link_hash_entry
*h
;
2630 /* Type of branch. */
2631 enum arm_st_branch_type branch_type
;
2633 /* Where this stub is being called from, or, in the case of combined
2634 stub sections, the first input section in the group. */
2637 /* The name for the local symbol at the start of this stub. The
2638 stub name in the hash table has to be unique; this does not, so
2639 it can be friendlier. */
2643 /* Used to build a map of a section. This is required for mixed-endian
2646 typedef struct elf32_elf_section_map
2651 elf32_arm_section_map
;
2653 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2657 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
,
2658 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
,
2659 VFP11_ERRATUM_ARM_VENEER
,
2660 VFP11_ERRATUM_THUMB_VENEER
2662 elf32_vfp11_erratum_type
;
2664 typedef struct elf32_vfp11_erratum_list
2666 struct elf32_vfp11_erratum_list
*next
;
2672 struct elf32_vfp11_erratum_list
*veneer
;
2673 unsigned int vfp_insn
;
2677 struct elf32_vfp11_erratum_list
*branch
;
2681 elf32_vfp11_erratum_type type
;
2683 elf32_vfp11_erratum_list
;
2685 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2689 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
,
2690 STM32L4XX_ERRATUM_VENEER
2692 elf32_stm32l4xx_erratum_type
;
2694 typedef struct elf32_stm32l4xx_erratum_list
2696 struct elf32_stm32l4xx_erratum_list
*next
;
2702 struct elf32_stm32l4xx_erratum_list
*veneer
;
2707 struct elf32_stm32l4xx_erratum_list
*branch
;
2711 elf32_stm32l4xx_erratum_type type
;
2713 elf32_stm32l4xx_erratum_list
;
2718 INSERT_EXIDX_CANTUNWIND_AT_END
2720 arm_unwind_edit_type
;
2722 /* A (sorted) list of edits to apply to an unwind table. */
2723 typedef struct arm_unwind_table_edit
2725 arm_unwind_edit_type type
;
2726 /* Note: we sometimes want to insert an unwind entry corresponding to a
2727 section different from the one we're currently writing out, so record the
2728 (text) section this edit relates to here. */
2729 asection
*linked_section
;
2731 struct arm_unwind_table_edit
*next
;
2733 arm_unwind_table_edit
;
2735 typedef struct _arm_elf_section_data
2737 /* Information about mapping symbols. */
2738 struct bfd_elf_section_data elf
;
2739 unsigned int mapcount
;
2740 unsigned int mapsize
;
2741 elf32_arm_section_map
*map
;
2742 /* Information about CPU errata. */
2743 unsigned int erratumcount
;
2744 elf32_vfp11_erratum_list
*erratumlist
;
2745 unsigned int stm32l4xx_erratumcount
;
2746 elf32_stm32l4xx_erratum_list
*stm32l4xx_erratumlist
;
2747 /* Information about unwind tables. */
2750 /* Unwind info attached to a text section. */
2753 asection
*arm_exidx_sec
;
2756 /* Unwind info attached to an .ARM.exidx section. */
2759 arm_unwind_table_edit
*unwind_edit_list
;
2760 arm_unwind_table_edit
*unwind_edit_tail
;
2764 _arm_elf_section_data
;
2766 #define elf32_arm_section_data(sec) \
2767 ((_arm_elf_section_data *) elf_section_data (sec))
2769 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2770 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2771 so may be created multiple times: we use an array of these entries whilst
2772 relaxing which we can refresh easily, then create stubs for each potentially
2773 erratum-triggering instruction once we've settled on a solution. */
2775 struct a8_erratum_fix
2781 unsigned long orig_insn
;
2783 enum elf32_arm_stub_type stub_type
;
2784 enum arm_st_branch_type branch_type
;
2787 /* A table of relocs applied to branches which might trigger Cortex-A8
2790 struct a8_erratum_reloc
2793 bfd_vma destination
;
2794 struct elf32_arm_link_hash_entry
*hash
;
2795 const char *sym_name
;
2796 unsigned int r_type
;
2797 enum arm_st_branch_type branch_type
;
2798 bfd_boolean non_a8_stub
;
2801 /* The size of the thread control block. */
2804 /* ARM-specific information about a PLT entry, over and above the usual
2808 /* We reference count Thumb references to a PLT entry separately,
2809 so that we can emit the Thumb trampoline only if needed. */
2810 bfd_signed_vma thumb_refcount
;
2812 /* Some references from Thumb code may be eliminated by BL->BLX
2813 conversion, so record them separately. */
2814 bfd_signed_vma maybe_thumb_refcount
;
2816 /* How many of the recorded PLT accesses were from non-call relocations.
2817 This information is useful when deciding whether anything takes the
2818 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2819 non-call references to the function should resolve directly to the
2820 real runtime target. */
2821 unsigned int noncall_refcount
;
2823 /* Since PLT entries have variable size if the Thumb prologue is
2824 used, we need to record the index into .got.plt instead of
2825 recomputing it from the PLT offset. */
2826 bfd_signed_vma got_offset
;
2829 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2830 struct arm_local_iplt_info
2832 /* The information that is usually found in the generic ELF part of
2833 the hash table entry. */
2834 union gotplt_union root
;
2836 /* The information that is usually found in the ARM-specific part of
2837 the hash table entry. */
2838 struct arm_plt_info arm
;
2840 /* A list of all potential dynamic relocations against this symbol. */
2841 struct elf_dyn_relocs
*dyn_relocs
;
2844 struct elf_arm_obj_tdata
2846 struct elf_obj_tdata root
;
2848 /* tls_type for each local got entry. */
2849 char *local_got_tls_type
;
2851 /* GOTPLT entries for TLS descriptors. */
2852 bfd_vma
*local_tlsdesc_gotent
;
2854 /* Information for local symbols that need entries in .iplt. */
2855 struct arm_local_iplt_info
**local_iplt
;
2857 /* Zero to warn when linking objects with incompatible enum sizes. */
2858 int no_enum_size_warning
;
2860 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2861 int no_wchar_size_warning
;
2864 #define elf_arm_tdata(bfd) \
2865 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2867 #define elf32_arm_local_got_tls_type(bfd) \
2868 (elf_arm_tdata (bfd)->local_got_tls_type)
2870 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2871 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2873 #define elf32_arm_local_iplt(bfd) \
2874 (elf_arm_tdata (bfd)->local_iplt)
2876 #define is_arm_elf(bfd) \
2877 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2878 && elf_tdata (bfd) != NULL \
2879 && elf_object_id (bfd) == ARM_ELF_DATA)
2882 elf32_arm_mkobject (bfd
*abfd
)
2884 return bfd_elf_allocate_object (abfd
, sizeof (struct elf_arm_obj_tdata
),
2888 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2890 /* Arm ELF linker hash entry. */
2891 struct elf32_arm_link_hash_entry
2893 struct elf_link_hash_entry root
;
2895 /* Track dynamic relocs copied for this symbol. */
2896 struct elf_dyn_relocs
*dyn_relocs
;
2898 /* ARM-specific PLT information. */
2899 struct arm_plt_info plt
;
2901 #define GOT_UNKNOWN 0
2902 #define GOT_NORMAL 1
2903 #define GOT_TLS_GD 2
2904 #define GOT_TLS_IE 4
2905 #define GOT_TLS_GDESC 8
2906 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2907 unsigned int tls_type
: 8;
2909 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2910 unsigned int is_iplt
: 1;
2912 unsigned int unused
: 23;
2914 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2915 starting at the end of the jump table. */
2916 bfd_vma tlsdesc_got
;
2918 /* The symbol marking the real symbol location for exported thumb
2919 symbols with Arm stubs. */
2920 struct elf_link_hash_entry
*export_glue
;
2922 /* A pointer to the most recently used stub hash entry against this
2924 struct elf32_arm_stub_hash_entry
*stub_cache
;
2927 /* Traverse an arm ELF linker hash table. */
2928 #define elf32_arm_link_hash_traverse(table, func, info) \
2929 (elf_link_hash_traverse \
2931 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2934 /* Get the ARM elf linker hash table from a link_info structure. */
2935 #define elf32_arm_hash_table(info) \
2936 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2937 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2939 #define arm_stub_hash_lookup(table, string, create, copy) \
2940 ((struct elf32_arm_stub_hash_entry *) \
2941 bfd_hash_lookup ((table), (string), (create), (copy)))
2943 /* Array to keep track of which stub sections have been created, and
2944 information on stub grouping. */
2947 /* This is the section to which stubs in the group will be
2950 /* The stub section. */
2954 #define elf32_arm_compute_jump_table_size(htab) \
2955 ((htab)->next_tls_desc_index * 4)
2957 /* ARM ELF linker hash table. */
2958 struct elf32_arm_link_hash_table
2960 /* The main hash table. */
2961 struct elf_link_hash_table root
;
2963 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2964 bfd_size_type thumb_glue_size
;
2966 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2967 bfd_size_type arm_glue_size
;
2969 /* The size in bytes of section containing the ARMv4 BX veneers. */
2970 bfd_size_type bx_glue_size
;
2972 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2973 veneer has been populated. */
2974 bfd_vma bx_glue_offset
[15];
2976 /* The size in bytes of the section containing glue for VFP11 erratum
2978 bfd_size_type vfp11_erratum_glue_size
;
2980 /* The size in bytes of the section containing glue for STM32L4XX erratum
2982 bfd_size_type stm32l4xx_erratum_glue_size
;
2984 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2985 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2986 elf32_arm_write_section(). */
2987 struct a8_erratum_fix
*a8_erratum_fixes
;
2988 unsigned int num_a8_erratum_fixes
;
2990 /* An arbitrary input BFD chosen to hold the glue sections. */
2991 bfd
* bfd_of_glue_owner
;
2993 /* Nonzero to output a BE8 image. */
2996 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2997 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3000 /* The relocation to use for R_ARM_TARGET2 relocations. */
3003 /* 0 = Ignore R_ARM_V4BX.
3004 1 = Convert BX to MOV PC.
3005 2 = Generate v4 interworing stubs. */
3008 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3011 /* Whether we should fix the ARM1176 BLX immediate issue. */
3014 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3017 /* What sort of code sequences we should look for which may trigger the
3018 VFP11 denorm erratum. */
3019 bfd_arm_vfp11_fix vfp11_fix
;
3021 /* Global counter for the number of fixes we have emitted. */
3022 int num_vfp11_fixes
;
3024 /* What sort of code sequences we should look for which may trigger the
3025 STM32L4XX erratum. */
3026 bfd_arm_stm32l4xx_fix stm32l4xx_fix
;
3028 /* Global counter for the number of fixes we have emitted. */
3029 int num_stm32l4xx_fixes
;
3031 /* Nonzero to force PIC branch veneers. */
3034 /* The number of bytes in the initial entry in the PLT. */
3035 bfd_size_type plt_header_size
;
3037 /* The number of bytes in the subsequent PLT etries. */
3038 bfd_size_type plt_entry_size
;
3040 /* True if the target system is VxWorks. */
3043 /* True if the target system is Symbian OS. */
3046 /* True if the target system is Native Client. */
3049 /* True if the target uses REL relocations. */
3052 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3053 bfd_vma next_tls_desc_index
;
3055 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3056 bfd_vma num_tls_desc
;
3058 /* Short-cuts to get to dynamic linker sections. */
3062 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3065 /* The offset into splt of the PLT entry for the TLS descriptor
3066 resolver. Special values are 0, if not necessary (or not found
3067 to be necessary yet), and -1 if needed but not determined
3069 bfd_vma dt_tlsdesc_plt
;
3071 /* The offset into sgot of the GOT entry used by the PLT entry
3073 bfd_vma dt_tlsdesc_got
;
3075 /* Offset in .plt section of tls_arm_trampoline. */
3076 bfd_vma tls_trampoline
;
3078 /* Data for R_ARM_TLS_LDM32 relocations. */
3081 bfd_signed_vma refcount
;
3085 /* Small local sym cache. */
3086 struct sym_cache sym_cache
;
3088 /* For convenience in allocate_dynrelocs. */
3091 /* The amount of space used by the reserved portion of the sgotplt
3092 section, plus whatever space is used by the jump slots. */
3093 bfd_vma sgotplt_jump_table_size
;
3095 /* The stub hash table. */
3096 struct bfd_hash_table stub_hash_table
;
3098 /* Linker stub bfd. */
3101 /* Linker call-backs. */
3102 asection
* (*add_stub_section
) (const char *, asection
*, unsigned int);
3103 void (*layout_sections_again
) (void);
3105 /* Array to keep track of which stub sections have been created, and
3106 information on stub grouping. */
3107 struct map_stub
*stub_group
;
3109 /* Number of elements in stub_group. */
3110 unsigned int top_id
;
3112 /* Assorted information used by elf32_arm_size_stubs. */
3113 unsigned int bfd_count
;
3114 unsigned int top_index
;
3115 asection
**input_list
;
3119 ctz (unsigned int mask
)
3121 #if GCC_VERSION >= 3004
3122 return __builtin_ctz (mask
);
3126 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3137 popcount (unsigned int mask
)
3139 #if GCC_VERSION >= 3004
3140 return __builtin_popcount (mask
);
3142 unsigned int i
, sum
= 0;
3144 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3154 /* Create an entry in an ARM ELF linker hash table. */
3156 static struct bfd_hash_entry
*
3157 elf32_arm_link_hash_newfunc (struct bfd_hash_entry
* entry
,
3158 struct bfd_hash_table
* table
,
3159 const char * string
)
3161 struct elf32_arm_link_hash_entry
* ret
=
3162 (struct elf32_arm_link_hash_entry
*) entry
;
3164 /* Allocate the structure if it has not already been allocated by a
3167 ret
= (struct elf32_arm_link_hash_entry
*)
3168 bfd_hash_allocate (table
, sizeof (struct elf32_arm_link_hash_entry
));
3170 return (struct bfd_hash_entry
*) ret
;
3172 /* Call the allocation method of the superclass. */
3173 ret
= ((struct elf32_arm_link_hash_entry
*)
3174 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry
*) ret
,
3178 ret
->dyn_relocs
= NULL
;
3179 ret
->tls_type
= GOT_UNKNOWN
;
3180 ret
->tlsdesc_got
= (bfd_vma
) -1;
3181 ret
->plt
.thumb_refcount
= 0;
3182 ret
->plt
.maybe_thumb_refcount
= 0;
3183 ret
->plt
.noncall_refcount
= 0;
3184 ret
->plt
.got_offset
= -1;
3185 ret
->is_iplt
= FALSE
;
3186 ret
->export_glue
= NULL
;
3188 ret
->stub_cache
= NULL
;
3191 return (struct bfd_hash_entry
*) ret
;
3194 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3198 elf32_arm_allocate_local_sym_info (bfd
*abfd
)
3200 if (elf_local_got_refcounts (abfd
) == NULL
)
3202 bfd_size_type num_syms
;
3206 num_syms
= elf_tdata (abfd
)->symtab_hdr
.sh_info
;
3207 size
= num_syms
* (sizeof (bfd_signed_vma
)
3208 + sizeof (struct arm_local_iplt_info
*)
3211 data
= bfd_zalloc (abfd
, size
);
3215 elf_local_got_refcounts (abfd
) = (bfd_signed_vma
*) data
;
3216 data
+= num_syms
* sizeof (bfd_signed_vma
);
3218 elf32_arm_local_iplt (abfd
) = (struct arm_local_iplt_info
**) data
;
3219 data
+= num_syms
* sizeof (struct arm_local_iplt_info
*);
3221 elf32_arm_local_tlsdesc_gotent (abfd
) = (bfd_vma
*) data
;
3222 data
+= num_syms
* sizeof (bfd_vma
);
3224 elf32_arm_local_got_tls_type (abfd
) = data
;
3229 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3230 to input bfd ABFD. Create the information if it doesn't already exist.
3231 Return null if an allocation fails. */
3233 static struct arm_local_iplt_info
*
3234 elf32_arm_create_local_iplt (bfd
*abfd
, unsigned long r_symndx
)
3236 struct arm_local_iplt_info
**ptr
;
3238 if (!elf32_arm_allocate_local_sym_info (abfd
))
3241 BFD_ASSERT (r_symndx
< elf_tdata (abfd
)->symtab_hdr
.sh_info
);
3242 ptr
= &elf32_arm_local_iplt (abfd
)[r_symndx
];
3244 *ptr
= bfd_zalloc (abfd
, sizeof (**ptr
));
3248 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3249 in ABFD's symbol table. If the symbol is global, H points to its
3250 hash table entry, otherwise H is null.
3252 Return true if the symbol does have PLT information. When returning
3253 true, point *ROOT_PLT at the target-independent reference count/offset
3254 union and *ARM_PLT at the ARM-specific information. */
3257 elf32_arm_get_plt_info (bfd
*abfd
, struct elf32_arm_link_hash_entry
*h
,
3258 unsigned long r_symndx
, union gotplt_union
**root_plt
,
3259 struct arm_plt_info
**arm_plt
)
3261 struct arm_local_iplt_info
*local_iplt
;
3265 *root_plt
= &h
->root
.plt
;
3270 if (elf32_arm_local_iplt (abfd
) == NULL
)
3273 local_iplt
= elf32_arm_local_iplt (abfd
)[r_symndx
];
3274 if (local_iplt
== NULL
)
3277 *root_plt
= &local_iplt
->root
;
3278 *arm_plt
= &local_iplt
->arm
;
3282 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3286 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info
*info
,
3287 struct arm_plt_info
*arm_plt
)
3289 struct elf32_arm_link_hash_table
*htab
;
3291 htab
= elf32_arm_hash_table (info
);
3292 return (arm_plt
->thumb_refcount
!= 0
3293 || (!htab
->use_blx
&& arm_plt
->maybe_thumb_refcount
!= 0));
3296 /* Return a pointer to the head of the dynamic reloc list that should
3297 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3298 ABFD's symbol table. Return null if an error occurs. */
3300 static struct elf_dyn_relocs
**
3301 elf32_arm_get_local_dynreloc_list (bfd
*abfd
, unsigned long r_symndx
,
3302 Elf_Internal_Sym
*isym
)
3304 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
)
3306 struct arm_local_iplt_info
*local_iplt
;
3308 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
3309 if (local_iplt
== NULL
)
3311 return &local_iplt
->dyn_relocs
;
3315 /* Track dynamic relocs needed for local syms too.
3316 We really need local syms available to do this
3321 s
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
3325 vpp
= &elf_section_data (s
)->local_dynrel
;
3326 return (struct elf_dyn_relocs
**) vpp
;
3330 /* Initialize an entry in the stub hash table. */
3332 static struct bfd_hash_entry
*
3333 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
3334 struct bfd_hash_table
*table
,
3337 /* Allocate the structure if it has not already been allocated by a
3341 entry
= (struct bfd_hash_entry
*)
3342 bfd_hash_allocate (table
, sizeof (struct elf32_arm_stub_hash_entry
));
3347 /* Call the allocation method of the superclass. */
3348 entry
= bfd_hash_newfunc (entry
, table
, string
);
3351 struct elf32_arm_stub_hash_entry
*eh
;
3353 /* Initialize the local fields. */
3354 eh
= (struct elf32_arm_stub_hash_entry
*) entry
;
3355 eh
->stub_sec
= NULL
;
3356 eh
->stub_offset
= 0;
3357 eh
->target_value
= 0;
3358 eh
->target_section
= NULL
;
3359 eh
->target_addend
= 0;
3361 eh
->stub_type
= arm_stub_none
;
3363 eh
->stub_template
= NULL
;
3364 eh
->stub_template_size
= 0;
3367 eh
->output_name
= NULL
;
3373 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3374 shortcuts to them in our hash table. */
3377 create_got_section (bfd
*dynobj
, struct bfd_link_info
*info
)
3379 struct elf32_arm_link_hash_table
*htab
;
3381 htab
= elf32_arm_hash_table (info
);
3385 /* BPABI objects never have a GOT, or associated sections. */
3386 if (htab
->symbian_p
)
3389 if (! _bfd_elf_create_got_section (dynobj
, info
))
3395 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3398 create_ifunc_sections (struct bfd_link_info
*info
)
3400 struct elf32_arm_link_hash_table
*htab
;
3401 const struct elf_backend_data
*bed
;
3406 htab
= elf32_arm_hash_table (info
);
3407 dynobj
= htab
->root
.dynobj
;
3408 bed
= get_elf_backend_data (dynobj
);
3409 flags
= bed
->dynamic_sec_flags
;
3411 if (htab
->root
.iplt
== NULL
)
3413 s
= bfd_make_section_anyway_with_flags (dynobj
, ".iplt",
3414 flags
| SEC_READONLY
| SEC_CODE
);
3416 || !bfd_set_section_alignment (dynobj
, s
, bed
->plt_alignment
))
3418 htab
->root
.iplt
= s
;
3421 if (htab
->root
.irelplt
== NULL
)
3423 s
= bfd_make_section_anyway_with_flags (dynobj
,
3424 RELOC_SECTION (htab
, ".iplt"),
3425 flags
| SEC_READONLY
);
3427 || !bfd_set_section_alignment (dynobj
, s
, bed
->s
->log_file_align
))
3429 htab
->root
.irelplt
= s
;
3432 if (htab
->root
.igotplt
== NULL
)
3434 s
= bfd_make_section_anyway_with_flags (dynobj
, ".igot.plt", flags
);
3436 || !bfd_set_section_alignment (dynobj
, s
, bed
->s
->log_file_align
))
3438 htab
->root
.igotplt
= s
;
3443 /* Determine if we're dealing with a Thumb only architecture. */
3446 using_thumb_only (struct elf32_arm_link_hash_table
*globals
)
3448 int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3452 if (arch
== TAG_CPU_ARCH_V6_M
|| arch
== TAG_CPU_ARCH_V6S_M
)
3455 if (arch
!= TAG_CPU_ARCH_V7
&& arch
!= TAG_CPU_ARCH_V7E_M
)
3458 profile
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3459 Tag_CPU_arch_profile
);
3461 return profile
== 'M';
3464 /* Determine if we're dealing with a Thumb-2 object. */
3467 using_thumb2 (struct elf32_arm_link_hash_table
*globals
)
3469 int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3471 return arch
== TAG_CPU_ARCH_V6T2
|| arch
>= TAG_CPU_ARCH_V7
;
3474 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3475 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3479 elf32_arm_create_dynamic_sections (bfd
*dynobj
, struct bfd_link_info
*info
)
3481 struct elf32_arm_link_hash_table
*htab
;
3483 htab
= elf32_arm_hash_table (info
);
3487 if (!htab
->root
.sgot
&& !create_got_section (dynobj
, info
))
3490 if (!_bfd_elf_create_dynamic_sections (dynobj
, info
))
3493 htab
->sdynbss
= bfd_get_linker_section (dynobj
, ".dynbss");
3494 if (!bfd_link_pic (info
))
3495 htab
->srelbss
= bfd_get_linker_section (dynobj
,
3496 RELOC_SECTION (htab
, ".bss"));
3498 if (htab
->vxworks_p
)
3500 if (!elf_vxworks_create_dynamic_sections (dynobj
, info
, &htab
->srelplt2
))
3503 if (bfd_link_pic (info
))
3505 htab
->plt_header_size
= 0;
3506 htab
->plt_entry_size
3507 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry
);
3511 htab
->plt_header_size
3512 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry
);
3513 htab
->plt_entry_size
3514 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry
);
3520 Test for thumb only architectures. Note - we cannot just call
3521 using_thumb_only() as the attributes in the output bfd have not been
3522 initialised at this point, so instead we use the input bfd. */
3523 bfd
* saved_obfd
= htab
->obfd
;
3525 htab
->obfd
= dynobj
;
3526 if (using_thumb_only (htab
))
3528 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
3529 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
3531 htab
->obfd
= saved_obfd
;
3534 if (!htab
->root
.splt
3535 || !htab
->root
.srelplt
3537 || (!bfd_link_pic (info
) && !htab
->srelbss
))
3543 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3546 elf32_arm_copy_indirect_symbol (struct bfd_link_info
*info
,
3547 struct elf_link_hash_entry
*dir
,
3548 struct elf_link_hash_entry
*ind
)
3550 struct elf32_arm_link_hash_entry
*edir
, *eind
;
3552 edir
= (struct elf32_arm_link_hash_entry
*) dir
;
3553 eind
= (struct elf32_arm_link_hash_entry
*) ind
;
3555 if (eind
->dyn_relocs
!= NULL
)
3557 if (edir
->dyn_relocs
!= NULL
)
3559 struct elf_dyn_relocs
**pp
;
3560 struct elf_dyn_relocs
*p
;
3562 /* Add reloc counts against the indirect sym to the direct sym
3563 list. Merge any entries against the same section. */
3564 for (pp
= &eind
->dyn_relocs
; (p
= *pp
) != NULL
; )
3566 struct elf_dyn_relocs
*q
;
3568 for (q
= edir
->dyn_relocs
; q
!= NULL
; q
= q
->next
)
3569 if (q
->sec
== p
->sec
)
3571 q
->pc_count
+= p
->pc_count
;
3572 q
->count
+= p
->count
;
3579 *pp
= edir
->dyn_relocs
;
3582 edir
->dyn_relocs
= eind
->dyn_relocs
;
3583 eind
->dyn_relocs
= NULL
;
3586 if (ind
->root
.type
== bfd_link_hash_indirect
)
3588 /* Copy over PLT info. */
3589 edir
->plt
.thumb_refcount
+= eind
->plt
.thumb_refcount
;
3590 eind
->plt
.thumb_refcount
= 0;
3591 edir
->plt
.maybe_thumb_refcount
+= eind
->plt
.maybe_thumb_refcount
;
3592 eind
->plt
.maybe_thumb_refcount
= 0;
3593 edir
->plt
.noncall_refcount
+= eind
->plt
.noncall_refcount
;
3594 eind
->plt
.noncall_refcount
= 0;
3596 /* We should only allocate a function to .iplt once the final
3597 symbol information is known. */
3598 BFD_ASSERT (!eind
->is_iplt
);
3600 if (dir
->got
.refcount
<= 0)
3602 edir
->tls_type
= eind
->tls_type
;
3603 eind
->tls_type
= GOT_UNKNOWN
;
3607 _bfd_elf_link_hash_copy_indirect (info
, dir
, ind
);
3610 /* Destroy an ARM elf linker hash table. */
3613 elf32_arm_link_hash_table_free (bfd
*obfd
)
3615 struct elf32_arm_link_hash_table
*ret
3616 = (struct elf32_arm_link_hash_table
*) obfd
->link
.hash
;
3618 bfd_hash_table_free (&ret
->stub_hash_table
);
3619 _bfd_elf_link_hash_table_free (obfd
);
3622 /* Create an ARM elf linker hash table. */
3624 static struct bfd_link_hash_table
*
3625 elf32_arm_link_hash_table_create (bfd
*abfd
)
3627 struct elf32_arm_link_hash_table
*ret
;
3628 bfd_size_type amt
= sizeof (struct elf32_arm_link_hash_table
);
3630 ret
= (struct elf32_arm_link_hash_table
*) bfd_zmalloc (amt
);
3634 if (!_bfd_elf_link_hash_table_init (& ret
->root
, abfd
,
3635 elf32_arm_link_hash_newfunc
,
3636 sizeof (struct elf32_arm_link_hash_entry
),
3643 ret
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
3644 ret
->stm32l4xx_fix
= BFD_ARM_STM32L4XX_FIX_NONE
;
3645 #ifdef FOUR_WORD_PLT
3646 ret
->plt_header_size
= 16;
3647 ret
->plt_entry_size
= 16;
3649 ret
->plt_header_size
= 20;
3650 ret
->plt_entry_size
= elf32_arm_use_long_plt_entry
? 16 : 12;
3655 if (!bfd_hash_table_init (&ret
->stub_hash_table
, stub_hash_newfunc
,
3656 sizeof (struct elf32_arm_stub_hash_entry
)))
3658 _bfd_elf_link_hash_table_free (abfd
);
3661 ret
->root
.root
.hash_table_free
= elf32_arm_link_hash_table_free
;
3663 return &ret
->root
.root
;
3666 /* Determine what kind of NOPs are available. */
3669 arch_has_arm_nop (struct elf32_arm_link_hash_table
*globals
)
3671 const int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3673 return arch
== TAG_CPU_ARCH_V6T2
3674 || arch
== TAG_CPU_ARCH_V6K
3675 || arch
== TAG_CPU_ARCH_V7
3676 || arch
== TAG_CPU_ARCH_V7E_M
;
3680 arch_has_thumb2_nop (struct elf32_arm_link_hash_table
*globals
)
3682 const int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3684 return (arch
== TAG_CPU_ARCH_V6T2
|| arch
== TAG_CPU_ARCH_V7
3685 || arch
== TAG_CPU_ARCH_V7E_M
);
3689 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type
)
3693 case arm_stub_long_branch_thumb_only
:
3694 case arm_stub_long_branch_v4t_thumb_arm
:
3695 case arm_stub_short_branch_v4t_thumb_arm
:
3696 case arm_stub_long_branch_v4t_thumb_arm_pic
:
3697 case arm_stub_long_branch_v4t_thumb_tls_pic
:
3698 case arm_stub_long_branch_thumb_only_pic
:
3709 /* Determine the type of stub needed, if any, for a call. */
3711 static enum elf32_arm_stub_type
3712 arm_type_of_stub (struct bfd_link_info
*info
,
3713 asection
*input_sec
,
3714 const Elf_Internal_Rela
*rel
,
3715 unsigned char st_type
,
3716 enum arm_st_branch_type
*actual_branch_type
,
3717 struct elf32_arm_link_hash_entry
*hash
,
3718 bfd_vma destination
,
3724 bfd_signed_vma branch_offset
;
3725 unsigned int r_type
;
3726 struct elf32_arm_link_hash_table
* globals
;
3729 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
3731 enum arm_st_branch_type branch_type
= *actual_branch_type
;
3732 union gotplt_union
*root_plt
;
3733 struct arm_plt_info
*arm_plt
;
3735 if (branch_type
== ST_BRANCH_LONG
)
3738 globals
= elf32_arm_hash_table (info
);
3739 if (globals
== NULL
)
3742 thumb_only
= using_thumb_only (globals
);
3744 thumb2
= using_thumb2 (globals
);
3746 /* Determine where the call point is. */
3747 location
= (input_sec
->output_offset
3748 + input_sec
->output_section
->vma
3751 r_type
= ELF32_R_TYPE (rel
->r_info
);
3753 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3754 are considering a function call relocation. */
3755 if (thumb_only
&& (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
3756 || r_type
== R_ARM_THM_JUMP19
)
3757 && branch_type
== ST_BRANCH_TO_ARM
)
3758 branch_type
= ST_BRANCH_TO_THUMB
;
3760 /* For TLS call relocs, it is the caller's responsibility to provide
3761 the address of the appropriate trampoline. */
3762 if (r_type
!= R_ARM_TLS_CALL
3763 && r_type
!= R_ARM_THM_TLS_CALL
3764 && elf32_arm_get_plt_info (input_bfd
, hash
, ELF32_R_SYM (rel
->r_info
),
3765 &root_plt
, &arm_plt
)
3766 && root_plt
->offset
!= (bfd_vma
) -1)
3770 if (hash
== NULL
|| hash
->is_iplt
)
3771 splt
= globals
->root
.iplt
;
3773 splt
= globals
->root
.splt
;
3778 /* Note when dealing with PLT entries: the main PLT stub is in
3779 ARM mode, so if the branch is in Thumb mode, another
3780 Thumb->ARM stub will be inserted later just before the ARM
3781 PLT stub. We don't take this extra distance into account
3782 here, because if a long branch stub is needed, we'll add a
3783 Thumb->Arm one and branch directly to the ARM PLT entry
3784 because it avoids spreading offset corrections in several
3787 destination
= (splt
->output_section
->vma
3788 + splt
->output_offset
3789 + root_plt
->offset
);
3791 branch_type
= ST_BRANCH_TO_ARM
;
3794 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3795 BFD_ASSERT (st_type
!= STT_GNU_IFUNC
);
3797 branch_offset
= (bfd_signed_vma
)(destination
- location
);
3799 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
3800 || r_type
== R_ARM_THM_TLS_CALL
|| r_type
== R_ARM_THM_JUMP19
)
3802 /* Handle cases where:
3803 - this call goes too far (different Thumb/Thumb2 max
3805 - it's a Thumb->Arm call and blx is not available, or it's a
3806 Thumb->Arm branch (not bl). A stub is needed in this case,
3807 but only if this call is not through a PLT entry. Indeed,
3808 PLT stubs handle mode switching already.
3811 && (branch_offset
> THM_MAX_FWD_BRANCH_OFFSET
3812 || (branch_offset
< THM_MAX_BWD_BRANCH_OFFSET
)))
3814 && (branch_offset
> THM2_MAX_FWD_BRANCH_OFFSET
3815 || (branch_offset
< THM2_MAX_BWD_BRANCH_OFFSET
)))
3817 && (branch_offset
> THM2_MAX_FWD_COND_BRANCH_OFFSET
3818 || (branch_offset
< THM2_MAX_BWD_COND_BRANCH_OFFSET
))
3819 && (r_type
== R_ARM_THM_JUMP19
))
3820 || (branch_type
== ST_BRANCH_TO_ARM
3821 && (((r_type
== R_ARM_THM_CALL
3822 || r_type
== R_ARM_THM_TLS_CALL
) && !globals
->use_blx
)
3823 || (r_type
== R_ARM_THM_JUMP24
)
3824 || (r_type
== R_ARM_THM_JUMP19
))
3827 if (branch_type
== ST_BRANCH_TO_THUMB
)
3829 /* Thumb to thumb. */
3832 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
3834 ? ((globals
->use_blx
3835 && (r_type
== R_ARM_THM_CALL
))
3836 /* V5T and above. Stub starts with ARM code, so
3837 we must be able to switch mode before
3838 reaching it, which is only possible for 'bl'
3839 (ie R_ARM_THM_CALL relocation). */
3840 ? arm_stub_long_branch_any_thumb_pic
3841 /* On V4T, use Thumb code only. */
3842 : arm_stub_long_branch_v4t_thumb_thumb_pic
)
3844 /* non-PIC stubs. */
3845 : ((globals
->use_blx
3846 && (r_type
== R_ARM_THM_CALL
))
3847 /* V5T and above. */
3848 ? arm_stub_long_branch_any_any
3850 : arm_stub_long_branch_v4t_thumb_thumb
);
3854 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
3856 ? arm_stub_long_branch_thumb_only_pic
3858 : arm_stub_long_branch_thumb_only
;
3865 && sym_sec
->owner
!= NULL
3866 && !INTERWORK_FLAG (sym_sec
->owner
))
3868 (*_bfd_error_handler
)
3869 (_("%B(%s): warning: interworking not enabled.\n"
3870 " first occurrence: %B: Thumb call to ARM"),
3871 sym_sec
->owner
, input_bfd
, name
);
3875 (bfd_link_pic (info
) | globals
->pic_veneer
)
3877 ? (r_type
== R_ARM_THM_TLS_CALL
3878 /* TLS PIC stubs. */
3879 ? (globals
->use_blx
? arm_stub_long_branch_any_tls_pic
3880 : arm_stub_long_branch_v4t_thumb_tls_pic
)
3881 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
3882 /* V5T PIC and above. */
3883 ? arm_stub_long_branch_any_arm_pic
3885 : arm_stub_long_branch_v4t_thumb_arm_pic
))
3887 /* non-PIC stubs. */
3888 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
3889 /* V5T and above. */
3890 ? arm_stub_long_branch_any_any
3892 : arm_stub_long_branch_v4t_thumb_arm
);
3894 /* Handle v4t short branches. */
3895 if ((stub_type
== arm_stub_long_branch_v4t_thumb_arm
)
3896 && (branch_offset
<= THM_MAX_FWD_BRANCH_OFFSET
)
3897 && (branch_offset
>= THM_MAX_BWD_BRANCH_OFFSET
))
3898 stub_type
= arm_stub_short_branch_v4t_thumb_arm
;
3902 else if (r_type
== R_ARM_CALL
3903 || r_type
== R_ARM_JUMP24
3904 || r_type
== R_ARM_PLT32
3905 || r_type
== R_ARM_TLS_CALL
)
3907 if (branch_type
== ST_BRANCH_TO_THUMB
)
3912 && sym_sec
->owner
!= NULL
3913 && !INTERWORK_FLAG (sym_sec
->owner
))
3915 (*_bfd_error_handler
)
3916 (_("%B(%s): warning: interworking not enabled.\n"
3917 " first occurrence: %B: ARM call to Thumb"),
3918 sym_sec
->owner
, input_bfd
, name
);
3921 /* We have an extra 2-bytes reach because of
3922 the mode change (bit 24 (H) of BLX encoding). */
3923 if (branch_offset
> (ARM_MAX_FWD_BRANCH_OFFSET
+ 2)
3924 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
)
3925 || (r_type
== R_ARM_CALL
&& !globals
->use_blx
)
3926 || (r_type
== R_ARM_JUMP24
)
3927 || (r_type
== R_ARM_PLT32
))
3929 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
3931 ? ((globals
->use_blx
)
3932 /* V5T and above. */
3933 ? arm_stub_long_branch_any_thumb_pic
3935 : arm_stub_long_branch_v4t_arm_thumb_pic
)
3937 /* non-PIC stubs. */
3938 : ((globals
->use_blx
)
3939 /* V5T and above. */
3940 ? arm_stub_long_branch_any_any
3942 : arm_stub_long_branch_v4t_arm_thumb
);
3948 if (branch_offset
> ARM_MAX_FWD_BRANCH_OFFSET
3949 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
))
3952 (bfd_link_pic (info
) | globals
->pic_veneer
)
3954 ? (r_type
== R_ARM_TLS_CALL
3956 ? arm_stub_long_branch_any_tls_pic
3958 ? arm_stub_long_branch_arm_nacl_pic
3959 : arm_stub_long_branch_any_arm_pic
))
3960 /* non-PIC stubs. */
3962 ? arm_stub_long_branch_arm_nacl
3963 : arm_stub_long_branch_any_any
);
3968 /* If a stub is needed, record the actual destination type. */
3969 if (stub_type
!= arm_stub_none
)
3970 *actual_branch_type
= branch_type
;
3975 /* Build a name for an entry in the stub hash table. */
3978 elf32_arm_stub_name (const asection
*input_section
,
3979 const asection
*sym_sec
,
3980 const struct elf32_arm_link_hash_entry
*hash
,
3981 const Elf_Internal_Rela
*rel
,
3982 enum elf32_arm_stub_type stub_type
)
3989 len
= 8 + 1 + strlen (hash
->root
.root
.root
.string
) + 1 + 8 + 1 + 2 + 1;
3990 stub_name
= (char *) bfd_malloc (len
);
3991 if (stub_name
!= NULL
)
3992 sprintf (stub_name
, "%08x_%s+%x_%d",
3993 input_section
->id
& 0xffffffff,
3994 hash
->root
.root
.root
.string
,
3995 (int) rel
->r_addend
& 0xffffffff,
4000 len
= 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4001 stub_name
= (char *) bfd_malloc (len
);
4002 if (stub_name
!= NULL
)
4003 sprintf (stub_name
, "%08x_%x:%x+%x_%d",
4004 input_section
->id
& 0xffffffff,
4005 sym_sec
->id
& 0xffffffff,
4006 ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
4007 || ELF32_R_TYPE (rel
->r_info
) == R_ARM_THM_TLS_CALL
4008 ? 0 : (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
4009 (int) rel
->r_addend
& 0xffffffff,
4016 /* Look up an entry in the stub hash. Stub entries are cached because
4017 creating the stub name takes a bit of time. */
4019 static struct elf32_arm_stub_hash_entry
*
4020 elf32_arm_get_stub_entry (const asection
*input_section
,
4021 const asection
*sym_sec
,
4022 struct elf_link_hash_entry
*hash
,
4023 const Elf_Internal_Rela
*rel
,
4024 struct elf32_arm_link_hash_table
*htab
,
4025 enum elf32_arm_stub_type stub_type
)
4027 struct elf32_arm_stub_hash_entry
*stub_entry
;
4028 struct elf32_arm_link_hash_entry
*h
= (struct elf32_arm_link_hash_entry
*) hash
;
4029 const asection
*id_sec
;
4031 if ((input_section
->flags
& SEC_CODE
) == 0)
4034 /* If this input section is part of a group of sections sharing one
4035 stub section, then use the id of the first section in the group.
4036 Stub names need to include a section id, as there may well be
4037 more than one stub used to reach say, printf, and we need to
4038 distinguish between them. */
4039 id_sec
= htab
->stub_group
[input_section
->id
].link_sec
;
4041 if (h
!= NULL
&& h
->stub_cache
!= NULL
4042 && h
->stub_cache
->h
== h
4043 && h
->stub_cache
->id_sec
== id_sec
4044 && h
->stub_cache
->stub_type
== stub_type
)
4046 stub_entry
= h
->stub_cache
;
4052 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, h
, rel
, stub_type
);
4053 if (stub_name
== NULL
)
4056 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
,
4057 stub_name
, FALSE
, FALSE
);
4059 h
->stub_cache
= stub_entry
;
4067 /* Find or create a stub section. Returns a pointer to the stub section, and
4068 the section to which the stub section will be attached (in *LINK_SEC_P).
4069 LINK_SEC_P may be NULL. */
4072 elf32_arm_create_or_find_stub_sec (asection
**link_sec_p
, asection
*section
,
4073 struct elf32_arm_link_hash_table
*htab
)
4078 link_sec
= htab
->stub_group
[section
->id
].link_sec
;
4079 BFD_ASSERT (link_sec
!= NULL
);
4080 stub_sec
= htab
->stub_group
[section
->id
].stub_sec
;
4082 if (stub_sec
== NULL
)
4084 stub_sec
= htab
->stub_group
[link_sec
->id
].stub_sec
;
4085 if (stub_sec
== NULL
)
4091 namelen
= strlen (link_sec
->name
);
4092 len
= namelen
+ sizeof (STUB_SUFFIX
);
4093 s_name
= (char *) bfd_alloc (htab
->stub_bfd
, len
);
4097 memcpy (s_name
, link_sec
->name
, namelen
);
4098 memcpy (s_name
+ namelen
, STUB_SUFFIX
, sizeof (STUB_SUFFIX
));
4099 stub_sec
= (*htab
->add_stub_section
) (s_name
, link_sec
,
4100 htab
->nacl_p
? 4 : 3);
4101 if (stub_sec
== NULL
)
4103 htab
->stub_group
[link_sec
->id
].stub_sec
= stub_sec
;
4105 htab
->stub_group
[section
->id
].stub_sec
= stub_sec
;
4109 *link_sec_p
= link_sec
;
4114 /* Add a new stub entry to the stub hash. Not all fields of the new
4115 stub entry are initialised. */
4117 static struct elf32_arm_stub_hash_entry
*
4118 elf32_arm_add_stub (const char *stub_name
,
4120 struct elf32_arm_link_hash_table
*htab
)
4124 struct elf32_arm_stub_hash_entry
*stub_entry
;
4126 stub_sec
= elf32_arm_create_or_find_stub_sec (&link_sec
, section
, htab
);
4127 if (stub_sec
== NULL
)
4130 /* Enter this entry into the linker stub hash table. */
4131 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
4133 if (stub_entry
== NULL
)
4135 (*_bfd_error_handler
) (_("%s: cannot create stub entry %s"),
4141 stub_entry
->stub_sec
= stub_sec
;
4142 stub_entry
->stub_offset
= 0;
4143 stub_entry
->id_sec
= link_sec
;
4148 /* Store an Arm insn into an output section not processed by
4149 elf32_arm_write_section. */
4152 put_arm_insn (struct elf32_arm_link_hash_table
* htab
,
4153 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4155 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4156 bfd_putl32 (val
, ptr
);
4158 bfd_putb32 (val
, ptr
);
4161 /* Store a 16-bit Thumb insn into an output section not processed by
4162 elf32_arm_write_section. */
4165 put_thumb_insn (struct elf32_arm_link_hash_table
* htab
,
4166 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4168 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4169 bfd_putl16 (val
, ptr
);
4171 bfd_putb16 (val
, ptr
);
4174 /* Store a Thumb2 insn into an output section not processed by
4175 elf32_arm_write_section. */
4178 put_thumb2_insn (struct elf32_arm_link_hash_table
* htab
,
4179 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4181 /* T2 instructions are 16-bit streamed. */
4182 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4184 bfd_putl16 ((val
>> 16) & 0xffff, ptr
);
4185 bfd_putl16 ((val
& 0xffff), ptr
+ 2);
4189 bfd_putb16 ((val
>> 16) & 0xffff, ptr
);
4190 bfd_putb16 ((val
& 0xffff), ptr
+ 2);
4194 /* If it's possible to change R_TYPE to a more efficient access
4195 model, return the new reloc type. */
4198 elf32_arm_tls_transition (struct bfd_link_info
*info
, int r_type
,
4199 struct elf_link_hash_entry
*h
)
4201 int is_local
= (h
== NULL
);
4203 if (bfd_link_pic (info
)
4204 || (h
&& h
->root
.type
== bfd_link_hash_undefweak
))
4207 /* We do not support relaxations for Old TLS models. */
4210 case R_ARM_TLS_GOTDESC
:
4211 case R_ARM_TLS_CALL
:
4212 case R_ARM_THM_TLS_CALL
:
4213 case R_ARM_TLS_DESCSEQ
:
4214 case R_ARM_THM_TLS_DESCSEQ
:
4215 return is_local
? R_ARM_TLS_LE32
: R_ARM_TLS_IE32
;
4221 static bfd_reloc_status_type elf32_arm_final_link_relocate
4222 (reloc_howto_type
*, bfd
*, bfd
*, asection
*, bfd_byte
*,
4223 Elf_Internal_Rela
*, bfd_vma
, struct bfd_link_info
*, asection
*,
4224 const char *, unsigned char, enum arm_st_branch_type
,
4225 struct elf_link_hash_entry
*, bfd_boolean
*, char **);
4228 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type
)
4232 case arm_stub_a8_veneer_b_cond
:
4233 case arm_stub_a8_veneer_b
:
4234 case arm_stub_a8_veneer_bl
:
4237 case arm_stub_long_branch_any_any
:
4238 case arm_stub_long_branch_v4t_arm_thumb
:
4239 case arm_stub_long_branch_thumb_only
:
4240 case arm_stub_long_branch_v4t_thumb_thumb
:
4241 case arm_stub_long_branch_v4t_thumb_arm
:
4242 case arm_stub_short_branch_v4t_thumb_arm
:
4243 case arm_stub_long_branch_any_arm_pic
:
4244 case arm_stub_long_branch_any_thumb_pic
:
4245 case arm_stub_long_branch_v4t_thumb_thumb_pic
:
4246 case arm_stub_long_branch_v4t_arm_thumb_pic
:
4247 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4248 case arm_stub_long_branch_thumb_only_pic
:
4249 case arm_stub_long_branch_any_tls_pic
:
4250 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4251 case arm_stub_a8_veneer_blx
:
4254 case arm_stub_long_branch_arm_nacl
:
4255 case arm_stub_long_branch_arm_nacl_pic
:
4259 abort (); /* Should be unreachable. */
4264 arm_build_one_stub (struct bfd_hash_entry
*gen_entry
,
4268 struct elf32_arm_stub_hash_entry
*stub_entry
;
4269 struct elf32_arm_link_hash_table
*globals
;
4270 struct bfd_link_info
*info
;
4277 const insn_sequence
*template_sequence
;
4279 int stub_reloc_idx
[MAXRELOCS
] = {-1, -1};
4280 int stub_reloc_offset
[MAXRELOCS
] = {0, 0};
4283 /* Massage our args to the form they really have. */
4284 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
4285 info
= (struct bfd_link_info
*) in_arg
;
4287 globals
= elf32_arm_hash_table (info
);
4288 if (globals
== NULL
)
4291 stub_sec
= stub_entry
->stub_sec
;
4293 if ((globals
->fix_cortex_a8
< 0)
4294 != (arm_stub_required_alignment (stub_entry
->stub_type
) == 2))
4295 /* We have to do less-strictly-aligned fixes last. */
4298 /* Make a note of the offset within the stubs for this entry. */
4299 stub_entry
->stub_offset
= stub_sec
->size
;
4300 loc
= stub_sec
->contents
+ stub_entry
->stub_offset
;
4302 stub_bfd
= stub_sec
->owner
;
4304 /* This is the address of the stub destination. */
4305 sym_value
= (stub_entry
->target_value
4306 + stub_entry
->target_section
->output_offset
4307 + stub_entry
->target_section
->output_section
->vma
);
4309 template_sequence
= stub_entry
->stub_template
;
4310 template_size
= stub_entry
->stub_template_size
;
4313 for (i
= 0; i
< template_size
; i
++)
4315 switch (template_sequence
[i
].type
)
4319 bfd_vma data
= (bfd_vma
) template_sequence
[i
].data
;
4320 if (template_sequence
[i
].reloc_addend
!= 0)
4322 /* We've borrowed the reloc_addend field to mean we should
4323 insert a condition code into this (Thumb-1 branch)
4324 instruction. See THUMB16_BCOND_INSN. */
4325 BFD_ASSERT ((data
& 0xff00) == 0xd000);
4326 data
|= ((stub_entry
->orig_insn
>> 22) & 0xf) << 8;
4328 bfd_put_16 (stub_bfd
, data
, loc
+ size
);
4334 bfd_put_16 (stub_bfd
,
4335 (template_sequence
[i
].data
>> 16) & 0xffff,
4337 bfd_put_16 (stub_bfd
, template_sequence
[i
].data
& 0xffff,
4339 if (template_sequence
[i
].r_type
!= R_ARM_NONE
)
4341 stub_reloc_idx
[nrelocs
] = i
;
4342 stub_reloc_offset
[nrelocs
++] = size
;
4348 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
,
4350 /* Handle cases where the target is encoded within the
4352 if (template_sequence
[i
].r_type
== R_ARM_JUMP24
)
4354 stub_reloc_idx
[nrelocs
] = i
;
4355 stub_reloc_offset
[nrelocs
++] = size
;
4361 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
, loc
+ size
);
4362 stub_reloc_idx
[nrelocs
] = i
;
4363 stub_reloc_offset
[nrelocs
++] = size
;
4373 stub_sec
->size
+= size
;
4375 /* Stub size has already been computed in arm_size_one_stub. Check
4377 BFD_ASSERT (size
== stub_entry
->stub_size
);
4379 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4380 if (stub_entry
->branch_type
== ST_BRANCH_TO_THUMB
)
4383 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4385 BFD_ASSERT (nrelocs
!= 0 && nrelocs
<= MAXRELOCS
);
4387 for (i
= 0; i
< nrelocs
; i
++)
4388 if (template_sequence
[stub_reloc_idx
[i
]].r_type
== R_ARM_THM_JUMP24
4389 || template_sequence
[stub_reloc_idx
[i
]].r_type
== R_ARM_THM_JUMP19
4390 || template_sequence
[stub_reloc_idx
[i
]].r_type
== R_ARM_THM_CALL
4391 || template_sequence
[stub_reloc_idx
[i
]].r_type
== R_ARM_THM_XPC22
)
4393 Elf_Internal_Rela rel
;
4394 bfd_boolean unresolved_reloc
;
4395 char *error_message
;
4396 enum arm_st_branch_type branch_type
4397 = (template_sequence
[stub_reloc_idx
[i
]].r_type
!= R_ARM_THM_XPC22
4398 ? ST_BRANCH_TO_THUMB
: ST_BRANCH_TO_ARM
);
4399 bfd_vma points_to
= sym_value
+ stub_entry
->target_addend
;
4401 rel
.r_offset
= stub_entry
->stub_offset
+ stub_reloc_offset
[i
];
4402 rel
.r_info
= ELF32_R_INFO (0,
4403 template_sequence
[stub_reloc_idx
[i
]].r_type
);
4404 rel
.r_addend
= template_sequence
[stub_reloc_idx
[i
]].reloc_addend
;
4406 if (stub_entry
->stub_type
== arm_stub_a8_veneer_b_cond
&& i
== 0)
4407 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4408 template should refer back to the instruction after the original
4410 points_to
= sym_value
;
4412 /* There may be unintended consequences if this is not true. */
4413 BFD_ASSERT (stub_entry
->h
== NULL
);
4415 /* Note: _bfd_final_link_relocate doesn't handle these relocations
4416 properly. We should probably use this function unconditionally,
4417 rather than only for certain relocations listed in the enclosing
4418 conditional, for the sake of consistency. */
4419 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4420 (template_sequence
[stub_reloc_idx
[i
]].r_type
),
4421 stub_bfd
, info
->output_bfd
, stub_sec
, stub_sec
->contents
, &rel
,
4422 points_to
, info
, stub_entry
->target_section
, "", STT_FUNC
,
4423 branch_type
, (struct elf_link_hash_entry
*) stub_entry
->h
,
4424 &unresolved_reloc
, &error_message
);
4428 Elf_Internal_Rela rel
;
4429 bfd_boolean unresolved_reloc
;
4430 char *error_message
;
4431 bfd_vma points_to
= sym_value
+ stub_entry
->target_addend
4432 + template_sequence
[stub_reloc_idx
[i
]].reloc_addend
;
4434 rel
.r_offset
= stub_entry
->stub_offset
+ stub_reloc_offset
[i
];
4435 rel
.r_info
= ELF32_R_INFO (0,
4436 template_sequence
[stub_reloc_idx
[i
]].r_type
);
4439 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4440 (template_sequence
[stub_reloc_idx
[i
]].r_type
),
4441 stub_bfd
, info
->output_bfd
, stub_sec
, stub_sec
->contents
, &rel
,
4442 points_to
, info
, stub_entry
->target_section
, "", STT_FUNC
,
4443 stub_entry
->branch_type
,
4444 (struct elf_link_hash_entry
*) stub_entry
->h
, &unresolved_reloc
,
4452 /* Calculate the template, template size and instruction size for a stub.
4453 Return value is the instruction size. */
4456 find_stub_size_and_template (enum elf32_arm_stub_type stub_type
,
4457 const insn_sequence
**stub_template
,
4458 int *stub_template_size
)
4460 const insn_sequence
*template_sequence
= NULL
;
4461 int template_size
= 0, i
;
4464 template_sequence
= stub_definitions
[stub_type
].template_sequence
;
4466 *stub_template
= template_sequence
;
4468 template_size
= stub_definitions
[stub_type
].template_size
;
4469 if (stub_template_size
)
4470 *stub_template_size
= template_size
;
4473 for (i
= 0; i
< template_size
; i
++)
4475 switch (template_sequence
[i
].type
)
4496 /* As above, but don't actually build the stub. Just bump offset so
4497 we know stub section sizes. */
4500 arm_size_one_stub (struct bfd_hash_entry
*gen_entry
,
4501 void *in_arg ATTRIBUTE_UNUSED
)
4503 struct elf32_arm_stub_hash_entry
*stub_entry
;
4504 const insn_sequence
*template_sequence
;
4505 int template_size
, size
;
4507 /* Massage our args to the form they really have. */
4508 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
4510 BFD_ASSERT((stub_entry
->stub_type
> arm_stub_none
)
4511 && stub_entry
->stub_type
< ARRAY_SIZE(stub_definitions
));
4513 size
= find_stub_size_and_template (stub_entry
->stub_type
, &template_sequence
,
4516 stub_entry
->stub_size
= size
;
4517 stub_entry
->stub_template
= template_sequence
;
4518 stub_entry
->stub_template_size
= template_size
;
4520 size
= (size
+ 7) & ~7;
4521 stub_entry
->stub_sec
->size
+= size
;
4526 /* External entry points for sizing and building linker stubs. */
4528 /* Set up various things so that we can make a list of input sections
4529 for each output section included in the link. Returns -1 on error,
4530 0 when no stubs will be needed, and 1 on success. */
4533 elf32_arm_setup_section_lists (bfd
*output_bfd
,
4534 struct bfd_link_info
*info
)
4537 unsigned int bfd_count
;
4538 unsigned int top_id
, top_index
;
4540 asection
**input_list
, **list
;
4542 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
4546 if (! is_elf_hash_table (htab
))
4549 /* Count the number of input BFDs and find the top input section id. */
4550 for (input_bfd
= info
->input_bfds
, bfd_count
= 0, top_id
= 0;
4552 input_bfd
= input_bfd
->link
.next
)
4555 for (section
= input_bfd
->sections
;
4557 section
= section
->next
)
4559 if (top_id
< section
->id
)
4560 top_id
= section
->id
;
4563 htab
->bfd_count
= bfd_count
;
4565 amt
= sizeof (struct map_stub
) * (top_id
+ 1);
4566 htab
->stub_group
= (struct map_stub
*) bfd_zmalloc (amt
);
4567 if (htab
->stub_group
== NULL
)
4569 htab
->top_id
= top_id
;
4571 /* We can't use output_bfd->section_count here to find the top output
4572 section index as some sections may have been removed, and
4573 _bfd_strip_section_from_output doesn't renumber the indices. */
4574 for (section
= output_bfd
->sections
, top_index
= 0;
4576 section
= section
->next
)
4578 if (top_index
< section
->index
)
4579 top_index
= section
->index
;
4582 htab
->top_index
= top_index
;
4583 amt
= sizeof (asection
*) * (top_index
+ 1);
4584 input_list
= (asection
**) bfd_malloc (amt
);
4585 htab
->input_list
= input_list
;
4586 if (input_list
== NULL
)
4589 /* For sections we aren't interested in, mark their entries with a
4590 value we can check later. */
4591 list
= input_list
+ top_index
;
4593 *list
= bfd_abs_section_ptr
;
4594 while (list
-- != input_list
);
4596 for (section
= output_bfd
->sections
;
4598 section
= section
->next
)
4600 if ((section
->flags
& SEC_CODE
) != 0)
4601 input_list
[section
->index
] = NULL
;
4607 /* The linker repeatedly calls this function for each input section,
4608 in the order that input sections are linked into output sections.
4609 Build lists of input sections to determine groupings between which
4610 we may insert linker stubs. */
4613 elf32_arm_next_input_section (struct bfd_link_info
*info
,
4616 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
4621 if (isec
->output_section
->index
<= htab
->top_index
)
4623 asection
**list
= htab
->input_list
+ isec
->output_section
->index
;
4625 if (*list
!= bfd_abs_section_ptr
&& (isec
->flags
& SEC_CODE
) != 0)
4627 /* Steal the link_sec pointer for our list. */
4628 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4629 /* This happens to make the list in reverse order,
4630 which we reverse later. */
4631 PREV_SEC (isec
) = *list
;
4637 /* See whether we can group stub sections together. Grouping stub
4638 sections may result in fewer stubs. More importantly, we need to
4639 put all .init* and .fini* stubs at the end of the .init or
4640 .fini output sections respectively, because glibc splits the
4641 _init and _fini functions into multiple parts. Putting a stub in
4642 the middle of a function is not a good idea. */
4645 group_sections (struct elf32_arm_link_hash_table
*htab
,
4646 bfd_size_type stub_group_size
,
4647 bfd_boolean stubs_always_after_branch
)
4649 asection
**list
= htab
->input_list
;
4653 asection
*tail
= *list
;
4656 if (tail
== bfd_abs_section_ptr
)
4659 /* Reverse the list: we must avoid placing stubs at the
4660 beginning of the section because the beginning of the text
4661 section may be required for an interrupt vector in bare metal
4663 #define NEXT_SEC PREV_SEC
4665 while (tail
!= NULL
)
4667 /* Pop from tail. */
4668 asection
*item
= tail
;
4669 tail
= PREV_SEC (item
);
4672 NEXT_SEC (item
) = head
;
4676 while (head
!= NULL
)
4680 bfd_vma stub_group_start
= head
->output_offset
;
4681 bfd_vma end_of_next
;
4684 while (NEXT_SEC (curr
) != NULL
)
4686 next
= NEXT_SEC (curr
);
4687 end_of_next
= next
->output_offset
+ next
->size
;
4688 if (end_of_next
- stub_group_start
>= stub_group_size
)
4689 /* End of NEXT is too far from start, so stop. */
4691 /* Add NEXT to the group. */
4695 /* OK, the size from the start to the start of CURR is less
4696 than stub_group_size and thus can be handled by one stub
4697 section. (Or the head section is itself larger than
4698 stub_group_size, in which case we may be toast.)
4699 We should really be keeping track of the total size of
4700 stubs added here, as stubs contribute to the final output
4704 next
= NEXT_SEC (head
);
4705 /* Set up this stub group. */
4706 htab
->stub_group
[head
->id
].link_sec
= curr
;
4708 while (head
!= curr
&& (head
= next
) != NULL
);
4710 /* But wait, there's more! Input sections up to stub_group_size
4711 bytes after the stub section can be handled by it too. */
4712 if (!stubs_always_after_branch
)
4714 stub_group_start
= curr
->output_offset
+ curr
->size
;
4716 while (next
!= NULL
)
4718 end_of_next
= next
->output_offset
+ next
->size
;
4719 if (end_of_next
- stub_group_start
>= stub_group_size
)
4720 /* End of NEXT is too far from stubs, so stop. */
4722 /* Add NEXT to the stub group. */
4724 next
= NEXT_SEC (head
);
4725 htab
->stub_group
[head
->id
].link_sec
= curr
;
4731 while (list
++ != htab
->input_list
+ htab
->top_index
);
4733 free (htab
->input_list
);
4738 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4742 a8_reloc_compare (const void *a
, const void *b
)
4744 const struct a8_erratum_reloc
*ra
= (const struct a8_erratum_reloc
*) a
;
4745 const struct a8_erratum_reloc
*rb
= (const struct a8_erratum_reloc
*) b
;
4747 if (ra
->from
< rb
->from
)
4749 else if (ra
->from
> rb
->from
)
4755 static struct elf_link_hash_entry
*find_thumb_glue (struct bfd_link_info
*,
4756 const char *, char **);
4758 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4759 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4760 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4764 cortex_a8_erratum_scan (bfd
*input_bfd
,
4765 struct bfd_link_info
*info
,
4766 struct a8_erratum_fix
**a8_fixes_p
,
4767 unsigned int *num_a8_fixes_p
,
4768 unsigned int *a8_fix_table_size_p
,
4769 struct a8_erratum_reloc
*a8_relocs
,
4770 unsigned int num_a8_relocs
,
4771 unsigned prev_num_a8_fixes
,
4772 bfd_boolean
*stub_changed_p
)
4775 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
4776 struct a8_erratum_fix
*a8_fixes
= *a8_fixes_p
;
4777 unsigned int num_a8_fixes
= *num_a8_fixes_p
;
4778 unsigned int a8_fix_table_size
= *a8_fix_table_size_p
;
4783 for (section
= input_bfd
->sections
;
4785 section
= section
->next
)
4787 bfd_byte
*contents
= NULL
;
4788 struct _arm_elf_section_data
*sec_data
;
4792 if (elf_section_type (section
) != SHT_PROGBITS
4793 || (elf_section_flags (section
) & SHF_EXECINSTR
) == 0
4794 || (section
->flags
& SEC_EXCLUDE
) != 0
4795 || (section
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
)
4796 || (section
->output_section
== bfd_abs_section_ptr
))
4799 base_vma
= section
->output_section
->vma
+ section
->output_offset
;
4801 if (elf_section_data (section
)->this_hdr
.contents
!= NULL
)
4802 contents
= elf_section_data (section
)->this_hdr
.contents
;
4803 else if (! bfd_malloc_and_get_section (input_bfd
, section
, &contents
))
4806 sec_data
= elf32_arm_section_data (section
);
4808 for (span
= 0; span
< sec_data
->mapcount
; span
++)
4810 unsigned int span_start
= sec_data
->map
[span
].vma
;
4811 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
4812 ? section
->size
: sec_data
->map
[span
+ 1].vma
;
4814 char span_type
= sec_data
->map
[span
].type
;
4815 bfd_boolean last_was_32bit
= FALSE
, last_was_branch
= FALSE
;
4817 if (span_type
!= 't')
4820 /* Span is entirely within a single 4KB region: skip scanning. */
4821 if (((base_vma
+ span_start
) & ~0xfff)
4822 == ((base_vma
+ span_end
) & ~0xfff))
4825 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4827 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4828 * The branch target is in the same 4KB region as the
4829 first half of the branch.
4830 * The instruction before the branch is a 32-bit
4831 length non-branch instruction. */
4832 for (i
= span_start
; i
< span_end
;)
4834 unsigned int insn
= bfd_getl16 (&contents
[i
]);
4835 bfd_boolean insn_32bit
= FALSE
, is_blx
= FALSE
, is_b
= FALSE
;
4836 bfd_boolean is_bl
= FALSE
, is_bcc
= FALSE
, is_32bit_branch
;
4838 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
4843 /* Load the rest of the insn (in manual-friendly order). */
4844 insn
= (insn
<< 16) | bfd_getl16 (&contents
[i
+ 2]);
4846 /* Encoding T4: B<c>.W. */
4847 is_b
= (insn
& 0xf800d000) == 0xf0009000;
4848 /* Encoding T1: BL<c>.W. */
4849 is_bl
= (insn
& 0xf800d000) == 0xf000d000;
4850 /* Encoding T2: BLX<c>.W. */
4851 is_blx
= (insn
& 0xf800d000) == 0xf000c000;
4852 /* Encoding T3: B<c>.W (not permitted in IT block). */
4853 is_bcc
= (insn
& 0xf800d000) == 0xf0008000
4854 && (insn
& 0x07f00000) != 0x03800000;
4857 is_32bit_branch
= is_b
|| is_bl
|| is_blx
|| is_bcc
;
4859 if (((base_vma
+ i
) & 0xfff) == 0xffe
4863 && ! last_was_branch
)
4865 bfd_signed_vma offset
= 0;
4866 bfd_boolean force_target_arm
= FALSE
;
4867 bfd_boolean force_target_thumb
= FALSE
;
4869 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
4870 struct a8_erratum_reloc key
, *found
;
4871 bfd_boolean use_plt
= FALSE
;
4873 key
.from
= base_vma
+ i
;
4874 found
= (struct a8_erratum_reloc
*)
4875 bsearch (&key
, a8_relocs
, num_a8_relocs
,
4876 sizeof (struct a8_erratum_reloc
),
4881 char *error_message
= NULL
;
4882 struct elf_link_hash_entry
*entry
;
4884 /* We don't care about the error returned from this
4885 function, only if there is glue or not. */
4886 entry
= find_thumb_glue (info
, found
->sym_name
,
4890 found
->non_a8_stub
= TRUE
;
4892 /* Keep a simpler condition, for the sake of clarity. */
4893 if (htab
->root
.splt
!= NULL
&& found
->hash
!= NULL
4894 && found
->hash
->root
.plt
.offset
!= (bfd_vma
) -1)
4897 if (found
->r_type
== R_ARM_THM_CALL
)
4899 if (found
->branch_type
== ST_BRANCH_TO_ARM
4901 force_target_arm
= TRUE
;
4903 force_target_thumb
= TRUE
;
4907 /* Check if we have an offending branch instruction. */
4909 if (found
&& found
->non_a8_stub
)
4910 /* We've already made a stub for this instruction, e.g.
4911 it's a long branch or a Thumb->ARM stub. Assume that
4912 stub will suffice to work around the A8 erratum (see
4913 setting of always_after_branch above). */
4917 offset
= (insn
& 0x7ff) << 1;
4918 offset
|= (insn
& 0x3f0000) >> 4;
4919 offset
|= (insn
& 0x2000) ? 0x40000 : 0;
4920 offset
|= (insn
& 0x800) ? 0x80000 : 0;
4921 offset
|= (insn
& 0x4000000) ? 0x100000 : 0;
4922 if (offset
& 0x100000)
4923 offset
|= ~ ((bfd_signed_vma
) 0xfffff);
4924 stub_type
= arm_stub_a8_veneer_b_cond
;
4926 else if (is_b
|| is_bl
|| is_blx
)
4928 int s
= (insn
& 0x4000000) != 0;
4929 int j1
= (insn
& 0x2000) != 0;
4930 int j2
= (insn
& 0x800) != 0;
4934 offset
= (insn
& 0x7ff) << 1;
4935 offset
|= (insn
& 0x3ff0000) >> 4;
4939 if (offset
& 0x1000000)
4940 offset
|= ~ ((bfd_signed_vma
) 0xffffff);
4943 offset
&= ~ ((bfd_signed_vma
) 3);
4945 stub_type
= is_blx
? arm_stub_a8_veneer_blx
:
4946 is_bl
? arm_stub_a8_veneer_bl
: arm_stub_a8_veneer_b
;
4949 if (stub_type
!= arm_stub_none
)
4951 bfd_vma pc_for_insn
= base_vma
+ i
+ 4;
4953 /* The original instruction is a BL, but the target is
4954 an ARM instruction. If we were not making a stub,
4955 the BL would have been converted to a BLX. Use the
4956 BLX stub instead in that case. */
4957 if (htab
->use_blx
&& force_target_arm
4958 && stub_type
== arm_stub_a8_veneer_bl
)
4960 stub_type
= arm_stub_a8_veneer_blx
;
4964 /* Conversely, if the original instruction was
4965 BLX but the target is Thumb mode, use the BL
4967 else if (force_target_thumb
4968 && stub_type
== arm_stub_a8_veneer_blx
)
4970 stub_type
= arm_stub_a8_veneer_bl
;
4976 pc_for_insn
&= ~ ((bfd_vma
) 3);
4978 /* If we found a relocation, use the proper destination,
4979 not the offset in the (unrelocated) instruction.
4980 Note this is always done if we switched the stub type
4984 (bfd_signed_vma
) (found
->destination
- pc_for_insn
);
4986 /* If the stub will use a Thumb-mode branch to a
4987 PLT target, redirect it to the preceding Thumb
4989 if (stub_type
!= arm_stub_a8_veneer_blx
&& use_plt
)
4990 offset
-= PLT_THUMB_STUB_SIZE
;
4992 target
= pc_for_insn
+ offset
;
4994 /* The BLX stub is ARM-mode code. Adjust the offset to
4995 take the different PC value (+8 instead of +4) into
4997 if (stub_type
== arm_stub_a8_veneer_blx
)
5000 if (((base_vma
+ i
) & ~0xfff) == (target
& ~0xfff))
5002 char *stub_name
= NULL
;
5004 if (num_a8_fixes
== a8_fix_table_size
)
5006 a8_fix_table_size
*= 2;
5007 a8_fixes
= (struct a8_erratum_fix
*)
5008 bfd_realloc (a8_fixes
,
5009 sizeof (struct a8_erratum_fix
)
5010 * a8_fix_table_size
);
5013 if (num_a8_fixes
< prev_num_a8_fixes
)
5015 /* If we're doing a subsequent scan,
5016 check if we've found the same fix as
5017 before, and try and reuse the stub
5019 stub_name
= a8_fixes
[num_a8_fixes
].stub_name
;
5020 if ((a8_fixes
[num_a8_fixes
].section
!= section
)
5021 || (a8_fixes
[num_a8_fixes
].offset
!= i
))
5025 *stub_changed_p
= TRUE
;
5031 stub_name
= (char *) bfd_malloc (8 + 1 + 8 + 1);
5032 if (stub_name
!= NULL
)
5033 sprintf (stub_name
, "%x:%x", section
->id
, i
);
5036 a8_fixes
[num_a8_fixes
].input_bfd
= input_bfd
;
5037 a8_fixes
[num_a8_fixes
].section
= section
;
5038 a8_fixes
[num_a8_fixes
].offset
= i
;
5039 a8_fixes
[num_a8_fixes
].addend
= offset
;
5040 a8_fixes
[num_a8_fixes
].orig_insn
= insn
;
5041 a8_fixes
[num_a8_fixes
].stub_name
= stub_name
;
5042 a8_fixes
[num_a8_fixes
].stub_type
= stub_type
;
5043 a8_fixes
[num_a8_fixes
].branch_type
=
5044 is_blx
? ST_BRANCH_TO_ARM
: ST_BRANCH_TO_THUMB
;
5051 i
+= insn_32bit
? 4 : 2;
5052 last_was_32bit
= insn_32bit
;
5053 last_was_branch
= is_32bit_branch
;
5057 if (elf_section_data (section
)->this_hdr
.contents
== NULL
)
5061 *a8_fixes_p
= a8_fixes
;
5062 *num_a8_fixes_p
= num_a8_fixes
;
5063 *a8_fix_table_size_p
= a8_fix_table_size
;
5068 /* Determine and set the size of the stub section for a final link.
5070 The basic idea here is to examine all the relocations looking for
5071 PC-relative calls to a target that is unreachable with a "bl"
5075 elf32_arm_size_stubs (bfd
*output_bfd
,
5077 struct bfd_link_info
*info
,
5078 bfd_signed_vma group_size
,
5079 asection
* (*add_stub_section
) (const char *, asection
*,
5081 void (*layout_sections_again
) (void))
5083 bfd_size_type stub_group_size
;
5084 bfd_boolean stubs_always_after_branch
;
5085 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5086 struct a8_erratum_fix
*a8_fixes
= NULL
;
5087 unsigned int num_a8_fixes
= 0, a8_fix_table_size
= 10;
5088 struct a8_erratum_reloc
*a8_relocs
= NULL
;
5089 unsigned int num_a8_relocs
= 0, a8_reloc_table_size
= 10, i
;
5094 if (htab
->fix_cortex_a8
)
5096 a8_fixes
= (struct a8_erratum_fix
*)
5097 bfd_zmalloc (sizeof (struct a8_erratum_fix
) * a8_fix_table_size
);
5098 a8_relocs
= (struct a8_erratum_reloc
*)
5099 bfd_zmalloc (sizeof (struct a8_erratum_reloc
) * a8_reloc_table_size
);
5102 /* Propagate mach to stub bfd, because it may not have been
5103 finalized when we created stub_bfd. */
5104 bfd_set_arch_mach (stub_bfd
, bfd_get_arch (output_bfd
),
5105 bfd_get_mach (output_bfd
));
5107 /* Stash our params away. */
5108 htab
->stub_bfd
= stub_bfd
;
5109 htab
->add_stub_section
= add_stub_section
;
5110 htab
->layout_sections_again
= layout_sections_again
;
5111 stubs_always_after_branch
= group_size
< 0;
5113 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5114 as the first half of a 32-bit branch straddling two 4K pages. This is a
5115 crude way of enforcing that. */
5116 if (htab
->fix_cortex_a8
)
5117 stubs_always_after_branch
= 1;
5120 stub_group_size
= -group_size
;
5122 stub_group_size
= group_size
;
5124 if (stub_group_size
== 1)
5126 /* Default values. */
5127 /* Thumb branch range is +-4MB has to be used as the default
5128 maximum size (a given section can contain both ARM and Thumb
5129 code, so the worst case has to be taken into account).
5131 This value is 24K less than that, which allows for 2025
5132 12-byte stubs. If we exceed that, then we will fail to link.
5133 The user will have to relink with an explicit group size
5135 stub_group_size
= 4170000;
5138 group_sections (htab
, stub_group_size
, stubs_always_after_branch
);
5140 /* If we're applying the cortex A8 fix, we need to determine the
5141 program header size now, because we cannot change it later --
5142 that could alter section placements. Notice the A8 erratum fix
5143 ends up requiring the section addresses to remain unchanged
5144 modulo the page size. That's something we cannot represent
5145 inside BFD, and we don't want to force the section alignment to
5146 be the page size. */
5147 if (htab
->fix_cortex_a8
)
5148 (*htab
->layout_sections_again
) ();
5153 unsigned int bfd_indx
;
5155 bfd_boolean stub_changed
= FALSE
;
5156 unsigned prev_num_a8_fixes
= num_a8_fixes
;
5159 for (input_bfd
= info
->input_bfds
, bfd_indx
= 0;
5161 input_bfd
= input_bfd
->link
.next
, bfd_indx
++)
5163 Elf_Internal_Shdr
*symtab_hdr
;
5165 Elf_Internal_Sym
*local_syms
= NULL
;
5167 if (!is_arm_elf (input_bfd
))
5172 /* We'll need the symbol table in a second. */
5173 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
5174 if (symtab_hdr
->sh_info
== 0)
5177 /* Walk over each section attached to the input bfd. */
5178 for (section
= input_bfd
->sections
;
5180 section
= section
->next
)
5182 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
5184 /* If there aren't any relocs, then there's nothing more
5186 if ((section
->flags
& SEC_RELOC
) == 0
5187 || section
->reloc_count
== 0
5188 || (section
->flags
& SEC_CODE
) == 0)
5191 /* If this section is a link-once section that will be
5192 discarded, then don't create any stubs. */
5193 if (section
->output_section
== NULL
5194 || section
->output_section
->owner
!= output_bfd
)
5197 /* Get the relocs. */
5199 = _bfd_elf_link_read_relocs (input_bfd
, section
, NULL
,
5200 NULL
, info
->keep_memory
);
5201 if (internal_relocs
== NULL
)
5202 goto error_ret_free_local
;
5204 /* Now examine each relocation. */
5205 irela
= internal_relocs
;
5206 irelaend
= irela
+ section
->reloc_count
;
5207 for (; irela
< irelaend
; irela
++)
5209 unsigned int r_type
, r_indx
;
5210 enum elf32_arm_stub_type stub_type
;
5211 struct elf32_arm_stub_hash_entry
*stub_entry
;
5214 bfd_vma destination
;
5215 struct elf32_arm_link_hash_entry
*hash
;
5216 const char *sym_name
;
5218 const asection
*id_sec
;
5219 unsigned char st_type
;
5220 enum arm_st_branch_type branch_type
;
5221 bfd_boolean created_stub
= FALSE
;
5223 r_type
= ELF32_R_TYPE (irela
->r_info
);
5224 r_indx
= ELF32_R_SYM (irela
->r_info
);
5226 if (r_type
>= (unsigned int) R_ARM_max
)
5228 bfd_set_error (bfd_error_bad_value
);
5229 error_ret_free_internal
:
5230 if (elf_section_data (section
)->relocs
== NULL
)
5231 free (internal_relocs
);
5232 goto error_ret_free_local
;
5236 if (r_indx
>= symtab_hdr
->sh_info
)
5237 hash
= elf32_arm_hash_entry
5238 (elf_sym_hashes (input_bfd
)
5239 [r_indx
- symtab_hdr
->sh_info
]);
5241 /* Only look for stubs on branch instructions, or
5242 non-relaxed TLSCALL */
5243 if ((r_type
!= (unsigned int) R_ARM_CALL
)
5244 && (r_type
!= (unsigned int) R_ARM_THM_CALL
)
5245 && (r_type
!= (unsigned int) R_ARM_JUMP24
)
5246 && (r_type
!= (unsigned int) R_ARM_THM_JUMP19
)
5247 && (r_type
!= (unsigned int) R_ARM_THM_XPC22
)
5248 && (r_type
!= (unsigned int) R_ARM_THM_JUMP24
)
5249 && (r_type
!= (unsigned int) R_ARM_PLT32
)
5250 && !((r_type
== (unsigned int) R_ARM_TLS_CALL
5251 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
5252 && r_type
== elf32_arm_tls_transition
5253 (info
, r_type
, &hash
->root
)
5254 && ((hash
? hash
->tls_type
5255 : (elf32_arm_local_got_tls_type
5256 (input_bfd
)[r_indx
]))
5257 & GOT_TLS_GDESC
) != 0))
5260 /* Now determine the call target, its name, value,
5267 if (r_type
== (unsigned int) R_ARM_TLS_CALL
5268 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
5270 /* A non-relaxed TLS call. The target is the
5271 plt-resident trampoline and nothing to do
5273 BFD_ASSERT (htab
->tls_trampoline
> 0);
5274 sym_sec
= htab
->root
.splt
;
5275 sym_value
= htab
->tls_trampoline
;
5278 branch_type
= ST_BRANCH_TO_ARM
;
5282 /* It's a local symbol. */
5283 Elf_Internal_Sym
*sym
;
5285 if (local_syms
== NULL
)
5288 = (Elf_Internal_Sym
*) symtab_hdr
->contents
;
5289 if (local_syms
== NULL
)
5291 = bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
5292 symtab_hdr
->sh_info
, 0,
5294 if (local_syms
== NULL
)
5295 goto error_ret_free_internal
;
5298 sym
= local_syms
+ r_indx
;
5299 if (sym
->st_shndx
== SHN_UNDEF
)
5300 sym_sec
= bfd_und_section_ptr
;
5301 else if (sym
->st_shndx
== SHN_ABS
)
5302 sym_sec
= bfd_abs_section_ptr
;
5303 else if (sym
->st_shndx
== SHN_COMMON
)
5304 sym_sec
= bfd_com_section_ptr
;
5307 bfd_section_from_elf_index (input_bfd
, sym
->st_shndx
);
5310 /* This is an undefined symbol. It can never
5314 if (ELF_ST_TYPE (sym
->st_info
) != STT_SECTION
)
5315 sym_value
= sym
->st_value
;
5316 destination
= (sym_value
+ irela
->r_addend
5317 + sym_sec
->output_offset
5318 + sym_sec
->output_section
->vma
);
5319 st_type
= ELF_ST_TYPE (sym
->st_info
);
5320 branch_type
= ARM_SYM_BRANCH_TYPE (sym
);
5322 = bfd_elf_string_from_elf_section (input_bfd
,
5323 symtab_hdr
->sh_link
,
5328 /* It's an external symbol. */
5329 while (hash
->root
.root
.type
== bfd_link_hash_indirect
5330 || hash
->root
.root
.type
== bfd_link_hash_warning
)
5331 hash
= ((struct elf32_arm_link_hash_entry
*)
5332 hash
->root
.root
.u
.i
.link
);
5334 if (hash
->root
.root
.type
== bfd_link_hash_defined
5335 || hash
->root
.root
.type
== bfd_link_hash_defweak
)
5337 sym_sec
= hash
->root
.root
.u
.def
.section
;
5338 sym_value
= hash
->root
.root
.u
.def
.value
;
5340 struct elf32_arm_link_hash_table
*globals
=
5341 elf32_arm_hash_table (info
);
5343 /* For a destination in a shared library,
5344 use the PLT stub as target address to
5345 decide whether a branch stub is
5348 && globals
->root
.splt
!= NULL
5350 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5352 sym_sec
= globals
->root
.splt
;
5353 sym_value
= hash
->root
.plt
.offset
;
5354 if (sym_sec
->output_section
!= NULL
)
5355 destination
= (sym_value
5356 + sym_sec
->output_offset
5357 + sym_sec
->output_section
->vma
);
5359 else if (sym_sec
->output_section
!= NULL
)
5360 destination
= (sym_value
+ irela
->r_addend
5361 + sym_sec
->output_offset
5362 + sym_sec
->output_section
->vma
);
5364 else if ((hash
->root
.root
.type
== bfd_link_hash_undefined
)
5365 || (hash
->root
.root
.type
== bfd_link_hash_undefweak
))
5367 /* For a shared library, use the PLT stub as
5368 target address to decide whether a long
5369 branch stub is needed.
5370 For absolute code, they cannot be handled. */
5371 struct elf32_arm_link_hash_table
*globals
=
5372 elf32_arm_hash_table (info
);
5375 && globals
->root
.splt
!= NULL
5377 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5379 sym_sec
= globals
->root
.splt
;
5380 sym_value
= hash
->root
.plt
.offset
;
5381 if (sym_sec
->output_section
!= NULL
)
5382 destination
= (sym_value
5383 + sym_sec
->output_offset
5384 + sym_sec
->output_section
->vma
);
5391 bfd_set_error (bfd_error_bad_value
);
5392 goto error_ret_free_internal
;
5394 st_type
= hash
->root
.type
;
5395 branch_type
= hash
->root
.target_internal
;
5396 sym_name
= hash
->root
.root
.root
.string
;
5401 /* Determine what (if any) linker stub is needed. */
5402 stub_type
= arm_type_of_stub (info
, section
, irela
,
5403 st_type
, &branch_type
,
5404 hash
, destination
, sym_sec
,
5405 input_bfd
, sym_name
);
5406 if (stub_type
== arm_stub_none
)
5409 /* Support for grouping stub sections. */
5410 id_sec
= htab
->stub_group
[section
->id
].link_sec
;
5412 /* Get the name of this stub. */
5413 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, hash
,
5416 goto error_ret_free_internal
;
5418 /* We've either created a stub for this reloc already,
5419 or we are about to. */
5420 created_stub
= TRUE
;
5422 stub_entry
= arm_stub_hash_lookup
5423 (&htab
->stub_hash_table
, stub_name
,
5425 if (stub_entry
!= NULL
)
5427 /* The proper stub has already been created. */
5429 stub_entry
->target_value
= sym_value
;
5433 stub_entry
= elf32_arm_add_stub (stub_name
, section
,
5435 if (stub_entry
== NULL
)
5438 goto error_ret_free_internal
;
5441 stub_entry
->target_value
= sym_value
;
5442 stub_entry
->target_section
= sym_sec
;
5443 stub_entry
->stub_type
= stub_type
;
5444 stub_entry
->h
= hash
;
5445 stub_entry
->branch_type
= branch_type
;
5447 if (sym_name
== NULL
)
5448 sym_name
= "unnamed";
5449 stub_entry
->output_name
= (char *)
5450 bfd_alloc (htab
->stub_bfd
,
5451 sizeof (THUMB2ARM_GLUE_ENTRY_NAME
)
5452 + strlen (sym_name
));
5453 if (stub_entry
->output_name
== NULL
)
5456 goto error_ret_free_internal
;
5459 /* For historical reasons, use the existing names for
5460 ARM-to-Thumb and Thumb-to-ARM stubs. */
5461 if ((r_type
== (unsigned int) R_ARM_THM_CALL
5462 || r_type
== (unsigned int) R_ARM_THM_JUMP24
5463 || r_type
== (unsigned int) R_ARM_THM_JUMP19
)
5464 && branch_type
== ST_BRANCH_TO_ARM
)
5465 sprintf (stub_entry
->output_name
,
5466 THUMB2ARM_GLUE_ENTRY_NAME
, sym_name
);
5467 else if ((r_type
== (unsigned int) R_ARM_CALL
5468 || r_type
== (unsigned int) R_ARM_JUMP24
)
5469 && branch_type
== ST_BRANCH_TO_THUMB
)
5470 sprintf (stub_entry
->output_name
,
5471 ARM2THUMB_GLUE_ENTRY_NAME
, sym_name
);
5473 sprintf (stub_entry
->output_name
, STUB_ENTRY_NAME
,
5476 stub_changed
= TRUE
;
5480 /* Look for relocations which might trigger Cortex-A8
5482 if (htab
->fix_cortex_a8
5483 && (r_type
== (unsigned int) R_ARM_THM_JUMP24
5484 || r_type
== (unsigned int) R_ARM_THM_JUMP19
5485 || r_type
== (unsigned int) R_ARM_THM_CALL
5486 || r_type
== (unsigned int) R_ARM_THM_XPC22
))
5488 bfd_vma from
= section
->output_section
->vma
5489 + section
->output_offset
5492 if ((from
& 0xfff) == 0xffe)
5494 /* Found a candidate. Note we haven't checked the
5495 destination is within 4K here: if we do so (and
5496 don't create an entry in a8_relocs) we can't tell
5497 that a branch should have been relocated when
5499 if (num_a8_relocs
== a8_reloc_table_size
)
5501 a8_reloc_table_size
*= 2;
5502 a8_relocs
= (struct a8_erratum_reloc
*)
5503 bfd_realloc (a8_relocs
,
5504 sizeof (struct a8_erratum_reloc
)
5505 * a8_reloc_table_size
);
5508 a8_relocs
[num_a8_relocs
].from
= from
;
5509 a8_relocs
[num_a8_relocs
].destination
= destination
;
5510 a8_relocs
[num_a8_relocs
].r_type
= r_type
;
5511 a8_relocs
[num_a8_relocs
].branch_type
= branch_type
;
5512 a8_relocs
[num_a8_relocs
].sym_name
= sym_name
;
5513 a8_relocs
[num_a8_relocs
].non_a8_stub
= created_stub
;
5514 a8_relocs
[num_a8_relocs
].hash
= hash
;
5521 /* We're done with the internal relocs, free them. */
5522 if (elf_section_data (section
)->relocs
== NULL
)
5523 free (internal_relocs
);
5526 if (htab
->fix_cortex_a8
)
5528 /* Sort relocs which might apply to Cortex-A8 erratum. */
5529 qsort (a8_relocs
, num_a8_relocs
,
5530 sizeof (struct a8_erratum_reloc
),
5533 /* Scan for branches which might trigger Cortex-A8 erratum. */
5534 if (cortex_a8_erratum_scan (input_bfd
, info
, &a8_fixes
,
5535 &num_a8_fixes
, &a8_fix_table_size
,
5536 a8_relocs
, num_a8_relocs
,
5537 prev_num_a8_fixes
, &stub_changed
)
5539 goto error_ret_free_local
;
5543 if (prev_num_a8_fixes
!= num_a8_fixes
)
5544 stub_changed
= TRUE
;
5549 /* OK, we've added some stubs. Find out the new size of the
5551 for (stub_sec
= htab
->stub_bfd
->sections
;
5553 stub_sec
= stub_sec
->next
)
5555 /* Ignore non-stub sections. */
5556 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
5562 bfd_hash_traverse (&htab
->stub_hash_table
, arm_size_one_stub
, htab
);
5564 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5565 if (htab
->fix_cortex_a8
)
5566 for (i
= 0; i
< num_a8_fixes
; i
++)
5568 stub_sec
= elf32_arm_create_or_find_stub_sec (NULL
,
5569 a8_fixes
[i
].section
, htab
);
5571 if (stub_sec
== NULL
)
5572 goto error_ret_free_local
;
5575 += find_stub_size_and_template (a8_fixes
[i
].stub_type
, NULL
,
5580 /* Ask the linker to do its stuff. */
5581 (*htab
->layout_sections_again
) ();
5584 /* Add stubs for Cortex-A8 erratum fixes now. */
5585 if (htab
->fix_cortex_a8
)
5587 for (i
= 0; i
< num_a8_fixes
; i
++)
5589 struct elf32_arm_stub_hash_entry
*stub_entry
;
5590 char *stub_name
= a8_fixes
[i
].stub_name
;
5591 asection
*section
= a8_fixes
[i
].section
;
5592 unsigned int section_id
= a8_fixes
[i
].section
->id
;
5593 asection
*link_sec
= htab
->stub_group
[section_id
].link_sec
;
5594 asection
*stub_sec
= htab
->stub_group
[section_id
].stub_sec
;
5595 const insn_sequence
*template_sequence
;
5596 int template_size
, size
= 0;
5598 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
5600 if (stub_entry
== NULL
)
5602 (*_bfd_error_handler
) (_("%s: cannot create stub entry %s"),
5608 stub_entry
->stub_sec
= stub_sec
;
5609 stub_entry
->stub_offset
= 0;
5610 stub_entry
->id_sec
= link_sec
;
5611 stub_entry
->stub_type
= a8_fixes
[i
].stub_type
;
5612 stub_entry
->target_section
= a8_fixes
[i
].section
;
5613 stub_entry
->target_value
= a8_fixes
[i
].offset
;
5614 stub_entry
->target_addend
= a8_fixes
[i
].addend
;
5615 stub_entry
->orig_insn
= a8_fixes
[i
].orig_insn
;
5616 stub_entry
->branch_type
= a8_fixes
[i
].branch_type
;
5618 size
= find_stub_size_and_template (a8_fixes
[i
].stub_type
,
5622 stub_entry
->stub_size
= size
;
5623 stub_entry
->stub_template
= template_sequence
;
5624 stub_entry
->stub_template_size
= template_size
;
5627 /* Stash the Cortex-A8 erratum fix array for use later in
5628 elf32_arm_write_section(). */
5629 htab
->a8_erratum_fixes
= a8_fixes
;
5630 htab
->num_a8_erratum_fixes
= num_a8_fixes
;
5634 htab
->a8_erratum_fixes
= NULL
;
5635 htab
->num_a8_erratum_fixes
= 0;
5639 error_ret_free_local
:
5643 /* Build all the stubs associated with the current output file. The
5644 stubs are kept in a hash table attached to the main linker hash
5645 table. We also set up the .plt entries for statically linked PIC
5646 functions here. This function is called via arm_elf_finish in the
5650 elf32_arm_build_stubs (struct bfd_link_info
*info
)
5653 struct bfd_hash_table
*table
;
5654 struct elf32_arm_link_hash_table
*htab
;
5656 htab
= elf32_arm_hash_table (info
);
5660 for (stub_sec
= htab
->stub_bfd
->sections
;
5662 stub_sec
= stub_sec
->next
)
5666 /* Ignore non-stub sections. */
5667 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
5670 /* Allocate memory to hold the linker stubs. */
5671 size
= stub_sec
->size
;
5672 stub_sec
->contents
= (unsigned char *) bfd_zalloc (htab
->stub_bfd
, size
);
5673 if (stub_sec
->contents
== NULL
&& size
!= 0)
5678 /* Build the stubs as directed by the stub hash table. */
5679 table
= &htab
->stub_hash_table
;
5680 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
5681 if (htab
->fix_cortex_a8
)
5683 /* Place the cortex a8 stubs last. */
5684 htab
->fix_cortex_a8
= -1;
5685 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
5691 /* Locate the Thumb encoded calling stub for NAME. */
5693 static struct elf_link_hash_entry
*
5694 find_thumb_glue (struct bfd_link_info
*link_info
,
5696 char **error_message
)
5699 struct elf_link_hash_entry
*hash
;
5700 struct elf32_arm_link_hash_table
*hash_table
;
5702 /* We need a pointer to the armelf specific hash table. */
5703 hash_table
= elf32_arm_hash_table (link_info
);
5704 if (hash_table
== NULL
)
5707 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
5708 + strlen (THUMB2ARM_GLUE_ENTRY_NAME
) + 1);
5710 BFD_ASSERT (tmp_name
);
5712 sprintf (tmp_name
, THUMB2ARM_GLUE_ENTRY_NAME
, name
);
5714 hash
= elf_link_hash_lookup
5715 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
5718 && asprintf (error_message
, _("unable to find THUMB glue '%s' for '%s'"),
5719 tmp_name
, name
) == -1)
5720 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
5727 /* Locate the ARM encoded calling stub for NAME. */
5729 static struct elf_link_hash_entry
*
5730 find_arm_glue (struct bfd_link_info
*link_info
,
5732 char **error_message
)
5735 struct elf_link_hash_entry
*myh
;
5736 struct elf32_arm_link_hash_table
*hash_table
;
5738 /* We need a pointer to the elfarm specific hash table. */
5739 hash_table
= elf32_arm_hash_table (link_info
);
5740 if (hash_table
== NULL
)
5743 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
5744 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
5746 BFD_ASSERT (tmp_name
);
5748 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
5750 myh
= elf_link_hash_lookup
5751 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
5754 && asprintf (error_message
, _("unable to find ARM glue '%s' for '%s'"),
5755 tmp_name
, name
) == -1)
5756 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
5763 /* ARM->Thumb glue (static images):
5767 ldr r12, __func_addr
5770 .word func @ behave as if you saw a ARM_32 reloc.
5777 .word func @ behave as if you saw a ARM_32 reloc.
5779 (relocatable images)
5782 ldr r12, __func_offset
5788 #define ARM2THUMB_STATIC_GLUE_SIZE 12
5789 static const insn32 a2t1_ldr_insn
= 0xe59fc000;
5790 static const insn32 a2t2_bx_r12_insn
= 0xe12fff1c;
5791 static const insn32 a2t3_func_addr_insn
= 0x00000001;
5793 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5794 static const insn32 a2t1v5_ldr_insn
= 0xe51ff004;
5795 static const insn32 a2t2v5_func_addr_insn
= 0x00000001;
5797 #define ARM2THUMB_PIC_GLUE_SIZE 16
5798 static const insn32 a2t1p_ldr_insn
= 0xe59fc004;
5799 static const insn32 a2t2p_add_pc_insn
= 0xe08cc00f;
5800 static const insn32 a2t3p_bx_r12_insn
= 0xe12fff1c;
5802 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5806 __func_from_thumb: __func_from_thumb:
5808 nop ldr r6, __func_addr
5818 #define THUMB2ARM_GLUE_SIZE 8
5819 static const insn16 t2a1_bx_pc_insn
= 0x4778;
5820 static const insn16 t2a2_noop_insn
= 0x46c0;
5821 static const insn32 t2a3_b_insn
= 0xea000000;
5823 #define VFP11_ERRATUM_VENEER_SIZE 8
5824 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
5825 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
5827 #define ARM_BX_VENEER_SIZE 12
5828 static const insn32 armbx1_tst_insn
= 0xe3100001;
5829 static const insn32 armbx2_moveq_insn
= 0x01a0f000;
5830 static const insn32 armbx3_bx_insn
= 0xe12fff10;
5832 #ifndef ELFARM_NABI_C_INCLUDED
5834 arm_allocate_glue_section_space (bfd
* abfd
, bfd_size_type size
, const char * name
)
5837 bfd_byte
* contents
;
5841 /* Do not include empty glue sections in the output. */
5844 s
= bfd_get_linker_section (abfd
, name
);
5846 s
->flags
|= SEC_EXCLUDE
;
5851 BFD_ASSERT (abfd
!= NULL
);
5853 s
= bfd_get_linker_section (abfd
, name
);
5854 BFD_ASSERT (s
!= NULL
);
5856 contents
= (bfd_byte
*) bfd_alloc (abfd
, size
);
5858 BFD_ASSERT (s
->size
== size
);
5859 s
->contents
= contents
;
5863 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info
* info
)
5865 struct elf32_arm_link_hash_table
* globals
;
5867 globals
= elf32_arm_hash_table (info
);
5868 BFD_ASSERT (globals
!= NULL
);
5870 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
5871 globals
->arm_glue_size
,
5872 ARM2THUMB_GLUE_SECTION_NAME
);
5874 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
5875 globals
->thumb_glue_size
,
5876 THUMB2ARM_GLUE_SECTION_NAME
);
5878 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
5879 globals
->vfp11_erratum_glue_size
,
5880 VFP11_ERRATUM_VENEER_SECTION_NAME
);
5882 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
5883 globals
->stm32l4xx_erratum_glue_size
,
5884 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
5886 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
5887 globals
->bx_glue_size
,
5888 ARM_BX_GLUE_SECTION_NAME
);
5893 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5894 returns the symbol identifying the stub. */
5896 static struct elf_link_hash_entry
*
5897 record_arm_to_thumb_glue (struct bfd_link_info
* link_info
,
5898 struct elf_link_hash_entry
* h
)
5900 const char * name
= h
->root
.root
.string
;
5903 struct elf_link_hash_entry
* myh
;
5904 struct bfd_link_hash_entry
* bh
;
5905 struct elf32_arm_link_hash_table
* globals
;
5909 globals
= elf32_arm_hash_table (link_info
);
5910 BFD_ASSERT (globals
!= NULL
);
5911 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
5913 s
= bfd_get_linker_section
5914 (globals
->bfd_of_glue_owner
, ARM2THUMB_GLUE_SECTION_NAME
);
5916 BFD_ASSERT (s
!= NULL
);
5918 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
5919 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
5921 BFD_ASSERT (tmp_name
);
5923 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
5925 myh
= elf_link_hash_lookup
5926 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
5930 /* We've already seen this guy. */
5935 /* The only trick here is using hash_table->arm_glue_size as the value.
5936 Even though the section isn't allocated yet, this is where we will be
5937 putting it. The +1 on the value marks that the stub has not been
5938 output yet - not that it is a Thumb function. */
5940 val
= globals
->arm_glue_size
+ 1;
5941 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
5942 tmp_name
, BSF_GLOBAL
, s
, val
,
5943 NULL
, TRUE
, FALSE
, &bh
);
5945 myh
= (struct elf_link_hash_entry
*) bh
;
5946 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
5947 myh
->forced_local
= 1;
5951 if (bfd_link_pic (link_info
)
5952 || globals
->root
.is_relocatable_executable
5953 || globals
->pic_veneer
)
5954 size
= ARM2THUMB_PIC_GLUE_SIZE
;
5955 else if (globals
->use_blx
)
5956 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
5958 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
5961 globals
->arm_glue_size
+= size
;
5966 /* Allocate space for ARMv4 BX veneers. */
5969 record_arm_bx_glue (struct bfd_link_info
* link_info
, int reg
)
5972 struct elf32_arm_link_hash_table
*globals
;
5974 struct elf_link_hash_entry
*myh
;
5975 struct bfd_link_hash_entry
*bh
;
5978 /* BX PC does not need a veneer. */
5982 globals
= elf32_arm_hash_table (link_info
);
5983 BFD_ASSERT (globals
!= NULL
);
5984 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
5986 /* Check if this veneer has already been allocated. */
5987 if (globals
->bx_glue_offset
[reg
])
5990 s
= bfd_get_linker_section
5991 (globals
->bfd_of_glue_owner
, ARM_BX_GLUE_SECTION_NAME
);
5993 BFD_ASSERT (s
!= NULL
);
5995 /* Add symbol for veneer. */
5997 bfd_malloc ((bfd_size_type
) strlen (ARM_BX_GLUE_ENTRY_NAME
) + 1);
5999 BFD_ASSERT (tmp_name
);
6001 sprintf (tmp_name
, ARM_BX_GLUE_ENTRY_NAME
, reg
);
6003 myh
= elf_link_hash_lookup
6004 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6006 BFD_ASSERT (myh
== NULL
);
6009 val
= globals
->bx_glue_size
;
6010 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
6011 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
6012 NULL
, TRUE
, FALSE
, &bh
);
6014 myh
= (struct elf_link_hash_entry
*) bh
;
6015 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6016 myh
->forced_local
= 1;
6018 s
->size
+= ARM_BX_VENEER_SIZE
;
6019 globals
->bx_glue_offset
[reg
] = globals
->bx_glue_size
| 2;
6020 globals
->bx_glue_size
+= ARM_BX_VENEER_SIZE
;
6024 /* Add an entry to the code/data map for section SEC. */
6027 elf32_arm_section_map_add (asection
*sec
, char type
, bfd_vma vma
)
6029 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
6030 unsigned int newidx
;
6032 if (sec_data
->map
== NULL
)
6034 sec_data
->map
= (elf32_arm_section_map
*)
6035 bfd_malloc (sizeof (elf32_arm_section_map
));
6036 sec_data
->mapcount
= 0;
6037 sec_data
->mapsize
= 1;
6040 newidx
= sec_data
->mapcount
++;
6042 if (sec_data
->mapcount
> sec_data
->mapsize
)
6044 sec_data
->mapsize
*= 2;
6045 sec_data
->map
= (elf32_arm_section_map
*)
6046 bfd_realloc_or_free (sec_data
->map
, sec_data
->mapsize
6047 * sizeof (elf32_arm_section_map
));
6052 sec_data
->map
[newidx
].vma
= vma
;
6053 sec_data
->map
[newidx
].type
= type
;
6058 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
6059 veneers are handled for now. */
6062 record_vfp11_erratum_veneer (struct bfd_link_info
*link_info
,
6063 elf32_vfp11_erratum_list
*branch
,
6065 asection
*branch_sec
,
6066 unsigned int offset
)
6069 struct elf32_arm_link_hash_table
*hash_table
;
6071 struct elf_link_hash_entry
*myh
;
6072 struct bfd_link_hash_entry
*bh
;
6074 struct _arm_elf_section_data
*sec_data
;
6075 elf32_vfp11_erratum_list
*newerr
;
6077 hash_table
= elf32_arm_hash_table (link_info
);
6078 BFD_ASSERT (hash_table
!= NULL
);
6079 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
6081 s
= bfd_get_linker_section
6082 (hash_table
->bfd_of_glue_owner
, VFP11_ERRATUM_VENEER_SECTION_NAME
);
6084 sec_data
= elf32_arm_section_data (s
);
6086 BFD_ASSERT (s
!= NULL
);
6088 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
6089 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
6091 BFD_ASSERT (tmp_name
);
6093 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
6094 hash_table
->num_vfp11_fixes
);
6096 myh
= elf_link_hash_lookup
6097 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6099 BFD_ASSERT (myh
== NULL
);
6102 val
= hash_table
->vfp11_erratum_glue_size
;
6103 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
6104 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
6105 NULL
, TRUE
, FALSE
, &bh
);
6107 myh
= (struct elf_link_hash_entry
*) bh
;
6108 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6109 myh
->forced_local
= 1;
6111 /* Link veneer back to calling location. */
6112 sec_data
->erratumcount
+= 1;
6113 newerr
= (elf32_vfp11_erratum_list
*)
6114 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
6116 newerr
->type
= VFP11_ERRATUM_ARM_VENEER
;
6118 newerr
->u
.v
.branch
= branch
;
6119 newerr
->u
.v
.id
= hash_table
->num_vfp11_fixes
;
6120 branch
->u
.b
.veneer
= newerr
;
6122 newerr
->next
= sec_data
->erratumlist
;
6123 sec_data
->erratumlist
= newerr
;
6125 /* A symbol for the return from the veneer. */
6126 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
6127 hash_table
->num_vfp11_fixes
);
6129 myh
= elf_link_hash_lookup
6130 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6137 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
6138 branch_sec
, val
, NULL
, TRUE
, FALSE
, &bh
);
6140 myh
= (struct elf_link_hash_entry
*) bh
;
6141 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6142 myh
->forced_local
= 1;
6146 /* Generate a mapping symbol for the veneer section, and explicitly add an
6147 entry for that symbol to the code/data map for the section. */
6148 if (hash_table
->vfp11_erratum_glue_size
== 0)
6151 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
6152 ever requires this erratum fix. */
6153 _bfd_generic_link_add_one_symbol (link_info
,
6154 hash_table
->bfd_of_glue_owner
, "$a",
6155 BSF_LOCAL
, s
, 0, NULL
,
6158 myh
= (struct elf_link_hash_entry
*) bh
;
6159 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
6160 myh
->forced_local
= 1;
6162 /* The elf32_arm_init_maps function only cares about symbols from input
6163 BFDs. We must make a note of this generated mapping symbol
6164 ourselves so that code byteswapping works properly in
6165 elf32_arm_write_section. */
6166 elf32_arm_section_map_add (s
, 'a', 0);
6169 s
->size
+= VFP11_ERRATUM_VENEER_SIZE
;
6170 hash_table
->vfp11_erratum_glue_size
+= VFP11_ERRATUM_VENEER_SIZE
;
6171 hash_table
->num_vfp11_fixes
++;
6173 /* The offset of the veneer. */
6177 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
6178 veneers need to be handled because used only in Cortex-M. */
6181 record_stm32l4xx_erratum_veneer (struct bfd_link_info
*link_info
,
6182 elf32_stm32l4xx_erratum_list
*branch
,
6184 asection
*branch_sec
,
6185 unsigned int offset
,
6186 bfd_size_type veneer_size
)
6189 struct elf32_arm_link_hash_table
*hash_table
;
6191 struct elf_link_hash_entry
*myh
;
6192 struct bfd_link_hash_entry
*bh
;
6194 struct _arm_elf_section_data
*sec_data
;
6195 elf32_stm32l4xx_erratum_list
*newerr
;
6197 hash_table
= elf32_arm_hash_table (link_info
);
6198 BFD_ASSERT (hash_table
!= NULL
);
6199 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
6201 s
= bfd_get_linker_section
6202 (hash_table
->bfd_of_glue_owner
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
6204 BFD_ASSERT (s
!= NULL
);
6206 sec_data
= elf32_arm_section_data (s
);
6208 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
6209 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
6211 BFD_ASSERT (tmp_name
);
6213 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
6214 hash_table
->num_stm32l4xx_fixes
);
6216 myh
= elf_link_hash_lookup
6217 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6219 BFD_ASSERT (myh
== NULL
);
6222 val
= hash_table
->stm32l4xx_erratum_glue_size
;
6223 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
6224 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
6225 NULL
, TRUE
, FALSE
, &bh
);
6227 myh
= (struct elf_link_hash_entry
*) bh
;
6228 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6229 myh
->forced_local
= 1;
6231 /* Link veneer back to calling location. */
6232 sec_data
->stm32l4xx_erratumcount
+= 1;
6233 newerr
= (elf32_stm32l4xx_erratum_list
*)
6234 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list
));
6236 newerr
->type
= STM32L4XX_ERRATUM_VENEER
;
6238 newerr
->u
.v
.branch
= branch
;
6239 newerr
->u
.v
.id
= hash_table
->num_stm32l4xx_fixes
;
6240 branch
->u
.b
.veneer
= newerr
;
6242 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
6243 sec_data
->stm32l4xx_erratumlist
= newerr
;
6245 /* A symbol for the return from the veneer. */
6246 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
6247 hash_table
->num_stm32l4xx_fixes
);
6249 myh
= elf_link_hash_lookup
6250 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6257 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
6258 branch_sec
, val
, NULL
, TRUE
, FALSE
, &bh
);
6260 myh
= (struct elf_link_hash_entry
*) bh
;
6261 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6262 myh
->forced_local
= 1;
6266 /* Generate a mapping symbol for the veneer section, and explicitly add an
6267 entry for that symbol to the code/data map for the section. */
6268 if (hash_table
->stm32l4xx_erratum_glue_size
== 0)
6271 /* Creates a THUMB symbol since there is no other choice. */
6272 _bfd_generic_link_add_one_symbol (link_info
,
6273 hash_table
->bfd_of_glue_owner
, "$t",
6274 BSF_LOCAL
, s
, 0, NULL
,
6277 myh
= (struct elf_link_hash_entry
*) bh
;
6278 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
6279 myh
->forced_local
= 1;
6281 /* The elf32_arm_init_maps function only cares about symbols from input
6282 BFDs. We must make a note of this generated mapping symbol
6283 ourselves so that code byteswapping works properly in
6284 elf32_arm_write_section. */
6285 elf32_arm_section_map_add (s
, 't', 0);
6288 s
->size
+= veneer_size
;
6289 hash_table
->stm32l4xx_erratum_glue_size
+= veneer_size
;
6290 hash_table
->num_stm32l4xx_fixes
++;
6292 /* The offset of the veneer. */
6296 #define ARM_GLUE_SECTION_FLAGS \
6297 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6298 | SEC_READONLY | SEC_LINKER_CREATED)
6300 /* Create a fake section for use by the ARM backend of the linker. */
6303 arm_make_glue_section (bfd
* abfd
, const char * name
)
6307 sec
= bfd_get_linker_section (abfd
, name
);
6312 sec
= bfd_make_section_anyway_with_flags (abfd
, name
, ARM_GLUE_SECTION_FLAGS
);
6315 || !bfd_set_section_alignment (abfd
, sec
, 2))
6318 /* Set the gc mark to prevent the section from being removed by garbage
6319 collection, despite the fact that no relocs refer to this section. */
6325 /* Set size of .plt entries. This function is called from the
6326 linker scripts in ld/emultempl/{armelf}.em. */
6329 bfd_elf32_arm_use_long_plt (void)
6331 elf32_arm_use_long_plt_entry
= TRUE
;
6334 /* Add the glue sections to ABFD. This function is called from the
6335 linker scripts in ld/emultempl/{armelf}.em. */
6338 bfd_elf32_arm_add_glue_sections_to_bfd (bfd
*abfd
,
6339 struct bfd_link_info
*info
)
6341 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
6342 bfd_boolean dostm32l4xx
= globals
6343 && globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
;
6344 bfd_boolean addglue
;
6346 /* If we are only performing a partial
6347 link do not bother adding the glue. */
6348 if (bfd_link_relocatable (info
))
6351 addglue
= arm_make_glue_section (abfd
, ARM2THUMB_GLUE_SECTION_NAME
)
6352 && arm_make_glue_section (abfd
, THUMB2ARM_GLUE_SECTION_NAME
)
6353 && arm_make_glue_section (abfd
, VFP11_ERRATUM_VENEER_SECTION_NAME
)
6354 && arm_make_glue_section (abfd
, ARM_BX_GLUE_SECTION_NAME
);
6360 && arm_make_glue_section (abfd
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
6363 /* Select a BFD to be used to hold the sections used by the glue code.
6364 This function is called from the linker scripts in ld/emultempl/
6368 bfd_elf32_arm_get_bfd_for_interworking (bfd
*abfd
, struct bfd_link_info
*info
)
6370 struct elf32_arm_link_hash_table
*globals
;
6372 /* If we are only performing a partial link
6373 do not bother getting a bfd to hold the glue. */
6374 if (bfd_link_relocatable (info
))
6377 /* Make sure we don't attach the glue sections to a dynamic object. */
6378 BFD_ASSERT (!(abfd
->flags
& DYNAMIC
));
6380 globals
= elf32_arm_hash_table (info
);
6381 BFD_ASSERT (globals
!= NULL
);
6383 if (globals
->bfd_of_glue_owner
!= NULL
)
6386 /* Save the bfd for later use. */
6387 globals
->bfd_of_glue_owner
= abfd
;
6393 check_use_blx (struct elf32_arm_link_hash_table
*globals
)
6397 cpu_arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
6400 if (globals
->fix_arm1176
)
6402 if (cpu_arch
== TAG_CPU_ARCH_V6T2
|| cpu_arch
> TAG_CPU_ARCH_V6K
)
6403 globals
->use_blx
= 1;
6407 if (cpu_arch
> TAG_CPU_ARCH_V4T
)
6408 globals
->use_blx
= 1;
6413 bfd_elf32_arm_process_before_allocation (bfd
*abfd
,
6414 struct bfd_link_info
*link_info
)
6416 Elf_Internal_Shdr
*symtab_hdr
;
6417 Elf_Internal_Rela
*internal_relocs
= NULL
;
6418 Elf_Internal_Rela
*irel
, *irelend
;
6419 bfd_byte
*contents
= NULL
;
6422 struct elf32_arm_link_hash_table
*globals
;
6424 /* If we are only performing a partial link do not bother
6425 to construct any glue. */
6426 if (bfd_link_relocatable (link_info
))
6429 /* Here we have a bfd that is to be included on the link. We have a
6430 hook to do reloc rummaging, before section sizes are nailed down. */
6431 globals
= elf32_arm_hash_table (link_info
);
6432 BFD_ASSERT (globals
!= NULL
);
6434 check_use_blx (globals
);
6436 if (globals
->byteswap_code
&& !bfd_big_endian (abfd
))
6438 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6443 /* PR 5398: If we have not decided to include any loadable sections in
6444 the output then we will not have a glue owner bfd. This is OK, it
6445 just means that there is nothing else for us to do here. */
6446 if (globals
->bfd_of_glue_owner
== NULL
)
6449 /* Rummage around all the relocs and map the glue vectors. */
6450 sec
= abfd
->sections
;
6455 for (; sec
!= NULL
; sec
= sec
->next
)
6457 if (sec
->reloc_count
== 0)
6460 if ((sec
->flags
& SEC_EXCLUDE
) != 0)
6463 symtab_hdr
= & elf_symtab_hdr (abfd
);
6465 /* Load the relocs. */
6467 = _bfd_elf_link_read_relocs (abfd
, sec
, NULL
, NULL
, FALSE
);
6469 if (internal_relocs
== NULL
)
6472 irelend
= internal_relocs
+ sec
->reloc_count
;
6473 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
6476 unsigned long r_index
;
6478 struct elf_link_hash_entry
*h
;
6480 r_type
= ELF32_R_TYPE (irel
->r_info
);
6481 r_index
= ELF32_R_SYM (irel
->r_info
);
6483 /* These are the only relocation types we care about. */
6484 if ( r_type
!= R_ARM_PC24
6485 && (r_type
!= R_ARM_V4BX
|| globals
->fix_v4bx
< 2))
6488 /* Get the section contents if we haven't done so already. */
6489 if (contents
== NULL
)
6491 /* Get cached copy if it exists. */
6492 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
6493 contents
= elf_section_data (sec
)->this_hdr
.contents
;
6496 /* Go get them off disk. */
6497 if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
6502 if (r_type
== R_ARM_V4BX
)
6506 reg
= bfd_get_32 (abfd
, contents
+ irel
->r_offset
) & 0xf;
6507 record_arm_bx_glue (link_info
, reg
);
6511 /* If the relocation is not against a symbol it cannot concern us. */
6514 /* We don't care about local symbols. */
6515 if (r_index
< symtab_hdr
->sh_info
)
6518 /* This is an external symbol. */
6519 r_index
-= symtab_hdr
->sh_info
;
6520 h
= (struct elf_link_hash_entry
*)
6521 elf_sym_hashes (abfd
)[r_index
];
6523 /* If the relocation is against a static symbol it must be within
6524 the current section and so cannot be a cross ARM/Thumb relocation. */
6528 /* If the call will go through a PLT entry then we do not need
6530 if (globals
->root
.splt
!= NULL
&& h
->plt
.offset
!= (bfd_vma
) -1)
6536 /* This one is a call from arm code. We need to look up
6537 the target of the call. If it is a thumb target, we
6539 if (h
->target_internal
== ST_BRANCH_TO_THUMB
)
6540 record_arm_to_thumb_glue (link_info
, h
);
6548 if (contents
!= NULL
6549 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
6553 if (internal_relocs
!= NULL
6554 && elf_section_data (sec
)->relocs
!= internal_relocs
)
6555 free (internal_relocs
);
6556 internal_relocs
= NULL
;
6562 if (contents
!= NULL
6563 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
6565 if (internal_relocs
!= NULL
6566 && elf_section_data (sec
)->relocs
!= internal_relocs
)
6567 free (internal_relocs
);
6574 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6577 bfd_elf32_arm_init_maps (bfd
*abfd
)
6579 Elf_Internal_Sym
*isymbuf
;
6580 Elf_Internal_Shdr
*hdr
;
6581 unsigned int i
, localsyms
;
6583 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6584 if (! is_arm_elf (abfd
))
6587 if ((abfd
->flags
& DYNAMIC
) != 0)
6590 hdr
= & elf_symtab_hdr (abfd
);
6591 localsyms
= hdr
->sh_info
;
6593 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6594 should contain the number of local symbols, which should come before any
6595 global symbols. Mapping symbols are always local. */
6596 isymbuf
= bfd_elf_get_elf_syms (abfd
, hdr
, localsyms
, 0, NULL
, NULL
,
6599 /* No internal symbols read? Skip this BFD. */
6600 if (isymbuf
== NULL
)
6603 for (i
= 0; i
< localsyms
; i
++)
6605 Elf_Internal_Sym
*isym
= &isymbuf
[i
];
6606 asection
*sec
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
6610 && ELF_ST_BIND (isym
->st_info
) == STB_LOCAL
)
6612 name
= bfd_elf_string_from_elf_section (abfd
,
6613 hdr
->sh_link
, isym
->st_name
);
6615 if (bfd_is_arm_special_symbol_name (name
,
6616 BFD_ARM_SPECIAL_SYM_TYPE_MAP
))
6617 elf32_arm_section_map_add (sec
, name
[1], isym
->st_value
);
6623 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6624 say what they wanted. */
6627 bfd_elf32_arm_set_cortex_a8_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
6629 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
6630 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
6632 if (globals
== NULL
)
6635 if (globals
->fix_cortex_a8
== -1)
6637 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6638 if (out_attr
[Tag_CPU_arch
].i
== TAG_CPU_ARCH_V7
6639 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
6640 || out_attr
[Tag_CPU_arch_profile
].i
== 0))
6641 globals
->fix_cortex_a8
= 1;
6643 globals
->fix_cortex_a8
= 0;
6649 bfd_elf32_arm_set_vfp11_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
6651 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
6652 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
6654 if (globals
== NULL
)
6656 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6657 if (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V7
)
6659 switch (globals
->vfp11_fix
)
6661 case BFD_ARM_VFP11_FIX_DEFAULT
:
6662 case BFD_ARM_VFP11_FIX_NONE
:
6663 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
6667 /* Give a warning, but do as the user requests anyway. */
6668 (*_bfd_error_handler
) (_("%B: warning: selected VFP11 erratum "
6669 "workaround is not necessary for target architecture"), obfd
);
6672 else if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_DEFAULT
)
6673 /* For earlier architectures, we might need the workaround, but do not
6674 enable it by default. If users is running with broken hardware, they
6675 must enable the erratum fix explicitly. */
6676 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
6680 bfd_elf32_arm_set_stm32l4xx_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
6682 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
6683 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
6685 if (globals
== NULL
)
6688 /* We assume only Cortex-M4 may require the fix. */
6689 if (out_attr
[Tag_CPU_arch
].i
!= TAG_CPU_ARCH_V7E_M
6690 || out_attr
[Tag_CPU_arch_profile
].i
!= 'M')
6692 if (globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
)
6693 /* Give a warning, but do as the user requests anyway. */
6694 (*_bfd_error_handler
)
6695 (_("%B: warning: selected STM32L4XX erratum "
6696 "workaround is not necessary for target architecture"), obfd
);
6700 enum bfd_arm_vfp11_pipe
6708 /* Return a VFP register number. This is encoded as RX:X for single-precision
6709 registers, or X:RX for double-precision registers, where RX is the group of
6710 four bits in the instruction encoding and X is the single extension bit.
6711 RX and X fields are specified using their lowest (starting) bit. The return
6714 0...31: single-precision registers s0...s31
6715 32...63: double-precision registers d0...d31.
6717 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6718 encounter VFP3 instructions, so we allow the full range for DP registers. */
6721 bfd_arm_vfp11_regno (unsigned int insn
, bfd_boolean is_double
, unsigned int rx
,
6725 return (((insn
>> rx
) & 0xf) | (((insn
>> x
) & 1) << 4)) + 32;
6727 return (((insn
>> rx
) & 0xf) << 1) | ((insn
>> x
) & 1);
6730 /* Set bits in *WMASK according to a register number REG as encoded by
6731 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6734 bfd_arm_vfp11_write_mask (unsigned int *wmask
, unsigned int reg
)
6739 *wmask
|= 3 << ((reg
- 32) * 2);
6742 /* Return TRUE if WMASK overwrites anything in REGS. */
6745 bfd_arm_vfp11_antidependency (unsigned int wmask
, int *regs
, int numregs
)
6749 for (i
= 0; i
< numregs
; i
++)
6751 unsigned int reg
= regs
[i
];
6753 if (reg
< 32 && (wmask
& (1 << reg
)) != 0)
6761 if ((wmask
& (3 << (reg
* 2))) != 0)
6768 /* In this function, we're interested in two things: finding input registers
6769 for VFP data-processing instructions, and finding the set of registers which
6770 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
6771 hold the written set, so FLDM etc. are easy to deal with (we're only
6772 interested in 32 SP registers or 16 dp registers, due to the VFP version
6773 implemented by the chip in question). DP registers are marked by setting
6774 both SP registers in the write mask). */
6776 static enum bfd_arm_vfp11_pipe
6777 bfd_arm_vfp11_insn_decode (unsigned int insn
, unsigned int *destmask
, int *regs
,
6780 enum bfd_arm_vfp11_pipe vpipe
= VFP11_BAD
;
6781 bfd_boolean is_double
= ((insn
& 0xf00) == 0xb00) ? 1 : 0;
6783 if ((insn
& 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
6786 unsigned int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
6787 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
6789 pqrs
= ((insn
& 0x00800000) >> 20)
6790 | ((insn
& 0x00300000) >> 19)
6791 | ((insn
& 0x00000040) >> 6);
6795 case 0: /* fmac[sd]. */
6796 case 1: /* fnmac[sd]. */
6797 case 2: /* fmsc[sd]. */
6798 case 3: /* fnmsc[sd]. */
6800 bfd_arm_vfp11_write_mask (destmask
, fd
);
6802 regs
[1] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
6807 case 4: /* fmul[sd]. */
6808 case 5: /* fnmul[sd]. */
6809 case 6: /* fadd[sd]. */
6810 case 7: /* fsub[sd]. */
6814 case 8: /* fdiv[sd]. */
6817 bfd_arm_vfp11_write_mask (destmask
, fd
);
6818 regs
[0] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
6823 case 15: /* extended opcode. */
6825 unsigned int extn
= ((insn
>> 15) & 0x1e)
6826 | ((insn
>> 7) & 1);
6830 case 0: /* fcpy[sd]. */
6831 case 1: /* fabs[sd]. */
6832 case 2: /* fneg[sd]. */
6833 case 8: /* fcmp[sd]. */
6834 case 9: /* fcmpe[sd]. */
6835 case 10: /* fcmpz[sd]. */
6836 case 11: /* fcmpez[sd]. */
6837 case 16: /* fuito[sd]. */
6838 case 17: /* fsito[sd]. */
6839 case 24: /* ftoui[sd]. */
6840 case 25: /* ftouiz[sd]. */
6841 case 26: /* ftosi[sd]. */
6842 case 27: /* ftosiz[sd]. */
6843 /* These instructions will not bounce due to underflow. */
6848 case 3: /* fsqrt[sd]. */
6849 /* fsqrt cannot underflow, but it can (perhaps) overwrite
6850 registers to cause the erratum in previous instructions. */
6851 bfd_arm_vfp11_write_mask (destmask
, fd
);
6855 case 15: /* fcvt{ds,sd}. */
6859 bfd_arm_vfp11_write_mask (destmask
, fd
);
6861 /* Only FCVTSD can underflow. */
6862 if ((insn
& 0x100) != 0)
6881 /* Two-register transfer. */
6882 else if ((insn
& 0x0fe00ed0) == 0x0c400a10)
6884 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
6886 if ((insn
& 0x100000) == 0)
6889 bfd_arm_vfp11_write_mask (destmask
, fm
);
6892 bfd_arm_vfp11_write_mask (destmask
, fm
);
6893 bfd_arm_vfp11_write_mask (destmask
, fm
+ 1);
6899 else if ((insn
& 0x0e100e00) == 0x0c100a00) /* A load insn. */
6901 int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
6902 unsigned int puw
= ((insn
>> 21) & 0x1) | (((insn
>> 23) & 3) << 1);
6906 case 0: /* Two-reg transfer. We should catch these above. */
6909 case 2: /* fldm[sdx]. */
6913 unsigned int i
, offset
= insn
& 0xff;
6918 for (i
= fd
; i
< fd
+ offset
; i
++)
6919 bfd_arm_vfp11_write_mask (destmask
, i
);
6923 case 4: /* fld[sd]. */
6925 bfd_arm_vfp11_write_mask (destmask
, fd
);
6934 /* Single-register transfer. Note L==0. */
6935 else if ((insn
& 0x0f100e10) == 0x0e000a10)
6937 unsigned int opcode
= (insn
>> 21) & 7;
6938 unsigned int fn
= bfd_arm_vfp11_regno (insn
, is_double
, 16, 7);
6942 case 0: /* fmsr/fmdlr. */
6943 case 1: /* fmdhr. */
6944 /* Mark fmdhr and fmdlr as writing to the whole of the DP
6945 destination register. I don't know if this is exactly right,
6946 but it is the conservative choice. */
6947 bfd_arm_vfp11_write_mask (destmask
, fn
);
6961 static int elf32_arm_compare_mapping (const void * a
, const void * b
);
6964 /* Look for potentially-troublesome code sequences which might trigger the
6965 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
6966 (available from ARM) for details of the erratum. A short version is
6967 described in ld.texinfo. */
6970 bfd_elf32_arm_vfp11_erratum_scan (bfd
*abfd
, struct bfd_link_info
*link_info
)
6973 bfd_byte
*contents
= NULL
;
6975 int regs
[3], numregs
= 0;
6976 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
6977 int use_vector
= (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_VECTOR
);
6979 if (globals
== NULL
)
6982 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6983 The states transition as follows:
6985 0 -> 1 (vector) or 0 -> 2 (scalar)
6986 A VFP FMAC-pipeline instruction has been seen. Fill
6987 regs[0]..regs[numregs-1] with its input operands. Remember this
6988 instruction in 'first_fmac'.
6991 Any instruction, except for a VFP instruction which overwrites
6996 A VFP instruction has been seen which overwrites any of regs[*].
6997 We must make a veneer! Reset state to 0 before examining next
7001 If we fail to match anything in state 2, reset to state 0 and reset
7002 the instruction pointer to the instruction after 'first_fmac'.
7004 If the VFP11 vector mode is in use, there must be at least two unrelated
7005 instructions between anti-dependent VFP11 instructions to properly avoid
7006 triggering the erratum, hence the use of the extra state 1. */
7008 /* If we are only performing a partial link do not bother
7009 to construct any glue. */
7010 if (bfd_link_relocatable (link_info
))
7013 /* Skip if this bfd does not correspond to an ELF image. */
7014 if (! is_arm_elf (abfd
))
7017 /* We should have chosen a fix type by the time we get here. */
7018 BFD_ASSERT (globals
->vfp11_fix
!= BFD_ARM_VFP11_FIX_DEFAULT
);
7020 if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_NONE
)
7023 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7024 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
7027 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7029 unsigned int i
, span
, first_fmac
= 0, veneer_of_insn
= 0;
7030 struct _arm_elf_section_data
*sec_data
;
7032 /* If we don't have executable progbits, we're not interested in this
7033 section. Also skip if section is to be excluded. */
7034 if (elf_section_type (sec
) != SHT_PROGBITS
7035 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
7036 || (sec
->flags
& SEC_EXCLUDE
) != 0
7037 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
7038 || sec
->output_section
== bfd_abs_section_ptr
7039 || strcmp (sec
->name
, VFP11_ERRATUM_VENEER_SECTION_NAME
) == 0)
7042 sec_data
= elf32_arm_section_data (sec
);
7044 if (sec_data
->mapcount
== 0)
7047 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
7048 contents
= elf_section_data (sec
)->this_hdr
.contents
;
7049 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
7052 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
7053 elf32_arm_compare_mapping
);
7055 for (span
= 0; span
< sec_data
->mapcount
; span
++)
7057 unsigned int span_start
= sec_data
->map
[span
].vma
;
7058 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
7059 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
7060 char span_type
= sec_data
->map
[span
].type
;
7062 /* FIXME: Only ARM mode is supported at present. We may need to
7063 support Thumb-2 mode also at some point. */
7064 if (span_type
!= 'a')
7067 for (i
= span_start
; i
< span_end
;)
7069 unsigned int next_i
= i
+ 4;
7070 unsigned int insn
= bfd_big_endian (abfd
)
7071 ? (contents
[i
] << 24)
7072 | (contents
[i
+ 1] << 16)
7073 | (contents
[i
+ 2] << 8)
7075 : (contents
[i
+ 3] << 24)
7076 | (contents
[i
+ 2] << 16)
7077 | (contents
[i
+ 1] << 8)
7079 unsigned int writemask
= 0;
7080 enum bfd_arm_vfp11_pipe vpipe
;
7085 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
, regs
,
7087 /* I'm assuming the VFP11 erratum can trigger with denorm
7088 operands on either the FMAC or the DS pipeline. This might
7089 lead to slightly overenthusiastic veneer insertion. */
7090 if (vpipe
== VFP11_FMAC
|| vpipe
== VFP11_DS
)
7092 state
= use_vector
? 1 : 2;
7094 veneer_of_insn
= insn
;
7100 int other_regs
[3], other_numregs
;
7101 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
7104 if (vpipe
!= VFP11_BAD
7105 && bfd_arm_vfp11_antidependency (writemask
, regs
,
7115 int other_regs
[3], other_numregs
;
7116 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
7119 if (vpipe
!= VFP11_BAD
7120 && bfd_arm_vfp11_antidependency (writemask
, regs
,
7126 next_i
= first_fmac
+ 4;
7132 abort (); /* Should be unreachable. */
7137 elf32_vfp11_erratum_list
*newerr
=(elf32_vfp11_erratum_list
*)
7138 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
7140 elf32_arm_section_data (sec
)->erratumcount
+= 1;
7142 newerr
->u
.b
.vfp_insn
= veneer_of_insn
;
7147 newerr
->type
= VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
;
7154 record_vfp11_erratum_veneer (link_info
, newerr
, abfd
, sec
,
7159 newerr
->next
= sec_data
->erratumlist
;
7160 sec_data
->erratumlist
= newerr
;
7169 if (contents
!= NULL
7170 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7178 if (contents
!= NULL
7179 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7185 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
7186 after sections have been laid out, using specially-named symbols. */
7189 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd
*abfd
,
7190 struct bfd_link_info
*link_info
)
7193 struct elf32_arm_link_hash_table
*globals
;
7196 if (bfd_link_relocatable (link_info
))
7199 /* Skip if this bfd does not correspond to an ELF image. */
7200 if (! is_arm_elf (abfd
))
7203 globals
= elf32_arm_hash_table (link_info
);
7204 if (globals
== NULL
)
7207 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7208 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7210 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7212 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
7213 elf32_vfp11_erratum_list
*errnode
= sec_data
->erratumlist
;
7215 for (; errnode
!= NULL
; errnode
= errnode
->next
)
7217 struct elf_link_hash_entry
*myh
;
7220 switch (errnode
->type
)
7222 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
7223 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
:
7224 /* Find veneer symbol. */
7225 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
7226 errnode
->u
.b
.veneer
->u
.v
.id
);
7228 myh
= elf_link_hash_lookup
7229 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7232 (*_bfd_error_handler
) (_("%B: unable to find VFP11 veneer "
7233 "`%s'"), abfd
, tmp_name
);
7235 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7236 + myh
->root
.u
.def
.section
->output_offset
7237 + myh
->root
.u
.def
.value
;
7239 errnode
->u
.b
.veneer
->vma
= vma
;
7242 case VFP11_ERRATUM_ARM_VENEER
:
7243 case VFP11_ERRATUM_THUMB_VENEER
:
7244 /* Find return location. */
7245 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
7248 myh
= elf_link_hash_lookup
7249 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7252 (*_bfd_error_handler
) (_("%B: unable to find VFP11 veneer "
7253 "`%s'"), abfd
, tmp_name
);
7255 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7256 + myh
->root
.u
.def
.section
->output_offset
7257 + myh
->root
.u
.def
.value
;
7259 errnode
->u
.v
.branch
->vma
= vma
;
7271 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
7272 return locations after sections have been laid out, using
7273 specially-named symbols. */
7276 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd
*abfd
,
7277 struct bfd_link_info
*link_info
)
7280 struct elf32_arm_link_hash_table
*globals
;
7283 if (bfd_link_relocatable (link_info
))
7286 /* Skip if this bfd does not correspond to an ELF image. */
7287 if (! is_arm_elf (abfd
))
7290 globals
= elf32_arm_hash_table (link_info
);
7291 if (globals
== NULL
)
7294 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7295 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7297 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7299 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
7300 elf32_stm32l4xx_erratum_list
*errnode
= sec_data
->stm32l4xx_erratumlist
;
7302 for (; errnode
!= NULL
; errnode
= errnode
->next
)
7304 struct elf_link_hash_entry
*myh
;
7307 switch (errnode
->type
)
7309 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
7310 /* Find veneer symbol. */
7311 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
7312 errnode
->u
.b
.veneer
->u
.v
.id
);
7314 myh
= elf_link_hash_lookup
7315 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7318 (*_bfd_error_handler
) (_("%B: unable to find STM32L4XX veneer "
7319 "`%s'"), abfd
, tmp_name
);
7321 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7322 + myh
->root
.u
.def
.section
->output_offset
7323 + myh
->root
.u
.def
.value
;
7325 errnode
->u
.b
.veneer
->vma
= vma
;
7328 case STM32L4XX_ERRATUM_VENEER
:
7329 /* Find return location. */
7330 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
7333 myh
= elf_link_hash_lookup
7334 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7337 (*_bfd_error_handler
) (_("%B: unable to find STM32L4XX veneer "
7338 "`%s'"), abfd
, tmp_name
);
7340 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7341 + myh
->root
.u
.def
.section
->output_offset
7342 + myh
->root
.u
.def
.value
;
7344 errnode
->u
.v
.branch
->vma
= vma
;
7356 static inline bfd_boolean
7357 is_thumb2_ldmia (const insn32 insn
)
7359 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
7360 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
7361 return (insn
& 0xffd02000) == 0xe8900000;
7364 static inline bfd_boolean
7365 is_thumb2_ldmdb (const insn32 insn
)
7367 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
7368 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
7369 return (insn
& 0xffd02000) == 0xe9100000;
7372 static inline bfd_boolean
7373 is_thumb2_vldm (const insn32 insn
)
7375 /* A6.5 Extension register load or store instruction
7377 We look only for the 32-bit registers case since the DP (64-bit
7378 registers) are not supported for STM32L4XX
7379 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
7380 <list> is consecutive 32-bit registers
7381 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
7382 if P==0 && U==1 && W==1 && Rn=1101 VPOP
7383 if PUW=010 || PUW=011 || PUW=101 VLDM. */
7385 ((insn
& 0xfe100f00) == 0xec100a00)
7386 && /* (IA without !). */
7387 (((((insn
<< 7) >> 28) & 0xd) == 0x4)
7388 /* (IA with !), includes VPOP (when reg number is SP). */
7389 || ((((insn
<< 7) >> 28) & 0xd) == 0x5)
7391 || ((((insn
<< 7) >> 28) & 0xd) == 0x9));
7394 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
7396 - computes the number and the mode of memory accesses
7397 - decides if the replacement should be done:
7398 . replaces only if > 8-word accesses
7399 . or (testing purposes only) replaces all accesses. */
7402 stm32l4xx_need_create_replacing_stub (const insn32 insn
,
7403 bfd_arm_stm32l4xx_fix stm32l4xx_fix
)
7407 /* The field encoding the register list is the same for both LDMIA
7408 and LDMDB encodings. */
7409 if (is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
))
7410 nb_regs
= popcount (insn
& 0x0000ffff);
7411 else if (is_thumb2_vldm (insn
))
7412 nb_regs
= (insn
& 0xff);
7414 /* DEFAULT mode accounts for the real bug condition situation,
7415 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
7417 (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_DEFAULT
) ? nb_regs
> 8 :
7418 (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_ALL
) ? TRUE
: FALSE
;
7421 /* Look for potentially-troublesome code sequences which might trigger
7422 the STM STM32L4XX erratum. */
7425 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd
*abfd
,
7426 struct bfd_link_info
*link_info
)
7429 bfd_byte
*contents
= NULL
;
7430 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
7432 if (globals
== NULL
)
7435 /* If we are only performing a partial link do not bother
7436 to construct any glue. */
7437 if (bfd_link_relocatable (link_info
))
7440 /* Skip if this bfd does not correspond to an ELF image. */
7441 if (! is_arm_elf (abfd
))
7444 if (globals
->stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_NONE
)
7447 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7448 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
7451 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7453 unsigned int i
, span
;
7454 struct _arm_elf_section_data
*sec_data
;
7456 /* If we don't have executable progbits, we're not interested in this
7457 section. Also skip if section is to be excluded. */
7458 if (elf_section_type (sec
) != SHT_PROGBITS
7459 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
7460 || (sec
->flags
& SEC_EXCLUDE
) != 0
7461 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
7462 || sec
->output_section
== bfd_abs_section_ptr
7463 || strcmp (sec
->name
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
) == 0)
7466 sec_data
= elf32_arm_section_data (sec
);
7468 if (sec_data
->mapcount
== 0)
7471 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
7472 contents
= elf_section_data (sec
)->this_hdr
.contents
;
7473 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
7476 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
7477 elf32_arm_compare_mapping
);
7479 for (span
= 0; span
< sec_data
->mapcount
; span
++)
7481 unsigned int span_start
= sec_data
->map
[span
].vma
;
7482 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
7483 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
7484 char span_type
= sec_data
->map
[span
].type
;
7485 int itblock_current_pos
= 0;
7487 /* Only Thumb2 mode need be supported with this CM4 specific
7488 code, we should not encounter any arm mode eg span_type
7490 if (span_type
!= 't')
7493 for (i
= span_start
; i
< span_end
;)
7495 unsigned int insn
= bfd_get_16 (abfd
, &contents
[i
]);
7496 bfd_boolean insn_32bit
= FALSE
;
7497 bfd_boolean is_ldm
= FALSE
;
7498 bfd_boolean is_vldm
= FALSE
;
7499 bfd_boolean is_not_last_in_it_block
= FALSE
;
7501 /* The first 16-bits of all 32-bit thumb2 instructions start
7502 with opcode[15..13]=0b111 and the encoded op1 can be anything
7503 except opcode[12..11]!=0b00.
7504 See 32-bit Thumb instruction encoding. */
7505 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
7508 /* Compute the predicate that tells if the instruction
7509 is concerned by the IT block
7510 - Creates an error if there is a ldm that is not
7511 last in the IT block thus cannot be replaced
7512 - Otherwise we can create a branch at the end of the
7513 IT block, it will be controlled naturally by IT
7514 with the proper pseudo-predicate
7515 - So the only interesting predicate is the one that
7516 tells that we are not on the last item of an IT
7518 if (itblock_current_pos
!= 0)
7519 is_not_last_in_it_block
= !!--itblock_current_pos
;
7523 /* Load the rest of the insn (in manual-friendly order). */
7524 insn
= (insn
<< 16) | bfd_get_16 (abfd
, &contents
[i
+ 2]);
7525 is_ldm
= is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
);
7526 is_vldm
= is_thumb2_vldm (insn
);
7528 /* Veneers are created for (v)ldm depending on
7529 option flags and memory accesses conditions; but
7530 if the instruction is not the last instruction of
7531 an IT block, we cannot create a jump there, so we
7533 if ((is_ldm
|| is_vldm
) &&
7534 stm32l4xx_need_create_replacing_stub
7535 (insn
, globals
->stm32l4xx_fix
))
7537 if (is_not_last_in_it_block
)
7539 (*_bfd_error_handler
)
7540 /* Note - overlong line used here to allow for translation. */
7542 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
7543 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
7544 abfd
, sec
, (long)i
);
7548 elf32_stm32l4xx_erratum_list
*newerr
=
7549 (elf32_stm32l4xx_erratum_list
*)
7551 (sizeof (elf32_stm32l4xx_erratum_list
));
7553 elf32_arm_section_data (sec
)
7554 ->stm32l4xx_erratumcount
+= 1;
7555 newerr
->u
.b
.insn
= insn
;
7556 /* We create only thumb branches. */
7558 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
;
7559 record_stm32l4xx_erratum_veneer
7560 (link_info
, newerr
, abfd
, sec
,
7563 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
:
7564 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
7566 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
7567 sec_data
->stm32l4xx_erratumlist
= newerr
;
7574 IT blocks are only encoded in T1
7575 Encoding T1: IT{x{y{z}}} <firstcond>
7576 1 0 1 1 - 1 1 1 1 - firstcond - mask
7577 if mask = '0000' then see 'related encodings'
7578 We don't deal with UNPREDICTABLE, just ignore these.
7579 There can be no nested IT blocks so an IT block
7580 is naturally a new one for which it is worth
7581 computing its size. */
7582 bfd_boolean is_newitblock
= ((insn
& 0xff00) == 0xbf00) &&
7583 ((insn
& 0x000f) != 0x0000);
7584 /* If we have a new IT block we compute its size. */
7587 /* Compute the number of instructions controlled
7588 by the IT block, it will be used to decide
7589 whether we are inside an IT block or not. */
7590 unsigned int mask
= insn
& 0x000f;
7591 itblock_current_pos
= 4 - ctz (mask
);
7595 i
+= insn_32bit
? 4 : 2;
7599 if (contents
!= NULL
7600 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7608 if (contents
!= NULL
7609 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7615 /* Set target relocation values needed during linking. */
7618 bfd_elf32_arm_set_target_relocs (struct bfd
*output_bfd
,
7619 struct bfd_link_info
*link_info
,
7621 char * target2_type
,
7624 bfd_arm_vfp11_fix vfp11_fix
,
7625 bfd_arm_stm32l4xx_fix stm32l4xx_fix
,
7626 int no_enum_warn
, int no_wchar_warn
,
7627 int pic_veneer
, int fix_cortex_a8
,
7630 struct elf32_arm_link_hash_table
*globals
;
7632 globals
= elf32_arm_hash_table (link_info
);
7633 if (globals
== NULL
)
7636 globals
->target1_is_rel
= target1_is_rel
;
7637 if (strcmp (target2_type
, "rel") == 0)
7638 globals
->target2_reloc
= R_ARM_REL32
;
7639 else if (strcmp (target2_type
, "abs") == 0)
7640 globals
->target2_reloc
= R_ARM_ABS32
;
7641 else if (strcmp (target2_type
, "got-rel") == 0)
7642 globals
->target2_reloc
= R_ARM_GOT_PREL
;
7645 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
7648 globals
->fix_v4bx
= fix_v4bx
;
7649 globals
->use_blx
|= use_blx
;
7650 globals
->vfp11_fix
= vfp11_fix
;
7651 globals
->stm32l4xx_fix
= stm32l4xx_fix
;
7652 globals
->pic_veneer
= pic_veneer
;
7653 globals
->fix_cortex_a8
= fix_cortex_a8
;
7654 globals
->fix_arm1176
= fix_arm1176
;
7656 BFD_ASSERT (is_arm_elf (output_bfd
));
7657 elf_arm_tdata (output_bfd
)->no_enum_size_warning
= no_enum_warn
;
7658 elf_arm_tdata (output_bfd
)->no_wchar_size_warning
= no_wchar_warn
;
7661 /* Replace the target offset of a Thumb bl or b.w instruction. */
7664 insert_thumb_branch (bfd
*abfd
, long int offset
, bfd_byte
*insn
)
7670 BFD_ASSERT ((offset
& 1) == 0);
7672 upper
= bfd_get_16 (abfd
, insn
);
7673 lower
= bfd_get_16 (abfd
, insn
+ 2);
7674 reloc_sign
= (offset
< 0) ? 1 : 0;
7675 upper
= (upper
& ~(bfd_vma
) 0x7ff)
7676 | ((offset
>> 12) & 0x3ff)
7677 | (reloc_sign
<< 10);
7678 lower
= (lower
& ~(bfd_vma
) 0x2fff)
7679 | (((!((offset
>> 23) & 1)) ^ reloc_sign
) << 13)
7680 | (((!((offset
>> 22) & 1)) ^ reloc_sign
) << 11)
7681 | ((offset
>> 1) & 0x7ff);
7682 bfd_put_16 (abfd
, upper
, insn
);
7683 bfd_put_16 (abfd
, lower
, insn
+ 2);
7686 /* Thumb code calling an ARM function. */
7689 elf32_thumb_to_arm_stub (struct bfd_link_info
* info
,
7693 asection
* input_section
,
7694 bfd_byte
* hit_data
,
7697 bfd_signed_vma addend
,
7699 char **error_message
)
7703 long int ret_offset
;
7704 struct elf_link_hash_entry
* myh
;
7705 struct elf32_arm_link_hash_table
* globals
;
7707 myh
= find_thumb_glue (info
, name
, error_message
);
7711 globals
= elf32_arm_hash_table (info
);
7712 BFD_ASSERT (globals
!= NULL
);
7713 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7715 my_offset
= myh
->root
.u
.def
.value
;
7717 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
7718 THUMB2ARM_GLUE_SECTION_NAME
);
7720 BFD_ASSERT (s
!= NULL
);
7721 BFD_ASSERT (s
->contents
!= NULL
);
7722 BFD_ASSERT (s
->output_section
!= NULL
);
7724 if ((my_offset
& 0x01) == 0x01)
7727 && sym_sec
->owner
!= NULL
7728 && !INTERWORK_FLAG (sym_sec
->owner
))
7730 (*_bfd_error_handler
)
7731 (_("%B(%s): warning: interworking not enabled.\n"
7732 " first occurrence: %B: Thumb call to ARM"),
7733 sym_sec
->owner
, input_bfd
, name
);
7739 myh
->root
.u
.def
.value
= my_offset
;
7741 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a1_bx_pc_insn
,
7742 s
->contents
+ my_offset
);
7744 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a2_noop_insn
,
7745 s
->contents
+ my_offset
+ 2);
7748 /* Address of destination of the stub. */
7749 ((bfd_signed_vma
) val
)
7751 /* Offset from the start of the current section
7752 to the start of the stubs. */
7754 /* Offset of the start of this stub from the start of the stubs. */
7756 /* Address of the start of the current section. */
7757 + s
->output_section
->vma
)
7758 /* The branch instruction is 4 bytes into the stub. */
7760 /* ARM branches work from the pc of the instruction + 8. */
7763 put_arm_insn (globals
, output_bfd
,
7764 (bfd_vma
) t2a3_b_insn
| ((ret_offset
>> 2) & 0x00FFFFFF),
7765 s
->contents
+ my_offset
+ 4);
7768 BFD_ASSERT (my_offset
<= globals
->thumb_glue_size
);
7770 /* Now go back and fix up the original BL insn to point to here. */
7772 /* Address of where the stub is located. */
7773 (s
->output_section
->vma
+ s
->output_offset
+ my_offset
)
7774 /* Address of where the BL is located. */
7775 - (input_section
->output_section
->vma
+ input_section
->output_offset
7777 /* Addend in the relocation. */
7779 /* Biassing for PC-relative addressing. */
7782 insert_thumb_branch (input_bfd
, ret_offset
, hit_data
- input_section
->vma
);
7787 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
7789 static struct elf_link_hash_entry
*
7790 elf32_arm_create_thumb_stub (struct bfd_link_info
* info
,
7797 char ** error_message
)
7800 long int ret_offset
;
7801 struct elf_link_hash_entry
* myh
;
7802 struct elf32_arm_link_hash_table
* globals
;
7804 myh
= find_arm_glue (info
, name
, error_message
);
7808 globals
= elf32_arm_hash_table (info
);
7809 BFD_ASSERT (globals
!= NULL
);
7810 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7812 my_offset
= myh
->root
.u
.def
.value
;
7814 if ((my_offset
& 0x01) == 0x01)
7817 && sym_sec
->owner
!= NULL
7818 && !INTERWORK_FLAG (sym_sec
->owner
))
7820 (*_bfd_error_handler
)
7821 (_("%B(%s): warning: interworking not enabled.\n"
7822 " first occurrence: %B: arm call to thumb"),
7823 sym_sec
->owner
, input_bfd
, name
);
7827 myh
->root
.u
.def
.value
= my_offset
;
7829 if (bfd_link_pic (info
)
7830 || globals
->root
.is_relocatable_executable
7831 || globals
->pic_veneer
)
7833 /* For relocatable objects we can't use absolute addresses,
7834 so construct the address from a relative offset. */
7835 /* TODO: If the offset is small it's probably worth
7836 constructing the address with adds. */
7837 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1p_ldr_insn
,
7838 s
->contents
+ my_offset
);
7839 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2p_add_pc_insn
,
7840 s
->contents
+ my_offset
+ 4);
7841 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t3p_bx_r12_insn
,
7842 s
->contents
+ my_offset
+ 8);
7843 /* Adjust the offset by 4 for the position of the add,
7844 and 8 for the pipeline offset. */
7845 ret_offset
= (val
- (s
->output_offset
7846 + s
->output_section
->vma
7849 bfd_put_32 (output_bfd
, ret_offset
,
7850 s
->contents
+ my_offset
+ 12);
7852 else if (globals
->use_blx
)
7854 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1v5_ldr_insn
,
7855 s
->contents
+ my_offset
);
7857 /* It's a thumb address. Add the low order bit. */
7858 bfd_put_32 (output_bfd
, val
| a2t2v5_func_addr_insn
,
7859 s
->contents
+ my_offset
+ 4);
7863 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1_ldr_insn
,
7864 s
->contents
+ my_offset
);
7866 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2_bx_r12_insn
,
7867 s
->contents
+ my_offset
+ 4);
7869 /* It's a thumb address. Add the low order bit. */
7870 bfd_put_32 (output_bfd
, val
| a2t3_func_addr_insn
,
7871 s
->contents
+ my_offset
+ 8);
7877 BFD_ASSERT (my_offset
<= globals
->arm_glue_size
);
7882 /* Arm code calling a Thumb function. */
7885 elf32_arm_to_thumb_stub (struct bfd_link_info
* info
,
7889 asection
* input_section
,
7890 bfd_byte
* hit_data
,
7893 bfd_signed_vma addend
,
7895 char **error_message
)
7897 unsigned long int tmp
;
7900 long int ret_offset
;
7901 struct elf_link_hash_entry
* myh
;
7902 struct elf32_arm_link_hash_table
* globals
;
7904 globals
= elf32_arm_hash_table (info
);
7905 BFD_ASSERT (globals
!= NULL
);
7906 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7908 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
7909 ARM2THUMB_GLUE_SECTION_NAME
);
7910 BFD_ASSERT (s
!= NULL
);
7911 BFD_ASSERT (s
->contents
!= NULL
);
7912 BFD_ASSERT (s
->output_section
!= NULL
);
7914 myh
= elf32_arm_create_thumb_stub (info
, name
, input_bfd
, output_bfd
,
7915 sym_sec
, val
, s
, error_message
);
7919 my_offset
= myh
->root
.u
.def
.value
;
7920 tmp
= bfd_get_32 (input_bfd
, hit_data
);
7921 tmp
= tmp
& 0xFF000000;
7923 /* Somehow these are both 4 too far, so subtract 8. */
7924 ret_offset
= (s
->output_offset
7926 + s
->output_section
->vma
7927 - (input_section
->output_offset
7928 + input_section
->output_section
->vma
7932 tmp
= tmp
| ((ret_offset
>> 2) & 0x00FFFFFF);
7934 bfd_put_32 (output_bfd
, (bfd_vma
) tmp
, hit_data
- input_section
->vma
);
7939 /* Populate Arm stub for an exported Thumb function. */
7942 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry
*h
, void * inf
)
7944 struct bfd_link_info
* info
= (struct bfd_link_info
*) inf
;
7946 struct elf_link_hash_entry
* myh
;
7947 struct elf32_arm_link_hash_entry
*eh
;
7948 struct elf32_arm_link_hash_table
* globals
;
7951 char *error_message
;
7953 eh
= elf32_arm_hash_entry (h
);
7954 /* Allocate stubs for exported Thumb functions on v4t. */
7955 if (eh
->export_glue
== NULL
)
7958 globals
= elf32_arm_hash_table (info
);
7959 BFD_ASSERT (globals
!= NULL
);
7960 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7962 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
7963 ARM2THUMB_GLUE_SECTION_NAME
);
7964 BFD_ASSERT (s
!= NULL
);
7965 BFD_ASSERT (s
->contents
!= NULL
);
7966 BFD_ASSERT (s
->output_section
!= NULL
);
7968 sec
= eh
->export_glue
->root
.u
.def
.section
;
7970 BFD_ASSERT (sec
->output_section
!= NULL
);
7972 val
= eh
->export_glue
->root
.u
.def
.value
+ sec
->output_offset
7973 + sec
->output_section
->vma
;
7975 myh
= elf32_arm_create_thumb_stub (info
, h
->root
.root
.string
,
7976 h
->root
.u
.def
.section
->owner
,
7977 globals
->obfd
, sec
, val
, s
,
7983 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
7986 elf32_arm_bx_glue (struct bfd_link_info
* info
, int reg
)
7991 struct elf32_arm_link_hash_table
*globals
;
7993 globals
= elf32_arm_hash_table (info
);
7994 BFD_ASSERT (globals
!= NULL
);
7995 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7997 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
7998 ARM_BX_GLUE_SECTION_NAME
);
7999 BFD_ASSERT (s
!= NULL
);
8000 BFD_ASSERT (s
->contents
!= NULL
);
8001 BFD_ASSERT (s
->output_section
!= NULL
);
8003 BFD_ASSERT (globals
->bx_glue_offset
[reg
] & 2);
8005 glue_addr
= globals
->bx_glue_offset
[reg
] & ~(bfd_vma
)3;
8007 if ((globals
->bx_glue_offset
[reg
] & 1) == 0)
8009 p
= s
->contents
+ glue_addr
;
8010 bfd_put_32 (globals
->obfd
, armbx1_tst_insn
+ (reg
<< 16), p
);
8011 bfd_put_32 (globals
->obfd
, armbx2_moveq_insn
+ reg
, p
+ 4);
8012 bfd_put_32 (globals
->obfd
, armbx3_bx_insn
+ reg
, p
+ 8);
8013 globals
->bx_glue_offset
[reg
] |= 1;
8016 return glue_addr
+ s
->output_section
->vma
+ s
->output_offset
;
8019 /* Generate Arm stubs for exported Thumb symbols. */
8021 elf32_arm_begin_write_processing (bfd
*abfd ATTRIBUTE_UNUSED
,
8022 struct bfd_link_info
*link_info
)
8024 struct elf32_arm_link_hash_table
* globals
;
8026 if (link_info
== NULL
)
8027 /* Ignore this if we are not called by the ELF backend linker. */
8030 globals
= elf32_arm_hash_table (link_info
);
8031 if (globals
== NULL
)
8034 /* If blx is available then exported Thumb symbols are OK and there is
8036 if (globals
->use_blx
)
8039 elf_link_hash_traverse (&globals
->root
, elf32_arm_to_thumb_export_stub
,
8043 /* Reserve space for COUNT dynamic relocations in relocation selection
8047 elf32_arm_allocate_dynrelocs (struct bfd_link_info
*info
, asection
*sreloc
,
8048 bfd_size_type count
)
8050 struct elf32_arm_link_hash_table
*htab
;
8052 htab
= elf32_arm_hash_table (info
);
8053 BFD_ASSERT (htab
->root
.dynamic_sections_created
);
8056 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
8059 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
8060 dynamic, the relocations should go in SRELOC, otherwise they should
8061 go in the special .rel.iplt section. */
8064 elf32_arm_allocate_irelocs (struct bfd_link_info
*info
, asection
*sreloc
,
8065 bfd_size_type count
)
8067 struct elf32_arm_link_hash_table
*htab
;
8069 htab
= elf32_arm_hash_table (info
);
8070 if (!htab
->root
.dynamic_sections_created
)
8071 htab
->root
.irelplt
->size
+= RELOC_SIZE (htab
) * count
;
8074 BFD_ASSERT (sreloc
!= NULL
);
8075 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
8079 /* Add relocation REL to the end of relocation section SRELOC. */
8082 elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
8083 asection
*sreloc
, Elf_Internal_Rela
*rel
)
8086 struct elf32_arm_link_hash_table
*htab
;
8088 htab
= elf32_arm_hash_table (info
);
8089 if (!htab
->root
.dynamic_sections_created
8090 && ELF32_R_TYPE (rel
->r_info
) == R_ARM_IRELATIVE
)
8091 sreloc
= htab
->root
.irelplt
;
8094 loc
= sreloc
->contents
;
8095 loc
+= sreloc
->reloc_count
++ * RELOC_SIZE (htab
);
8096 if (sreloc
->reloc_count
* RELOC_SIZE (htab
) > sreloc
->size
)
8098 SWAP_RELOC_OUT (htab
) (output_bfd
, rel
, loc
);
8101 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
8102 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
8106 elf32_arm_allocate_plt_entry (struct bfd_link_info
*info
,
8107 bfd_boolean is_iplt_entry
,
8108 union gotplt_union
*root_plt
,
8109 struct arm_plt_info
*arm_plt
)
8111 struct elf32_arm_link_hash_table
*htab
;
8115 htab
= elf32_arm_hash_table (info
);
8119 splt
= htab
->root
.iplt
;
8120 sgotplt
= htab
->root
.igotplt
;
8122 /* NaCl uses a special first entry in .iplt too. */
8123 if (htab
->nacl_p
&& splt
->size
== 0)
8124 splt
->size
+= htab
->plt_header_size
;
8126 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
8127 elf32_arm_allocate_irelocs (info
, htab
->root
.irelplt
, 1);
8131 splt
= htab
->root
.splt
;
8132 sgotplt
= htab
->root
.sgotplt
;
8134 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
8135 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
8137 /* If this is the first .plt entry, make room for the special
8139 if (splt
->size
== 0)
8140 splt
->size
+= htab
->plt_header_size
;
8142 htab
->next_tls_desc_index
++;
8145 /* Allocate the PLT entry itself, including any leading Thumb stub. */
8146 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
8147 splt
->size
+= PLT_THUMB_STUB_SIZE
;
8148 root_plt
->offset
= splt
->size
;
8149 splt
->size
+= htab
->plt_entry_size
;
8151 if (!htab
->symbian_p
)
8153 /* We also need to make an entry in the .got.plt section, which
8154 will be placed in the .got section by the linker script. */
8156 arm_plt
->got_offset
= sgotplt
->size
;
8158 arm_plt
->got_offset
= sgotplt
->size
- 8 * htab
->num_tls_desc
;
8164 arm_movw_immediate (bfd_vma value
)
8166 return (value
& 0x00000fff) | ((value
& 0x0000f000) << 4);
8170 arm_movt_immediate (bfd_vma value
)
8172 return ((value
& 0x0fff0000) >> 16) | ((value
& 0xf0000000) >> 12);
8175 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
8176 the entry lives in .iplt and resolves to (*SYM_VALUE)().
8177 Otherwise, DYNINDX is the index of the symbol in the dynamic
8178 symbol table and SYM_VALUE is undefined.
8180 ROOT_PLT points to the offset of the PLT entry from the start of its
8181 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
8182 bookkeeping information.
8184 Returns FALSE if there was a problem. */
8187 elf32_arm_populate_plt_entry (bfd
*output_bfd
, struct bfd_link_info
*info
,
8188 union gotplt_union
*root_plt
,
8189 struct arm_plt_info
*arm_plt
,
8190 int dynindx
, bfd_vma sym_value
)
8192 struct elf32_arm_link_hash_table
*htab
;
8198 Elf_Internal_Rela rel
;
8199 bfd_vma plt_header_size
;
8200 bfd_vma got_header_size
;
8202 htab
= elf32_arm_hash_table (info
);
8204 /* Pick the appropriate sections and sizes. */
8207 splt
= htab
->root
.iplt
;
8208 sgot
= htab
->root
.igotplt
;
8209 srel
= htab
->root
.irelplt
;
8211 /* There are no reserved entries in .igot.plt, and no special
8212 first entry in .iplt. */
8213 got_header_size
= 0;
8214 plt_header_size
= 0;
8218 splt
= htab
->root
.splt
;
8219 sgot
= htab
->root
.sgotplt
;
8220 srel
= htab
->root
.srelplt
;
8222 got_header_size
= get_elf_backend_data (output_bfd
)->got_header_size
;
8223 plt_header_size
= htab
->plt_header_size
;
8225 BFD_ASSERT (splt
!= NULL
&& srel
!= NULL
);
8227 /* Fill in the entry in the procedure linkage table. */
8228 if (htab
->symbian_p
)
8230 BFD_ASSERT (dynindx
>= 0);
8231 put_arm_insn (htab
, output_bfd
,
8232 elf32_arm_symbian_plt_entry
[0],
8233 splt
->contents
+ root_plt
->offset
);
8234 bfd_put_32 (output_bfd
,
8235 elf32_arm_symbian_plt_entry
[1],
8236 splt
->contents
+ root_plt
->offset
+ 4);
8238 /* Fill in the entry in the .rel.plt section. */
8239 rel
.r_offset
= (splt
->output_section
->vma
8240 + splt
->output_offset
8241 + root_plt
->offset
+ 4);
8242 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_GLOB_DAT
);
8244 /* Get the index in the procedure linkage table which
8245 corresponds to this symbol. This is the index of this symbol
8246 in all the symbols for which we are making plt entries. The
8247 first entry in the procedure linkage table is reserved. */
8248 plt_index
= ((root_plt
->offset
- plt_header_size
)
8249 / htab
->plt_entry_size
);
8253 bfd_vma got_offset
, got_address
, plt_address
;
8254 bfd_vma got_displacement
, initial_got_entry
;
8257 BFD_ASSERT (sgot
!= NULL
);
8259 /* Get the offset into the .(i)got.plt table of the entry that
8260 corresponds to this function. */
8261 got_offset
= (arm_plt
->got_offset
& -2);
8263 /* Get the index in the procedure linkage table which
8264 corresponds to this symbol. This is the index of this symbol
8265 in all the symbols for which we are making plt entries.
8266 After the reserved .got.plt entries, all symbols appear in
8267 the same order as in .plt. */
8268 plt_index
= (got_offset
- got_header_size
) / 4;
8270 /* Calculate the address of the GOT entry. */
8271 got_address
= (sgot
->output_section
->vma
8272 + sgot
->output_offset
8275 /* ...and the address of the PLT entry. */
8276 plt_address
= (splt
->output_section
->vma
8277 + splt
->output_offset
8278 + root_plt
->offset
);
8280 ptr
= splt
->contents
+ root_plt
->offset
;
8281 if (htab
->vxworks_p
&& bfd_link_pic (info
))
8286 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
8288 val
= elf32_arm_vxworks_shared_plt_entry
[i
];
8290 val
|= got_address
- sgot
->output_section
->vma
;
8292 val
|= plt_index
* RELOC_SIZE (htab
);
8293 if (i
== 2 || i
== 5)
8294 bfd_put_32 (output_bfd
, val
, ptr
);
8296 put_arm_insn (htab
, output_bfd
, val
, ptr
);
8299 else if (htab
->vxworks_p
)
8304 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
8306 val
= elf32_arm_vxworks_exec_plt_entry
[i
];
8310 val
|= 0xffffff & -((root_plt
->offset
+ i
* 4 + 8) >> 2);
8312 val
|= plt_index
* RELOC_SIZE (htab
);
8313 if (i
== 2 || i
== 5)
8314 bfd_put_32 (output_bfd
, val
, ptr
);
8316 put_arm_insn (htab
, output_bfd
, val
, ptr
);
8319 loc
= (htab
->srelplt2
->contents
8320 + (plt_index
* 2 + 1) * RELOC_SIZE (htab
));
8322 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
8323 referencing the GOT for this PLT entry. */
8324 rel
.r_offset
= plt_address
+ 8;
8325 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
8326 rel
.r_addend
= got_offset
;
8327 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
8328 loc
+= RELOC_SIZE (htab
);
8330 /* Create the R_ARM_ABS32 relocation referencing the
8331 beginning of the PLT for this GOT entry. */
8332 rel
.r_offset
= got_address
;
8333 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
8335 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
8337 else if (htab
->nacl_p
)
8339 /* Calculate the displacement between the PLT slot and the
8340 common tail that's part of the special initial PLT slot. */
8341 int32_t tail_displacement
8342 = ((splt
->output_section
->vma
+ splt
->output_offset
8343 + ARM_NACL_PLT_TAIL_OFFSET
)
8344 - (plt_address
+ htab
->plt_entry_size
+ 4));
8345 BFD_ASSERT ((tail_displacement
& 3) == 0);
8346 tail_displacement
>>= 2;
8348 BFD_ASSERT ((tail_displacement
& 0xff000000) == 0
8349 || (-tail_displacement
& 0xff000000) == 0);
8351 /* Calculate the displacement between the PLT slot and the entry
8352 in the GOT. The offset accounts for the value produced by
8353 adding to pc in the penultimate instruction of the PLT stub. */
8354 got_displacement
= (got_address
8355 - (plt_address
+ htab
->plt_entry_size
));
8357 /* NaCl does not support interworking at all. */
8358 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
));
8360 put_arm_insn (htab
, output_bfd
,
8361 elf32_arm_nacl_plt_entry
[0]
8362 | arm_movw_immediate (got_displacement
),
8364 put_arm_insn (htab
, output_bfd
,
8365 elf32_arm_nacl_plt_entry
[1]
8366 | arm_movt_immediate (got_displacement
),
8368 put_arm_insn (htab
, output_bfd
,
8369 elf32_arm_nacl_plt_entry
[2],
8371 put_arm_insn (htab
, output_bfd
,
8372 elf32_arm_nacl_plt_entry
[3]
8373 | (tail_displacement
& 0x00ffffff),
8376 else if (using_thumb_only (htab
))
8378 /* PR ld/16017: Generate thumb only PLT entries. */
8379 if (!using_thumb2 (htab
))
8381 /* FIXME: We ought to be able to generate thumb-1 PLT
8383 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
8388 /* Calculate the displacement between the PLT slot and the entry in
8389 the GOT. The 12-byte offset accounts for the value produced by
8390 adding to pc in the 3rd instruction of the PLT stub. */
8391 got_displacement
= got_address
- (plt_address
+ 12);
8393 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
8394 instead of 'put_thumb_insn'. */
8395 put_arm_insn (htab
, output_bfd
,
8396 elf32_thumb2_plt_entry
[0]
8397 | ((got_displacement
& 0x000000ff) << 16)
8398 | ((got_displacement
& 0x00000700) << 20)
8399 | ((got_displacement
& 0x00000800) >> 1)
8400 | ((got_displacement
& 0x0000f000) >> 12),
8402 put_arm_insn (htab
, output_bfd
,
8403 elf32_thumb2_plt_entry
[1]
8404 | ((got_displacement
& 0x00ff0000) )
8405 | ((got_displacement
& 0x07000000) << 4)
8406 | ((got_displacement
& 0x08000000) >> 17)
8407 | ((got_displacement
& 0xf0000000) >> 28),
8409 put_arm_insn (htab
, output_bfd
,
8410 elf32_thumb2_plt_entry
[2],
8412 put_arm_insn (htab
, output_bfd
,
8413 elf32_thumb2_plt_entry
[3],
8418 /* Calculate the displacement between the PLT slot and the
8419 entry in the GOT. The eight-byte offset accounts for the
8420 value produced by adding to pc in the first instruction
8422 got_displacement
= got_address
- (plt_address
+ 8);
8424 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
8426 put_thumb_insn (htab
, output_bfd
,
8427 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
8428 put_thumb_insn (htab
, output_bfd
,
8429 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
8432 if (!elf32_arm_use_long_plt_entry
)
8434 BFD_ASSERT ((got_displacement
& 0xf0000000) == 0);
8436 put_arm_insn (htab
, output_bfd
,
8437 elf32_arm_plt_entry_short
[0]
8438 | ((got_displacement
& 0x0ff00000) >> 20),
8440 put_arm_insn (htab
, output_bfd
,
8441 elf32_arm_plt_entry_short
[1]
8442 | ((got_displacement
& 0x000ff000) >> 12),
8444 put_arm_insn (htab
, output_bfd
,
8445 elf32_arm_plt_entry_short
[2]
8446 | (got_displacement
& 0x00000fff),
8448 #ifdef FOUR_WORD_PLT
8449 bfd_put_32 (output_bfd
, elf32_arm_plt_entry_short
[3], ptr
+ 12);
8454 put_arm_insn (htab
, output_bfd
,
8455 elf32_arm_plt_entry_long
[0]
8456 | ((got_displacement
& 0xf0000000) >> 28),
8458 put_arm_insn (htab
, output_bfd
,
8459 elf32_arm_plt_entry_long
[1]
8460 | ((got_displacement
& 0x0ff00000) >> 20),
8462 put_arm_insn (htab
, output_bfd
,
8463 elf32_arm_plt_entry_long
[2]
8464 | ((got_displacement
& 0x000ff000) >> 12),
8466 put_arm_insn (htab
, output_bfd
,
8467 elf32_arm_plt_entry_long
[3]
8468 | (got_displacement
& 0x00000fff),
8473 /* Fill in the entry in the .rel(a).(i)plt section. */
8474 rel
.r_offset
= got_address
;
8478 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
8479 The dynamic linker or static executable then calls SYM_VALUE
8480 to determine the correct run-time value of the .igot.plt entry. */
8481 rel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
8482 initial_got_entry
= sym_value
;
8486 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_JUMP_SLOT
);
8487 initial_got_entry
= (splt
->output_section
->vma
8488 + splt
->output_offset
);
8491 /* Fill in the entry in the global offset table. */
8492 bfd_put_32 (output_bfd
, initial_got_entry
,
8493 sgot
->contents
+ got_offset
);
8497 elf32_arm_add_dynreloc (output_bfd
, info
, srel
, &rel
);
8500 loc
= srel
->contents
+ plt_index
* RELOC_SIZE (htab
);
8501 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
8507 /* Some relocations map to different relocations depending on the
8508 target. Return the real relocation. */
8511 arm_real_reloc_type (struct elf32_arm_link_hash_table
* globals
,
8517 if (globals
->target1_is_rel
)
8523 return globals
->target2_reloc
;
8530 /* Return the base VMA address which should be subtracted from real addresses
8531 when resolving @dtpoff relocation.
8532 This is PT_TLS segment p_vaddr. */
8535 dtpoff_base (struct bfd_link_info
*info
)
8537 /* If tls_sec is NULL, we should have signalled an error already. */
8538 if (elf_hash_table (info
)->tls_sec
== NULL
)
8540 return elf_hash_table (info
)->tls_sec
->vma
;
8543 /* Return the relocation value for @tpoff relocation
8544 if STT_TLS virtual address is ADDRESS. */
8547 tpoff (struct bfd_link_info
*info
, bfd_vma address
)
8549 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
8552 /* If tls_sec is NULL, we should have signalled an error already. */
8553 if (htab
->tls_sec
== NULL
)
8555 base
= align_power ((bfd_vma
) TCB_SIZE
, htab
->tls_sec
->alignment_power
);
8556 return address
- htab
->tls_sec
->vma
+ base
;
8559 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
8560 VALUE is the relocation value. */
8562 static bfd_reloc_status_type
8563 elf32_arm_abs12_reloc (bfd
*abfd
, void *data
, bfd_vma value
)
8566 return bfd_reloc_overflow
;
8568 value
|= bfd_get_32 (abfd
, data
) & 0xfffff000;
8569 bfd_put_32 (abfd
, value
, data
);
8570 return bfd_reloc_ok
;
8573 /* Handle TLS relaxations. Relaxing is possible for symbols that use
8574 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
8575 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
8577 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
8578 is to then call final_link_relocate. Return other values in the
8581 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
8582 the pre-relaxed code. It would be nice if the relocs were updated
8583 to match the optimization. */
8585 static bfd_reloc_status_type
8586 elf32_arm_tls_relax (struct elf32_arm_link_hash_table
*globals
,
8587 bfd
*input_bfd
, asection
*input_sec
, bfd_byte
*contents
,
8588 Elf_Internal_Rela
*rel
, unsigned long is_local
)
8592 switch (ELF32_R_TYPE (rel
->r_info
))
8595 return bfd_reloc_notsupported
;
8597 case R_ARM_TLS_GOTDESC
:
8602 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
8604 insn
-= 5; /* THUMB */
8606 insn
-= 8; /* ARM */
8608 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
8609 return bfd_reloc_continue
;
8611 case R_ARM_THM_TLS_DESCSEQ
:
8613 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
);
8614 if ((insn
& 0xff78) == 0x4478) /* add rx, pc */
8618 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
8620 else if ((insn
& 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
8624 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
8627 bfd_put_16 (input_bfd
, insn
& 0xf83f, contents
+ rel
->r_offset
);
8629 else if ((insn
& 0xff87) == 0x4780) /* blx rx */
8633 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
8636 bfd_put_16 (input_bfd
, 0x4600 | (insn
& 0x78),
8637 contents
+ rel
->r_offset
);
8641 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
8642 /* It's a 32 bit instruction, fetch the rest of it for
8643 error generation. */
8645 | bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
+ 2);
8646 (*_bfd_error_handler
)
8647 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
8648 input_bfd
, input_sec
, (unsigned long)rel
->r_offset
, insn
);
8649 return bfd_reloc_notsupported
;
8653 case R_ARM_TLS_DESCSEQ
:
8655 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
8656 if ((insn
& 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
8660 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xffff),
8661 contents
+ rel
->r_offset
);
8663 else if ((insn
& 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
8667 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
8670 bfd_put_32 (input_bfd
, insn
& 0xfffff000,
8671 contents
+ rel
->r_offset
);
8673 else if ((insn
& 0xfffffff0) == 0xe12fff30) /* blx rx */
8677 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
8680 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xf),
8681 contents
+ rel
->r_offset
);
8685 (*_bfd_error_handler
)
8686 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
8687 input_bfd
, input_sec
, (unsigned long)rel
->r_offset
, insn
);
8688 return bfd_reloc_notsupported
;
8692 case R_ARM_TLS_CALL
:
8693 /* GD->IE relaxation, turn the instruction into 'nop' or
8694 'ldr r0, [pc,r0]' */
8695 insn
= is_local
? 0xe1a00000 : 0xe79f0000;
8696 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
8699 case R_ARM_THM_TLS_CALL
:
8700 /* GD->IE relaxation. */
8702 /* add r0,pc; ldr r0, [r0] */
8704 else if (arch_has_thumb2_nop (globals
))
8711 bfd_put_16 (input_bfd
, insn
>> 16, contents
+ rel
->r_offset
);
8712 bfd_put_16 (input_bfd
, insn
& 0xffff, contents
+ rel
->r_offset
+ 2);
8715 return bfd_reloc_ok
;
8718 /* For a given value of n, calculate the value of G_n as required to
8719 deal with group relocations. We return it in the form of an
8720 encoded constant-and-rotation, together with the final residual. If n is
8721 specified as less than zero, then final_residual is filled with the
8722 input value and no further action is performed. */
8725 calculate_group_reloc_mask (bfd_vma value
, int n
, bfd_vma
*final_residual
)
8729 bfd_vma encoded_g_n
= 0;
8730 bfd_vma residual
= value
; /* Also known as Y_n. */
8732 for (current_n
= 0; current_n
<= n
; current_n
++)
8736 /* Calculate which part of the value to mask. */
8743 /* Determine the most significant bit in the residual and
8744 align the resulting value to a 2-bit boundary. */
8745 for (msb
= 30; msb
>= 0; msb
-= 2)
8746 if (residual
& (3 << msb
))
8749 /* The desired shift is now (msb - 6), or zero, whichever
8756 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
8757 g_n
= residual
& (0xff << shift
);
8758 encoded_g_n
= (g_n
>> shift
)
8759 | ((g_n
<= 0xff ? 0 : (32 - shift
) / 2) << 8);
8761 /* Calculate the residual for the next time around. */
8765 *final_residual
= residual
;
8770 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
8771 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
8774 identify_add_or_sub (bfd_vma insn
)
8776 int opcode
= insn
& 0x1e00000;
8778 if (opcode
== 1 << 23) /* ADD */
8781 if (opcode
== 1 << 22) /* SUB */
8787 /* Perform a relocation as part of a final link. */
8789 static bfd_reloc_status_type
8790 elf32_arm_final_link_relocate (reloc_howto_type
* howto
,
8793 asection
* input_section
,
8794 bfd_byte
* contents
,
8795 Elf_Internal_Rela
* rel
,
8797 struct bfd_link_info
* info
,
8799 const char * sym_name
,
8800 unsigned char st_type
,
8801 enum arm_st_branch_type branch_type
,
8802 struct elf_link_hash_entry
* h
,
8803 bfd_boolean
* unresolved_reloc_p
,
8804 char ** error_message
)
8806 unsigned long r_type
= howto
->type
;
8807 unsigned long r_symndx
;
8808 bfd_byte
* hit_data
= contents
+ rel
->r_offset
;
8809 bfd_vma
* local_got_offsets
;
8810 bfd_vma
* local_tlsdesc_gotents
;
8813 asection
* sreloc
= NULL
;
8816 bfd_signed_vma signed_addend
;
8817 unsigned char dynreloc_st_type
;
8818 bfd_vma dynreloc_value
;
8819 struct elf32_arm_link_hash_table
* globals
;
8820 struct elf32_arm_link_hash_entry
*eh
;
8821 union gotplt_union
*root_plt
;
8822 struct arm_plt_info
*arm_plt
;
8824 bfd_vma gotplt_offset
;
8825 bfd_boolean has_iplt_entry
;
8827 globals
= elf32_arm_hash_table (info
);
8828 if (globals
== NULL
)
8829 return bfd_reloc_notsupported
;
8831 BFD_ASSERT (is_arm_elf (input_bfd
));
8833 /* Some relocation types map to different relocations depending on the
8834 target. We pick the right one here. */
8835 r_type
= arm_real_reloc_type (globals
, r_type
);
8837 /* It is possible to have linker relaxations on some TLS access
8838 models. Update our information here. */
8839 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
8841 if (r_type
!= howto
->type
)
8842 howto
= elf32_arm_howto_from_type (r_type
);
8844 eh
= (struct elf32_arm_link_hash_entry
*) h
;
8845 sgot
= globals
->root
.sgot
;
8846 local_got_offsets
= elf_local_got_offsets (input_bfd
);
8847 local_tlsdesc_gotents
= elf32_arm_local_tlsdesc_gotent (input_bfd
);
8849 if (globals
->root
.dynamic_sections_created
)
8850 srelgot
= globals
->root
.srelgot
;
8854 r_symndx
= ELF32_R_SYM (rel
->r_info
);
8856 if (globals
->use_rel
)
8858 addend
= bfd_get_32 (input_bfd
, hit_data
) & howto
->src_mask
;
8860 if (addend
& ((howto
->src_mask
+ 1) >> 1))
8863 signed_addend
&= ~ howto
->src_mask
;
8864 signed_addend
|= addend
;
8867 signed_addend
= addend
;
8870 addend
= signed_addend
= rel
->r_addend
;
8872 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
8873 are resolving a function call relocation. */
8874 if (using_thumb_only (globals
)
8875 && (r_type
== R_ARM_THM_CALL
8876 || r_type
== R_ARM_THM_JUMP24
)
8877 && branch_type
== ST_BRANCH_TO_ARM
)
8878 branch_type
= ST_BRANCH_TO_THUMB
;
8880 /* Record the symbol information that should be used in dynamic
8882 dynreloc_st_type
= st_type
;
8883 dynreloc_value
= value
;
8884 if (branch_type
== ST_BRANCH_TO_THUMB
)
8885 dynreloc_value
|= 1;
8887 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
8888 VALUE appropriately for relocations that we resolve at link time. */
8889 has_iplt_entry
= FALSE
;
8890 if (elf32_arm_get_plt_info (input_bfd
, eh
, r_symndx
, &root_plt
, &arm_plt
)
8891 && root_plt
->offset
!= (bfd_vma
) -1)
8893 plt_offset
= root_plt
->offset
;
8894 gotplt_offset
= arm_plt
->got_offset
;
8896 if (h
== NULL
|| eh
->is_iplt
)
8898 has_iplt_entry
= TRUE
;
8899 splt
= globals
->root
.iplt
;
8901 /* Populate .iplt entries here, because not all of them will
8902 be seen by finish_dynamic_symbol. The lower bit is set if
8903 we have already populated the entry. */
8908 if (elf32_arm_populate_plt_entry (output_bfd
, info
, root_plt
, arm_plt
,
8909 -1, dynreloc_value
))
8910 root_plt
->offset
|= 1;
8912 return bfd_reloc_notsupported
;
8915 /* Static relocations always resolve to the .iplt entry. */
8917 value
= (splt
->output_section
->vma
8918 + splt
->output_offset
8920 branch_type
= ST_BRANCH_TO_ARM
;
8922 /* If there are non-call relocations that resolve to the .iplt
8923 entry, then all dynamic ones must too. */
8924 if (arm_plt
->noncall_refcount
!= 0)
8926 dynreloc_st_type
= st_type
;
8927 dynreloc_value
= value
;
8931 /* We populate the .plt entry in finish_dynamic_symbol. */
8932 splt
= globals
->root
.splt
;
8937 plt_offset
= (bfd_vma
) -1;
8938 gotplt_offset
= (bfd_vma
) -1;
8944 /* We don't need to find a value for this symbol. It's just a
8946 *unresolved_reloc_p
= FALSE
;
8947 return bfd_reloc_ok
;
8950 if (!globals
->vxworks_p
)
8951 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
8955 case R_ARM_ABS32_NOI
:
8957 case R_ARM_REL32_NOI
:
8963 /* Handle relocations which should use the PLT entry. ABS32/REL32
8964 will use the symbol's value, which may point to a PLT entry, but we
8965 don't need to handle that here. If we created a PLT entry, all
8966 branches in this object should go to it, except if the PLT is too
8967 far away, in which case a long branch stub should be inserted. */
8968 if ((r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_REL32
8969 && r_type
!= R_ARM_ABS32_NOI
&& r_type
!= R_ARM_REL32_NOI
8970 && r_type
!= R_ARM_CALL
8971 && r_type
!= R_ARM_JUMP24
8972 && r_type
!= R_ARM_PLT32
)
8973 && plt_offset
!= (bfd_vma
) -1)
8975 /* If we've created a .plt section, and assigned a PLT entry
8976 to this function, it must either be a STT_GNU_IFUNC reference
8977 or not be known to bind locally. In other cases, we should
8978 have cleared the PLT entry by now. */
8979 BFD_ASSERT (has_iplt_entry
|| !SYMBOL_CALLS_LOCAL (info
, h
));
8981 value
= (splt
->output_section
->vma
8982 + splt
->output_offset
8984 *unresolved_reloc_p
= FALSE
;
8985 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
8986 contents
, rel
->r_offset
, value
,
8990 /* When generating a shared object or relocatable executable, these
8991 relocations are copied into the output file to be resolved at
8993 if ((bfd_link_pic (info
)
8994 || globals
->root
.is_relocatable_executable
)
8995 && (input_section
->flags
& SEC_ALLOC
)
8996 && !(globals
->vxworks_p
8997 && strcmp (input_section
->output_section
->name
,
8999 && ((r_type
!= R_ARM_REL32
&& r_type
!= R_ARM_REL32_NOI
)
9000 || !SYMBOL_CALLS_LOCAL (info
, h
))
9001 && !(input_bfd
== globals
->stub_bfd
9002 && strstr (input_section
->name
, STUB_SUFFIX
))
9004 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
9005 || h
->root
.type
!= bfd_link_hash_undefweak
)
9006 && r_type
!= R_ARM_PC24
9007 && r_type
!= R_ARM_CALL
9008 && r_type
!= R_ARM_JUMP24
9009 && r_type
!= R_ARM_PREL31
9010 && r_type
!= R_ARM_PLT32
)
9012 Elf_Internal_Rela outrel
;
9013 bfd_boolean skip
, relocate
;
9015 if ((r_type
== R_ARM_REL32
|| r_type
== R_ARM_REL32_NOI
)
9018 char *v
= _("shared object");
9020 if (bfd_link_executable (info
))
9021 v
= _("PIE executable");
9023 (*_bfd_error_handler
)
9024 (_("%B: relocation %s against external or undefined symbol `%s'"
9025 " can not be used when making a %s; recompile with -fPIC"), input_bfd
,
9026 elf32_arm_howto_table_1
[r_type
].name
, h
->root
.root
.string
, v
);
9027 return bfd_reloc_notsupported
;
9030 *unresolved_reloc_p
= FALSE
;
9032 if (sreloc
== NULL
&& globals
->root
.dynamic_sections_created
)
9034 sreloc
= _bfd_elf_get_dynamic_reloc_section (input_bfd
, input_section
,
9035 ! globals
->use_rel
);
9038 return bfd_reloc_notsupported
;
9044 outrel
.r_addend
= addend
;
9046 _bfd_elf_section_offset (output_bfd
, info
, input_section
,
9048 if (outrel
.r_offset
== (bfd_vma
) -1)
9050 else if (outrel
.r_offset
== (bfd_vma
) -2)
9051 skip
= TRUE
, relocate
= TRUE
;
9052 outrel
.r_offset
+= (input_section
->output_section
->vma
9053 + input_section
->output_offset
);
9056 memset (&outrel
, 0, sizeof outrel
);
9059 && (!bfd_link_pic (info
)
9060 || !SYMBOLIC_BIND (info
, h
)
9061 || !h
->def_regular
))
9062 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, r_type
);
9067 /* This symbol is local, or marked to become local. */
9068 BFD_ASSERT (r_type
== R_ARM_ABS32
|| r_type
== R_ARM_ABS32_NOI
);
9069 if (globals
->symbian_p
)
9073 /* On Symbian OS, the data segment and text segement
9074 can be relocated independently. Therefore, we
9075 must indicate the segment to which this
9076 relocation is relative. The BPABI allows us to
9077 use any symbol in the right segment; we just use
9078 the section symbol as it is convenient. (We
9079 cannot use the symbol given by "h" directly as it
9080 will not appear in the dynamic symbol table.)
9082 Note that the dynamic linker ignores the section
9083 symbol value, so we don't subtract osec->vma
9084 from the emitted reloc addend. */
9086 osec
= sym_sec
->output_section
;
9088 osec
= input_section
->output_section
;
9089 symbol
= elf_section_data (osec
)->dynindx
;
9092 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
9094 if ((osec
->flags
& SEC_READONLY
) == 0
9095 && htab
->data_index_section
!= NULL
)
9096 osec
= htab
->data_index_section
;
9098 osec
= htab
->text_index_section
;
9099 symbol
= elf_section_data (osec
)->dynindx
;
9101 BFD_ASSERT (symbol
!= 0);
9104 /* On SVR4-ish systems, the dynamic loader cannot
9105 relocate the text and data segments independently,
9106 so the symbol does not matter. */
9108 if (dynreloc_st_type
== STT_GNU_IFUNC
)
9109 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
9110 to the .iplt entry. Instead, every non-call reference
9111 must use an R_ARM_IRELATIVE relocation to obtain the
9112 correct run-time address. */
9113 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_IRELATIVE
);
9115 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_RELATIVE
);
9116 if (globals
->use_rel
)
9119 outrel
.r_addend
+= dynreloc_value
;
9122 elf32_arm_add_dynreloc (output_bfd
, info
, sreloc
, &outrel
);
9124 /* If this reloc is against an external symbol, we do not want to
9125 fiddle with the addend. Otherwise, we need to include the symbol
9126 value so that it becomes an addend for the dynamic reloc. */
9128 return bfd_reloc_ok
;
9130 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
9131 contents
, rel
->r_offset
,
9132 dynreloc_value
, (bfd_vma
) 0);
9134 else switch (r_type
)
9137 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
9139 case R_ARM_XPC25
: /* Arm BLX instruction. */
9142 case R_ARM_PC24
: /* Arm B/BL instruction. */
9145 struct elf32_arm_stub_hash_entry
*stub_entry
= NULL
;
9147 if (r_type
== R_ARM_XPC25
)
9149 /* Check for Arm calling Arm function. */
9150 /* FIXME: Should we translate the instruction into a BL
9151 instruction instead ? */
9152 if (branch_type
!= ST_BRANCH_TO_THUMB
)
9153 (*_bfd_error_handler
)
9154 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
9156 h
? h
->root
.root
.string
: "(local)");
9158 else if (r_type
== R_ARM_PC24
)
9160 /* Check for Arm calling Thumb function. */
9161 if (branch_type
== ST_BRANCH_TO_THUMB
)
9163 if (elf32_arm_to_thumb_stub (info
, sym_name
, input_bfd
,
9164 output_bfd
, input_section
,
9165 hit_data
, sym_sec
, rel
->r_offset
,
9166 signed_addend
, value
,
9168 return bfd_reloc_ok
;
9170 return bfd_reloc_dangerous
;
9174 /* Check if a stub has to be inserted because the
9175 destination is too far or we are changing mode. */
9176 if ( r_type
== R_ARM_CALL
9177 || r_type
== R_ARM_JUMP24
9178 || r_type
== R_ARM_PLT32
)
9180 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
9181 struct elf32_arm_link_hash_entry
*hash
;
9183 hash
= (struct elf32_arm_link_hash_entry
*) h
;
9184 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
9185 st_type
, &branch_type
,
9186 hash
, value
, sym_sec
,
9187 input_bfd
, sym_name
);
9189 if (stub_type
!= arm_stub_none
)
9191 /* The target is out of reach, so redirect the
9192 branch to the local stub for this function. */
9193 stub_entry
= elf32_arm_get_stub_entry (input_section
,
9198 if (stub_entry
!= NULL
)
9199 value
= (stub_entry
->stub_offset
9200 + stub_entry
->stub_sec
->output_offset
9201 + stub_entry
->stub_sec
->output_section
->vma
);
9203 if (plt_offset
!= (bfd_vma
) -1)
9204 *unresolved_reloc_p
= FALSE
;
9209 /* If the call goes through a PLT entry, make sure to
9210 check distance to the right destination address. */
9211 if (plt_offset
!= (bfd_vma
) -1)
9213 value
= (splt
->output_section
->vma
9214 + splt
->output_offset
9216 *unresolved_reloc_p
= FALSE
;
9217 /* The PLT entry is in ARM mode, regardless of the
9219 branch_type
= ST_BRANCH_TO_ARM
;
9224 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
9226 S is the address of the symbol in the relocation.
9227 P is address of the instruction being relocated.
9228 A is the addend (extracted from the instruction) in bytes.
9230 S is held in 'value'.
9231 P is the base address of the section containing the
9232 instruction plus the offset of the reloc into that
9234 (input_section->output_section->vma +
9235 input_section->output_offset +
9237 A is the addend, converted into bytes, ie:
9240 Note: None of these operations have knowledge of the pipeline
9241 size of the processor, thus it is up to the assembler to
9242 encode this information into the addend. */
9243 value
-= (input_section
->output_section
->vma
9244 + input_section
->output_offset
);
9245 value
-= rel
->r_offset
;
9246 if (globals
->use_rel
)
9247 value
+= (signed_addend
<< howto
->size
);
9249 /* RELA addends do not have to be adjusted by howto->size. */
9250 value
+= signed_addend
;
9252 signed_addend
= value
;
9253 signed_addend
>>= howto
->rightshift
;
9255 /* A branch to an undefined weak symbol is turned into a jump to
9256 the next instruction unless a PLT entry will be created.
9257 Do the same for local undefined symbols (but not for STN_UNDEF).
9258 The jump to the next instruction is optimized as a NOP depending
9259 on the architecture. */
9260 if (h
? (h
->root
.type
== bfd_link_hash_undefweak
9261 && plt_offset
== (bfd_vma
) -1)
9262 : r_symndx
!= STN_UNDEF
&& bfd_is_und_section (sym_sec
))
9264 value
= (bfd_get_32 (input_bfd
, hit_data
) & 0xf0000000);
9266 if (arch_has_arm_nop (globals
))
9267 value
|= 0x0320f000;
9269 value
|= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
9273 /* Perform a signed range check. */
9274 if ( signed_addend
> ((bfd_signed_vma
) (howto
->dst_mask
>> 1))
9275 || signed_addend
< - ((bfd_signed_vma
) ((howto
->dst_mask
+ 1) >> 1)))
9276 return bfd_reloc_overflow
;
9278 addend
= (value
& 2);
9280 value
= (signed_addend
& howto
->dst_mask
)
9281 | (bfd_get_32 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
9283 if (r_type
== R_ARM_CALL
)
9285 /* Set the H bit in the BLX instruction. */
9286 if (branch_type
== ST_BRANCH_TO_THUMB
)
9291 value
&= ~(bfd_vma
)(1 << 24);
9294 /* Select the correct instruction (BL or BLX). */
9295 /* Only if we are not handling a BL to a stub. In this
9296 case, mode switching is performed by the stub. */
9297 if (branch_type
== ST_BRANCH_TO_THUMB
&& !stub_entry
)
9299 else if (stub_entry
|| branch_type
!= ST_BRANCH_UNKNOWN
)
9301 value
&= ~(bfd_vma
)(1 << 28);
9311 if (branch_type
== ST_BRANCH_TO_THUMB
)
9315 case R_ARM_ABS32_NOI
:
9321 if (branch_type
== ST_BRANCH_TO_THUMB
)
9323 value
-= (input_section
->output_section
->vma
9324 + input_section
->output_offset
+ rel
->r_offset
);
9327 case R_ARM_REL32_NOI
:
9329 value
-= (input_section
->output_section
->vma
9330 + input_section
->output_offset
+ rel
->r_offset
);
9334 value
-= (input_section
->output_section
->vma
9335 + input_section
->output_offset
+ rel
->r_offset
);
9336 value
+= signed_addend
;
9337 if (! h
|| h
->root
.type
!= bfd_link_hash_undefweak
)
9339 /* Check for overflow. */
9340 if ((value
^ (value
>> 1)) & (1 << 30))
9341 return bfd_reloc_overflow
;
9343 value
&= 0x7fffffff;
9344 value
|= (bfd_get_32 (input_bfd
, hit_data
) & 0x80000000);
9345 if (branch_type
== ST_BRANCH_TO_THUMB
)
9350 bfd_put_32 (input_bfd
, value
, hit_data
);
9351 return bfd_reloc_ok
;
9354 /* PR 16202: Refectch the addend using the correct size. */
9355 if (globals
->use_rel
)
9356 addend
= bfd_get_8 (input_bfd
, hit_data
);
9359 /* There is no way to tell whether the user intended to use a signed or
9360 unsigned addend. When checking for overflow we accept either,
9361 as specified by the AAELF. */
9362 if ((long) value
> 0xff || (long) value
< -0x80)
9363 return bfd_reloc_overflow
;
9365 bfd_put_8 (input_bfd
, value
, hit_data
);
9366 return bfd_reloc_ok
;
9369 /* PR 16202: Refectch the addend using the correct size. */
9370 if (globals
->use_rel
)
9371 addend
= bfd_get_16 (input_bfd
, hit_data
);
9374 /* See comment for R_ARM_ABS8. */
9375 if ((long) value
> 0xffff || (long) value
< -0x8000)
9376 return bfd_reloc_overflow
;
9378 bfd_put_16 (input_bfd
, value
, hit_data
);
9379 return bfd_reloc_ok
;
9381 case R_ARM_THM_ABS5
:
9382 /* Support ldr and str instructions for the thumb. */
9383 if (globals
->use_rel
)
9385 /* Need to refetch addend. */
9386 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
9387 /* ??? Need to determine shift amount from operand size. */
9388 addend
>>= howto
->rightshift
;
9392 /* ??? Isn't value unsigned? */
9393 if ((long) value
> 0x1f || (long) value
< -0x10)
9394 return bfd_reloc_overflow
;
9396 /* ??? Value needs to be properly shifted into place first. */
9397 value
|= bfd_get_16 (input_bfd
, hit_data
) & 0xf83f;
9398 bfd_put_16 (input_bfd
, value
, hit_data
);
9399 return bfd_reloc_ok
;
9401 case R_ARM_THM_ALU_PREL_11_0
:
9402 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
9405 bfd_signed_vma relocation
;
9407 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
9408 | bfd_get_16 (input_bfd
, hit_data
+ 2);
9410 if (globals
->use_rel
)
9412 signed_addend
= (insn
& 0xff) | ((insn
& 0x7000) >> 4)
9413 | ((insn
& (1 << 26)) >> 15);
9414 if (insn
& 0xf00000)
9415 signed_addend
= -signed_addend
;
9418 relocation
= value
+ signed_addend
;
9419 relocation
-= Pa (input_section
->output_section
->vma
9420 + input_section
->output_offset
9425 if (value
>= 0x1000)
9426 return bfd_reloc_overflow
;
9428 insn
= (insn
& 0xfb0f8f00) | (value
& 0xff)
9429 | ((value
& 0x700) << 4)
9430 | ((value
& 0x800) << 15);
9434 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
9435 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
9437 return bfd_reloc_ok
;
9441 /* PR 10073: This reloc is not generated by the GNU toolchain,
9442 but it is supported for compatibility with third party libraries
9443 generated by other compilers, specifically the ARM/IAR. */
9446 bfd_signed_vma relocation
;
9448 insn
= bfd_get_16 (input_bfd
, hit_data
);
9450 if (globals
->use_rel
)
9451 addend
= ((((insn
& 0x00ff) << 2) + 4) & 0x3ff) -4;
9453 relocation
= value
+ addend
;
9454 relocation
-= Pa (input_section
->output_section
->vma
9455 + input_section
->output_offset
9460 /* We do not check for overflow of this reloc. Although strictly
9461 speaking this is incorrect, it appears to be necessary in order
9462 to work with IAR generated relocs. Since GCC and GAS do not
9463 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
9464 a problem for them. */
9467 insn
= (insn
& 0xff00) | (value
>> 2);
9469 bfd_put_16 (input_bfd
, insn
, hit_data
);
9471 return bfd_reloc_ok
;
9474 case R_ARM_THM_PC12
:
9475 /* Corresponds to: ldr.w reg, [pc, #offset]. */
9478 bfd_signed_vma relocation
;
9480 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
9481 | bfd_get_16 (input_bfd
, hit_data
+ 2);
9483 if (globals
->use_rel
)
9485 signed_addend
= insn
& 0xfff;
9486 if (!(insn
& (1 << 23)))
9487 signed_addend
= -signed_addend
;
9490 relocation
= value
+ signed_addend
;
9491 relocation
-= Pa (input_section
->output_section
->vma
9492 + input_section
->output_offset
9497 if (value
>= 0x1000)
9498 return bfd_reloc_overflow
;
9500 insn
= (insn
& 0xff7ff000) | value
;
9501 if (relocation
>= 0)
9504 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
9505 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
9507 return bfd_reloc_ok
;
9510 case R_ARM_THM_XPC22
:
9511 case R_ARM_THM_CALL
:
9512 case R_ARM_THM_JUMP24
:
9513 /* Thumb BL (branch long instruction). */
9517 bfd_boolean overflow
= FALSE
;
9518 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
9519 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
9520 bfd_signed_vma reloc_signed_max
;
9521 bfd_signed_vma reloc_signed_min
;
9523 bfd_signed_vma signed_check
;
9525 const int thumb2
= using_thumb2 (globals
);
9527 /* A branch to an undefined weak symbol is turned into a jump to
9528 the next instruction unless a PLT entry will be created.
9529 The jump to the next instruction is optimized as a NOP.W for
9530 Thumb-2 enabled architectures. */
9531 if (h
&& h
->root
.type
== bfd_link_hash_undefweak
9532 && plt_offset
== (bfd_vma
) -1)
9534 if (arch_has_thumb2_nop (globals
))
9536 bfd_put_16 (input_bfd
, 0xf3af, hit_data
);
9537 bfd_put_16 (input_bfd
, 0x8000, hit_data
+ 2);
9541 bfd_put_16 (input_bfd
, 0xe000, hit_data
);
9542 bfd_put_16 (input_bfd
, 0xbf00, hit_data
+ 2);
9544 return bfd_reloc_ok
;
9547 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
9548 with Thumb-1) involving the J1 and J2 bits. */
9549 if (globals
->use_rel
)
9551 bfd_vma s
= (upper_insn
& (1 << 10)) >> 10;
9552 bfd_vma upper
= upper_insn
& 0x3ff;
9553 bfd_vma lower
= lower_insn
& 0x7ff;
9554 bfd_vma j1
= (lower_insn
& (1 << 13)) >> 13;
9555 bfd_vma j2
= (lower_insn
& (1 << 11)) >> 11;
9556 bfd_vma i1
= j1
^ s
? 0 : 1;
9557 bfd_vma i2
= j2
^ s
? 0 : 1;
9559 addend
= (i1
<< 23) | (i2
<< 22) | (upper
<< 12) | (lower
<< 1);
9561 addend
= (addend
| ((s
? 0 : 1) << 24)) - (1 << 24);
9563 signed_addend
= addend
;
9566 if (r_type
== R_ARM_THM_XPC22
)
9568 /* Check for Thumb to Thumb call. */
9569 /* FIXME: Should we translate the instruction into a BL
9570 instruction instead ? */
9571 if (branch_type
== ST_BRANCH_TO_THUMB
)
9572 (*_bfd_error_handler
)
9573 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
9575 h
? h
->root
.root
.string
: "(local)");
9579 /* If it is not a call to Thumb, assume call to Arm.
9580 If it is a call relative to a section name, then it is not a
9581 function call at all, but rather a long jump. Calls through
9582 the PLT do not require stubs. */
9583 if (branch_type
== ST_BRANCH_TO_ARM
&& plt_offset
== (bfd_vma
) -1)
9585 if (globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
9587 /* Convert BL to BLX. */
9588 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
9590 else if (( r_type
!= R_ARM_THM_CALL
)
9591 && (r_type
!= R_ARM_THM_JUMP24
))
9593 if (elf32_thumb_to_arm_stub
9594 (info
, sym_name
, input_bfd
, output_bfd
, input_section
,
9595 hit_data
, sym_sec
, rel
->r_offset
, signed_addend
, value
,
9597 return bfd_reloc_ok
;
9599 return bfd_reloc_dangerous
;
9602 else if (branch_type
== ST_BRANCH_TO_THUMB
9604 && r_type
== R_ARM_THM_CALL
)
9606 /* Make sure this is a BL. */
9607 lower_insn
|= 0x1800;
9611 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
9612 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
)
9614 /* Check if a stub has to be inserted because the destination
9616 struct elf32_arm_stub_hash_entry
*stub_entry
;
9617 struct elf32_arm_link_hash_entry
*hash
;
9619 hash
= (struct elf32_arm_link_hash_entry
*) h
;
9621 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
9622 st_type
, &branch_type
,
9623 hash
, value
, sym_sec
,
9624 input_bfd
, sym_name
);
9626 if (stub_type
!= arm_stub_none
)
9628 /* The target is out of reach or we are changing modes, so
9629 redirect the branch to the local stub for this
9631 stub_entry
= elf32_arm_get_stub_entry (input_section
,
9635 if (stub_entry
!= NULL
)
9637 value
= (stub_entry
->stub_offset
9638 + stub_entry
->stub_sec
->output_offset
9639 + stub_entry
->stub_sec
->output_section
->vma
);
9641 if (plt_offset
!= (bfd_vma
) -1)
9642 *unresolved_reloc_p
= FALSE
;
9645 /* If this call becomes a call to Arm, force BLX. */
9646 if (globals
->use_blx
&& (r_type
== R_ARM_THM_CALL
))
9649 && !arm_stub_is_thumb (stub_entry
->stub_type
))
9650 || branch_type
!= ST_BRANCH_TO_THUMB
)
9651 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
9656 /* Handle calls via the PLT. */
9657 if (stub_type
== arm_stub_none
&& plt_offset
!= (bfd_vma
) -1)
9659 value
= (splt
->output_section
->vma
9660 + splt
->output_offset
9663 if (globals
->use_blx
9664 && r_type
== R_ARM_THM_CALL
9665 && ! using_thumb_only (globals
))
9667 /* If the Thumb BLX instruction is available, convert
9668 the BL to a BLX instruction to call the ARM-mode
9670 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
9671 branch_type
= ST_BRANCH_TO_ARM
;
9675 if (! using_thumb_only (globals
))
9676 /* Target the Thumb stub before the ARM PLT entry. */
9677 value
-= PLT_THUMB_STUB_SIZE
;
9678 branch_type
= ST_BRANCH_TO_THUMB
;
9680 *unresolved_reloc_p
= FALSE
;
9683 relocation
= value
+ signed_addend
;
9685 relocation
-= (input_section
->output_section
->vma
9686 + input_section
->output_offset
9689 check
= relocation
>> howto
->rightshift
;
9691 /* If this is a signed value, the rightshift just dropped
9692 leading 1 bits (assuming twos complement). */
9693 if ((bfd_signed_vma
) relocation
>= 0)
9694 signed_check
= check
;
9696 signed_check
= check
| ~((bfd_vma
) -1 >> howto
->rightshift
);
9698 /* Calculate the permissable maximum and minimum values for
9699 this relocation according to whether we're relocating for
9701 bitsize
= howto
->bitsize
;
9704 reloc_signed_max
= (1 << (bitsize
- 1)) - 1;
9705 reloc_signed_min
= ~reloc_signed_max
;
9707 /* Assumes two's complement. */
9708 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
9711 if ((lower_insn
& 0x5000) == 0x4000)
9712 /* For a BLX instruction, make sure that the relocation is rounded up
9713 to a word boundary. This follows the semantics of the instruction
9714 which specifies that bit 1 of the target address will come from bit
9715 1 of the base address. */
9716 relocation
= (relocation
+ 2) & ~ 3;
9718 /* Put RELOCATION back into the insn. Assumes two's complement.
9719 We use the Thumb-2 encoding, which is safe even if dealing with
9720 a Thumb-1 instruction by virtue of our overflow check above. */
9721 reloc_sign
= (signed_check
< 0) ? 1 : 0;
9722 upper_insn
= (upper_insn
& ~(bfd_vma
) 0x7ff)
9723 | ((relocation
>> 12) & 0x3ff)
9724 | (reloc_sign
<< 10);
9725 lower_insn
= (lower_insn
& ~(bfd_vma
) 0x2fff)
9726 | (((!((relocation
>> 23) & 1)) ^ reloc_sign
) << 13)
9727 | (((!((relocation
>> 22) & 1)) ^ reloc_sign
) << 11)
9728 | ((relocation
>> 1) & 0x7ff);
9730 /* Put the relocated value back in the object file: */
9731 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
9732 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
9734 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
9738 case R_ARM_THM_JUMP19
:
9739 /* Thumb32 conditional branch instruction. */
9742 bfd_boolean overflow
= FALSE
;
9743 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
9744 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
9745 bfd_signed_vma reloc_signed_max
= 0xffffe;
9746 bfd_signed_vma reloc_signed_min
= -0x100000;
9747 bfd_signed_vma signed_check
;
9748 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
9749 struct elf32_arm_stub_hash_entry
*stub_entry
;
9750 struct elf32_arm_link_hash_entry
*hash
;
9752 /* Need to refetch the addend, reconstruct the top three bits,
9753 and squish the two 11 bit pieces together. */
9754 if (globals
->use_rel
)
9756 bfd_vma S
= (upper_insn
& 0x0400) >> 10;
9757 bfd_vma upper
= (upper_insn
& 0x003f);
9758 bfd_vma J1
= (lower_insn
& 0x2000) >> 13;
9759 bfd_vma J2
= (lower_insn
& 0x0800) >> 11;
9760 bfd_vma lower
= (lower_insn
& 0x07ff);
9765 upper
-= 0x0100; /* Sign extend. */
9767 addend
= (upper
<< 12) | (lower
<< 1);
9768 signed_addend
= addend
;
9771 /* Handle calls via the PLT. */
9772 if (plt_offset
!= (bfd_vma
) -1)
9774 value
= (splt
->output_section
->vma
9775 + splt
->output_offset
9777 /* Target the Thumb stub before the ARM PLT entry. */
9778 value
-= PLT_THUMB_STUB_SIZE
;
9779 *unresolved_reloc_p
= FALSE
;
9782 hash
= (struct elf32_arm_link_hash_entry
*)h
;
9784 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
9785 st_type
, &branch_type
,
9786 hash
, value
, sym_sec
,
9787 input_bfd
, sym_name
);
9788 if (stub_type
!= arm_stub_none
)
9790 stub_entry
= elf32_arm_get_stub_entry (input_section
,
9794 if (stub_entry
!= NULL
)
9796 value
= (stub_entry
->stub_offset
9797 + stub_entry
->stub_sec
->output_offset
9798 + stub_entry
->stub_sec
->output_section
->vma
);
9802 relocation
= value
+ signed_addend
;
9803 relocation
-= (input_section
->output_section
->vma
9804 + input_section
->output_offset
9806 signed_check
= (bfd_signed_vma
) relocation
;
9808 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
9811 /* Put RELOCATION back into the insn. */
9813 bfd_vma S
= (relocation
& 0x00100000) >> 20;
9814 bfd_vma J2
= (relocation
& 0x00080000) >> 19;
9815 bfd_vma J1
= (relocation
& 0x00040000) >> 18;
9816 bfd_vma hi
= (relocation
& 0x0003f000) >> 12;
9817 bfd_vma lo
= (relocation
& 0x00000ffe) >> 1;
9819 upper_insn
= (upper_insn
& 0xfbc0) | (S
<< 10) | hi
;
9820 lower_insn
= (lower_insn
& 0xd000) | (J1
<< 13) | (J2
<< 11) | lo
;
9823 /* Put the relocated value back in the object file: */
9824 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
9825 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
9827 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
9830 case R_ARM_THM_JUMP11
:
9831 case R_ARM_THM_JUMP8
:
9832 case R_ARM_THM_JUMP6
:
9833 /* Thumb B (branch) instruction). */
9835 bfd_signed_vma relocation
;
9836 bfd_signed_vma reloc_signed_max
= (1 << (howto
->bitsize
- 1)) - 1;
9837 bfd_signed_vma reloc_signed_min
= ~ reloc_signed_max
;
9838 bfd_signed_vma signed_check
;
9840 /* CZB cannot jump backward. */
9841 if (r_type
== R_ARM_THM_JUMP6
)
9842 reloc_signed_min
= 0;
9844 if (globals
->use_rel
)
9846 /* Need to refetch addend. */
9847 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
9848 if (addend
& ((howto
->src_mask
+ 1) >> 1))
9851 signed_addend
&= ~ howto
->src_mask
;
9852 signed_addend
|= addend
;
9855 signed_addend
= addend
;
9856 /* The value in the insn has been right shifted. We need to
9857 undo this, so that we can perform the address calculation
9858 in terms of bytes. */
9859 signed_addend
<<= howto
->rightshift
;
9861 relocation
= value
+ signed_addend
;
9863 relocation
-= (input_section
->output_section
->vma
9864 + input_section
->output_offset
9867 relocation
>>= howto
->rightshift
;
9868 signed_check
= relocation
;
9870 if (r_type
== R_ARM_THM_JUMP6
)
9871 relocation
= ((relocation
& 0x0020) << 4) | ((relocation
& 0x001f) << 3);
9873 relocation
&= howto
->dst_mask
;
9874 relocation
|= (bfd_get_16 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
9876 bfd_put_16 (input_bfd
, relocation
, hit_data
);
9878 /* Assumes two's complement. */
9879 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
9880 return bfd_reloc_overflow
;
9882 return bfd_reloc_ok
;
9885 case R_ARM_ALU_PCREL7_0
:
9886 case R_ARM_ALU_PCREL15_8
:
9887 case R_ARM_ALU_PCREL23_15
:
9892 insn
= bfd_get_32 (input_bfd
, hit_data
);
9893 if (globals
->use_rel
)
9895 /* Extract the addend. */
9896 addend
= (insn
& 0xff) << ((insn
& 0xf00) >> 7);
9897 signed_addend
= addend
;
9899 relocation
= value
+ signed_addend
;
9901 relocation
-= (input_section
->output_section
->vma
9902 + input_section
->output_offset
9904 insn
= (insn
& ~0xfff)
9905 | ((howto
->bitpos
<< 7) & 0xf00)
9906 | ((relocation
>> howto
->bitpos
) & 0xff);
9907 bfd_put_32 (input_bfd
, value
, hit_data
);
9909 return bfd_reloc_ok
;
9911 case R_ARM_GNU_VTINHERIT
:
9912 case R_ARM_GNU_VTENTRY
:
9913 return bfd_reloc_ok
;
9915 case R_ARM_GOTOFF32
:
9916 /* Relocation is relative to the start of the
9917 global offset table. */
9919 BFD_ASSERT (sgot
!= NULL
);
9921 return bfd_reloc_notsupported
;
9923 /* If we are addressing a Thumb function, we need to adjust the
9924 address by one, so that attempts to call the function pointer will
9925 correctly interpret it as Thumb code. */
9926 if (branch_type
== ST_BRANCH_TO_THUMB
)
9929 /* Note that sgot->output_offset is not involved in this
9930 calculation. We always want the start of .got. If we
9931 define _GLOBAL_OFFSET_TABLE in a different way, as is
9932 permitted by the ABI, we might have to change this
9934 value
-= sgot
->output_section
->vma
;
9935 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
9936 contents
, rel
->r_offset
, value
,
9940 /* Use global offset table as symbol value. */
9941 BFD_ASSERT (sgot
!= NULL
);
9944 return bfd_reloc_notsupported
;
9946 *unresolved_reloc_p
= FALSE
;
9947 value
= sgot
->output_section
->vma
;
9948 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
9949 contents
, rel
->r_offset
, value
,
9953 case R_ARM_GOT_PREL
:
9954 /* Relocation is to the entry for this symbol in the
9955 global offset table. */
9957 return bfd_reloc_notsupported
;
9959 if (dynreloc_st_type
== STT_GNU_IFUNC
9960 && plt_offset
!= (bfd_vma
) -1
9961 && (h
== NULL
|| SYMBOL_REFERENCES_LOCAL (info
, h
)))
9963 /* We have a relocation against a locally-binding STT_GNU_IFUNC
9964 symbol, and the relocation resolves directly to the runtime
9965 target rather than to the .iplt entry. This means that any
9966 .got entry would be the same value as the .igot.plt entry,
9967 so there's no point creating both. */
9968 sgot
= globals
->root
.igotplt
;
9969 value
= sgot
->output_offset
+ gotplt_offset
;
9975 off
= h
->got
.offset
;
9976 BFD_ASSERT (off
!= (bfd_vma
) -1);
9979 /* We have already processsed one GOT relocation against
9982 if (globals
->root
.dynamic_sections_created
9983 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
9984 *unresolved_reloc_p
= FALSE
;
9988 Elf_Internal_Rela outrel
;
9990 if (h
->dynindx
!= -1 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
9992 /* If the symbol doesn't resolve locally in a static
9993 object, we have an undefined reference. If the
9994 symbol doesn't resolve locally in a dynamic object,
9995 it should be resolved by the dynamic linker. */
9996 if (globals
->root
.dynamic_sections_created
)
9998 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_GLOB_DAT
);
9999 *unresolved_reloc_p
= FALSE
;
10003 outrel
.r_addend
= 0;
10007 if (dynreloc_st_type
== STT_GNU_IFUNC
)
10008 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
10009 else if (bfd_link_pic (info
) &&
10010 (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
10011 || h
->root
.type
!= bfd_link_hash_undefweak
))
10012 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
10015 outrel
.r_addend
= dynreloc_value
;
10018 /* The GOT entry is initialized to zero by default.
10019 See if we should install a different value. */
10020 if (outrel
.r_addend
!= 0
10021 && (outrel
.r_info
== 0 || globals
->use_rel
))
10023 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10024 sgot
->contents
+ off
);
10025 outrel
.r_addend
= 0;
10028 if (outrel
.r_info
!= 0)
10030 outrel
.r_offset
= (sgot
->output_section
->vma
10031 + sgot
->output_offset
10033 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10035 h
->got
.offset
|= 1;
10037 value
= sgot
->output_offset
+ off
;
10043 BFD_ASSERT (local_got_offsets
!= NULL
&&
10044 local_got_offsets
[r_symndx
] != (bfd_vma
) -1);
10046 off
= local_got_offsets
[r_symndx
];
10048 /* The offset must always be a multiple of 4. We use the
10049 least significant bit to record whether we have already
10050 generated the necessary reloc. */
10051 if ((off
& 1) != 0)
10055 if (globals
->use_rel
)
10056 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ off
);
10058 if (bfd_link_pic (info
) || dynreloc_st_type
== STT_GNU_IFUNC
)
10060 Elf_Internal_Rela outrel
;
10062 outrel
.r_addend
= addend
+ dynreloc_value
;
10063 outrel
.r_offset
= (sgot
->output_section
->vma
10064 + sgot
->output_offset
10066 if (dynreloc_st_type
== STT_GNU_IFUNC
)
10067 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
10069 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
10070 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10073 local_got_offsets
[r_symndx
] |= 1;
10076 value
= sgot
->output_offset
+ off
;
10078 if (r_type
!= R_ARM_GOT32
)
10079 value
+= sgot
->output_section
->vma
;
10081 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10082 contents
, rel
->r_offset
, value
,
10085 case R_ARM_TLS_LDO32
:
10086 value
= value
- dtpoff_base (info
);
10088 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10089 contents
, rel
->r_offset
, value
,
10092 case R_ARM_TLS_LDM32
:
10099 off
= globals
->tls_ldm_got
.offset
;
10101 if ((off
& 1) != 0)
10105 /* If we don't know the module number, create a relocation
10107 if (bfd_link_pic (info
))
10109 Elf_Internal_Rela outrel
;
10111 if (srelgot
== NULL
)
10114 outrel
.r_addend
= 0;
10115 outrel
.r_offset
= (sgot
->output_section
->vma
10116 + sgot
->output_offset
+ off
);
10117 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32
);
10119 if (globals
->use_rel
)
10120 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10121 sgot
->contents
+ off
);
10123 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10126 bfd_put_32 (output_bfd
, 1, sgot
->contents
+ off
);
10128 globals
->tls_ldm_got
.offset
|= 1;
10131 value
= sgot
->output_section
->vma
+ sgot
->output_offset
+ off
10132 - (input_section
->output_section
->vma
+ input_section
->output_offset
+ rel
->r_offset
);
10134 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10135 contents
, rel
->r_offset
, value
,
10139 case R_ARM_TLS_CALL
:
10140 case R_ARM_THM_TLS_CALL
:
10141 case R_ARM_TLS_GD32
:
10142 case R_ARM_TLS_IE32
:
10143 case R_ARM_TLS_GOTDESC
:
10144 case R_ARM_TLS_DESCSEQ
:
10145 case R_ARM_THM_TLS_DESCSEQ
:
10147 bfd_vma off
, offplt
;
10151 BFD_ASSERT (sgot
!= NULL
);
10156 dyn
= globals
->root
.dynamic_sections_created
;
10157 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
10158 bfd_link_pic (info
),
10160 && (!bfd_link_pic (info
)
10161 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
10163 *unresolved_reloc_p
= FALSE
;
10166 off
= h
->got
.offset
;
10167 offplt
= elf32_arm_hash_entry (h
)->tlsdesc_got
;
10168 tls_type
= ((struct elf32_arm_link_hash_entry
*) h
)->tls_type
;
10172 BFD_ASSERT (local_got_offsets
!= NULL
);
10173 off
= local_got_offsets
[r_symndx
];
10174 offplt
= local_tlsdesc_gotents
[r_symndx
];
10175 tls_type
= elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
];
10178 /* Linker relaxations happens from one of the
10179 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
10180 if (ELF32_R_TYPE(rel
->r_info
) != r_type
)
10181 tls_type
= GOT_TLS_IE
;
10183 BFD_ASSERT (tls_type
!= GOT_UNKNOWN
);
10185 if ((off
& 1) != 0)
10189 bfd_boolean need_relocs
= FALSE
;
10190 Elf_Internal_Rela outrel
;
10193 /* The GOT entries have not been initialized yet. Do it
10194 now, and emit any relocations. If both an IE GOT and a
10195 GD GOT are necessary, we emit the GD first. */
10197 if ((bfd_link_pic (info
) || indx
!= 0)
10199 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
10200 || h
->root
.type
!= bfd_link_hash_undefweak
))
10202 need_relocs
= TRUE
;
10203 BFD_ASSERT (srelgot
!= NULL
);
10206 if (tls_type
& GOT_TLS_GDESC
)
10210 /* We should have relaxed, unless this is an undefined
10212 BFD_ASSERT ((h
&& (h
->root
.type
== bfd_link_hash_undefweak
))
10213 || bfd_link_pic (info
));
10214 BFD_ASSERT (globals
->sgotplt_jump_table_size
+ offplt
+ 8
10215 <= globals
->root
.sgotplt
->size
);
10217 outrel
.r_addend
= 0;
10218 outrel
.r_offset
= (globals
->root
.sgotplt
->output_section
->vma
10219 + globals
->root
.sgotplt
->output_offset
10221 + globals
->sgotplt_jump_table_size
);
10223 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DESC
);
10224 sreloc
= globals
->root
.srelplt
;
10225 loc
= sreloc
->contents
;
10226 loc
+= globals
->next_tls_desc_index
++ * RELOC_SIZE (globals
);
10227 BFD_ASSERT (loc
+ RELOC_SIZE (globals
)
10228 <= sreloc
->contents
+ sreloc
->size
);
10230 SWAP_RELOC_OUT (globals
) (output_bfd
, &outrel
, loc
);
10232 /* For globals, the first word in the relocation gets
10233 the relocation index and the top bit set, or zero,
10234 if we're binding now. For locals, it gets the
10235 symbol's offset in the tls section. */
10236 bfd_put_32 (output_bfd
,
10237 !h
? value
- elf_hash_table (info
)->tls_sec
->vma
10238 : info
->flags
& DF_BIND_NOW
? 0
10239 : 0x80000000 | ELF32_R_SYM (outrel
.r_info
),
10240 globals
->root
.sgotplt
->contents
+ offplt
10241 + globals
->sgotplt_jump_table_size
);
10243 /* Second word in the relocation is always zero. */
10244 bfd_put_32 (output_bfd
, 0,
10245 globals
->root
.sgotplt
->contents
+ offplt
10246 + globals
->sgotplt_jump_table_size
+ 4);
10248 if (tls_type
& GOT_TLS_GD
)
10252 outrel
.r_addend
= 0;
10253 outrel
.r_offset
= (sgot
->output_section
->vma
10254 + sgot
->output_offset
10256 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DTPMOD32
);
10258 if (globals
->use_rel
)
10259 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10260 sgot
->contents
+ cur_off
);
10262 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10265 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
10266 sgot
->contents
+ cur_off
+ 4);
10269 outrel
.r_addend
= 0;
10270 outrel
.r_info
= ELF32_R_INFO (indx
,
10271 R_ARM_TLS_DTPOFF32
);
10272 outrel
.r_offset
+= 4;
10274 if (globals
->use_rel
)
10275 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10276 sgot
->contents
+ cur_off
+ 4);
10278 elf32_arm_add_dynreloc (output_bfd
, info
,
10284 /* If we are not emitting relocations for a
10285 general dynamic reference, then we must be in a
10286 static link or an executable link with the
10287 symbol binding locally. Mark it as belonging
10288 to module 1, the executable. */
10289 bfd_put_32 (output_bfd
, 1,
10290 sgot
->contents
+ cur_off
);
10291 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
10292 sgot
->contents
+ cur_off
+ 4);
10298 if (tls_type
& GOT_TLS_IE
)
10303 outrel
.r_addend
= value
- dtpoff_base (info
);
10305 outrel
.r_addend
= 0;
10306 outrel
.r_offset
= (sgot
->output_section
->vma
10307 + sgot
->output_offset
10309 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_TPOFF32
);
10311 if (globals
->use_rel
)
10312 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10313 sgot
->contents
+ cur_off
);
10315 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10318 bfd_put_32 (output_bfd
, tpoff (info
, value
),
10319 sgot
->contents
+ cur_off
);
10324 h
->got
.offset
|= 1;
10326 local_got_offsets
[r_symndx
] |= 1;
10329 if ((tls_type
& GOT_TLS_GD
) && r_type
!= R_ARM_TLS_GD32
)
10331 else if (tls_type
& GOT_TLS_GDESC
)
10334 if (ELF32_R_TYPE(rel
->r_info
) == R_ARM_TLS_CALL
10335 || ELF32_R_TYPE(rel
->r_info
) == R_ARM_THM_TLS_CALL
)
10337 bfd_signed_vma offset
;
10338 /* TLS stubs are arm mode. The original symbol is a
10339 data object, so branch_type is bogus. */
10340 branch_type
= ST_BRANCH_TO_ARM
;
10341 enum elf32_arm_stub_type stub_type
10342 = arm_type_of_stub (info
, input_section
, rel
,
10343 st_type
, &branch_type
,
10344 (struct elf32_arm_link_hash_entry
*)h
,
10345 globals
->tls_trampoline
, globals
->root
.splt
,
10346 input_bfd
, sym_name
);
10348 if (stub_type
!= arm_stub_none
)
10350 struct elf32_arm_stub_hash_entry
*stub_entry
10351 = elf32_arm_get_stub_entry
10352 (input_section
, globals
->root
.splt
, 0, rel
,
10353 globals
, stub_type
);
10354 offset
= (stub_entry
->stub_offset
10355 + stub_entry
->stub_sec
->output_offset
10356 + stub_entry
->stub_sec
->output_section
->vma
);
10359 offset
= (globals
->root
.splt
->output_section
->vma
10360 + globals
->root
.splt
->output_offset
10361 + globals
->tls_trampoline
);
10363 if (ELF32_R_TYPE(rel
->r_info
) == R_ARM_TLS_CALL
)
10365 unsigned long inst
;
10367 offset
-= (input_section
->output_section
->vma
10368 + input_section
->output_offset
10369 + rel
->r_offset
+ 8);
10371 inst
= offset
>> 2;
10372 inst
&= 0x00ffffff;
10373 value
= inst
| (globals
->use_blx
? 0xfa000000 : 0xeb000000);
10377 /* Thumb blx encodes the offset in a complicated
10379 unsigned upper_insn
, lower_insn
;
10382 offset
-= (input_section
->output_section
->vma
10383 + input_section
->output_offset
10384 + rel
->r_offset
+ 4);
10386 if (stub_type
!= arm_stub_none
10387 && arm_stub_is_thumb (stub_type
))
10389 lower_insn
= 0xd000;
10393 lower_insn
= 0xc000;
10394 /* Round up the offset to a word boundary. */
10395 offset
= (offset
+ 2) & ~2;
10399 upper_insn
= (0xf000
10400 | ((offset
>> 12) & 0x3ff)
10402 lower_insn
|= (((!((offset
>> 23) & 1)) ^ neg
) << 13)
10403 | (((!((offset
>> 22) & 1)) ^ neg
) << 11)
10404 | ((offset
>> 1) & 0x7ff);
10405 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
10406 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
10407 return bfd_reloc_ok
;
10410 /* These relocations needs special care, as besides the fact
10411 they point somewhere in .gotplt, the addend must be
10412 adjusted accordingly depending on the type of instruction
10414 else if ((r_type
== R_ARM_TLS_GOTDESC
) && (tls_type
& GOT_TLS_GDESC
))
10416 unsigned long data
, insn
;
10419 data
= bfd_get_32 (input_bfd
, hit_data
);
10425 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
- data
);
10426 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
10427 insn
= (insn
<< 16)
10428 | bfd_get_16 (input_bfd
,
10429 contents
+ rel
->r_offset
- data
+ 2);
10430 if ((insn
& 0xf800c000) == 0xf000c000)
10433 else if ((insn
& 0xffffff00) == 0x4400)
10438 (*_bfd_error_handler
)
10439 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
10440 input_bfd
, input_section
,
10441 (unsigned long)rel
->r_offset
, insn
);
10442 return bfd_reloc_notsupported
;
10447 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
- data
);
10449 switch (insn
>> 24)
10451 case 0xeb: /* bl */
10452 case 0xfa: /* blx */
10456 case 0xe0: /* add */
10461 (*_bfd_error_handler
)
10462 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
10463 input_bfd
, input_section
,
10464 (unsigned long)rel
->r_offset
, insn
);
10465 return bfd_reloc_notsupported
;
10469 value
+= ((globals
->root
.sgotplt
->output_section
->vma
10470 + globals
->root
.sgotplt
->output_offset
+ off
)
10471 - (input_section
->output_section
->vma
10472 + input_section
->output_offset
10474 + globals
->sgotplt_jump_table_size
);
10477 value
= ((globals
->root
.sgot
->output_section
->vma
10478 + globals
->root
.sgot
->output_offset
+ off
)
10479 - (input_section
->output_section
->vma
10480 + input_section
->output_offset
+ rel
->r_offset
));
10482 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10483 contents
, rel
->r_offset
, value
,
10487 case R_ARM_TLS_LE32
:
10488 if (bfd_link_dll (info
))
10490 (*_bfd_error_handler
)
10491 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
10492 input_bfd
, input_section
,
10493 (long) rel
->r_offset
, howto
->name
);
10494 return bfd_reloc_notsupported
;
10497 value
= tpoff (info
, value
);
10499 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10500 contents
, rel
->r_offset
, value
,
10504 if (globals
->fix_v4bx
)
10506 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10508 /* Ensure that we have a BX instruction. */
10509 BFD_ASSERT ((insn
& 0x0ffffff0) == 0x012fff10);
10511 if (globals
->fix_v4bx
== 2 && (insn
& 0xf) != 0xf)
10513 /* Branch to veneer. */
10515 glue_addr
= elf32_arm_bx_glue (info
, insn
& 0xf);
10516 glue_addr
-= input_section
->output_section
->vma
10517 + input_section
->output_offset
10518 + rel
->r_offset
+ 8;
10519 insn
= (insn
& 0xf0000000) | 0x0a000000
10520 | ((glue_addr
>> 2) & 0x00ffffff);
10524 /* Preserve Rm (lowest four bits) and the condition code
10525 (highest four bits). Other bits encode MOV PC,Rm. */
10526 insn
= (insn
& 0xf000000f) | 0x01a0f000;
10529 bfd_put_32 (input_bfd
, insn
, hit_data
);
10531 return bfd_reloc_ok
;
10533 case R_ARM_MOVW_ABS_NC
:
10534 case R_ARM_MOVT_ABS
:
10535 case R_ARM_MOVW_PREL_NC
:
10536 case R_ARM_MOVT_PREL
:
10537 /* Until we properly support segment-base-relative addressing then
10538 we assume the segment base to be zero, as for the group relocations.
10539 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
10540 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
10541 case R_ARM_MOVW_BREL_NC
:
10542 case R_ARM_MOVW_BREL
:
10543 case R_ARM_MOVT_BREL
:
10545 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10547 if (globals
->use_rel
)
10549 addend
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
10550 signed_addend
= (addend
^ 0x8000) - 0x8000;
10553 value
+= signed_addend
;
10555 if (r_type
== R_ARM_MOVW_PREL_NC
|| r_type
== R_ARM_MOVT_PREL
)
10556 value
-= (input_section
->output_section
->vma
10557 + input_section
->output_offset
+ rel
->r_offset
);
10559 if (r_type
== R_ARM_MOVW_BREL
&& value
>= 0x10000)
10560 return bfd_reloc_overflow
;
10562 if (branch_type
== ST_BRANCH_TO_THUMB
)
10565 if (r_type
== R_ARM_MOVT_ABS
|| r_type
== R_ARM_MOVT_PREL
10566 || r_type
== R_ARM_MOVT_BREL
)
10569 insn
&= 0xfff0f000;
10570 insn
|= value
& 0xfff;
10571 insn
|= (value
& 0xf000) << 4;
10572 bfd_put_32 (input_bfd
, insn
, hit_data
);
10574 return bfd_reloc_ok
;
10576 case R_ARM_THM_MOVW_ABS_NC
:
10577 case R_ARM_THM_MOVT_ABS
:
10578 case R_ARM_THM_MOVW_PREL_NC
:
10579 case R_ARM_THM_MOVT_PREL
:
10580 /* Until we properly support segment-base-relative addressing then
10581 we assume the segment base to be zero, as for the above relocations.
10582 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
10583 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
10584 as R_ARM_THM_MOVT_ABS. */
10585 case R_ARM_THM_MOVW_BREL_NC
:
10586 case R_ARM_THM_MOVW_BREL
:
10587 case R_ARM_THM_MOVT_BREL
:
10591 insn
= bfd_get_16 (input_bfd
, hit_data
) << 16;
10592 insn
|= bfd_get_16 (input_bfd
, hit_data
+ 2);
10594 if (globals
->use_rel
)
10596 addend
= ((insn
>> 4) & 0xf000)
10597 | ((insn
>> 15) & 0x0800)
10598 | ((insn
>> 4) & 0x0700)
10600 signed_addend
= (addend
^ 0x8000) - 0x8000;
10603 value
+= signed_addend
;
10605 if (r_type
== R_ARM_THM_MOVW_PREL_NC
|| r_type
== R_ARM_THM_MOVT_PREL
)
10606 value
-= (input_section
->output_section
->vma
10607 + input_section
->output_offset
+ rel
->r_offset
);
10609 if (r_type
== R_ARM_THM_MOVW_BREL
&& value
>= 0x10000)
10610 return bfd_reloc_overflow
;
10612 if (branch_type
== ST_BRANCH_TO_THUMB
)
10615 if (r_type
== R_ARM_THM_MOVT_ABS
|| r_type
== R_ARM_THM_MOVT_PREL
10616 || r_type
== R_ARM_THM_MOVT_BREL
)
10619 insn
&= 0xfbf08f00;
10620 insn
|= (value
& 0xf000) << 4;
10621 insn
|= (value
& 0x0800) << 15;
10622 insn
|= (value
& 0x0700) << 4;
10623 insn
|= (value
& 0x00ff);
10625 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
10626 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
10628 return bfd_reloc_ok
;
10630 case R_ARM_ALU_PC_G0_NC
:
10631 case R_ARM_ALU_PC_G1_NC
:
10632 case R_ARM_ALU_PC_G0
:
10633 case R_ARM_ALU_PC_G1
:
10634 case R_ARM_ALU_PC_G2
:
10635 case R_ARM_ALU_SB_G0_NC
:
10636 case R_ARM_ALU_SB_G1_NC
:
10637 case R_ARM_ALU_SB_G0
:
10638 case R_ARM_ALU_SB_G1
:
10639 case R_ARM_ALU_SB_G2
:
10641 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10642 bfd_vma pc
= input_section
->output_section
->vma
10643 + input_section
->output_offset
+ rel
->r_offset
;
10644 /* sb is the origin of the *segment* containing the symbol. */
10645 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
10648 bfd_signed_vma signed_value
;
10651 /* Determine which group of bits to select. */
10654 case R_ARM_ALU_PC_G0_NC
:
10655 case R_ARM_ALU_PC_G0
:
10656 case R_ARM_ALU_SB_G0_NC
:
10657 case R_ARM_ALU_SB_G0
:
10661 case R_ARM_ALU_PC_G1_NC
:
10662 case R_ARM_ALU_PC_G1
:
10663 case R_ARM_ALU_SB_G1_NC
:
10664 case R_ARM_ALU_SB_G1
:
10668 case R_ARM_ALU_PC_G2
:
10669 case R_ARM_ALU_SB_G2
:
10677 /* If REL, extract the addend from the insn. If RELA, it will
10678 have already been fetched for us. */
10679 if (globals
->use_rel
)
10682 bfd_vma constant
= insn
& 0xff;
10683 bfd_vma rotation
= (insn
& 0xf00) >> 8;
10686 signed_addend
= constant
;
10689 /* Compensate for the fact that in the instruction, the
10690 rotation is stored in multiples of 2 bits. */
10693 /* Rotate "constant" right by "rotation" bits. */
10694 signed_addend
= (constant
>> rotation
) |
10695 (constant
<< (8 * sizeof (bfd_vma
) - rotation
));
10698 /* Determine if the instruction is an ADD or a SUB.
10699 (For REL, this determines the sign of the addend.) */
10700 negative
= identify_add_or_sub (insn
);
10703 (*_bfd_error_handler
)
10704 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
10705 input_bfd
, input_section
,
10706 (long) rel
->r_offset
, howto
->name
);
10707 return bfd_reloc_overflow
;
10710 signed_addend
*= negative
;
10713 /* Compute the value (X) to go in the place. */
10714 if (r_type
== R_ARM_ALU_PC_G0_NC
10715 || r_type
== R_ARM_ALU_PC_G1_NC
10716 || r_type
== R_ARM_ALU_PC_G0
10717 || r_type
== R_ARM_ALU_PC_G1
10718 || r_type
== R_ARM_ALU_PC_G2
)
10720 signed_value
= value
- pc
+ signed_addend
;
10722 /* Section base relative. */
10723 signed_value
= value
- sb
+ signed_addend
;
10725 /* If the target symbol is a Thumb function, then set the
10726 Thumb bit in the address. */
10727 if (branch_type
== ST_BRANCH_TO_THUMB
)
10730 /* Calculate the value of the relevant G_n, in encoded
10731 constant-with-rotation format. */
10732 g_n
= calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
10735 /* Check for overflow if required. */
10736 if ((r_type
== R_ARM_ALU_PC_G0
10737 || r_type
== R_ARM_ALU_PC_G1
10738 || r_type
== R_ARM_ALU_PC_G2
10739 || r_type
== R_ARM_ALU_SB_G0
10740 || r_type
== R_ARM_ALU_SB_G1
10741 || r_type
== R_ARM_ALU_SB_G2
) && residual
!= 0)
10743 (*_bfd_error_handler
)
10744 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10745 input_bfd
, input_section
,
10746 (long) rel
->r_offset
, signed_value
< 0 ? - signed_value
: signed_value
,
10748 return bfd_reloc_overflow
;
10751 /* Mask out the value and the ADD/SUB part of the opcode; take care
10752 not to destroy the S bit. */
10753 insn
&= 0xff1ff000;
10755 /* Set the opcode according to whether the value to go in the
10756 place is negative. */
10757 if (signed_value
< 0)
10762 /* Encode the offset. */
10765 bfd_put_32 (input_bfd
, insn
, hit_data
);
10767 return bfd_reloc_ok
;
10769 case R_ARM_LDR_PC_G0
:
10770 case R_ARM_LDR_PC_G1
:
10771 case R_ARM_LDR_PC_G2
:
10772 case R_ARM_LDR_SB_G0
:
10773 case R_ARM_LDR_SB_G1
:
10774 case R_ARM_LDR_SB_G2
:
10776 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10777 bfd_vma pc
= input_section
->output_section
->vma
10778 + input_section
->output_offset
+ rel
->r_offset
;
10779 /* sb is the origin of the *segment* containing the symbol. */
10780 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
10782 bfd_signed_vma signed_value
;
10785 /* Determine which groups of bits to calculate. */
10788 case R_ARM_LDR_PC_G0
:
10789 case R_ARM_LDR_SB_G0
:
10793 case R_ARM_LDR_PC_G1
:
10794 case R_ARM_LDR_SB_G1
:
10798 case R_ARM_LDR_PC_G2
:
10799 case R_ARM_LDR_SB_G2
:
10807 /* If REL, extract the addend from the insn. If RELA, it will
10808 have already been fetched for us. */
10809 if (globals
->use_rel
)
10811 int negative
= (insn
& (1 << 23)) ? 1 : -1;
10812 signed_addend
= negative
* (insn
& 0xfff);
10815 /* Compute the value (X) to go in the place. */
10816 if (r_type
== R_ARM_LDR_PC_G0
10817 || r_type
== R_ARM_LDR_PC_G1
10818 || r_type
== R_ARM_LDR_PC_G2
)
10820 signed_value
= value
- pc
+ signed_addend
;
10822 /* Section base relative. */
10823 signed_value
= value
- sb
+ signed_addend
;
10825 /* Calculate the value of the relevant G_{n-1} to obtain
10826 the residual at that stage. */
10827 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
10828 group
- 1, &residual
);
10830 /* Check for overflow. */
10831 if (residual
>= 0x1000)
10833 (*_bfd_error_handler
)
10834 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10835 input_bfd
, input_section
,
10836 (long) rel
->r_offset
, labs (signed_value
), howto
->name
);
10837 return bfd_reloc_overflow
;
10840 /* Mask out the value and U bit. */
10841 insn
&= 0xff7ff000;
10843 /* Set the U bit if the value to go in the place is non-negative. */
10844 if (signed_value
>= 0)
10847 /* Encode the offset. */
10850 bfd_put_32 (input_bfd
, insn
, hit_data
);
10852 return bfd_reloc_ok
;
10854 case R_ARM_LDRS_PC_G0
:
10855 case R_ARM_LDRS_PC_G1
:
10856 case R_ARM_LDRS_PC_G2
:
10857 case R_ARM_LDRS_SB_G0
:
10858 case R_ARM_LDRS_SB_G1
:
10859 case R_ARM_LDRS_SB_G2
:
10861 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10862 bfd_vma pc
= input_section
->output_section
->vma
10863 + input_section
->output_offset
+ rel
->r_offset
;
10864 /* sb is the origin of the *segment* containing the symbol. */
10865 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
10867 bfd_signed_vma signed_value
;
10870 /* Determine which groups of bits to calculate. */
10873 case R_ARM_LDRS_PC_G0
:
10874 case R_ARM_LDRS_SB_G0
:
10878 case R_ARM_LDRS_PC_G1
:
10879 case R_ARM_LDRS_SB_G1
:
10883 case R_ARM_LDRS_PC_G2
:
10884 case R_ARM_LDRS_SB_G2
:
10892 /* If REL, extract the addend from the insn. If RELA, it will
10893 have already been fetched for us. */
10894 if (globals
->use_rel
)
10896 int negative
= (insn
& (1 << 23)) ? 1 : -1;
10897 signed_addend
= negative
* (((insn
& 0xf00) >> 4) + (insn
& 0xf));
10900 /* Compute the value (X) to go in the place. */
10901 if (r_type
== R_ARM_LDRS_PC_G0
10902 || r_type
== R_ARM_LDRS_PC_G1
10903 || r_type
== R_ARM_LDRS_PC_G2
)
10905 signed_value
= value
- pc
+ signed_addend
;
10907 /* Section base relative. */
10908 signed_value
= value
- sb
+ signed_addend
;
10910 /* Calculate the value of the relevant G_{n-1} to obtain
10911 the residual at that stage. */
10912 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
10913 group
- 1, &residual
);
10915 /* Check for overflow. */
10916 if (residual
>= 0x100)
10918 (*_bfd_error_handler
)
10919 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10920 input_bfd
, input_section
,
10921 (long) rel
->r_offset
, labs (signed_value
), howto
->name
);
10922 return bfd_reloc_overflow
;
10925 /* Mask out the value and U bit. */
10926 insn
&= 0xff7ff0f0;
10928 /* Set the U bit if the value to go in the place is non-negative. */
10929 if (signed_value
>= 0)
10932 /* Encode the offset. */
10933 insn
|= ((residual
& 0xf0) << 4) | (residual
& 0xf);
10935 bfd_put_32 (input_bfd
, insn
, hit_data
);
10937 return bfd_reloc_ok
;
10939 case R_ARM_LDC_PC_G0
:
10940 case R_ARM_LDC_PC_G1
:
10941 case R_ARM_LDC_PC_G2
:
10942 case R_ARM_LDC_SB_G0
:
10943 case R_ARM_LDC_SB_G1
:
10944 case R_ARM_LDC_SB_G2
:
10946 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10947 bfd_vma pc
= input_section
->output_section
->vma
10948 + input_section
->output_offset
+ rel
->r_offset
;
10949 /* sb is the origin of the *segment* containing the symbol. */
10950 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
10952 bfd_signed_vma signed_value
;
10955 /* Determine which groups of bits to calculate. */
10958 case R_ARM_LDC_PC_G0
:
10959 case R_ARM_LDC_SB_G0
:
10963 case R_ARM_LDC_PC_G1
:
10964 case R_ARM_LDC_SB_G1
:
10968 case R_ARM_LDC_PC_G2
:
10969 case R_ARM_LDC_SB_G2
:
10977 /* If REL, extract the addend from the insn. If RELA, it will
10978 have already been fetched for us. */
10979 if (globals
->use_rel
)
10981 int negative
= (insn
& (1 << 23)) ? 1 : -1;
10982 signed_addend
= negative
* ((insn
& 0xff) << 2);
10985 /* Compute the value (X) to go in the place. */
10986 if (r_type
== R_ARM_LDC_PC_G0
10987 || r_type
== R_ARM_LDC_PC_G1
10988 || r_type
== R_ARM_LDC_PC_G2
)
10990 signed_value
= value
- pc
+ signed_addend
;
10992 /* Section base relative. */
10993 signed_value
= value
- sb
+ signed_addend
;
10995 /* Calculate the value of the relevant G_{n-1} to obtain
10996 the residual at that stage. */
10997 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
10998 group
- 1, &residual
);
11000 /* Check for overflow. (The absolute value to go in the place must be
11001 divisible by four and, after having been divided by four, must
11002 fit in eight bits.) */
11003 if ((residual
& 0x3) != 0 || residual
>= 0x400)
11005 (*_bfd_error_handler
)
11006 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11007 input_bfd
, input_section
,
11008 (long) rel
->r_offset
, labs (signed_value
), howto
->name
);
11009 return bfd_reloc_overflow
;
11012 /* Mask out the value and U bit. */
11013 insn
&= 0xff7fff00;
11015 /* Set the U bit if the value to go in the place is non-negative. */
11016 if (signed_value
>= 0)
11019 /* Encode the offset. */
11020 insn
|= residual
>> 2;
11022 bfd_put_32 (input_bfd
, insn
, hit_data
);
11024 return bfd_reloc_ok
;
11027 return bfd_reloc_notsupported
;
11031 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
11033 arm_add_to_rel (bfd
* abfd
,
11034 bfd_byte
* address
,
11035 reloc_howto_type
* howto
,
11036 bfd_signed_vma increment
)
11038 bfd_signed_vma addend
;
11040 if (howto
->type
== R_ARM_THM_CALL
11041 || howto
->type
== R_ARM_THM_JUMP24
)
11043 int upper_insn
, lower_insn
;
11046 upper_insn
= bfd_get_16 (abfd
, address
);
11047 lower_insn
= bfd_get_16 (abfd
, address
+ 2);
11048 upper
= upper_insn
& 0x7ff;
11049 lower
= lower_insn
& 0x7ff;
11051 addend
= (upper
<< 12) | (lower
<< 1);
11052 addend
+= increment
;
11055 upper_insn
= (upper_insn
& 0xf800) | ((addend
>> 11) & 0x7ff);
11056 lower_insn
= (lower_insn
& 0xf800) | (addend
& 0x7ff);
11058 bfd_put_16 (abfd
, (bfd_vma
) upper_insn
, address
);
11059 bfd_put_16 (abfd
, (bfd_vma
) lower_insn
, address
+ 2);
11065 contents
= bfd_get_32 (abfd
, address
);
11067 /* Get the (signed) value from the instruction. */
11068 addend
= contents
& howto
->src_mask
;
11069 if (addend
& ((howto
->src_mask
+ 1) >> 1))
11071 bfd_signed_vma mask
;
11074 mask
&= ~ howto
->src_mask
;
11078 /* Add in the increment, (which is a byte value). */
11079 switch (howto
->type
)
11082 addend
+= increment
;
11089 addend
<<= howto
->size
;
11090 addend
+= increment
;
11092 /* Should we check for overflow here ? */
11094 /* Drop any undesired bits. */
11095 addend
>>= howto
->rightshift
;
11099 contents
= (contents
& ~ howto
->dst_mask
) | (addend
& howto
->dst_mask
);
11101 bfd_put_32 (abfd
, contents
, address
);
11105 #define IS_ARM_TLS_RELOC(R_TYPE) \
11106 ((R_TYPE) == R_ARM_TLS_GD32 \
11107 || (R_TYPE) == R_ARM_TLS_LDO32 \
11108 || (R_TYPE) == R_ARM_TLS_LDM32 \
11109 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
11110 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
11111 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
11112 || (R_TYPE) == R_ARM_TLS_LE32 \
11113 || (R_TYPE) == R_ARM_TLS_IE32 \
11114 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
11116 /* Specific set of relocations for the gnu tls dialect. */
11117 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
11118 ((R_TYPE) == R_ARM_TLS_GOTDESC \
11119 || (R_TYPE) == R_ARM_TLS_CALL \
11120 || (R_TYPE) == R_ARM_THM_TLS_CALL \
11121 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
11122 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
11124 /* Relocate an ARM ELF section. */
11127 elf32_arm_relocate_section (bfd
* output_bfd
,
11128 struct bfd_link_info
* info
,
11130 asection
* input_section
,
11131 bfd_byte
* contents
,
11132 Elf_Internal_Rela
* relocs
,
11133 Elf_Internal_Sym
* local_syms
,
11134 asection
** local_sections
)
11136 Elf_Internal_Shdr
*symtab_hdr
;
11137 struct elf_link_hash_entry
**sym_hashes
;
11138 Elf_Internal_Rela
*rel
;
11139 Elf_Internal_Rela
*relend
;
11141 struct elf32_arm_link_hash_table
* globals
;
11143 globals
= elf32_arm_hash_table (info
);
11144 if (globals
== NULL
)
11147 symtab_hdr
= & elf_symtab_hdr (input_bfd
);
11148 sym_hashes
= elf_sym_hashes (input_bfd
);
11151 relend
= relocs
+ input_section
->reloc_count
;
11152 for (; rel
< relend
; rel
++)
11155 reloc_howto_type
* howto
;
11156 unsigned long r_symndx
;
11157 Elf_Internal_Sym
* sym
;
11159 struct elf_link_hash_entry
* h
;
11160 bfd_vma relocation
;
11161 bfd_reloc_status_type r
;
11164 bfd_boolean unresolved_reloc
= FALSE
;
11165 char *error_message
= NULL
;
11167 r_symndx
= ELF32_R_SYM (rel
->r_info
);
11168 r_type
= ELF32_R_TYPE (rel
->r_info
);
11169 r_type
= arm_real_reloc_type (globals
, r_type
);
11171 if ( r_type
== R_ARM_GNU_VTENTRY
11172 || r_type
== R_ARM_GNU_VTINHERIT
)
11175 bfd_reloc
.howto
= elf32_arm_howto_from_type (r_type
);
11176 howto
= bfd_reloc
.howto
;
11182 if (r_symndx
< symtab_hdr
->sh_info
)
11184 sym
= local_syms
+ r_symndx
;
11185 sym_type
= ELF32_ST_TYPE (sym
->st_info
);
11186 sec
= local_sections
[r_symndx
];
11188 /* An object file might have a reference to a local
11189 undefined symbol. This is a daft object file, but we
11190 should at least do something about it. V4BX & NONE
11191 relocations do not use the symbol and are explicitly
11192 allowed to use the undefined symbol, so allow those.
11193 Likewise for relocations against STN_UNDEF. */
11194 if (r_type
!= R_ARM_V4BX
11195 && r_type
!= R_ARM_NONE
11196 && r_symndx
!= STN_UNDEF
11197 && bfd_is_und_section (sec
)
11198 && ELF_ST_BIND (sym
->st_info
) != STB_WEAK
)
11200 if (!info
->callbacks
->undefined_symbol
11201 (info
, bfd_elf_string_from_elf_section
11202 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
),
11203 input_bfd
, input_section
,
11204 rel
->r_offset
, TRUE
))
11208 if (globals
->use_rel
)
11210 relocation
= (sec
->output_section
->vma
11211 + sec
->output_offset
11213 if (!bfd_link_relocatable (info
)
11214 && (sec
->flags
& SEC_MERGE
)
11215 && ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
11218 bfd_vma addend
, value
;
11222 case R_ARM_MOVW_ABS_NC
:
11223 case R_ARM_MOVT_ABS
:
11224 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
11225 addend
= ((value
& 0xf0000) >> 4) | (value
& 0xfff);
11226 addend
= (addend
^ 0x8000) - 0x8000;
11229 case R_ARM_THM_MOVW_ABS_NC
:
11230 case R_ARM_THM_MOVT_ABS
:
11231 value
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
)
11233 value
|= bfd_get_16 (input_bfd
,
11234 contents
+ rel
->r_offset
+ 2);
11235 addend
= ((value
& 0xf7000) >> 4) | (value
& 0xff)
11236 | ((value
& 0x04000000) >> 15);
11237 addend
= (addend
^ 0x8000) - 0x8000;
11241 if (howto
->rightshift
11242 || (howto
->src_mask
& (howto
->src_mask
+ 1)))
11244 (*_bfd_error_handler
)
11245 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
11246 input_bfd
, input_section
,
11247 (long) rel
->r_offset
, howto
->name
);
11251 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
11253 /* Get the (signed) value from the instruction. */
11254 addend
= value
& howto
->src_mask
;
11255 if (addend
& ((howto
->src_mask
+ 1) >> 1))
11257 bfd_signed_vma mask
;
11260 mask
&= ~ howto
->src_mask
;
11268 _bfd_elf_rel_local_sym (output_bfd
, sym
, &msec
, addend
)
11270 addend
+= msec
->output_section
->vma
+ msec
->output_offset
;
11272 /* Cases here must match those in the preceding
11273 switch statement. */
11276 case R_ARM_MOVW_ABS_NC
:
11277 case R_ARM_MOVT_ABS
:
11278 value
= (value
& 0xfff0f000) | ((addend
& 0xf000) << 4)
11279 | (addend
& 0xfff);
11280 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
11283 case R_ARM_THM_MOVW_ABS_NC
:
11284 case R_ARM_THM_MOVT_ABS
:
11285 value
= (value
& 0xfbf08f00) | ((addend
& 0xf700) << 4)
11286 | (addend
& 0xff) | ((addend
& 0x0800) << 15);
11287 bfd_put_16 (input_bfd
, value
>> 16,
11288 contents
+ rel
->r_offset
);
11289 bfd_put_16 (input_bfd
, value
,
11290 contents
+ rel
->r_offset
+ 2);
11294 value
= (value
& ~ howto
->dst_mask
)
11295 | (addend
& howto
->dst_mask
);
11296 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
11302 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
11306 bfd_boolean warned
, ignored
;
11308 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
11309 r_symndx
, symtab_hdr
, sym_hashes
,
11310 h
, sec
, relocation
,
11311 unresolved_reloc
, warned
, ignored
);
11313 sym_type
= h
->type
;
11316 if (sec
!= NULL
&& discarded_section (sec
))
11317 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
11318 rel
, 1, relend
, howto
, 0, contents
);
11320 if (bfd_link_relocatable (info
))
11322 /* This is a relocatable link. We don't have to change
11323 anything, unless the reloc is against a section symbol,
11324 in which case we have to adjust according to where the
11325 section symbol winds up in the output section. */
11326 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
11328 if (globals
->use_rel
)
11329 arm_add_to_rel (input_bfd
, contents
+ rel
->r_offset
,
11330 howto
, (bfd_signed_vma
) sec
->output_offset
);
11332 rel
->r_addend
+= sec
->output_offset
;
11338 name
= h
->root
.root
.string
;
11341 name
= (bfd_elf_string_from_elf_section
11342 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
));
11343 if (name
== NULL
|| *name
== '\0')
11344 name
= bfd_section_name (input_bfd
, sec
);
11347 if (r_symndx
!= STN_UNDEF
11348 && r_type
!= R_ARM_NONE
11350 || h
->root
.type
== bfd_link_hash_defined
11351 || h
->root
.type
== bfd_link_hash_defweak
)
11352 && IS_ARM_TLS_RELOC (r_type
) != (sym_type
== STT_TLS
))
11354 (*_bfd_error_handler
)
11355 ((sym_type
== STT_TLS
11356 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
11357 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
11360 (long) rel
->r_offset
,
11365 /* We call elf32_arm_final_link_relocate unless we're completely
11366 done, i.e., the relaxation produced the final output we want,
11367 and we won't let anybody mess with it. Also, we have to do
11368 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
11369 both in relaxed and non-relaxed cases. */
11370 if ((elf32_arm_tls_transition (info
, r_type
, h
) != (unsigned)r_type
)
11371 || (IS_ARM_TLS_GNU_RELOC (r_type
)
11372 && !((h
? elf32_arm_hash_entry (h
)->tls_type
:
11373 elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
])
11376 r
= elf32_arm_tls_relax (globals
, input_bfd
, input_section
,
11377 contents
, rel
, h
== NULL
);
11378 /* This may have been marked unresolved because it came from
11379 a shared library. But we've just dealt with that. */
11380 unresolved_reloc
= 0;
11383 r
= bfd_reloc_continue
;
11385 if (r
== bfd_reloc_continue
)
11386 r
= elf32_arm_final_link_relocate (howto
, input_bfd
, output_bfd
,
11387 input_section
, contents
, rel
,
11388 relocation
, info
, sec
, name
, sym_type
,
11389 (h
? h
->target_internal
11390 : ARM_SYM_BRANCH_TYPE (sym
)), h
,
11391 &unresolved_reloc
, &error_message
);
11393 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
11394 because such sections are not SEC_ALLOC and thus ld.so will
11395 not process them. */
11396 if (unresolved_reloc
11397 && !((input_section
->flags
& SEC_DEBUGGING
) != 0
11399 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
11400 rel
->r_offset
) != (bfd_vma
) -1)
11402 (*_bfd_error_handler
)
11403 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
11406 (long) rel
->r_offset
,
11408 h
->root
.root
.string
);
11412 if (r
!= bfd_reloc_ok
)
11416 case bfd_reloc_overflow
:
11417 /* If the overflowing reloc was to an undefined symbol,
11418 we have already printed one error message and there
11419 is no point complaining again. */
11421 h
->root
.type
!= bfd_link_hash_undefined
)
11422 && (!((*info
->callbacks
->reloc_overflow
)
11423 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
,
11424 (bfd_vma
) 0, input_bfd
, input_section
,
11429 case bfd_reloc_undefined
:
11430 if (!((*info
->callbacks
->undefined_symbol
)
11431 (info
, name
, input_bfd
, input_section
,
11432 rel
->r_offset
, TRUE
)))
11436 case bfd_reloc_outofrange
:
11437 error_message
= _("out of range");
11440 case bfd_reloc_notsupported
:
11441 error_message
= _("unsupported relocation");
11444 case bfd_reloc_dangerous
:
11445 /* error_message should already be set. */
11449 error_message
= _("unknown error");
11450 /* Fall through. */
11453 BFD_ASSERT (error_message
!= NULL
);
11454 if (!((*info
->callbacks
->reloc_dangerous
)
11455 (info
, error_message
, input_bfd
, input_section
,
11466 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
11467 adds the edit to the start of the list. (The list must be built in order of
11468 ascending TINDEX: the function's callers are primarily responsible for
11469 maintaining that condition). */
11472 add_unwind_table_edit (arm_unwind_table_edit
**head
,
11473 arm_unwind_table_edit
**tail
,
11474 arm_unwind_edit_type type
,
11475 asection
*linked_section
,
11476 unsigned int tindex
)
11478 arm_unwind_table_edit
*new_edit
= (arm_unwind_table_edit
*)
11479 xmalloc (sizeof (arm_unwind_table_edit
));
11481 new_edit
->type
= type
;
11482 new_edit
->linked_section
= linked_section
;
11483 new_edit
->index
= tindex
;
11487 new_edit
->next
= NULL
;
11490 (*tail
)->next
= new_edit
;
11492 (*tail
) = new_edit
;
11495 (*head
) = new_edit
;
11499 new_edit
->next
= *head
;
11508 static _arm_elf_section_data
*get_arm_elf_section_data (asection
*);
11510 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
11512 adjust_exidx_size(asection
*exidx_sec
, int adjust
)
11516 if (!exidx_sec
->rawsize
)
11517 exidx_sec
->rawsize
= exidx_sec
->size
;
11519 bfd_set_section_size (exidx_sec
->owner
, exidx_sec
, exidx_sec
->size
+ adjust
);
11520 out_sec
= exidx_sec
->output_section
;
11521 /* Adjust size of output section. */
11522 bfd_set_section_size (out_sec
->owner
, out_sec
, out_sec
->size
+adjust
);
11525 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
11527 insert_cantunwind_after(asection
*text_sec
, asection
*exidx_sec
)
11529 struct _arm_elf_section_data
*exidx_arm_data
;
11531 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
11532 add_unwind_table_edit (
11533 &exidx_arm_data
->u
.exidx
.unwind_edit_list
,
11534 &exidx_arm_data
->u
.exidx
.unwind_edit_tail
,
11535 INSERT_EXIDX_CANTUNWIND_AT_END
, text_sec
, UINT_MAX
);
11537 adjust_exidx_size(exidx_sec
, 8);
11540 /* Scan .ARM.exidx tables, and create a list describing edits which should be
11541 made to those tables, such that:
11543 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
11544 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
11545 codes which have been inlined into the index).
11547 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
11549 The edits are applied when the tables are written
11550 (in elf32_arm_write_section). */
11553 elf32_arm_fix_exidx_coverage (asection
**text_section_order
,
11554 unsigned int num_text_sections
,
11555 struct bfd_link_info
*info
,
11556 bfd_boolean merge_exidx_entries
)
11559 unsigned int last_second_word
= 0, i
;
11560 asection
*last_exidx_sec
= NULL
;
11561 asection
*last_text_sec
= NULL
;
11562 int last_unwind_type
= -1;
11564 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
11566 for (inp
= info
->input_bfds
; inp
!= NULL
; inp
= inp
->link
.next
)
11570 for (sec
= inp
->sections
; sec
!= NULL
; sec
= sec
->next
)
11572 struct bfd_elf_section_data
*elf_sec
= elf_section_data (sec
);
11573 Elf_Internal_Shdr
*hdr
= &elf_sec
->this_hdr
;
11575 if (!hdr
|| hdr
->sh_type
!= SHT_ARM_EXIDX
)
11578 if (elf_sec
->linked_to
)
11580 Elf_Internal_Shdr
*linked_hdr
11581 = &elf_section_data (elf_sec
->linked_to
)->this_hdr
;
11582 struct _arm_elf_section_data
*linked_sec_arm_data
11583 = get_arm_elf_section_data (linked_hdr
->bfd_section
);
11585 if (linked_sec_arm_data
== NULL
)
11588 /* Link this .ARM.exidx section back from the text section it
11590 linked_sec_arm_data
->u
.text
.arm_exidx_sec
= sec
;
11595 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
11596 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
11597 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
11599 for (i
= 0; i
< num_text_sections
; i
++)
11601 asection
*sec
= text_section_order
[i
];
11602 asection
*exidx_sec
;
11603 struct _arm_elf_section_data
*arm_data
= get_arm_elf_section_data (sec
);
11604 struct _arm_elf_section_data
*exidx_arm_data
;
11605 bfd_byte
*contents
= NULL
;
11606 int deleted_exidx_bytes
= 0;
11608 arm_unwind_table_edit
*unwind_edit_head
= NULL
;
11609 arm_unwind_table_edit
*unwind_edit_tail
= NULL
;
11610 Elf_Internal_Shdr
*hdr
;
11613 if (arm_data
== NULL
)
11616 exidx_sec
= arm_data
->u
.text
.arm_exidx_sec
;
11617 if (exidx_sec
== NULL
)
11619 /* Section has no unwind data. */
11620 if (last_unwind_type
== 0 || !last_exidx_sec
)
11623 /* Ignore zero sized sections. */
11624 if (sec
->size
== 0)
11627 insert_cantunwind_after(last_text_sec
, last_exidx_sec
);
11628 last_unwind_type
= 0;
11632 /* Skip /DISCARD/ sections. */
11633 if (bfd_is_abs_section (exidx_sec
->output_section
))
11636 hdr
= &elf_section_data (exidx_sec
)->this_hdr
;
11637 if (hdr
->sh_type
!= SHT_ARM_EXIDX
)
11640 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
11641 if (exidx_arm_data
== NULL
)
11644 ibfd
= exidx_sec
->owner
;
11646 if (hdr
->contents
!= NULL
)
11647 contents
= hdr
->contents
;
11648 else if (! bfd_malloc_and_get_section (ibfd
, exidx_sec
, &contents
))
11652 for (j
= 0; j
< hdr
->sh_size
; j
+= 8)
11654 unsigned int second_word
= bfd_get_32 (ibfd
, contents
+ j
+ 4);
11658 /* An EXIDX_CANTUNWIND entry. */
11659 if (second_word
== 1)
11661 if (last_unwind_type
== 0)
11665 /* Inlined unwinding data. Merge if equal to previous. */
11666 else if ((second_word
& 0x80000000) != 0)
11668 if (merge_exidx_entries
11669 && last_second_word
== second_word
&& last_unwind_type
== 1)
11672 last_second_word
= second_word
;
11674 /* Normal table entry. In theory we could merge these too,
11675 but duplicate entries are likely to be much less common. */
11681 add_unwind_table_edit (&unwind_edit_head
, &unwind_edit_tail
,
11682 DELETE_EXIDX_ENTRY
, NULL
, j
/ 8);
11684 deleted_exidx_bytes
+= 8;
11687 last_unwind_type
= unwind_type
;
11690 /* Free contents if we allocated it ourselves. */
11691 if (contents
!= hdr
->contents
)
11694 /* Record edits to be applied later (in elf32_arm_write_section). */
11695 exidx_arm_data
->u
.exidx
.unwind_edit_list
= unwind_edit_head
;
11696 exidx_arm_data
->u
.exidx
.unwind_edit_tail
= unwind_edit_tail
;
11698 if (deleted_exidx_bytes
> 0)
11699 adjust_exidx_size(exidx_sec
, -deleted_exidx_bytes
);
11701 last_exidx_sec
= exidx_sec
;
11702 last_text_sec
= sec
;
11705 /* Add terminating CANTUNWIND entry. */
11706 if (last_exidx_sec
&& last_unwind_type
!= 0)
11707 insert_cantunwind_after(last_text_sec
, last_exidx_sec
);
11713 elf32_arm_output_glue_section (struct bfd_link_info
*info
, bfd
*obfd
,
11714 bfd
*ibfd
, const char *name
)
11716 asection
*sec
, *osec
;
11718 sec
= bfd_get_linker_section (ibfd
, name
);
11719 if (sec
== NULL
|| (sec
->flags
& SEC_EXCLUDE
) != 0)
11722 osec
= sec
->output_section
;
11723 if (elf32_arm_write_section (obfd
, info
, sec
, sec
->contents
))
11726 if (! bfd_set_section_contents (obfd
, osec
, sec
->contents
,
11727 sec
->output_offset
, sec
->size
))
11734 elf32_arm_final_link (bfd
*abfd
, struct bfd_link_info
*info
)
11736 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
11737 asection
*sec
, *osec
;
11739 if (globals
== NULL
)
11742 /* Invoke the regular ELF backend linker to do all the work. */
11743 if (!bfd_elf_final_link (abfd
, info
))
11746 /* Process stub sections (eg BE8 encoding, ...). */
11747 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
11749 for (i
=0; i
<htab
->top_id
; i
++)
11751 sec
= htab
->stub_group
[i
].stub_sec
;
11752 /* Only process it once, in its link_sec slot. */
11753 if (sec
&& i
== htab
->stub_group
[i
].link_sec
->id
)
11755 osec
= sec
->output_section
;
11756 elf32_arm_write_section (abfd
, info
, sec
, sec
->contents
);
11757 if (! bfd_set_section_contents (abfd
, osec
, sec
->contents
,
11758 sec
->output_offset
, sec
->size
))
11763 /* Write out any glue sections now that we have created all the
11765 if (globals
->bfd_of_glue_owner
!= NULL
)
11767 if (! elf32_arm_output_glue_section (info
, abfd
,
11768 globals
->bfd_of_glue_owner
,
11769 ARM2THUMB_GLUE_SECTION_NAME
))
11772 if (! elf32_arm_output_glue_section (info
, abfd
,
11773 globals
->bfd_of_glue_owner
,
11774 THUMB2ARM_GLUE_SECTION_NAME
))
11777 if (! elf32_arm_output_glue_section (info
, abfd
,
11778 globals
->bfd_of_glue_owner
,
11779 VFP11_ERRATUM_VENEER_SECTION_NAME
))
11782 if (! elf32_arm_output_glue_section (info
, abfd
,
11783 globals
->bfd_of_glue_owner
,
11784 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
))
11787 if (! elf32_arm_output_glue_section (info
, abfd
,
11788 globals
->bfd_of_glue_owner
,
11789 ARM_BX_GLUE_SECTION_NAME
))
11796 /* Return a best guess for the machine number based on the attributes. */
11798 static unsigned int
11799 bfd_arm_get_mach_from_attributes (bfd
* abfd
)
11801 int arch
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
11805 case TAG_CPU_ARCH_V4
: return bfd_mach_arm_4
;
11806 case TAG_CPU_ARCH_V4T
: return bfd_mach_arm_4T
;
11807 case TAG_CPU_ARCH_V5T
: return bfd_mach_arm_5T
;
11809 case TAG_CPU_ARCH_V5TE
:
11813 BFD_ASSERT (Tag_CPU_name
< NUM_KNOWN_OBJ_ATTRIBUTES
);
11814 name
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_CPU_name
].s
;
11818 if (strcmp (name
, "IWMMXT2") == 0)
11819 return bfd_mach_arm_iWMMXt2
;
11821 if (strcmp (name
, "IWMMXT") == 0)
11822 return bfd_mach_arm_iWMMXt
;
11824 if (strcmp (name
, "XSCALE") == 0)
11828 BFD_ASSERT (Tag_WMMX_arch
< NUM_KNOWN_OBJ_ATTRIBUTES
);
11829 wmmx
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_WMMX_arch
].i
;
11832 case 1: return bfd_mach_arm_iWMMXt
;
11833 case 2: return bfd_mach_arm_iWMMXt2
;
11834 default: return bfd_mach_arm_XScale
;
11839 return bfd_mach_arm_5TE
;
11843 return bfd_mach_arm_unknown
;
11847 /* Set the right machine number. */
11850 elf32_arm_object_p (bfd
*abfd
)
11854 mach
= bfd_arm_get_mach_from_notes (abfd
, ARM_NOTE_SECTION
);
11856 if (mach
== bfd_mach_arm_unknown
)
11858 if (elf_elfheader (abfd
)->e_flags
& EF_ARM_MAVERICK_FLOAT
)
11859 mach
= bfd_mach_arm_ep9312
;
11861 mach
= bfd_arm_get_mach_from_attributes (abfd
);
11864 bfd_default_set_arch_mach (abfd
, bfd_arch_arm
, mach
);
11868 /* Function to keep ARM specific flags in the ELF header. */
11871 elf32_arm_set_private_flags (bfd
*abfd
, flagword flags
)
11873 if (elf_flags_init (abfd
)
11874 && elf_elfheader (abfd
)->e_flags
!= flags
)
11876 if (EF_ARM_EABI_VERSION (flags
) == EF_ARM_EABI_UNKNOWN
)
11878 if (flags
& EF_ARM_INTERWORK
)
11879 (*_bfd_error_handler
)
11880 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
11884 (_("Warning: Clearing the interworking flag of %B due to outside request"),
11890 elf_elfheader (abfd
)->e_flags
= flags
;
11891 elf_flags_init (abfd
) = TRUE
;
11897 /* Copy backend specific data from one object module to another. */
11900 elf32_arm_copy_private_bfd_data (bfd
*ibfd
, bfd
*obfd
)
11903 flagword out_flags
;
11905 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
11908 in_flags
= elf_elfheader (ibfd
)->e_flags
;
11909 out_flags
= elf_elfheader (obfd
)->e_flags
;
11911 if (elf_flags_init (obfd
)
11912 && EF_ARM_EABI_VERSION (out_flags
) == EF_ARM_EABI_UNKNOWN
11913 && in_flags
!= out_flags
)
11915 /* Cannot mix APCS26 and APCS32 code. */
11916 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
11919 /* Cannot mix float APCS and non-float APCS code. */
11920 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
11923 /* If the src and dest have different interworking flags
11924 then turn off the interworking bit. */
11925 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
11927 if (out_flags
& EF_ARM_INTERWORK
)
11929 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
11932 in_flags
&= ~EF_ARM_INTERWORK
;
11935 /* Likewise for PIC, though don't warn for this case. */
11936 if ((in_flags
& EF_ARM_PIC
) != (out_flags
& EF_ARM_PIC
))
11937 in_flags
&= ~EF_ARM_PIC
;
11940 elf_elfheader (obfd
)->e_flags
= in_flags
;
11941 elf_flags_init (obfd
) = TRUE
;
11943 return _bfd_elf_copy_private_bfd_data (ibfd
, obfd
);
11946 /* Values for Tag_ABI_PCS_R9_use. */
11955 /* Values for Tag_ABI_PCS_RW_data. */
11958 AEABI_PCS_RW_data_absolute
,
11959 AEABI_PCS_RW_data_PCrel
,
11960 AEABI_PCS_RW_data_SBrel
,
11961 AEABI_PCS_RW_data_unused
11964 /* Values for Tag_ABI_enum_size. */
11970 AEABI_enum_forced_wide
11973 /* Determine whether an object attribute tag takes an integer, a
11977 elf32_arm_obj_attrs_arg_type (int tag
)
11979 if (tag
== Tag_compatibility
)
11980 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_STR_VAL
;
11981 else if (tag
== Tag_nodefaults
)
11982 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_NO_DEFAULT
;
11983 else if (tag
== Tag_CPU_raw_name
|| tag
== Tag_CPU_name
)
11984 return ATTR_TYPE_FLAG_STR_VAL
;
11986 return ATTR_TYPE_FLAG_INT_VAL
;
11988 return (tag
& 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL
: ATTR_TYPE_FLAG_INT_VAL
;
11991 /* The ABI defines that Tag_conformance should be emitted first, and that
11992 Tag_nodefaults should be second (if either is defined). This sets those
11993 two positions, and bumps up the position of all the remaining tags to
11996 elf32_arm_obj_attrs_order (int num
)
11998 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
)
11999 return Tag_conformance
;
12000 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
+ 1)
12001 return Tag_nodefaults
;
12002 if ((num
- 2) < Tag_nodefaults
)
12004 if ((num
- 1) < Tag_conformance
)
12009 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
12011 elf32_arm_obj_attrs_handle_unknown (bfd
*abfd
, int tag
)
12013 if ((tag
& 127) < 64)
12016 (_("%B: Unknown mandatory EABI object attribute %d"),
12018 bfd_set_error (bfd_error_bad_value
);
12024 (_("Warning: %B: Unknown EABI object attribute %d"),
12030 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
12031 Returns -1 if no architecture could be read. */
12034 get_secondary_compatible_arch (bfd
*abfd
)
12036 obj_attribute
*attr
=
12037 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
12039 /* Note: the tag and its argument below are uleb128 values, though
12040 currently-defined values fit in one byte for each. */
12042 && attr
->s
[0] == Tag_CPU_arch
12043 && (attr
->s
[1] & 128) != 128
12044 && attr
->s
[2] == 0)
12047 /* This tag is "safely ignorable", so don't complain if it looks funny. */
12051 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
12052 The tag is removed if ARCH is -1. */
12055 set_secondary_compatible_arch (bfd
*abfd
, int arch
)
12057 obj_attribute
*attr
=
12058 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
12066 /* Note: the tag and its argument below are uleb128 values, though
12067 currently-defined values fit in one byte for each. */
12069 attr
->s
= (char *) bfd_alloc (abfd
, 3);
12070 attr
->s
[0] = Tag_CPU_arch
;
12075 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
12079 tag_cpu_arch_combine (bfd
*ibfd
, int oldtag
, int *secondary_compat_out
,
12080 int newtag
, int secondary_compat
)
12082 #define T(X) TAG_CPU_ARCH_##X
12083 int tagl
, tagh
, result
;
12086 T(V6T2
), /* PRE_V4. */
12088 T(V6T2
), /* V4T. */
12089 T(V6T2
), /* V5T. */
12090 T(V6T2
), /* V5TE. */
12091 T(V6T2
), /* V5TEJ. */
12094 T(V6T2
) /* V6T2. */
12098 T(V6K
), /* PRE_V4. */
12102 T(V6K
), /* V5TE. */
12103 T(V6K
), /* V5TEJ. */
12105 T(V6KZ
), /* V6KZ. */
12111 T(V7
), /* PRE_V4. */
12116 T(V7
), /* V5TEJ. */
12129 T(V6K
), /* V5TE. */
12130 T(V6K
), /* V5TEJ. */
12132 T(V6KZ
), /* V6KZ. */
12136 T(V6_M
) /* V6_M. */
12138 const int v6s_m
[] =
12144 T(V6K
), /* V5TE. */
12145 T(V6K
), /* V5TEJ. */
12147 T(V6KZ
), /* V6KZ. */
12151 T(V6S_M
), /* V6_M. */
12152 T(V6S_M
) /* V6S_M. */
12154 const int v7e_m
[] =
12158 T(V7E_M
), /* V4T. */
12159 T(V7E_M
), /* V5T. */
12160 T(V7E_M
), /* V5TE. */
12161 T(V7E_M
), /* V5TEJ. */
12162 T(V7E_M
), /* V6. */
12163 T(V7E_M
), /* V6KZ. */
12164 T(V7E_M
), /* V6T2. */
12165 T(V7E_M
), /* V6K. */
12166 T(V7E_M
), /* V7. */
12167 T(V7E_M
), /* V6_M. */
12168 T(V7E_M
), /* V6S_M. */
12169 T(V7E_M
) /* V7E_M. */
12173 T(V8
), /* PRE_V4. */
12178 T(V8
), /* V5TEJ. */
12185 T(V8
), /* V6S_M. */
12186 T(V8
), /* V7E_M. */
12189 const int v4t_plus_v6_m
[] =
12195 T(V5TE
), /* V5TE. */
12196 T(V5TEJ
), /* V5TEJ. */
12198 T(V6KZ
), /* V6KZ. */
12199 T(V6T2
), /* V6T2. */
12202 T(V6_M
), /* V6_M. */
12203 T(V6S_M
), /* V6S_M. */
12204 T(V7E_M
), /* V7E_M. */
12206 T(V4T_PLUS_V6_M
) /* V4T plus V6_M. */
12208 const int *comb
[] =
12217 /* Pseudo-architecture. */
12221 /* Check we've not got a higher architecture than we know about. */
12223 if (oldtag
> MAX_TAG_CPU_ARCH
|| newtag
> MAX_TAG_CPU_ARCH
)
12225 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd
);
12229 /* Override old tag if we have a Tag_also_compatible_with on the output. */
12231 if ((oldtag
== T(V6_M
) && *secondary_compat_out
== T(V4T
))
12232 || (oldtag
== T(V4T
) && *secondary_compat_out
== T(V6_M
)))
12233 oldtag
= T(V4T_PLUS_V6_M
);
12235 /* And override the new tag if we have a Tag_also_compatible_with on the
12238 if ((newtag
== T(V6_M
) && secondary_compat
== T(V4T
))
12239 || (newtag
== T(V4T
) && secondary_compat
== T(V6_M
)))
12240 newtag
= T(V4T_PLUS_V6_M
);
12242 tagl
= (oldtag
< newtag
) ? oldtag
: newtag
;
12243 result
= tagh
= (oldtag
> newtag
) ? oldtag
: newtag
;
12245 /* Architectures before V6KZ add features monotonically. */
12246 if (tagh
<= TAG_CPU_ARCH_V6KZ
)
12249 result
= comb
[tagh
- T(V6T2
)][tagl
];
12251 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
12252 as the canonical version. */
12253 if (result
== T(V4T_PLUS_V6_M
))
12256 *secondary_compat_out
= T(V6_M
);
12259 *secondary_compat_out
= -1;
12263 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
12264 ibfd
, oldtag
, newtag
);
12272 /* Query attributes object to see if integer divide instructions may be
12273 present in an object. */
12275 elf32_arm_attributes_accept_div (const obj_attribute
*attr
)
12277 int arch
= attr
[Tag_CPU_arch
].i
;
12278 int profile
= attr
[Tag_CPU_arch_profile
].i
;
12280 switch (attr
[Tag_DIV_use
].i
)
12283 /* Integer divide allowed if instruction contained in archetecture. */
12284 if (arch
== TAG_CPU_ARCH_V7
&& (profile
== 'R' || profile
== 'M'))
12286 else if (arch
>= TAG_CPU_ARCH_V7E_M
)
12292 /* Integer divide explicitly prohibited. */
12296 /* Unrecognised case - treat as allowing divide everywhere. */
12298 /* Integer divide allowed in ARM state. */
12303 /* Query attributes object to see if integer divide instructions are
12304 forbidden to be in the object. This is not the inverse of
12305 elf32_arm_attributes_accept_div. */
12307 elf32_arm_attributes_forbid_div (const obj_attribute
*attr
)
12309 return attr
[Tag_DIV_use
].i
== 1;
12312 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
12313 are conflicting attributes. */
12316 elf32_arm_merge_eabi_attributes (bfd
*ibfd
, bfd
*obfd
)
12318 obj_attribute
*in_attr
;
12319 obj_attribute
*out_attr
;
12320 /* Some tags have 0 = don't care, 1 = strong requirement,
12321 2 = weak requirement. */
12322 static const int order_021
[3] = {0, 2, 1};
12324 bfd_boolean result
= TRUE
;
12325 const char *sec_name
= get_elf_backend_data (ibfd
)->obj_attrs_section
;
12327 /* Skip the linker stubs file. This preserves previous behavior
12328 of accepting unknown attributes in the first input file - but
12330 if (ibfd
->flags
& BFD_LINKER_CREATED
)
12333 /* Skip any input that hasn't attribute section.
12334 This enables to link object files without attribute section with
12336 if (bfd_get_section_by_name (ibfd
, sec_name
) == NULL
)
12339 if (!elf_known_obj_attributes_proc (obfd
)[0].i
)
12341 /* This is the first object. Copy the attributes. */
12342 _bfd_elf_copy_obj_attributes (ibfd
, obfd
);
12344 out_attr
= elf_known_obj_attributes_proc (obfd
);
12346 /* Use the Tag_null value to indicate the attributes have been
12350 /* We do not output objects with Tag_MPextension_use_legacy - we move
12351 the attribute's value to Tag_MPextension_use. */
12352 if (out_attr
[Tag_MPextension_use_legacy
].i
!= 0)
12354 if (out_attr
[Tag_MPextension_use
].i
!= 0
12355 && out_attr
[Tag_MPextension_use_legacy
].i
12356 != out_attr
[Tag_MPextension_use
].i
)
12359 (_("Error: %B has both the current and legacy "
12360 "Tag_MPextension_use attributes"), ibfd
);
12364 out_attr
[Tag_MPextension_use
] =
12365 out_attr
[Tag_MPextension_use_legacy
];
12366 out_attr
[Tag_MPextension_use_legacy
].type
= 0;
12367 out_attr
[Tag_MPextension_use_legacy
].i
= 0;
12373 in_attr
= elf_known_obj_attributes_proc (ibfd
);
12374 out_attr
= elf_known_obj_attributes_proc (obfd
);
12375 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
12376 if (in_attr
[Tag_ABI_VFP_args
].i
!= out_attr
[Tag_ABI_VFP_args
].i
)
12378 /* Ignore mismatches if the object doesn't use floating point or is
12379 floating point ABI independent. */
12380 if (out_attr
[Tag_ABI_FP_number_model
].i
== AEABI_FP_number_model_none
12381 || (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
12382 && out_attr
[Tag_ABI_VFP_args
].i
== AEABI_VFP_args_compatible
))
12383 out_attr
[Tag_ABI_VFP_args
].i
= in_attr
[Tag_ABI_VFP_args
].i
;
12384 else if (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
12385 && in_attr
[Tag_ABI_VFP_args
].i
!= AEABI_VFP_args_compatible
)
12388 (_("error: %B uses VFP register arguments, %B does not"),
12389 in_attr
[Tag_ABI_VFP_args
].i
? ibfd
: obfd
,
12390 in_attr
[Tag_ABI_VFP_args
].i
? obfd
: ibfd
);
12395 for (i
= LEAST_KNOWN_OBJ_ATTRIBUTE
; i
< NUM_KNOWN_OBJ_ATTRIBUTES
; i
++)
12397 /* Merge this attribute with existing attributes. */
12400 case Tag_CPU_raw_name
:
12402 /* These are merged after Tag_CPU_arch. */
12405 case Tag_ABI_optimization_goals
:
12406 case Tag_ABI_FP_optimization_goals
:
12407 /* Use the first value seen. */
12412 int secondary_compat
= -1, secondary_compat_out
= -1;
12413 unsigned int saved_out_attr
= out_attr
[i
].i
;
12415 static const char *name_table
[] =
12417 /* These aren't real CPU names, but we can't guess
12418 that from the architecture version alone. */
12435 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
12436 secondary_compat
= get_secondary_compatible_arch (ibfd
);
12437 secondary_compat_out
= get_secondary_compatible_arch (obfd
);
12438 arch_attr
= tag_cpu_arch_combine (ibfd
, out_attr
[i
].i
,
12439 &secondary_compat_out
,
12443 /* Return with error if failed to merge. */
12444 if (arch_attr
== -1)
12447 out_attr
[i
].i
= arch_attr
;
12449 set_secondary_compatible_arch (obfd
, secondary_compat_out
);
12451 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
12452 if (out_attr
[i
].i
== saved_out_attr
)
12453 ; /* Leave the names alone. */
12454 else if (out_attr
[i
].i
== in_attr
[i
].i
)
12456 /* The output architecture has been changed to match the
12457 input architecture. Use the input names. */
12458 out_attr
[Tag_CPU_name
].s
= in_attr
[Tag_CPU_name
].s
12459 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_name
].s
)
12461 out_attr
[Tag_CPU_raw_name
].s
= in_attr
[Tag_CPU_raw_name
].s
12462 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_raw_name
].s
)
12467 out_attr
[Tag_CPU_name
].s
= NULL
;
12468 out_attr
[Tag_CPU_raw_name
].s
= NULL
;
12471 /* If we still don't have a value for Tag_CPU_name,
12472 make one up now. Tag_CPU_raw_name remains blank. */
12473 if (out_attr
[Tag_CPU_name
].s
== NULL
12474 && out_attr
[i
].i
< ARRAY_SIZE (name_table
))
12475 out_attr
[Tag_CPU_name
].s
=
12476 _bfd_elf_attr_strdup (obfd
, name_table
[out_attr
[i
].i
]);
12480 case Tag_ARM_ISA_use
:
12481 case Tag_THUMB_ISA_use
:
12482 case Tag_WMMX_arch
:
12483 case Tag_Advanced_SIMD_arch
:
12484 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
12485 case Tag_ABI_FP_rounding
:
12486 case Tag_ABI_FP_exceptions
:
12487 case Tag_ABI_FP_user_exceptions
:
12488 case Tag_ABI_FP_number_model
:
12489 case Tag_FP_HP_extension
:
12490 case Tag_CPU_unaligned_access
:
12492 case Tag_MPextension_use
:
12493 /* Use the largest value specified. */
12494 if (in_attr
[i
].i
> out_attr
[i
].i
)
12495 out_attr
[i
].i
= in_attr
[i
].i
;
12498 case Tag_ABI_align_preserved
:
12499 case Tag_ABI_PCS_RO_data
:
12500 /* Use the smallest value specified. */
12501 if (in_attr
[i
].i
< out_attr
[i
].i
)
12502 out_attr
[i
].i
= in_attr
[i
].i
;
12505 case Tag_ABI_align_needed
:
12506 if ((in_attr
[i
].i
> 0 || out_attr
[i
].i
> 0)
12507 && (in_attr
[Tag_ABI_align_preserved
].i
== 0
12508 || out_attr
[Tag_ABI_align_preserved
].i
== 0))
12510 /* This error message should be enabled once all non-conformant
12511 binaries in the toolchain have had the attributes set
12514 (_("error: %B: 8-byte data alignment conflicts with %B"),
12518 /* Fall through. */
12519 case Tag_ABI_FP_denormal
:
12520 case Tag_ABI_PCS_GOT_use
:
12521 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
12522 value if greater than 2 (for future-proofing). */
12523 if ((in_attr
[i
].i
> 2 && in_attr
[i
].i
> out_attr
[i
].i
)
12524 || (in_attr
[i
].i
<= 2 && out_attr
[i
].i
<= 2
12525 && order_021
[in_attr
[i
].i
] > order_021
[out_attr
[i
].i
]))
12526 out_attr
[i
].i
= in_attr
[i
].i
;
12529 case Tag_Virtualization_use
:
12530 /* The virtualization tag effectively stores two bits of
12531 information: the intended use of TrustZone (in bit 0), and the
12532 intended use of Virtualization (in bit 1). */
12533 if (out_attr
[i
].i
== 0)
12534 out_attr
[i
].i
= in_attr
[i
].i
;
12535 else if (in_attr
[i
].i
!= 0
12536 && in_attr
[i
].i
!= out_attr
[i
].i
)
12538 if (in_attr
[i
].i
<= 3 && out_attr
[i
].i
<= 3)
12543 (_("error: %B: unable to merge virtualization attributes "
12551 case Tag_CPU_arch_profile
:
12552 if (out_attr
[i
].i
!= in_attr
[i
].i
)
12554 /* 0 will merge with anything.
12555 'A' and 'S' merge to 'A'.
12556 'R' and 'S' merge to 'R'.
12557 'M' and 'A|R|S' is an error. */
12558 if (out_attr
[i
].i
== 0
12559 || (out_attr
[i
].i
== 'S'
12560 && (in_attr
[i
].i
== 'A' || in_attr
[i
].i
== 'R')))
12561 out_attr
[i
].i
= in_attr
[i
].i
;
12562 else if (in_attr
[i
].i
== 0
12563 || (in_attr
[i
].i
== 'S'
12564 && (out_attr
[i
].i
== 'A' || out_attr
[i
].i
== 'R')))
12565 ; /* Do nothing. */
12569 (_("error: %B: Conflicting architecture profiles %c/%c"),
12571 in_attr
[i
].i
? in_attr
[i
].i
: '0',
12572 out_attr
[i
].i
? out_attr
[i
].i
: '0');
12579 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
12580 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
12581 when it's 0. It might mean absence of FP hardware if
12582 Tag_FP_arch is zero. */
12584 #define VFP_VERSION_COUNT 9
12585 static const struct
12589 } vfp_versions
[VFP_VERSION_COUNT
] =
12605 /* If the output has no requirement about FP hardware,
12606 follow the requirement of the input. */
12607 if (out_attr
[i
].i
== 0)
12609 BFD_ASSERT (out_attr
[Tag_ABI_HardFP_use
].i
== 0);
12610 out_attr
[i
].i
= in_attr
[i
].i
;
12611 out_attr
[Tag_ABI_HardFP_use
].i
12612 = in_attr
[Tag_ABI_HardFP_use
].i
;
12615 /* If the input has no requirement about FP hardware, do
12617 else if (in_attr
[i
].i
== 0)
12619 BFD_ASSERT (in_attr
[Tag_ABI_HardFP_use
].i
== 0);
12623 /* Both the input and the output have nonzero Tag_FP_arch.
12624 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
12626 /* If both the input and the output have zero Tag_ABI_HardFP_use,
12628 if (in_attr
[Tag_ABI_HardFP_use
].i
== 0
12629 && out_attr
[Tag_ABI_HardFP_use
].i
== 0)
12631 /* If the input and the output have different Tag_ABI_HardFP_use,
12632 the combination of them is 0 (implied by Tag_FP_arch). */
12633 else if (in_attr
[Tag_ABI_HardFP_use
].i
12634 != out_attr
[Tag_ABI_HardFP_use
].i
)
12635 out_attr
[Tag_ABI_HardFP_use
].i
= 0;
12637 /* Now we can handle Tag_FP_arch. */
12639 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
12640 pick the biggest. */
12641 if (in_attr
[i
].i
>= VFP_VERSION_COUNT
12642 && in_attr
[i
].i
> out_attr
[i
].i
)
12644 out_attr
[i
] = in_attr
[i
];
12647 /* The output uses the superset of input features
12648 (ISA version) and registers. */
12649 ver
= vfp_versions
[in_attr
[i
].i
].ver
;
12650 if (ver
< vfp_versions
[out_attr
[i
].i
].ver
)
12651 ver
= vfp_versions
[out_attr
[i
].i
].ver
;
12652 regs
= vfp_versions
[in_attr
[i
].i
].regs
;
12653 if (regs
< vfp_versions
[out_attr
[i
].i
].regs
)
12654 regs
= vfp_versions
[out_attr
[i
].i
].regs
;
12655 /* This assumes all possible supersets are also a valid
12657 for (newval
= VFP_VERSION_COUNT
- 1; newval
> 0; newval
--)
12659 if (regs
== vfp_versions
[newval
].regs
12660 && ver
== vfp_versions
[newval
].ver
)
12663 out_attr
[i
].i
= newval
;
12666 case Tag_PCS_config
:
12667 if (out_attr
[i
].i
== 0)
12668 out_attr
[i
].i
= in_attr
[i
].i
;
12669 else if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= in_attr
[i
].i
)
12671 /* It's sometimes ok to mix different configs, so this is only
12674 (_("Warning: %B: Conflicting platform configuration"), ibfd
);
12677 case Tag_ABI_PCS_R9_use
:
12678 if (in_attr
[i
].i
!= out_attr
[i
].i
12679 && out_attr
[i
].i
!= AEABI_R9_unused
12680 && in_attr
[i
].i
!= AEABI_R9_unused
)
12683 (_("error: %B: Conflicting use of R9"), ibfd
);
12686 if (out_attr
[i
].i
== AEABI_R9_unused
)
12687 out_attr
[i
].i
= in_attr
[i
].i
;
12689 case Tag_ABI_PCS_RW_data
:
12690 if (in_attr
[i
].i
== AEABI_PCS_RW_data_SBrel
12691 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_SB
12692 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_unused
)
12695 (_("error: %B: SB relative addressing conflicts with use of R9"),
12699 /* Use the smallest value specified. */
12700 if (in_attr
[i
].i
< out_attr
[i
].i
)
12701 out_attr
[i
].i
= in_attr
[i
].i
;
12703 case Tag_ABI_PCS_wchar_t
:
12704 if (out_attr
[i
].i
&& in_attr
[i
].i
&& out_attr
[i
].i
!= in_attr
[i
].i
12705 && !elf_arm_tdata (obfd
)->no_wchar_size_warning
)
12708 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
12709 ibfd
, in_attr
[i
].i
, out_attr
[i
].i
);
12711 else if (in_attr
[i
].i
&& !out_attr
[i
].i
)
12712 out_attr
[i
].i
= in_attr
[i
].i
;
12714 case Tag_ABI_enum_size
:
12715 if (in_attr
[i
].i
!= AEABI_enum_unused
)
12717 if (out_attr
[i
].i
== AEABI_enum_unused
12718 || out_attr
[i
].i
== AEABI_enum_forced_wide
)
12720 /* The existing object is compatible with anything.
12721 Use whatever requirements the new object has. */
12722 out_attr
[i
].i
= in_attr
[i
].i
;
12724 else if (in_attr
[i
].i
!= AEABI_enum_forced_wide
12725 && out_attr
[i
].i
!= in_attr
[i
].i
12726 && !elf_arm_tdata (obfd
)->no_enum_size_warning
)
12728 static const char *aeabi_enum_names
[] =
12729 { "", "variable-size", "32-bit", "" };
12730 const char *in_name
=
12731 in_attr
[i
].i
< ARRAY_SIZE(aeabi_enum_names
)
12732 ? aeabi_enum_names
[in_attr
[i
].i
]
12734 const char *out_name
=
12735 out_attr
[i
].i
< ARRAY_SIZE(aeabi_enum_names
)
12736 ? aeabi_enum_names
[out_attr
[i
].i
]
12739 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
12740 ibfd
, in_name
, out_name
);
12744 case Tag_ABI_VFP_args
:
12747 case Tag_ABI_WMMX_args
:
12748 if (in_attr
[i
].i
!= out_attr
[i
].i
)
12751 (_("error: %B uses iWMMXt register arguments, %B does not"),
12756 case Tag_compatibility
:
12757 /* Merged in target-independent code. */
12759 case Tag_ABI_HardFP_use
:
12760 /* This is handled along with Tag_FP_arch. */
12762 case Tag_ABI_FP_16bit_format
:
12763 if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= 0)
12765 if (in_attr
[i
].i
!= out_attr
[i
].i
)
12768 (_("error: fp16 format mismatch between %B and %B"),
12773 if (in_attr
[i
].i
!= 0)
12774 out_attr
[i
].i
= in_attr
[i
].i
;
12778 /* A value of zero on input means that the divide instruction may
12779 be used if available in the base architecture as specified via
12780 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
12781 the user did not want divide instructions. A value of 2
12782 explicitly means that divide instructions were allowed in ARM
12783 and Thumb state. */
12784 if (in_attr
[i
].i
== out_attr
[i
].i
)
12785 /* Do nothing. */ ;
12786 else if (elf32_arm_attributes_forbid_div (in_attr
)
12787 && !elf32_arm_attributes_accept_div (out_attr
))
12789 else if (elf32_arm_attributes_forbid_div (out_attr
)
12790 && elf32_arm_attributes_accept_div (in_attr
))
12791 out_attr
[i
].i
= in_attr
[i
].i
;
12792 else if (in_attr
[i
].i
== 2)
12793 out_attr
[i
].i
= in_attr
[i
].i
;
12796 case Tag_MPextension_use_legacy
:
12797 /* We don't output objects with Tag_MPextension_use_legacy - we
12798 move the value to Tag_MPextension_use. */
12799 if (in_attr
[i
].i
!= 0 && in_attr
[Tag_MPextension_use
].i
!= 0)
12801 if (in_attr
[Tag_MPextension_use
].i
!= in_attr
[i
].i
)
12804 (_("%B has has both the current and legacy "
12805 "Tag_MPextension_use attributes"),
12811 if (in_attr
[i
].i
> out_attr
[Tag_MPextension_use
].i
)
12812 out_attr
[Tag_MPextension_use
] = in_attr
[i
];
12816 case Tag_nodefaults
:
12817 /* This tag is set if it exists, but the value is unused (and is
12818 typically zero). We don't actually need to do anything here -
12819 the merge happens automatically when the type flags are merged
12822 case Tag_also_compatible_with
:
12823 /* Already done in Tag_CPU_arch. */
12825 case Tag_conformance
:
12826 /* Keep the attribute if it matches. Throw it away otherwise.
12827 No attribute means no claim to conform. */
12828 if (!in_attr
[i
].s
|| !out_attr
[i
].s
12829 || strcmp (in_attr
[i
].s
, out_attr
[i
].s
) != 0)
12830 out_attr
[i
].s
= NULL
;
12835 = result
&& _bfd_elf_merge_unknown_attribute_low (ibfd
, obfd
, i
);
12838 /* If out_attr was copied from in_attr then it won't have a type yet. */
12839 if (in_attr
[i
].type
&& !out_attr
[i
].type
)
12840 out_attr
[i
].type
= in_attr
[i
].type
;
12843 /* Merge Tag_compatibility attributes and any common GNU ones. */
12844 if (!_bfd_elf_merge_object_attributes (ibfd
, obfd
))
12847 /* Check for any attributes not known on ARM. */
12848 result
&= _bfd_elf_merge_unknown_attribute_list (ibfd
, obfd
);
12854 /* Return TRUE if the two EABI versions are incompatible. */
12857 elf32_arm_versions_compatible (unsigned iver
, unsigned over
)
12859 /* v4 and v5 are the same spec before and after it was released,
12860 so allow mixing them. */
12861 if ((iver
== EF_ARM_EABI_VER4
&& over
== EF_ARM_EABI_VER5
)
12862 || (iver
== EF_ARM_EABI_VER5
&& over
== EF_ARM_EABI_VER4
))
12865 return (iver
== over
);
12868 /* Merge backend specific data from an object file to the output
12869 object file when linking. */
12872 elf32_arm_merge_private_bfd_data (bfd
* ibfd
, bfd
* obfd
);
12874 /* Display the flags field. */
12877 elf32_arm_print_private_bfd_data (bfd
*abfd
, void * ptr
)
12879 FILE * file
= (FILE *) ptr
;
12880 unsigned long flags
;
12882 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
12884 /* Print normal ELF private data. */
12885 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
12887 flags
= elf_elfheader (abfd
)->e_flags
;
12888 /* Ignore init flag - it may not be set, despite the flags field
12889 containing valid data. */
12891 /* xgettext:c-format */
12892 fprintf (file
, _("private flags = %lx:"), elf_elfheader (abfd
)->e_flags
);
12894 switch (EF_ARM_EABI_VERSION (flags
))
12896 case EF_ARM_EABI_UNKNOWN
:
12897 /* The following flag bits are GNU extensions and not part of the
12898 official ARM ELF extended ABI. Hence they are only decoded if
12899 the EABI version is not set. */
12900 if (flags
& EF_ARM_INTERWORK
)
12901 fprintf (file
, _(" [interworking enabled]"));
12903 if (flags
& EF_ARM_APCS_26
)
12904 fprintf (file
, " [APCS-26]");
12906 fprintf (file
, " [APCS-32]");
12908 if (flags
& EF_ARM_VFP_FLOAT
)
12909 fprintf (file
, _(" [VFP float format]"));
12910 else if (flags
& EF_ARM_MAVERICK_FLOAT
)
12911 fprintf (file
, _(" [Maverick float format]"));
12913 fprintf (file
, _(" [FPA float format]"));
12915 if (flags
& EF_ARM_APCS_FLOAT
)
12916 fprintf (file
, _(" [floats passed in float registers]"));
12918 if (flags
& EF_ARM_PIC
)
12919 fprintf (file
, _(" [position independent]"));
12921 if (flags
& EF_ARM_NEW_ABI
)
12922 fprintf (file
, _(" [new ABI]"));
12924 if (flags
& EF_ARM_OLD_ABI
)
12925 fprintf (file
, _(" [old ABI]"));
12927 if (flags
& EF_ARM_SOFT_FLOAT
)
12928 fprintf (file
, _(" [software FP]"));
12930 flags
&= ~(EF_ARM_INTERWORK
| EF_ARM_APCS_26
| EF_ARM_APCS_FLOAT
12931 | EF_ARM_PIC
| EF_ARM_NEW_ABI
| EF_ARM_OLD_ABI
12932 | EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
12933 | EF_ARM_MAVERICK_FLOAT
);
12936 case EF_ARM_EABI_VER1
:
12937 fprintf (file
, _(" [Version1 EABI]"));
12939 if (flags
& EF_ARM_SYMSARESORTED
)
12940 fprintf (file
, _(" [sorted symbol table]"));
12942 fprintf (file
, _(" [unsorted symbol table]"));
12944 flags
&= ~ EF_ARM_SYMSARESORTED
;
12947 case EF_ARM_EABI_VER2
:
12948 fprintf (file
, _(" [Version2 EABI]"));
12950 if (flags
& EF_ARM_SYMSARESORTED
)
12951 fprintf (file
, _(" [sorted symbol table]"));
12953 fprintf (file
, _(" [unsorted symbol table]"));
12955 if (flags
& EF_ARM_DYNSYMSUSESEGIDX
)
12956 fprintf (file
, _(" [dynamic symbols use segment index]"));
12958 if (flags
& EF_ARM_MAPSYMSFIRST
)
12959 fprintf (file
, _(" [mapping symbols precede others]"));
12961 flags
&= ~(EF_ARM_SYMSARESORTED
| EF_ARM_DYNSYMSUSESEGIDX
12962 | EF_ARM_MAPSYMSFIRST
);
12965 case EF_ARM_EABI_VER3
:
12966 fprintf (file
, _(" [Version3 EABI]"));
12969 case EF_ARM_EABI_VER4
:
12970 fprintf (file
, _(" [Version4 EABI]"));
12973 case EF_ARM_EABI_VER5
:
12974 fprintf (file
, _(" [Version5 EABI]"));
12976 if (flags
& EF_ARM_ABI_FLOAT_SOFT
)
12977 fprintf (file
, _(" [soft-float ABI]"));
12979 if (flags
& EF_ARM_ABI_FLOAT_HARD
)
12980 fprintf (file
, _(" [hard-float ABI]"));
12982 flags
&= ~(EF_ARM_ABI_FLOAT_SOFT
| EF_ARM_ABI_FLOAT_HARD
);
12985 if (flags
& EF_ARM_BE8
)
12986 fprintf (file
, _(" [BE8]"));
12988 if (flags
& EF_ARM_LE8
)
12989 fprintf (file
, _(" [LE8]"));
12991 flags
&= ~(EF_ARM_LE8
| EF_ARM_BE8
);
12995 fprintf (file
, _(" <EABI version unrecognised>"));
12999 flags
&= ~ EF_ARM_EABIMASK
;
13001 if (flags
& EF_ARM_RELEXEC
)
13002 fprintf (file
, _(" [relocatable executable]"));
13004 flags
&= ~EF_ARM_RELEXEC
;
13007 fprintf (file
, _("<Unrecognised flag bits set>"));
13009 fputc ('\n', file
);
13015 elf32_arm_get_symbol_type (Elf_Internal_Sym
* elf_sym
, int type
)
13017 switch (ELF_ST_TYPE (elf_sym
->st_info
))
13019 case STT_ARM_TFUNC
:
13020 return ELF_ST_TYPE (elf_sym
->st_info
);
13022 case STT_ARM_16BIT
:
13023 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
13024 This allows us to distinguish between data used by Thumb instructions
13025 and non-data (which is probably code) inside Thumb regions of an
13027 if (type
!= STT_OBJECT
&& type
!= STT_TLS
)
13028 return ELF_ST_TYPE (elf_sym
->st_info
);
13039 elf32_arm_gc_mark_hook (asection
*sec
,
13040 struct bfd_link_info
*info
,
13041 Elf_Internal_Rela
*rel
,
13042 struct elf_link_hash_entry
*h
,
13043 Elf_Internal_Sym
*sym
)
13046 switch (ELF32_R_TYPE (rel
->r_info
))
13048 case R_ARM_GNU_VTINHERIT
:
13049 case R_ARM_GNU_VTENTRY
:
13053 return _bfd_elf_gc_mark_hook (sec
, info
, rel
, h
, sym
);
13056 /* Update the got entry reference counts for the section being removed. */
13059 elf32_arm_gc_sweep_hook (bfd
* abfd
,
13060 struct bfd_link_info
* info
,
13062 const Elf_Internal_Rela
* relocs
)
13064 Elf_Internal_Shdr
*symtab_hdr
;
13065 struct elf_link_hash_entry
**sym_hashes
;
13066 bfd_signed_vma
*local_got_refcounts
;
13067 const Elf_Internal_Rela
*rel
, *relend
;
13068 struct elf32_arm_link_hash_table
* globals
;
13070 if (bfd_link_relocatable (info
))
13073 globals
= elf32_arm_hash_table (info
);
13074 if (globals
== NULL
)
13077 elf_section_data (sec
)->local_dynrel
= NULL
;
13079 symtab_hdr
= & elf_symtab_hdr (abfd
);
13080 sym_hashes
= elf_sym_hashes (abfd
);
13081 local_got_refcounts
= elf_local_got_refcounts (abfd
);
13083 check_use_blx (globals
);
13085 relend
= relocs
+ sec
->reloc_count
;
13086 for (rel
= relocs
; rel
< relend
; rel
++)
13088 unsigned long r_symndx
;
13089 struct elf_link_hash_entry
*h
= NULL
;
13090 struct elf32_arm_link_hash_entry
*eh
;
13092 bfd_boolean call_reloc_p
;
13093 bfd_boolean may_become_dynamic_p
;
13094 bfd_boolean may_need_local_target_p
;
13095 union gotplt_union
*root_plt
;
13096 struct arm_plt_info
*arm_plt
;
13098 r_symndx
= ELF32_R_SYM (rel
->r_info
);
13099 if (r_symndx
>= symtab_hdr
->sh_info
)
13101 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
13102 while (h
->root
.type
== bfd_link_hash_indirect
13103 || h
->root
.type
== bfd_link_hash_warning
)
13104 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
13106 eh
= (struct elf32_arm_link_hash_entry
*) h
;
13108 call_reloc_p
= FALSE
;
13109 may_become_dynamic_p
= FALSE
;
13110 may_need_local_target_p
= FALSE
;
13112 r_type
= ELF32_R_TYPE (rel
->r_info
);
13113 r_type
= arm_real_reloc_type (globals
, r_type
);
13117 case R_ARM_GOT_PREL
:
13118 case R_ARM_TLS_GD32
:
13119 case R_ARM_TLS_IE32
:
13122 if (h
->got
.refcount
> 0)
13123 h
->got
.refcount
-= 1;
13125 else if (local_got_refcounts
!= NULL
)
13127 if (local_got_refcounts
[r_symndx
] > 0)
13128 local_got_refcounts
[r_symndx
] -= 1;
13132 case R_ARM_TLS_LDM32
:
13133 globals
->tls_ldm_got
.refcount
-= 1;
13141 case R_ARM_THM_CALL
:
13142 case R_ARM_THM_JUMP24
:
13143 case R_ARM_THM_JUMP19
:
13144 call_reloc_p
= TRUE
;
13145 may_need_local_target_p
= TRUE
;
13149 if (!globals
->vxworks_p
)
13151 may_need_local_target_p
= TRUE
;
13154 /* Fall through. */
13156 case R_ARM_ABS32_NOI
:
13158 case R_ARM_REL32_NOI
:
13159 case R_ARM_MOVW_ABS_NC
:
13160 case R_ARM_MOVT_ABS
:
13161 case R_ARM_MOVW_PREL_NC
:
13162 case R_ARM_MOVT_PREL
:
13163 case R_ARM_THM_MOVW_ABS_NC
:
13164 case R_ARM_THM_MOVT_ABS
:
13165 case R_ARM_THM_MOVW_PREL_NC
:
13166 case R_ARM_THM_MOVT_PREL
:
13167 /* Should the interworking branches be here also? */
13168 if ((bfd_link_pic (info
) || globals
->root
.is_relocatable_executable
)
13169 && (sec
->flags
& SEC_ALLOC
) != 0)
13172 && elf32_arm_howto_from_type (r_type
)->pc_relative
)
13174 call_reloc_p
= TRUE
;
13175 may_need_local_target_p
= TRUE
;
13178 may_become_dynamic_p
= TRUE
;
13181 may_need_local_target_p
= TRUE
;
13188 if (may_need_local_target_p
13189 && elf32_arm_get_plt_info (abfd
, eh
, r_symndx
, &root_plt
, &arm_plt
))
13191 /* If PLT refcount book-keeping is wrong and too low, we'll
13192 see a zero value (going to -1) for the root PLT reference
13194 if (root_plt
->refcount
>= 0)
13196 BFD_ASSERT (root_plt
->refcount
!= 0);
13197 root_plt
->refcount
-= 1;
13200 /* A value of -1 means the symbol has become local, forced
13201 or seeing a hidden definition. Any other negative value
13203 BFD_ASSERT (root_plt
->refcount
== -1);
13206 arm_plt
->noncall_refcount
--;
13208 if (r_type
== R_ARM_THM_CALL
)
13209 arm_plt
->maybe_thumb_refcount
--;
13211 if (r_type
== R_ARM_THM_JUMP24
13212 || r_type
== R_ARM_THM_JUMP19
)
13213 arm_plt
->thumb_refcount
--;
13216 if (may_become_dynamic_p
)
13218 struct elf_dyn_relocs
**pp
;
13219 struct elf_dyn_relocs
*p
;
13222 pp
= &(eh
->dyn_relocs
);
13225 Elf_Internal_Sym
*isym
;
13227 isym
= bfd_sym_from_r_symndx (&globals
->sym_cache
,
13231 pp
= elf32_arm_get_local_dynreloc_list (abfd
, r_symndx
, isym
);
13235 for (; (p
= *pp
) != NULL
; pp
= &p
->next
)
13238 /* Everything must go for SEC. */
13248 /* Look through the relocs for a section during the first phase. */
13251 elf32_arm_check_relocs (bfd
*abfd
, struct bfd_link_info
*info
,
13252 asection
*sec
, const Elf_Internal_Rela
*relocs
)
13254 Elf_Internal_Shdr
*symtab_hdr
;
13255 struct elf_link_hash_entry
**sym_hashes
;
13256 const Elf_Internal_Rela
*rel
;
13257 const Elf_Internal_Rela
*rel_end
;
13260 struct elf32_arm_link_hash_table
*htab
;
13261 bfd_boolean call_reloc_p
;
13262 bfd_boolean may_become_dynamic_p
;
13263 bfd_boolean may_need_local_target_p
;
13264 unsigned long nsyms
;
13266 if (bfd_link_relocatable (info
))
13269 BFD_ASSERT (is_arm_elf (abfd
));
13271 htab
= elf32_arm_hash_table (info
);
13277 /* Create dynamic sections for relocatable executables so that we can
13278 copy relocations. */
13279 if (htab
->root
.is_relocatable_executable
13280 && ! htab
->root
.dynamic_sections_created
)
13282 if (! _bfd_elf_link_create_dynamic_sections (abfd
, info
))
13286 if (htab
->root
.dynobj
== NULL
)
13287 htab
->root
.dynobj
= abfd
;
13288 if (!create_ifunc_sections (info
))
13291 dynobj
= htab
->root
.dynobj
;
13293 symtab_hdr
= & elf_symtab_hdr (abfd
);
13294 sym_hashes
= elf_sym_hashes (abfd
);
13295 nsyms
= NUM_SHDR_ENTRIES (symtab_hdr
);
13297 rel_end
= relocs
+ sec
->reloc_count
;
13298 for (rel
= relocs
; rel
< rel_end
; rel
++)
13300 Elf_Internal_Sym
*isym
;
13301 struct elf_link_hash_entry
*h
;
13302 struct elf32_arm_link_hash_entry
*eh
;
13303 unsigned long r_symndx
;
13306 r_symndx
= ELF32_R_SYM (rel
->r_info
);
13307 r_type
= ELF32_R_TYPE (rel
->r_info
);
13308 r_type
= arm_real_reloc_type (htab
, r_type
);
13310 if (r_symndx
>= nsyms
13311 /* PR 9934: It is possible to have relocations that do not
13312 refer to symbols, thus it is also possible to have an
13313 object file containing relocations but no symbol table. */
13314 && (r_symndx
> STN_UNDEF
|| nsyms
> 0))
13316 (*_bfd_error_handler
) (_("%B: bad symbol index: %d"), abfd
,
13325 if (r_symndx
< symtab_hdr
->sh_info
)
13327 /* A local symbol. */
13328 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
,
13335 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
13336 while (h
->root
.type
== bfd_link_hash_indirect
13337 || h
->root
.type
== bfd_link_hash_warning
)
13338 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
13340 /* PR15323, ref flags aren't set for references in the
13342 h
->root
.non_ir_ref
= 1;
13346 eh
= (struct elf32_arm_link_hash_entry
*) h
;
13348 call_reloc_p
= FALSE
;
13349 may_become_dynamic_p
= FALSE
;
13350 may_need_local_target_p
= FALSE
;
13352 /* Could be done earlier, if h were already available. */
13353 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
13357 case R_ARM_GOT_PREL
:
13358 case R_ARM_TLS_GD32
:
13359 case R_ARM_TLS_IE32
:
13360 case R_ARM_TLS_GOTDESC
:
13361 case R_ARM_TLS_DESCSEQ
:
13362 case R_ARM_THM_TLS_DESCSEQ
:
13363 case R_ARM_TLS_CALL
:
13364 case R_ARM_THM_TLS_CALL
:
13365 /* This symbol requires a global offset table entry. */
13367 int tls_type
, old_tls_type
;
13371 case R_ARM_TLS_GD32
: tls_type
= GOT_TLS_GD
; break;
13373 case R_ARM_TLS_IE32
: tls_type
= GOT_TLS_IE
; break;
13375 case R_ARM_TLS_GOTDESC
:
13376 case R_ARM_TLS_CALL
: case R_ARM_THM_TLS_CALL
:
13377 case R_ARM_TLS_DESCSEQ
: case R_ARM_THM_TLS_DESCSEQ
:
13378 tls_type
= GOT_TLS_GDESC
; break;
13380 default: tls_type
= GOT_NORMAL
; break;
13383 if (!bfd_link_executable (info
) && (tls_type
& GOT_TLS_IE
))
13384 info
->flags
|= DF_STATIC_TLS
;
13389 old_tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
13393 /* This is a global offset table entry for a local symbol. */
13394 if (!elf32_arm_allocate_local_sym_info (abfd
))
13396 elf_local_got_refcounts (abfd
)[r_symndx
] += 1;
13397 old_tls_type
= elf32_arm_local_got_tls_type (abfd
) [r_symndx
];
13400 /* If a variable is accessed with both tls methods, two
13401 slots may be created. */
13402 if (GOT_TLS_GD_ANY_P (old_tls_type
)
13403 && GOT_TLS_GD_ANY_P (tls_type
))
13404 tls_type
|= old_tls_type
;
13406 /* We will already have issued an error message if there
13407 is a TLS/non-TLS mismatch, based on the symbol
13408 type. So just combine any TLS types needed. */
13409 if (old_tls_type
!= GOT_UNKNOWN
&& old_tls_type
!= GOT_NORMAL
13410 && tls_type
!= GOT_NORMAL
)
13411 tls_type
|= old_tls_type
;
13413 /* If the symbol is accessed in both IE and GDESC
13414 method, we're able to relax. Turn off the GDESC flag,
13415 without messing up with any other kind of tls types
13416 that may be involved. */
13417 if ((tls_type
& GOT_TLS_IE
) && (tls_type
& GOT_TLS_GDESC
))
13418 tls_type
&= ~GOT_TLS_GDESC
;
13420 if (old_tls_type
!= tls_type
)
13423 elf32_arm_hash_entry (h
)->tls_type
= tls_type
;
13425 elf32_arm_local_got_tls_type (abfd
) [r_symndx
] = tls_type
;
13428 /* Fall through. */
13430 case R_ARM_TLS_LDM32
:
13431 if (r_type
== R_ARM_TLS_LDM32
)
13432 htab
->tls_ldm_got
.refcount
++;
13433 /* Fall through. */
13435 case R_ARM_GOTOFF32
:
13437 if (htab
->root
.sgot
== NULL
13438 && !create_got_section (htab
->root
.dynobj
, info
))
13447 case R_ARM_THM_CALL
:
13448 case R_ARM_THM_JUMP24
:
13449 case R_ARM_THM_JUMP19
:
13450 call_reloc_p
= TRUE
;
13451 may_need_local_target_p
= TRUE
;
13455 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
13456 ldr __GOTT_INDEX__ offsets. */
13457 if (!htab
->vxworks_p
)
13459 may_need_local_target_p
= TRUE
;
13462 /* Fall through. */
13464 case R_ARM_MOVW_ABS_NC
:
13465 case R_ARM_MOVT_ABS
:
13466 case R_ARM_THM_MOVW_ABS_NC
:
13467 case R_ARM_THM_MOVT_ABS
:
13468 if (bfd_link_pic (info
))
13470 (*_bfd_error_handler
)
13471 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
13472 abfd
, elf32_arm_howto_table_1
[r_type
].name
,
13473 (h
) ? h
->root
.root
.string
: "a local symbol");
13474 bfd_set_error (bfd_error_bad_value
);
13478 /* Fall through. */
13480 case R_ARM_ABS32_NOI
:
13481 if (h
!= NULL
&& bfd_link_executable (info
))
13483 h
->pointer_equality_needed
= 1;
13485 /* Fall through. */
13487 case R_ARM_REL32_NOI
:
13488 case R_ARM_MOVW_PREL_NC
:
13489 case R_ARM_MOVT_PREL
:
13490 case R_ARM_THM_MOVW_PREL_NC
:
13491 case R_ARM_THM_MOVT_PREL
:
13493 /* Should the interworking branches be listed here? */
13494 if ((bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
)
13495 && (sec
->flags
& SEC_ALLOC
) != 0)
13498 && elf32_arm_howto_from_type (r_type
)->pc_relative
)
13500 /* In shared libraries and relocatable executables,
13501 we treat local relative references as calls;
13502 see the related SYMBOL_CALLS_LOCAL code in
13503 allocate_dynrelocs. */
13504 call_reloc_p
= TRUE
;
13505 may_need_local_target_p
= TRUE
;
13508 /* We are creating a shared library or relocatable
13509 executable, and this is a reloc against a global symbol,
13510 or a non-PC-relative reloc against a local symbol.
13511 We may need to copy the reloc into the output. */
13512 may_become_dynamic_p
= TRUE
;
13515 may_need_local_target_p
= TRUE
;
13518 /* This relocation describes the C++ object vtable hierarchy.
13519 Reconstruct it for later use during GC. */
13520 case R_ARM_GNU_VTINHERIT
:
13521 if (!bfd_elf_gc_record_vtinherit (abfd
, sec
, h
, rel
->r_offset
))
13525 /* This relocation describes which C++ vtable entries are actually
13526 used. Record for later use during GC. */
13527 case R_ARM_GNU_VTENTRY
:
13528 BFD_ASSERT (h
!= NULL
);
13530 && !bfd_elf_gc_record_vtentry (abfd
, sec
, h
, rel
->r_offset
))
13538 /* We may need a .plt entry if the function this reloc
13539 refers to is in a different object, regardless of the
13540 symbol's type. We can't tell for sure yet, because
13541 something later might force the symbol local. */
13543 else if (may_need_local_target_p
)
13544 /* If this reloc is in a read-only section, we might
13545 need a copy reloc. We can't check reliably at this
13546 stage whether the section is read-only, as input
13547 sections have not yet been mapped to output sections.
13548 Tentatively set the flag for now, and correct in
13549 adjust_dynamic_symbol. */
13550 h
->non_got_ref
= 1;
13553 if (may_need_local_target_p
13554 && (h
!= NULL
|| ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
))
13556 union gotplt_union
*root_plt
;
13557 struct arm_plt_info
*arm_plt
;
13558 struct arm_local_iplt_info
*local_iplt
;
13562 root_plt
= &h
->plt
;
13563 arm_plt
= &eh
->plt
;
13567 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
13568 if (local_iplt
== NULL
)
13570 root_plt
= &local_iplt
->root
;
13571 arm_plt
= &local_iplt
->arm
;
13574 /* If the symbol is a function that doesn't bind locally,
13575 this relocation will need a PLT entry. */
13576 if (root_plt
->refcount
!= -1)
13577 root_plt
->refcount
+= 1;
13580 arm_plt
->noncall_refcount
++;
13582 /* It's too early to use htab->use_blx here, so we have to
13583 record possible blx references separately from
13584 relocs that definitely need a thumb stub. */
13586 if (r_type
== R_ARM_THM_CALL
)
13587 arm_plt
->maybe_thumb_refcount
+= 1;
13589 if (r_type
== R_ARM_THM_JUMP24
13590 || r_type
== R_ARM_THM_JUMP19
)
13591 arm_plt
->thumb_refcount
+= 1;
13594 if (may_become_dynamic_p
)
13596 struct elf_dyn_relocs
*p
, **head
;
13598 /* Create a reloc section in dynobj. */
13599 if (sreloc
== NULL
)
13601 sreloc
= _bfd_elf_make_dynamic_reloc_section
13602 (sec
, dynobj
, 2, abfd
, ! htab
->use_rel
);
13604 if (sreloc
== NULL
)
13607 /* BPABI objects never have dynamic relocations mapped. */
13608 if (htab
->symbian_p
)
13612 flags
= bfd_get_section_flags (dynobj
, sreloc
);
13613 flags
&= ~(SEC_LOAD
| SEC_ALLOC
);
13614 bfd_set_section_flags (dynobj
, sreloc
, flags
);
13618 /* If this is a global symbol, count the number of
13619 relocations we need for this symbol. */
13621 head
= &((struct elf32_arm_link_hash_entry
*) h
)->dyn_relocs
;
13624 head
= elf32_arm_get_local_dynreloc_list (abfd
, r_symndx
, isym
);
13630 if (p
== NULL
|| p
->sec
!= sec
)
13632 bfd_size_type amt
= sizeof *p
;
13634 p
= (struct elf_dyn_relocs
*) bfd_alloc (htab
->root
.dynobj
, amt
);
13644 if (elf32_arm_howto_from_type (r_type
)->pc_relative
)
13653 /* Unwinding tables are not referenced directly. This pass marks them as
13654 required if the corresponding code section is marked. */
13657 elf32_arm_gc_mark_extra_sections (struct bfd_link_info
*info
,
13658 elf_gc_mark_hook_fn gc_mark_hook
)
13661 Elf_Internal_Shdr
**elf_shdrp
;
13664 _bfd_elf_gc_mark_extra_sections (info
, gc_mark_hook
);
13666 /* Marking EH data may cause additional code sections to be marked,
13667 requiring multiple passes. */
13672 for (sub
= info
->input_bfds
; sub
!= NULL
; sub
= sub
->link
.next
)
13676 if (! is_arm_elf (sub
))
13679 elf_shdrp
= elf_elfsections (sub
);
13680 for (o
= sub
->sections
; o
!= NULL
; o
= o
->next
)
13682 Elf_Internal_Shdr
*hdr
;
13684 hdr
= &elf_section_data (o
)->this_hdr
;
13685 if (hdr
->sh_type
== SHT_ARM_EXIDX
13687 && hdr
->sh_link
< elf_numsections (sub
)
13689 && elf_shdrp
[hdr
->sh_link
]->bfd_section
->gc_mark
)
13692 if (!_bfd_elf_gc_mark (info
, o
, gc_mark_hook
))
13702 /* Treat mapping symbols as special target symbols. */
13705 elf32_arm_is_target_special_symbol (bfd
* abfd ATTRIBUTE_UNUSED
, asymbol
* sym
)
13707 return bfd_is_arm_special_symbol_name (sym
->name
,
13708 BFD_ARM_SPECIAL_SYM_TYPE_ANY
);
13711 /* This is a copy of elf_find_function() from elf.c except that
13712 ARM mapping symbols are ignored when looking for function names
13713 and STT_ARM_TFUNC is considered to a function type. */
13716 arm_elf_find_function (bfd
* abfd ATTRIBUTE_UNUSED
,
13717 asymbol
** symbols
,
13718 asection
* section
,
13720 const char ** filename_ptr
,
13721 const char ** functionname_ptr
)
13723 const char * filename
= NULL
;
13724 asymbol
* func
= NULL
;
13725 bfd_vma low_func
= 0;
13728 for (p
= symbols
; *p
!= NULL
; p
++)
13730 elf_symbol_type
*q
;
13732 q
= (elf_symbol_type
*) *p
;
13734 switch (ELF_ST_TYPE (q
->internal_elf_sym
.st_info
))
13739 filename
= bfd_asymbol_name (&q
->symbol
);
13742 case STT_ARM_TFUNC
:
13744 /* Skip mapping symbols. */
13745 if ((q
->symbol
.flags
& BSF_LOCAL
)
13746 && bfd_is_arm_special_symbol_name (q
->symbol
.name
,
13747 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
13749 /* Fall through. */
13750 if (bfd_get_section (&q
->symbol
) == section
13751 && q
->symbol
.value
>= low_func
13752 && q
->symbol
.value
<= offset
)
13754 func
= (asymbol
*) q
;
13755 low_func
= q
->symbol
.value
;
13765 *filename_ptr
= filename
;
13766 if (functionname_ptr
)
13767 *functionname_ptr
= bfd_asymbol_name (func
);
13773 /* Find the nearest line to a particular section and offset, for error
13774 reporting. This code is a duplicate of the code in elf.c, except
13775 that it uses arm_elf_find_function. */
13778 elf32_arm_find_nearest_line (bfd
* abfd
,
13779 asymbol
** symbols
,
13780 asection
* section
,
13782 const char ** filename_ptr
,
13783 const char ** functionname_ptr
,
13784 unsigned int * line_ptr
,
13785 unsigned int * discriminator_ptr
)
13787 bfd_boolean found
= FALSE
;
13789 if (_bfd_dwarf2_find_nearest_line (abfd
, symbols
, NULL
, section
, offset
,
13790 filename_ptr
, functionname_ptr
,
13791 line_ptr
, discriminator_ptr
,
13792 dwarf_debug_sections
, 0,
13793 & elf_tdata (abfd
)->dwarf2_find_line_info
))
13795 if (!*functionname_ptr
)
13796 arm_elf_find_function (abfd
, symbols
, section
, offset
,
13797 *filename_ptr
? NULL
: filename_ptr
,
13803 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
13806 if (! _bfd_stab_section_find_nearest_line (abfd
, symbols
, section
, offset
,
13807 & found
, filename_ptr
,
13808 functionname_ptr
, line_ptr
,
13809 & elf_tdata (abfd
)->line_info
))
13812 if (found
&& (*functionname_ptr
|| *line_ptr
))
13815 if (symbols
== NULL
)
13818 if (! arm_elf_find_function (abfd
, symbols
, section
, offset
,
13819 filename_ptr
, functionname_ptr
))
13827 elf32_arm_find_inliner_info (bfd
* abfd
,
13828 const char ** filename_ptr
,
13829 const char ** functionname_ptr
,
13830 unsigned int * line_ptr
)
13833 found
= _bfd_dwarf2_find_inliner_info (abfd
, filename_ptr
,
13834 functionname_ptr
, line_ptr
,
13835 & elf_tdata (abfd
)->dwarf2_find_line_info
);
13839 /* Adjust a symbol defined by a dynamic object and referenced by a
13840 regular object. The current definition is in some section of the
13841 dynamic object, but we're not including those sections. We have to
13842 change the definition to something the rest of the link can
13846 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info
* info
,
13847 struct elf_link_hash_entry
* h
)
13851 struct elf32_arm_link_hash_entry
* eh
;
13852 struct elf32_arm_link_hash_table
*globals
;
13854 globals
= elf32_arm_hash_table (info
);
13855 if (globals
== NULL
)
13858 dynobj
= elf_hash_table (info
)->dynobj
;
13860 /* Make sure we know what is going on here. */
13861 BFD_ASSERT (dynobj
!= NULL
13863 || h
->type
== STT_GNU_IFUNC
13864 || h
->u
.weakdef
!= NULL
13867 && !h
->def_regular
)));
13869 eh
= (struct elf32_arm_link_hash_entry
*) h
;
13871 /* If this is a function, put it in the procedure linkage table. We
13872 will fill in the contents of the procedure linkage table later,
13873 when we know the address of the .got section. */
13874 if (h
->type
== STT_FUNC
|| h
->type
== STT_GNU_IFUNC
|| h
->needs_plt
)
13876 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
13877 symbol binds locally. */
13878 if (h
->plt
.refcount
<= 0
13879 || (h
->type
!= STT_GNU_IFUNC
13880 && (SYMBOL_CALLS_LOCAL (info
, h
)
13881 || (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
13882 && h
->root
.type
== bfd_link_hash_undefweak
))))
13884 /* This case can occur if we saw a PLT32 reloc in an input
13885 file, but the symbol was never referred to by a dynamic
13886 object, or if all references were garbage collected. In
13887 such a case, we don't actually need to build a procedure
13888 linkage table, and we can just do a PC24 reloc instead. */
13889 h
->plt
.offset
= (bfd_vma
) -1;
13890 eh
->plt
.thumb_refcount
= 0;
13891 eh
->plt
.maybe_thumb_refcount
= 0;
13892 eh
->plt
.noncall_refcount
= 0;
13900 /* It's possible that we incorrectly decided a .plt reloc was
13901 needed for an R_ARM_PC24 or similar reloc to a non-function sym
13902 in check_relocs. We can't decide accurately between function
13903 and non-function syms in check-relocs; Objects loaded later in
13904 the link may change h->type. So fix it now. */
13905 h
->plt
.offset
= (bfd_vma
) -1;
13906 eh
->plt
.thumb_refcount
= 0;
13907 eh
->plt
.maybe_thumb_refcount
= 0;
13908 eh
->plt
.noncall_refcount
= 0;
13911 /* If this is a weak symbol, and there is a real definition, the
13912 processor independent code will have arranged for us to see the
13913 real definition first, and we can just use the same value. */
13914 if (h
->u
.weakdef
!= NULL
)
13916 BFD_ASSERT (h
->u
.weakdef
->root
.type
== bfd_link_hash_defined
13917 || h
->u
.weakdef
->root
.type
== bfd_link_hash_defweak
);
13918 h
->root
.u
.def
.section
= h
->u
.weakdef
->root
.u
.def
.section
;
13919 h
->root
.u
.def
.value
= h
->u
.weakdef
->root
.u
.def
.value
;
13923 /* If there are no non-GOT references, we do not need a copy
13925 if (!h
->non_got_ref
)
13928 /* This is a reference to a symbol defined by a dynamic object which
13929 is not a function. */
13931 /* If we are creating a shared library, we must presume that the
13932 only references to the symbol are via the global offset table.
13933 For such cases we need not do anything here; the relocations will
13934 be handled correctly by relocate_section. Relocatable executables
13935 can reference data in shared objects directly, so we don't need to
13936 do anything here. */
13937 if (bfd_link_pic (info
) || globals
->root
.is_relocatable_executable
)
13940 /* We must allocate the symbol in our .dynbss section, which will
13941 become part of the .bss section of the executable. There will be
13942 an entry for this symbol in the .dynsym section. The dynamic
13943 object will contain position independent code, so all references
13944 from the dynamic object to this symbol will go through the global
13945 offset table. The dynamic linker will use the .dynsym entry to
13946 determine the address it must put in the global offset table, so
13947 both the dynamic object and the regular object will refer to the
13948 same memory location for the variable. */
13949 s
= bfd_get_linker_section (dynobj
, ".dynbss");
13950 BFD_ASSERT (s
!= NULL
);
13952 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
13953 copy the initial value out of the dynamic object and into the
13954 runtime process image. We need to remember the offset into the
13955 .rel(a).bss section we are going to use. */
13956 if ((h
->root
.u
.def
.section
->flags
& SEC_ALLOC
) != 0 && h
->size
!= 0)
13960 srel
= bfd_get_linker_section (dynobj
, RELOC_SECTION (globals
, ".bss"));
13961 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
13965 return _bfd_elf_adjust_dynamic_copy (info
, h
, s
);
13968 /* Allocate space in .plt, .got and associated reloc sections for
13972 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry
*h
, void * inf
)
13974 struct bfd_link_info
*info
;
13975 struct elf32_arm_link_hash_table
*htab
;
13976 struct elf32_arm_link_hash_entry
*eh
;
13977 struct elf_dyn_relocs
*p
;
13979 if (h
->root
.type
== bfd_link_hash_indirect
)
13982 eh
= (struct elf32_arm_link_hash_entry
*) h
;
13984 info
= (struct bfd_link_info
*) inf
;
13985 htab
= elf32_arm_hash_table (info
);
13989 if ((htab
->root
.dynamic_sections_created
|| h
->type
== STT_GNU_IFUNC
)
13990 && h
->plt
.refcount
> 0)
13992 /* Make sure this symbol is output as a dynamic symbol.
13993 Undefined weak syms won't yet be marked as dynamic. */
13994 if (h
->dynindx
== -1
13995 && !h
->forced_local
)
13997 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14001 /* If the call in the PLT entry binds locally, the associated
14002 GOT entry should use an R_ARM_IRELATIVE relocation instead of
14003 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
14004 than the .plt section. */
14005 if (h
->type
== STT_GNU_IFUNC
&& SYMBOL_CALLS_LOCAL (info
, h
))
14008 if (eh
->plt
.noncall_refcount
== 0
14009 && SYMBOL_REFERENCES_LOCAL (info
, h
))
14010 /* All non-call references can be resolved directly.
14011 This means that they can (and in some cases, must)
14012 resolve directly to the run-time target, rather than
14013 to the PLT. That in turns means that any .got entry
14014 would be equal to the .igot.plt entry, so there's
14015 no point having both. */
14016 h
->got
.refcount
= 0;
14019 if (bfd_link_pic (info
)
14021 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h
))
14023 elf32_arm_allocate_plt_entry (info
, eh
->is_iplt
, &h
->plt
, &eh
->plt
);
14025 /* If this symbol is not defined in a regular file, and we are
14026 not generating a shared library, then set the symbol to this
14027 location in the .plt. This is required to make function
14028 pointers compare as equal between the normal executable and
14029 the shared library. */
14030 if (! bfd_link_pic (info
)
14031 && !h
->def_regular
)
14033 h
->root
.u
.def
.section
= htab
->root
.splt
;
14034 h
->root
.u
.def
.value
= h
->plt
.offset
;
14036 /* Make sure the function is not marked as Thumb, in case
14037 it is the target of an ABS32 relocation, which will
14038 point to the PLT entry. */
14039 h
->target_internal
= ST_BRANCH_TO_ARM
;
14042 /* VxWorks executables have a second set of relocations for
14043 each PLT entry. They go in a separate relocation section,
14044 which is processed by the kernel loader. */
14045 if (htab
->vxworks_p
&& !bfd_link_pic (info
))
14047 /* There is a relocation for the initial PLT entry:
14048 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
14049 if (h
->plt
.offset
== htab
->plt_header_size
)
14050 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 1);
14052 /* There are two extra relocations for each subsequent
14053 PLT entry: an R_ARM_32 relocation for the GOT entry,
14054 and an R_ARM_32 relocation for the PLT entry. */
14055 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 2);
14060 h
->plt
.offset
= (bfd_vma
) -1;
14066 h
->plt
.offset
= (bfd_vma
) -1;
14070 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14071 eh
->tlsdesc_got
= (bfd_vma
) -1;
14073 if (h
->got
.refcount
> 0)
14077 int tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
14080 /* Make sure this symbol is output as a dynamic symbol.
14081 Undefined weak syms won't yet be marked as dynamic. */
14082 if (h
->dynindx
== -1
14083 && !h
->forced_local
)
14085 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14089 if (!htab
->symbian_p
)
14091 s
= htab
->root
.sgot
;
14092 h
->got
.offset
= s
->size
;
14094 if (tls_type
== GOT_UNKNOWN
)
14097 if (tls_type
== GOT_NORMAL
)
14098 /* Non-TLS symbols need one GOT slot. */
14102 if (tls_type
& GOT_TLS_GDESC
)
14104 /* R_ARM_TLS_DESC needs 2 GOT slots. */
14106 = (htab
->root
.sgotplt
->size
14107 - elf32_arm_compute_jump_table_size (htab
));
14108 htab
->root
.sgotplt
->size
+= 8;
14109 h
->got
.offset
= (bfd_vma
) -2;
14110 /* plt.got_offset needs to know there's a TLS_DESC
14111 reloc in the middle of .got.plt. */
14112 htab
->num_tls_desc
++;
14115 if (tls_type
& GOT_TLS_GD
)
14117 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
14118 the symbol is both GD and GDESC, got.offset may
14119 have been overwritten. */
14120 h
->got
.offset
= s
->size
;
14124 if (tls_type
& GOT_TLS_IE
)
14125 /* R_ARM_TLS_IE32 needs one GOT slot. */
14129 dyn
= htab
->root
.dynamic_sections_created
;
14132 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
14133 bfd_link_pic (info
),
14135 && (!bfd_link_pic (info
)
14136 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
14139 if (tls_type
!= GOT_NORMAL
14140 && (bfd_link_pic (info
) || indx
!= 0)
14141 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
14142 || h
->root
.type
!= bfd_link_hash_undefweak
))
14144 if (tls_type
& GOT_TLS_IE
)
14145 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14147 if (tls_type
& GOT_TLS_GD
)
14148 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14150 if (tls_type
& GOT_TLS_GDESC
)
14152 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
14153 /* GDESC needs a trampoline to jump to. */
14154 htab
->tls_trampoline
= -1;
14157 /* Only GD needs it. GDESC just emits one relocation per
14159 if ((tls_type
& GOT_TLS_GD
) && indx
!= 0)
14160 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14162 else if (indx
!= -1 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
14164 if (htab
->root
.dynamic_sections_created
)
14165 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
14166 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14168 else if (h
->type
== STT_GNU_IFUNC
14169 && eh
->plt
.noncall_refcount
== 0)
14170 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
14171 they all resolve dynamically instead. Reserve room for the
14172 GOT entry's R_ARM_IRELATIVE relocation. */
14173 elf32_arm_allocate_irelocs (info
, htab
->root
.srelgot
, 1);
14174 else if (bfd_link_pic (info
)
14175 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
14176 || h
->root
.type
!= bfd_link_hash_undefweak
))
14177 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
14178 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14182 h
->got
.offset
= (bfd_vma
) -1;
14184 /* Allocate stubs for exported Thumb functions on v4t. */
14185 if (!htab
->use_blx
&& h
->dynindx
!= -1
14187 && h
->target_internal
== ST_BRANCH_TO_THUMB
14188 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
14190 struct elf_link_hash_entry
* th
;
14191 struct bfd_link_hash_entry
* bh
;
14192 struct elf_link_hash_entry
* myh
;
14196 /* Create a new symbol to regist the real location of the function. */
14197 s
= h
->root
.u
.def
.section
;
14198 sprintf (name
, "__real_%s", h
->root
.root
.string
);
14199 _bfd_generic_link_add_one_symbol (info
, s
->owner
,
14200 name
, BSF_GLOBAL
, s
,
14201 h
->root
.u
.def
.value
,
14202 NULL
, TRUE
, FALSE
, &bh
);
14204 myh
= (struct elf_link_hash_entry
*) bh
;
14205 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
14206 myh
->forced_local
= 1;
14207 myh
->target_internal
= ST_BRANCH_TO_THUMB
;
14208 eh
->export_glue
= myh
;
14209 th
= record_arm_to_thumb_glue (info
, h
);
14210 /* Point the symbol at the stub. */
14211 h
->type
= ELF_ST_INFO (ELF_ST_BIND (h
->type
), STT_FUNC
);
14212 h
->target_internal
= ST_BRANCH_TO_ARM
;
14213 h
->root
.u
.def
.section
= th
->root
.u
.def
.section
;
14214 h
->root
.u
.def
.value
= th
->root
.u
.def
.value
& ~1;
14217 if (eh
->dyn_relocs
== NULL
)
14220 /* In the shared -Bsymbolic case, discard space allocated for
14221 dynamic pc-relative relocs against symbols which turn out to be
14222 defined in regular objects. For the normal shared case, discard
14223 space for pc-relative relocs that have become local due to symbol
14224 visibility changes. */
14226 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
)
14228 /* Relocs that use pc_count are PC-relative forms, which will appear
14229 on something like ".long foo - ." or "movw REG, foo - .". We want
14230 calls to protected symbols to resolve directly to the function
14231 rather than going via the plt. If people want function pointer
14232 comparisons to work as expected then they should avoid writing
14233 assembly like ".long foo - .". */
14234 if (SYMBOL_CALLS_LOCAL (info
, h
))
14236 struct elf_dyn_relocs
**pp
;
14238 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
; )
14240 p
->count
-= p
->pc_count
;
14249 if (htab
->vxworks_p
)
14251 struct elf_dyn_relocs
**pp
;
14253 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
; )
14255 if (strcmp (p
->sec
->output_section
->name
, ".tls_vars") == 0)
14262 /* Also discard relocs on undefined weak syms with non-default
14264 if (eh
->dyn_relocs
!= NULL
14265 && h
->root
.type
== bfd_link_hash_undefweak
)
14267 if (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
)
14268 eh
->dyn_relocs
= NULL
;
14270 /* Make sure undefined weak symbols are output as a dynamic
14272 else if (h
->dynindx
== -1
14273 && !h
->forced_local
)
14275 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14280 else if (htab
->root
.is_relocatable_executable
&& h
->dynindx
== -1
14281 && h
->root
.type
== bfd_link_hash_new
)
14283 /* Output absolute symbols so that we can create relocations
14284 against them. For normal symbols we output a relocation
14285 against the section that contains them. */
14286 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14293 /* For the non-shared case, discard space for relocs against
14294 symbols which turn out to need copy relocs or are not
14297 if (!h
->non_got_ref
14298 && ((h
->def_dynamic
14299 && !h
->def_regular
)
14300 || (htab
->root
.dynamic_sections_created
14301 && (h
->root
.type
== bfd_link_hash_undefweak
14302 || h
->root
.type
== bfd_link_hash_undefined
))))
14304 /* Make sure this symbol is output as a dynamic symbol.
14305 Undefined weak syms won't yet be marked as dynamic. */
14306 if (h
->dynindx
== -1
14307 && !h
->forced_local
)
14309 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14313 /* If that succeeded, we know we'll be keeping all the
14315 if (h
->dynindx
!= -1)
14319 eh
->dyn_relocs
= NULL
;
14324 /* Finally, allocate space. */
14325 for (p
= eh
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
14327 asection
*sreloc
= elf_section_data (p
->sec
)->sreloc
;
14328 if (h
->type
== STT_GNU_IFUNC
14329 && eh
->plt
.noncall_refcount
== 0
14330 && SYMBOL_REFERENCES_LOCAL (info
, h
))
14331 elf32_arm_allocate_irelocs (info
, sreloc
, p
->count
);
14333 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
14339 /* Find any dynamic relocs that apply to read-only sections. */
14342 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry
* h
, void * inf
)
14344 struct elf32_arm_link_hash_entry
* eh
;
14345 struct elf_dyn_relocs
* p
;
14347 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14348 for (p
= eh
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
14350 asection
*s
= p
->sec
;
14352 if (s
!= NULL
&& (s
->flags
& SEC_READONLY
) != 0)
14354 struct bfd_link_info
*info
= (struct bfd_link_info
*) inf
;
14356 info
->flags
|= DF_TEXTREL
;
14358 /* Not an error, just cut short the traversal. */
14366 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info
*info
,
14369 struct elf32_arm_link_hash_table
*globals
;
14371 globals
= elf32_arm_hash_table (info
);
14372 if (globals
== NULL
)
14375 globals
->byteswap_code
= byteswap_code
;
14378 /* Set the sizes of the dynamic sections. */
14381 elf32_arm_size_dynamic_sections (bfd
* output_bfd ATTRIBUTE_UNUSED
,
14382 struct bfd_link_info
* info
)
14387 bfd_boolean relocs
;
14389 struct elf32_arm_link_hash_table
*htab
;
14391 htab
= elf32_arm_hash_table (info
);
14395 dynobj
= elf_hash_table (info
)->dynobj
;
14396 BFD_ASSERT (dynobj
!= NULL
);
14397 check_use_blx (htab
);
14399 if (elf_hash_table (info
)->dynamic_sections_created
)
14401 /* Set the contents of the .interp section to the interpreter. */
14402 if (bfd_link_executable (info
) && !info
->nointerp
)
14404 s
= bfd_get_linker_section (dynobj
, ".interp");
14405 BFD_ASSERT (s
!= NULL
);
14406 s
->size
= sizeof ELF_DYNAMIC_INTERPRETER
;
14407 s
->contents
= (unsigned char *) ELF_DYNAMIC_INTERPRETER
;
14411 /* Set up .got offsets for local syms, and space for local dynamic
14413 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
14415 bfd_signed_vma
*local_got
;
14416 bfd_signed_vma
*end_local_got
;
14417 struct arm_local_iplt_info
**local_iplt_ptr
, *local_iplt
;
14418 char *local_tls_type
;
14419 bfd_vma
*local_tlsdesc_gotent
;
14420 bfd_size_type locsymcount
;
14421 Elf_Internal_Shdr
*symtab_hdr
;
14423 bfd_boolean is_vxworks
= htab
->vxworks_p
;
14424 unsigned int symndx
;
14426 if (! is_arm_elf (ibfd
))
14429 for (s
= ibfd
->sections
; s
!= NULL
; s
= s
->next
)
14431 struct elf_dyn_relocs
*p
;
14433 for (p
= (struct elf_dyn_relocs
*)
14434 elf_section_data (s
)->local_dynrel
; p
!= NULL
; p
= p
->next
)
14436 if (!bfd_is_abs_section (p
->sec
)
14437 && bfd_is_abs_section (p
->sec
->output_section
))
14439 /* Input section has been discarded, either because
14440 it is a copy of a linkonce section or due to
14441 linker script /DISCARD/, so we'll be discarding
14444 else if (is_vxworks
14445 && strcmp (p
->sec
->output_section
->name
,
14448 /* Relocations in vxworks .tls_vars sections are
14449 handled specially by the loader. */
14451 else if (p
->count
!= 0)
14453 srel
= elf_section_data (p
->sec
)->sreloc
;
14454 elf32_arm_allocate_dynrelocs (info
, srel
, p
->count
);
14455 if ((p
->sec
->output_section
->flags
& SEC_READONLY
) != 0)
14456 info
->flags
|= DF_TEXTREL
;
14461 local_got
= elf_local_got_refcounts (ibfd
);
14465 symtab_hdr
= & elf_symtab_hdr (ibfd
);
14466 locsymcount
= symtab_hdr
->sh_info
;
14467 end_local_got
= local_got
+ locsymcount
;
14468 local_iplt_ptr
= elf32_arm_local_iplt (ibfd
);
14469 local_tls_type
= elf32_arm_local_got_tls_type (ibfd
);
14470 local_tlsdesc_gotent
= elf32_arm_local_tlsdesc_gotent (ibfd
);
14472 s
= htab
->root
.sgot
;
14473 srel
= htab
->root
.srelgot
;
14474 for (; local_got
< end_local_got
;
14475 ++local_got
, ++local_iplt_ptr
, ++local_tls_type
,
14476 ++local_tlsdesc_gotent
, ++symndx
)
14478 *local_tlsdesc_gotent
= (bfd_vma
) -1;
14479 local_iplt
= *local_iplt_ptr
;
14480 if (local_iplt
!= NULL
)
14482 struct elf_dyn_relocs
*p
;
14484 if (local_iplt
->root
.refcount
> 0)
14486 elf32_arm_allocate_plt_entry (info
, TRUE
,
14489 if (local_iplt
->arm
.noncall_refcount
== 0)
14490 /* All references to the PLT are calls, so all
14491 non-call references can resolve directly to the
14492 run-time target. This means that the .got entry
14493 would be the same as the .igot.plt entry, so there's
14494 no point creating both. */
14499 BFD_ASSERT (local_iplt
->arm
.noncall_refcount
== 0);
14500 local_iplt
->root
.offset
= (bfd_vma
) -1;
14503 for (p
= local_iplt
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
14507 psrel
= elf_section_data (p
->sec
)->sreloc
;
14508 if (local_iplt
->arm
.noncall_refcount
== 0)
14509 elf32_arm_allocate_irelocs (info
, psrel
, p
->count
);
14511 elf32_arm_allocate_dynrelocs (info
, psrel
, p
->count
);
14514 if (*local_got
> 0)
14516 Elf_Internal_Sym
*isym
;
14518 *local_got
= s
->size
;
14519 if (*local_tls_type
& GOT_TLS_GD
)
14520 /* TLS_GD relocs need an 8-byte structure in the GOT. */
14522 if (*local_tls_type
& GOT_TLS_GDESC
)
14524 *local_tlsdesc_gotent
= htab
->root
.sgotplt
->size
14525 - elf32_arm_compute_jump_table_size (htab
);
14526 htab
->root
.sgotplt
->size
+= 8;
14527 *local_got
= (bfd_vma
) -2;
14528 /* plt.got_offset needs to know there's a TLS_DESC
14529 reloc in the middle of .got.plt. */
14530 htab
->num_tls_desc
++;
14532 if (*local_tls_type
& GOT_TLS_IE
)
14535 if (*local_tls_type
& GOT_NORMAL
)
14537 /* If the symbol is both GD and GDESC, *local_got
14538 may have been overwritten. */
14539 *local_got
= s
->size
;
14543 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
, ibfd
, symndx
);
14547 /* If all references to an STT_GNU_IFUNC PLT are calls,
14548 then all non-call references, including this GOT entry,
14549 resolve directly to the run-time target. */
14550 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
14551 && (local_iplt
== NULL
14552 || local_iplt
->arm
.noncall_refcount
== 0))
14553 elf32_arm_allocate_irelocs (info
, srel
, 1);
14554 else if (bfd_link_pic (info
) || output_bfd
->flags
& DYNAMIC
)
14556 if ((bfd_link_pic (info
) && !(*local_tls_type
& GOT_TLS_GDESC
))
14557 || *local_tls_type
& GOT_TLS_GD
)
14558 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
14560 if (bfd_link_pic (info
) && *local_tls_type
& GOT_TLS_GDESC
)
14562 elf32_arm_allocate_dynrelocs (info
,
14563 htab
->root
.srelplt
, 1);
14564 htab
->tls_trampoline
= -1;
14569 *local_got
= (bfd_vma
) -1;
14573 if (htab
->tls_ldm_got
.refcount
> 0)
14575 /* Allocate two GOT entries and one dynamic relocation (if necessary)
14576 for R_ARM_TLS_LDM32 relocations. */
14577 htab
->tls_ldm_got
.offset
= htab
->root
.sgot
->size
;
14578 htab
->root
.sgot
->size
+= 8;
14579 if (bfd_link_pic (info
))
14580 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14583 htab
->tls_ldm_got
.offset
= -1;
14585 /* Allocate global sym .plt and .got entries, and space for global
14586 sym dynamic relocs. */
14587 elf_link_hash_traverse (& htab
->root
, allocate_dynrelocs_for_symbol
, info
);
14589 /* Here we rummage through the found bfds to collect glue information. */
14590 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
14592 if (! is_arm_elf (ibfd
))
14595 /* Initialise mapping tables for code/data. */
14596 bfd_elf32_arm_init_maps (ibfd
);
14598 if (!bfd_elf32_arm_process_before_allocation (ibfd
, info
)
14599 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd
, info
)
14600 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd
, info
))
14601 /* xgettext:c-format */
14602 _bfd_error_handler (_("Errors encountered processing file %s"),
14606 /* Allocate space for the glue sections now that we've sized them. */
14607 bfd_elf32_arm_allocate_interworking_sections (info
);
14609 /* For every jump slot reserved in the sgotplt, reloc_count is
14610 incremented. However, when we reserve space for TLS descriptors,
14611 it's not incremented, so in order to compute the space reserved
14612 for them, it suffices to multiply the reloc count by the jump
14614 if (htab
->root
.srelplt
)
14615 htab
->sgotplt_jump_table_size
= elf32_arm_compute_jump_table_size(htab
);
14617 if (htab
->tls_trampoline
)
14619 if (htab
->root
.splt
->size
== 0)
14620 htab
->root
.splt
->size
+= htab
->plt_header_size
;
14622 htab
->tls_trampoline
= htab
->root
.splt
->size
;
14623 htab
->root
.splt
->size
+= htab
->plt_entry_size
;
14625 /* If we're not using lazy TLS relocations, don't generate the
14626 PLT and GOT entries they require. */
14627 if (!(info
->flags
& DF_BIND_NOW
))
14629 htab
->dt_tlsdesc_got
= htab
->root
.sgot
->size
;
14630 htab
->root
.sgot
->size
+= 4;
14632 htab
->dt_tlsdesc_plt
= htab
->root
.splt
->size
;
14633 htab
->root
.splt
->size
+= 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline
);
14637 /* The check_relocs and adjust_dynamic_symbol entry points have
14638 determined the sizes of the various dynamic sections. Allocate
14639 memory for them. */
14642 for (s
= dynobj
->sections
; s
!= NULL
; s
= s
->next
)
14646 if ((s
->flags
& SEC_LINKER_CREATED
) == 0)
14649 /* It's OK to base decisions on the section name, because none
14650 of the dynobj section names depend upon the input files. */
14651 name
= bfd_get_section_name (dynobj
, s
);
14653 if (s
== htab
->root
.splt
)
14655 /* Remember whether there is a PLT. */
14656 plt
= s
->size
!= 0;
14658 else if (CONST_STRNEQ (name
, ".rel"))
14662 /* Remember whether there are any reloc sections other
14663 than .rel(a).plt and .rela.plt.unloaded. */
14664 if (s
!= htab
->root
.srelplt
&& s
!= htab
->srelplt2
)
14667 /* We use the reloc_count field as a counter if we need
14668 to copy relocs into the output file. */
14669 s
->reloc_count
= 0;
14672 else if (s
!= htab
->root
.sgot
14673 && s
!= htab
->root
.sgotplt
14674 && s
!= htab
->root
.iplt
14675 && s
!= htab
->root
.igotplt
14676 && s
!= htab
->sdynbss
)
14678 /* It's not one of our sections, so don't allocate space. */
14684 /* If we don't need this section, strip it from the
14685 output file. This is mostly to handle .rel(a).bss and
14686 .rel(a).plt. We must create both sections in
14687 create_dynamic_sections, because they must be created
14688 before the linker maps input sections to output
14689 sections. The linker does that before
14690 adjust_dynamic_symbol is called, and it is that
14691 function which decides whether anything needs to go
14692 into these sections. */
14693 s
->flags
|= SEC_EXCLUDE
;
14697 if ((s
->flags
& SEC_HAS_CONTENTS
) == 0)
14700 /* Allocate memory for the section contents. */
14701 s
->contents
= (unsigned char *) bfd_zalloc (dynobj
, s
->size
);
14702 if (s
->contents
== NULL
)
14706 if (elf_hash_table (info
)->dynamic_sections_created
)
14708 /* Add some entries to the .dynamic section. We fill in the
14709 values later, in elf32_arm_finish_dynamic_sections, but we
14710 must add the entries now so that we get the correct size for
14711 the .dynamic section. The DT_DEBUG entry is filled in by the
14712 dynamic linker and used by the debugger. */
14713 #define add_dynamic_entry(TAG, VAL) \
14714 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
14716 if (bfd_link_executable (info
))
14718 if (!add_dynamic_entry (DT_DEBUG
, 0))
14724 if ( !add_dynamic_entry (DT_PLTGOT
, 0)
14725 || !add_dynamic_entry (DT_PLTRELSZ
, 0)
14726 || !add_dynamic_entry (DT_PLTREL
,
14727 htab
->use_rel
? DT_REL
: DT_RELA
)
14728 || !add_dynamic_entry (DT_JMPREL
, 0))
14731 if (htab
->dt_tlsdesc_plt
&&
14732 (!add_dynamic_entry (DT_TLSDESC_PLT
,0)
14733 || !add_dynamic_entry (DT_TLSDESC_GOT
,0)))
14741 if (!add_dynamic_entry (DT_REL
, 0)
14742 || !add_dynamic_entry (DT_RELSZ
, 0)
14743 || !add_dynamic_entry (DT_RELENT
, RELOC_SIZE (htab
)))
14748 if (!add_dynamic_entry (DT_RELA
, 0)
14749 || !add_dynamic_entry (DT_RELASZ
, 0)
14750 || !add_dynamic_entry (DT_RELAENT
, RELOC_SIZE (htab
)))
14755 /* If any dynamic relocs apply to a read-only section,
14756 then we need a DT_TEXTREL entry. */
14757 if ((info
->flags
& DF_TEXTREL
) == 0)
14758 elf_link_hash_traverse (& htab
->root
, elf32_arm_readonly_dynrelocs
,
14761 if ((info
->flags
& DF_TEXTREL
) != 0)
14763 if (!add_dynamic_entry (DT_TEXTREL
, 0))
14766 if (htab
->vxworks_p
14767 && !elf_vxworks_add_dynamic_entries (output_bfd
, info
))
14770 #undef add_dynamic_entry
14775 /* Size sections even though they're not dynamic. We use it to setup
14776 _TLS_MODULE_BASE_, if needed. */
14779 elf32_arm_always_size_sections (bfd
*output_bfd
,
14780 struct bfd_link_info
*info
)
14784 if (bfd_link_relocatable (info
))
14787 tls_sec
= elf_hash_table (info
)->tls_sec
;
14791 struct elf_link_hash_entry
*tlsbase
;
14793 tlsbase
= elf_link_hash_lookup
14794 (elf_hash_table (info
), "_TLS_MODULE_BASE_", TRUE
, TRUE
, FALSE
);
14798 struct bfd_link_hash_entry
*bh
= NULL
;
14799 const struct elf_backend_data
*bed
14800 = get_elf_backend_data (output_bfd
);
14802 if (!(_bfd_generic_link_add_one_symbol
14803 (info
, output_bfd
, "_TLS_MODULE_BASE_", BSF_LOCAL
,
14804 tls_sec
, 0, NULL
, FALSE
,
14805 bed
->collect
, &bh
)))
14808 tlsbase
->type
= STT_TLS
;
14809 tlsbase
= (struct elf_link_hash_entry
*)bh
;
14810 tlsbase
->def_regular
= 1;
14811 tlsbase
->other
= STV_HIDDEN
;
14812 (*bed
->elf_backend_hide_symbol
) (info
, tlsbase
, TRUE
);
14818 /* Finish up dynamic symbol handling. We set the contents of various
14819 dynamic sections here. */
14822 elf32_arm_finish_dynamic_symbol (bfd
* output_bfd
,
14823 struct bfd_link_info
* info
,
14824 struct elf_link_hash_entry
* h
,
14825 Elf_Internal_Sym
* sym
)
14827 struct elf32_arm_link_hash_table
*htab
;
14828 struct elf32_arm_link_hash_entry
*eh
;
14830 htab
= elf32_arm_hash_table (info
);
14834 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14836 if (h
->plt
.offset
!= (bfd_vma
) -1)
14840 BFD_ASSERT (h
->dynindx
!= -1);
14841 if (! elf32_arm_populate_plt_entry (output_bfd
, info
, &h
->plt
, &eh
->plt
,
14846 if (!h
->def_regular
)
14848 /* Mark the symbol as undefined, rather than as defined in
14849 the .plt section. */
14850 sym
->st_shndx
= SHN_UNDEF
;
14851 /* If the symbol is weak we need to clear the value.
14852 Otherwise, the PLT entry would provide a definition for
14853 the symbol even if the symbol wasn't defined anywhere,
14854 and so the symbol would never be NULL. Leave the value if
14855 there were any relocations where pointer equality matters
14856 (this is a clue for the dynamic linker, to make function
14857 pointer comparisons work between an application and shared
14859 if (!h
->ref_regular_nonweak
|| !h
->pointer_equality_needed
)
14862 else if (eh
->is_iplt
&& eh
->plt
.noncall_refcount
!= 0)
14864 /* At least one non-call relocation references this .iplt entry,
14865 so the .iplt entry is the function's canonical address. */
14866 sym
->st_info
= ELF_ST_INFO (ELF_ST_BIND (sym
->st_info
), STT_FUNC
);
14867 sym
->st_target_internal
= ST_BRANCH_TO_ARM
;
14868 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
14869 (output_bfd
, htab
->root
.iplt
->output_section
));
14870 sym
->st_value
= (h
->plt
.offset
14871 + htab
->root
.iplt
->output_section
->vma
14872 + htab
->root
.iplt
->output_offset
);
14879 Elf_Internal_Rela rel
;
14881 /* This symbol needs a copy reloc. Set it up. */
14882 BFD_ASSERT (h
->dynindx
!= -1
14883 && (h
->root
.type
== bfd_link_hash_defined
14884 || h
->root
.type
== bfd_link_hash_defweak
));
14887 BFD_ASSERT (s
!= NULL
);
14890 rel
.r_offset
= (h
->root
.u
.def
.value
14891 + h
->root
.u
.def
.section
->output_section
->vma
14892 + h
->root
.u
.def
.section
->output_offset
);
14893 rel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_COPY
);
14894 elf32_arm_add_dynreloc (output_bfd
, info
, s
, &rel
);
14897 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
14898 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
14899 to the ".got" section. */
14900 if (h
== htab
->root
.hdynamic
14901 || (!htab
->vxworks_p
&& h
== htab
->root
.hgot
))
14902 sym
->st_shndx
= SHN_ABS
;
14908 arm_put_trampoline (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
14910 const unsigned long *template, unsigned count
)
14914 for (ix
= 0; ix
!= count
; ix
++)
14916 unsigned long insn
= template[ix
];
14918 /* Emit mov pc,rx if bx is not permitted. */
14919 if (htab
->fix_v4bx
== 1 && (insn
& 0x0ffffff0) == 0x012fff10)
14920 insn
= (insn
& 0xf000000f) | 0x01a0f000;
14921 put_arm_insn (htab
, output_bfd
, insn
, (char *)contents
+ ix
*4);
14925 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
14926 other variants, NaCl needs this entry in a static executable's
14927 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
14928 zero. For .iplt really only the last bundle is useful, and .iplt
14929 could have a shorter first entry, with each individual PLT entry's
14930 relative branch calculated differently so it targets the last
14931 bundle instead of the instruction before it (labelled .Lplt_tail
14932 above). But it's simpler to keep the size and layout of PLT0
14933 consistent with the dynamic case, at the cost of some dead code at
14934 the start of .iplt and the one dead store to the stack at the start
14937 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
14938 asection
*plt
, bfd_vma got_displacement
)
14942 put_arm_insn (htab
, output_bfd
,
14943 elf32_arm_nacl_plt0_entry
[0]
14944 | arm_movw_immediate (got_displacement
),
14945 plt
->contents
+ 0);
14946 put_arm_insn (htab
, output_bfd
,
14947 elf32_arm_nacl_plt0_entry
[1]
14948 | arm_movt_immediate (got_displacement
),
14949 plt
->contents
+ 4);
14951 for (i
= 2; i
< ARRAY_SIZE (elf32_arm_nacl_plt0_entry
); ++i
)
14952 put_arm_insn (htab
, output_bfd
,
14953 elf32_arm_nacl_plt0_entry
[i
],
14954 plt
->contents
+ (i
* 4));
14957 /* Finish up the dynamic sections. */
14960 elf32_arm_finish_dynamic_sections (bfd
* output_bfd
, struct bfd_link_info
* info
)
14965 struct elf32_arm_link_hash_table
*htab
;
14967 htab
= elf32_arm_hash_table (info
);
14971 dynobj
= elf_hash_table (info
)->dynobj
;
14973 sgot
= htab
->root
.sgotplt
;
14974 /* A broken linker script might have discarded the dynamic sections.
14975 Catch this here so that we do not seg-fault later on. */
14976 if (sgot
!= NULL
&& bfd_is_abs_section (sgot
->output_section
))
14978 sdyn
= bfd_get_linker_section (dynobj
, ".dynamic");
14980 if (elf_hash_table (info
)->dynamic_sections_created
)
14983 Elf32_External_Dyn
*dyncon
, *dynconend
;
14985 splt
= htab
->root
.splt
;
14986 BFD_ASSERT (splt
!= NULL
&& sdyn
!= NULL
);
14987 BFD_ASSERT (htab
->symbian_p
|| sgot
!= NULL
);
14989 dyncon
= (Elf32_External_Dyn
*) sdyn
->contents
;
14990 dynconend
= (Elf32_External_Dyn
*) (sdyn
->contents
+ sdyn
->size
);
14992 for (; dyncon
< dynconend
; dyncon
++)
14994 Elf_Internal_Dyn dyn
;
14998 bfd_elf32_swap_dyn_in (dynobj
, dyncon
, &dyn
);
15005 if (htab
->vxworks_p
15006 && elf_vxworks_finish_dynamic_entry (output_bfd
, &dyn
))
15007 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15012 goto get_vma_if_bpabi
;
15015 goto get_vma_if_bpabi
;
15018 goto get_vma_if_bpabi
;
15020 name
= ".gnu.version";
15021 goto get_vma_if_bpabi
;
15023 name
= ".gnu.version_d";
15024 goto get_vma_if_bpabi
;
15026 name
= ".gnu.version_r";
15027 goto get_vma_if_bpabi
;
15033 name
= RELOC_SECTION (htab
, ".plt");
15035 s
= bfd_get_section_by_name (output_bfd
, name
);
15038 /* PR ld/14397: Issue an error message if a required section is missing. */
15039 (*_bfd_error_handler
)
15040 (_("error: required section '%s' not found in the linker script"), name
);
15041 bfd_set_error (bfd_error_invalid_operation
);
15044 if (!htab
->symbian_p
)
15045 dyn
.d_un
.d_ptr
= s
->vma
;
15047 /* In the BPABI, tags in the PT_DYNAMIC section point
15048 at the file offset, not the memory address, for the
15049 convenience of the post linker. */
15050 dyn
.d_un
.d_ptr
= s
->filepos
;
15051 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15055 if (htab
->symbian_p
)
15060 s
= htab
->root
.srelplt
;
15061 BFD_ASSERT (s
!= NULL
);
15062 dyn
.d_un
.d_val
= s
->size
;
15063 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15068 if (!htab
->symbian_p
)
15070 /* My reading of the SVR4 ABI indicates that the
15071 procedure linkage table relocs (DT_JMPREL) should be
15072 included in the overall relocs (DT_REL). This is
15073 what Solaris does. However, UnixWare can not handle
15074 that case. Therefore, we override the DT_RELSZ entry
15075 here to make it not include the JMPREL relocs. Since
15076 the linker script arranges for .rel(a).plt to follow all
15077 other relocation sections, we don't have to worry
15078 about changing the DT_REL entry. */
15079 s
= htab
->root
.srelplt
;
15081 dyn
.d_un
.d_val
-= s
->size
;
15082 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15085 /* Fall through. */
15089 /* In the BPABI, the DT_REL tag must point at the file
15090 offset, not the VMA, of the first relocation
15091 section. So, we use code similar to that in
15092 elflink.c, but do not check for SHF_ALLOC on the
15093 relcoation section, since relocations sections are
15094 never allocated under the BPABI. The comments above
15095 about Unixware notwithstanding, we include all of the
15096 relocations here. */
15097 if (htab
->symbian_p
)
15100 type
= ((dyn
.d_tag
== DT_REL
|| dyn
.d_tag
== DT_RELSZ
)
15101 ? SHT_REL
: SHT_RELA
);
15102 dyn
.d_un
.d_val
= 0;
15103 for (i
= 1; i
< elf_numsections (output_bfd
); i
++)
15105 Elf_Internal_Shdr
*hdr
15106 = elf_elfsections (output_bfd
)[i
];
15107 if (hdr
->sh_type
== type
)
15109 if (dyn
.d_tag
== DT_RELSZ
15110 || dyn
.d_tag
== DT_RELASZ
)
15111 dyn
.d_un
.d_val
+= hdr
->sh_size
;
15112 else if ((ufile_ptr
) hdr
->sh_offset
15113 <= dyn
.d_un
.d_val
- 1)
15114 dyn
.d_un
.d_val
= hdr
->sh_offset
;
15117 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15121 case DT_TLSDESC_PLT
:
15122 s
= htab
->root
.splt
;
15123 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
15124 + htab
->dt_tlsdesc_plt
);
15125 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15128 case DT_TLSDESC_GOT
:
15129 s
= htab
->root
.sgot
;
15130 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
15131 + htab
->dt_tlsdesc_got
);
15132 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15135 /* Set the bottom bit of DT_INIT/FINI if the
15136 corresponding function is Thumb. */
15138 name
= info
->init_function
;
15141 name
= info
->fini_function
;
15143 /* If it wasn't set by elf_bfd_final_link
15144 then there is nothing to adjust. */
15145 if (dyn
.d_un
.d_val
!= 0)
15147 struct elf_link_hash_entry
* eh
;
15149 eh
= elf_link_hash_lookup (elf_hash_table (info
), name
,
15150 FALSE
, FALSE
, TRUE
);
15151 if (eh
!= NULL
&& eh
->target_internal
== ST_BRANCH_TO_THUMB
)
15153 dyn
.d_un
.d_val
|= 1;
15154 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15161 /* Fill in the first entry in the procedure linkage table. */
15162 if (splt
->size
> 0 && htab
->plt_header_size
)
15164 const bfd_vma
*plt0_entry
;
15165 bfd_vma got_address
, plt_address
, got_displacement
;
15167 /* Calculate the addresses of the GOT and PLT. */
15168 got_address
= sgot
->output_section
->vma
+ sgot
->output_offset
;
15169 plt_address
= splt
->output_section
->vma
+ splt
->output_offset
;
15171 if (htab
->vxworks_p
)
15173 /* The VxWorks GOT is relocated by the dynamic linker.
15174 Therefore, we must emit relocations rather than simply
15175 computing the values now. */
15176 Elf_Internal_Rela rel
;
15178 plt0_entry
= elf32_arm_vxworks_exec_plt0_entry
;
15179 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
15180 splt
->contents
+ 0);
15181 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
15182 splt
->contents
+ 4);
15183 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
15184 splt
->contents
+ 8);
15185 bfd_put_32 (output_bfd
, got_address
, splt
->contents
+ 12);
15187 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
15188 rel
.r_offset
= plt_address
+ 12;
15189 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
15191 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
,
15192 htab
->srelplt2
->contents
);
15194 else if (htab
->nacl_p
)
15195 arm_nacl_put_plt0 (htab
, output_bfd
, splt
,
15196 got_address
+ 8 - (plt_address
+ 16));
15197 else if (using_thumb_only (htab
))
15199 got_displacement
= got_address
- (plt_address
+ 12);
15201 plt0_entry
= elf32_thumb2_plt0_entry
;
15202 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
15203 splt
->contents
+ 0);
15204 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
15205 splt
->contents
+ 4);
15206 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
15207 splt
->contents
+ 8);
15209 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 12);
15213 got_displacement
= got_address
- (plt_address
+ 16);
15215 plt0_entry
= elf32_arm_plt0_entry
;
15216 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
15217 splt
->contents
+ 0);
15218 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
15219 splt
->contents
+ 4);
15220 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
15221 splt
->contents
+ 8);
15222 put_arm_insn (htab
, output_bfd
, plt0_entry
[3],
15223 splt
->contents
+ 12);
15225 #ifdef FOUR_WORD_PLT
15226 /* The displacement value goes in the otherwise-unused
15227 last word of the second entry. */
15228 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 28);
15230 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 16);
15235 /* UnixWare sets the entsize of .plt to 4, although that doesn't
15236 really seem like the right value. */
15237 if (splt
->output_section
->owner
== output_bfd
)
15238 elf_section_data (splt
->output_section
)->this_hdr
.sh_entsize
= 4;
15240 if (htab
->dt_tlsdesc_plt
)
15242 bfd_vma got_address
15243 = sgot
->output_section
->vma
+ sgot
->output_offset
;
15244 bfd_vma gotplt_address
= (htab
->root
.sgot
->output_section
->vma
15245 + htab
->root
.sgot
->output_offset
);
15246 bfd_vma plt_address
15247 = splt
->output_section
->vma
+ splt
->output_offset
;
15249 arm_put_trampoline (htab
, output_bfd
,
15250 splt
->contents
+ htab
->dt_tlsdesc_plt
,
15251 dl_tlsdesc_lazy_trampoline
, 6);
15253 bfd_put_32 (output_bfd
,
15254 gotplt_address
+ htab
->dt_tlsdesc_got
15255 - (plt_address
+ htab
->dt_tlsdesc_plt
)
15256 - dl_tlsdesc_lazy_trampoline
[6],
15257 splt
->contents
+ htab
->dt_tlsdesc_plt
+ 24);
15258 bfd_put_32 (output_bfd
,
15259 got_address
- (plt_address
+ htab
->dt_tlsdesc_plt
)
15260 - dl_tlsdesc_lazy_trampoline
[7],
15261 splt
->contents
+ htab
->dt_tlsdesc_plt
+ 24 + 4);
15264 if (htab
->tls_trampoline
)
15266 arm_put_trampoline (htab
, output_bfd
,
15267 splt
->contents
+ htab
->tls_trampoline
,
15268 tls_trampoline
, 3);
15269 #ifdef FOUR_WORD_PLT
15270 bfd_put_32 (output_bfd
, 0x00000000,
15271 splt
->contents
+ htab
->tls_trampoline
+ 12);
15275 if (htab
->vxworks_p
15276 && !bfd_link_pic (info
)
15277 && htab
->root
.splt
->size
> 0)
15279 /* Correct the .rel(a).plt.unloaded relocations. They will have
15280 incorrect symbol indexes. */
15284 num_plts
= ((htab
->root
.splt
->size
- htab
->plt_header_size
)
15285 / htab
->plt_entry_size
);
15286 p
= htab
->srelplt2
->contents
+ RELOC_SIZE (htab
);
15288 for (; num_plts
; num_plts
--)
15290 Elf_Internal_Rela rel
;
15292 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
15293 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
15294 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
15295 p
+= RELOC_SIZE (htab
);
15297 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
15298 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
15299 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
15300 p
+= RELOC_SIZE (htab
);
15305 if (htab
->nacl_p
&& htab
->root
.iplt
!= NULL
&& htab
->root
.iplt
->size
> 0)
15306 /* NaCl uses a special first entry in .iplt too. */
15307 arm_nacl_put_plt0 (htab
, output_bfd
, htab
->root
.iplt
, 0);
15309 /* Fill in the first three entries in the global offset table. */
15312 if (sgot
->size
> 0)
15315 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
);
15317 bfd_put_32 (output_bfd
,
15318 sdyn
->output_section
->vma
+ sdyn
->output_offset
,
15320 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 4);
15321 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 8);
15324 elf_section_data (sgot
->output_section
)->this_hdr
.sh_entsize
= 4;
15331 elf32_arm_post_process_headers (bfd
* abfd
, struct bfd_link_info
* link_info ATTRIBUTE_UNUSED
)
15333 Elf_Internal_Ehdr
* i_ehdrp
; /* ELF file header, internal form. */
15334 struct elf32_arm_link_hash_table
*globals
;
15336 i_ehdrp
= elf_elfheader (abfd
);
15338 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_UNKNOWN
)
15339 i_ehdrp
->e_ident
[EI_OSABI
] = ELFOSABI_ARM
;
15341 _bfd_elf_post_process_headers (abfd
, link_info
);
15342 i_ehdrp
->e_ident
[EI_ABIVERSION
] = ARM_ELF_ABI_VERSION
;
15346 globals
= elf32_arm_hash_table (link_info
);
15347 if (globals
!= NULL
&& globals
->byteswap_code
)
15348 i_ehdrp
->e_flags
|= EF_ARM_BE8
;
15351 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_VER5
15352 && ((i_ehdrp
->e_type
== ET_DYN
) || (i_ehdrp
->e_type
== ET_EXEC
)))
15354 int abi
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_ABI_VFP_args
);
15355 if (abi
== AEABI_VFP_args_vfp
)
15356 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_HARD
;
15358 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_SOFT
;
15362 static enum elf_reloc_type_class
15363 elf32_arm_reloc_type_class (const struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
15364 const asection
*rel_sec ATTRIBUTE_UNUSED
,
15365 const Elf_Internal_Rela
*rela
)
15367 switch ((int) ELF32_R_TYPE (rela
->r_info
))
15369 case R_ARM_RELATIVE
:
15370 return reloc_class_relative
;
15371 case R_ARM_JUMP_SLOT
:
15372 return reloc_class_plt
;
15374 return reloc_class_copy
;
15376 return reloc_class_normal
;
15381 elf32_arm_final_write_processing (bfd
*abfd
, bfd_boolean linker ATTRIBUTE_UNUSED
)
15383 bfd_arm_update_notes (abfd
, ARM_NOTE_SECTION
);
15386 /* Return TRUE if this is an unwinding table entry. */
15389 is_arm_elf_unwind_section_name (bfd
* abfd ATTRIBUTE_UNUSED
, const char * name
)
15391 return (CONST_STRNEQ (name
, ELF_STRING_ARM_unwind
)
15392 || CONST_STRNEQ (name
, ELF_STRING_ARM_unwind_once
));
15396 /* Set the type and flags for an ARM section. We do this by
15397 the section name, which is a hack, but ought to work. */
15400 elf32_arm_fake_sections (bfd
* abfd
, Elf_Internal_Shdr
* hdr
, asection
* sec
)
15404 name
= bfd_get_section_name (abfd
, sec
);
15406 if (is_arm_elf_unwind_section_name (abfd
, name
))
15408 hdr
->sh_type
= SHT_ARM_EXIDX
;
15409 hdr
->sh_flags
|= SHF_LINK_ORDER
;
15414 /* Handle an ARM specific section when reading an object file. This is
15415 called when bfd_section_from_shdr finds a section with an unknown
15419 elf32_arm_section_from_shdr (bfd
*abfd
,
15420 Elf_Internal_Shdr
* hdr
,
15424 /* There ought to be a place to keep ELF backend specific flags, but
15425 at the moment there isn't one. We just keep track of the
15426 sections by their name, instead. Fortunately, the ABI gives
15427 names for all the ARM specific sections, so we will probably get
15429 switch (hdr
->sh_type
)
15431 case SHT_ARM_EXIDX
:
15432 case SHT_ARM_PREEMPTMAP
:
15433 case SHT_ARM_ATTRIBUTES
:
15440 if (! _bfd_elf_make_section_from_shdr (abfd
, hdr
, name
, shindex
))
15446 static _arm_elf_section_data
*
15447 get_arm_elf_section_data (asection
* sec
)
15449 if (sec
&& sec
->owner
&& is_arm_elf (sec
->owner
))
15450 return elf32_arm_section_data (sec
);
15458 struct bfd_link_info
*info
;
15461 int (*func
) (void *, const char *, Elf_Internal_Sym
*,
15462 asection
*, struct elf_link_hash_entry
*);
15463 } output_arch_syminfo
;
15465 enum map_symbol_type
15473 /* Output a single mapping symbol. */
15476 elf32_arm_output_map_sym (output_arch_syminfo
*osi
,
15477 enum map_symbol_type type
,
15480 static const char *names
[3] = {"$a", "$t", "$d"};
15481 Elf_Internal_Sym sym
;
15483 sym
.st_value
= osi
->sec
->output_section
->vma
15484 + osi
->sec
->output_offset
15488 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
15489 sym
.st_shndx
= osi
->sec_shndx
;
15490 sym
.st_target_internal
= 0;
15491 elf32_arm_section_map_add (osi
->sec
, names
[type
][1], offset
);
15492 return osi
->func (osi
->flaginfo
, names
[type
], &sym
, osi
->sec
, NULL
) == 1;
15495 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
15496 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
15499 elf32_arm_output_plt_map_1 (output_arch_syminfo
*osi
,
15500 bfd_boolean is_iplt_entry_p
,
15501 union gotplt_union
*root_plt
,
15502 struct arm_plt_info
*arm_plt
)
15504 struct elf32_arm_link_hash_table
*htab
;
15505 bfd_vma addr
, plt_header_size
;
15507 if (root_plt
->offset
== (bfd_vma
) -1)
15510 htab
= elf32_arm_hash_table (osi
->info
);
15514 if (is_iplt_entry_p
)
15516 osi
->sec
= htab
->root
.iplt
;
15517 plt_header_size
= 0;
15521 osi
->sec
= htab
->root
.splt
;
15522 plt_header_size
= htab
->plt_header_size
;
15524 osi
->sec_shndx
= (_bfd_elf_section_from_bfd_section
15525 (osi
->info
->output_bfd
, osi
->sec
->output_section
));
15527 addr
= root_plt
->offset
& -2;
15528 if (htab
->symbian_p
)
15530 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15532 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 4))
15535 else if (htab
->vxworks_p
)
15537 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15539 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 8))
15541 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
+ 12))
15543 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 20))
15546 else if (htab
->nacl_p
)
15548 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15551 else if (using_thumb_only (htab
))
15553 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
))
15558 bfd_boolean thumb_stub_p
;
15560 thumb_stub_p
= elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
);
15563 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
15566 #ifdef FOUR_WORD_PLT
15567 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15569 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 12))
15572 /* A three-word PLT with no Thumb thunk contains only Arm code,
15573 so only need to output a mapping symbol for the first PLT entry and
15574 entries with thumb thunks. */
15575 if (thumb_stub_p
|| addr
== plt_header_size
)
15577 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15586 /* Output mapping symbols for PLT entries associated with H. */
15589 elf32_arm_output_plt_map (struct elf_link_hash_entry
*h
, void *inf
)
15591 output_arch_syminfo
*osi
= (output_arch_syminfo
*) inf
;
15592 struct elf32_arm_link_hash_entry
*eh
;
15594 if (h
->root
.type
== bfd_link_hash_indirect
)
15597 if (h
->root
.type
== bfd_link_hash_warning
)
15598 /* When warning symbols are created, they **replace** the "real"
15599 entry in the hash table, thus we never get to see the real
15600 symbol in a hash traversal. So look at it now. */
15601 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
15603 eh
= (struct elf32_arm_link_hash_entry
*) h
;
15604 return elf32_arm_output_plt_map_1 (osi
, SYMBOL_CALLS_LOCAL (osi
->info
, h
),
15605 &h
->plt
, &eh
->plt
);
15608 /* Output a single local symbol for a generated stub. */
15611 elf32_arm_output_stub_sym (output_arch_syminfo
*osi
, const char *name
,
15612 bfd_vma offset
, bfd_vma size
)
15614 Elf_Internal_Sym sym
;
15616 sym
.st_value
= osi
->sec
->output_section
->vma
15617 + osi
->sec
->output_offset
15619 sym
.st_size
= size
;
15621 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
15622 sym
.st_shndx
= osi
->sec_shndx
;
15623 sym
.st_target_internal
= 0;
15624 return osi
->func (osi
->flaginfo
, name
, &sym
, osi
->sec
, NULL
) == 1;
15628 arm_map_one_stub (struct bfd_hash_entry
* gen_entry
,
15631 struct elf32_arm_stub_hash_entry
*stub_entry
;
15632 asection
*stub_sec
;
15635 output_arch_syminfo
*osi
;
15636 const insn_sequence
*template_sequence
;
15637 enum stub_insn_type prev_type
;
15640 enum map_symbol_type sym_type
;
15642 /* Massage our args to the form they really have. */
15643 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
15644 osi
= (output_arch_syminfo
*) in_arg
;
15646 stub_sec
= stub_entry
->stub_sec
;
15648 /* Ensure this stub is attached to the current section being
15650 if (stub_sec
!= osi
->sec
)
15653 addr
= (bfd_vma
) stub_entry
->stub_offset
;
15654 stub_name
= stub_entry
->output_name
;
15656 template_sequence
= stub_entry
->stub_template
;
15657 switch (template_sequence
[0].type
)
15660 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
, stub_entry
->stub_size
))
15665 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
| 1,
15666 stub_entry
->stub_size
))
15674 prev_type
= DATA_TYPE
;
15676 for (i
= 0; i
< stub_entry
->stub_template_size
; i
++)
15678 switch (template_sequence
[i
].type
)
15681 sym_type
= ARM_MAP_ARM
;
15686 sym_type
= ARM_MAP_THUMB
;
15690 sym_type
= ARM_MAP_DATA
;
15698 if (template_sequence
[i
].type
!= prev_type
)
15700 prev_type
= template_sequence
[i
].type
;
15701 if (!elf32_arm_output_map_sym (osi
, sym_type
, addr
+ size
))
15705 switch (template_sequence
[i
].type
)
15729 /* Output mapping symbols for linker generated sections,
15730 and for those data-only sections that do not have a
15734 elf32_arm_output_arch_local_syms (bfd
*output_bfd
,
15735 struct bfd_link_info
*info
,
15737 int (*func
) (void *, const char *,
15738 Elf_Internal_Sym
*,
15740 struct elf_link_hash_entry
*))
15742 output_arch_syminfo osi
;
15743 struct elf32_arm_link_hash_table
*htab
;
15745 bfd_size_type size
;
15748 htab
= elf32_arm_hash_table (info
);
15752 check_use_blx (htab
);
15754 osi
.flaginfo
= flaginfo
;
15758 /* Add a $d mapping symbol to data-only sections that
15759 don't have any mapping symbol. This may result in (harmless) redundant
15760 mapping symbols. */
15761 for (input_bfd
= info
->input_bfds
;
15763 input_bfd
= input_bfd
->link
.next
)
15765 if ((input_bfd
->flags
& (BFD_LINKER_CREATED
| HAS_SYMS
)) == HAS_SYMS
)
15766 for (osi
.sec
= input_bfd
->sections
;
15768 osi
.sec
= osi
.sec
->next
)
15770 if (osi
.sec
->output_section
!= NULL
15771 && ((osi
.sec
->output_section
->flags
& (SEC_ALLOC
| SEC_CODE
))
15773 && (osi
.sec
->flags
& (SEC_HAS_CONTENTS
| SEC_LINKER_CREATED
))
15774 == SEC_HAS_CONTENTS
15775 && get_arm_elf_section_data (osi
.sec
) != NULL
15776 && get_arm_elf_section_data (osi
.sec
)->mapcount
== 0
15777 && osi
.sec
->size
> 0
15778 && (osi
.sec
->flags
& SEC_EXCLUDE
) == 0)
15780 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
15781 (output_bfd
, osi
.sec
->output_section
);
15782 if (osi
.sec_shndx
!= (int)SHN_BAD
)
15783 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 0);
15788 /* ARM->Thumb glue. */
15789 if (htab
->arm_glue_size
> 0)
15791 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
15792 ARM2THUMB_GLUE_SECTION_NAME
);
15794 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
15795 (output_bfd
, osi
.sec
->output_section
);
15796 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
15797 || htab
->pic_veneer
)
15798 size
= ARM2THUMB_PIC_GLUE_SIZE
;
15799 else if (htab
->use_blx
)
15800 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
15802 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
15804 for (offset
= 0; offset
< htab
->arm_glue_size
; offset
+= size
)
15806 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
);
15807 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, offset
+ size
- 4);
15811 /* Thumb->ARM glue. */
15812 if (htab
->thumb_glue_size
> 0)
15814 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
15815 THUMB2ARM_GLUE_SECTION_NAME
);
15817 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
15818 (output_bfd
, osi
.sec
->output_section
);
15819 size
= THUMB2ARM_GLUE_SIZE
;
15821 for (offset
= 0; offset
< htab
->thumb_glue_size
; offset
+= size
)
15823 elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, offset
);
15824 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
+ 4);
15828 /* ARMv4 BX veneers. */
15829 if (htab
->bx_glue_size
> 0)
15831 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
15832 ARM_BX_GLUE_SECTION_NAME
);
15834 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
15835 (output_bfd
, osi
.sec
->output_section
);
15837 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0);
15840 /* Long calls stubs. */
15841 if (htab
->stub_bfd
&& htab
->stub_bfd
->sections
)
15843 asection
* stub_sec
;
15845 for (stub_sec
= htab
->stub_bfd
->sections
;
15847 stub_sec
= stub_sec
->next
)
15849 /* Ignore non-stub sections. */
15850 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
15853 osi
.sec
= stub_sec
;
15855 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
15856 (output_bfd
, osi
.sec
->output_section
);
15858 bfd_hash_traverse (&htab
->stub_hash_table
, arm_map_one_stub
, &osi
);
15862 /* Finally, output mapping symbols for the PLT. */
15863 if (htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
15865 osi
.sec
= htab
->root
.splt
;
15866 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
15867 (output_bfd
, osi
.sec
->output_section
));
15869 /* Output mapping symbols for the plt header. SymbianOS does not have a
15871 if (htab
->vxworks_p
)
15873 /* VxWorks shared libraries have no PLT header. */
15874 if (!bfd_link_pic (info
))
15876 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
15878 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
15882 else if (htab
->nacl_p
)
15884 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
15887 else if (using_thumb_only (htab
))
15889 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 0))
15891 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
15893 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 16))
15896 else if (!htab
->symbian_p
)
15898 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
15900 #ifndef FOUR_WORD_PLT
15901 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 16))
15906 if (htab
->nacl_p
&& htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0)
15908 /* NaCl uses a special first entry in .iplt too. */
15909 osi
.sec
= htab
->root
.iplt
;
15910 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
15911 (output_bfd
, osi
.sec
->output_section
));
15912 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
15915 if ((htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
15916 || (htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0))
15918 elf_link_hash_traverse (&htab
->root
, elf32_arm_output_plt_map
, &osi
);
15919 for (input_bfd
= info
->input_bfds
;
15921 input_bfd
= input_bfd
->link
.next
)
15923 struct arm_local_iplt_info
**local_iplt
;
15924 unsigned int i
, num_syms
;
15926 local_iplt
= elf32_arm_local_iplt (input_bfd
);
15927 if (local_iplt
!= NULL
)
15929 num_syms
= elf_symtab_hdr (input_bfd
).sh_info
;
15930 for (i
= 0; i
< num_syms
; i
++)
15931 if (local_iplt
[i
] != NULL
15932 && !elf32_arm_output_plt_map_1 (&osi
, TRUE
,
15933 &local_iplt
[i
]->root
,
15934 &local_iplt
[i
]->arm
))
15939 if (htab
->dt_tlsdesc_plt
!= 0)
15941 /* Mapping symbols for the lazy tls trampoline. */
15942 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->dt_tlsdesc_plt
))
15945 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
15946 htab
->dt_tlsdesc_plt
+ 24))
15949 if (htab
->tls_trampoline
!= 0)
15951 /* Mapping symbols for the tls trampoline. */
15952 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->tls_trampoline
))
15954 #ifdef FOUR_WORD_PLT
15955 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
15956 htab
->tls_trampoline
+ 12))
15964 /* Allocate target specific section data. */
15967 elf32_arm_new_section_hook (bfd
*abfd
, asection
*sec
)
15969 if (!sec
->used_by_bfd
)
15971 _arm_elf_section_data
*sdata
;
15972 bfd_size_type amt
= sizeof (*sdata
);
15974 sdata
= (_arm_elf_section_data
*) bfd_zalloc (abfd
, amt
);
15977 sec
->used_by_bfd
= sdata
;
15980 return _bfd_elf_new_section_hook (abfd
, sec
);
15984 /* Used to order a list of mapping symbols by address. */
15987 elf32_arm_compare_mapping (const void * a
, const void * b
)
15989 const elf32_arm_section_map
*amap
= (const elf32_arm_section_map
*) a
;
15990 const elf32_arm_section_map
*bmap
= (const elf32_arm_section_map
*) b
;
15992 if (amap
->vma
> bmap
->vma
)
15994 else if (amap
->vma
< bmap
->vma
)
15996 else if (amap
->type
> bmap
->type
)
15997 /* Ensure results do not depend on the host qsort for objects with
15998 multiple mapping symbols at the same address by sorting on type
16001 else if (amap
->type
< bmap
->type
)
16007 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
16009 static unsigned long
16010 offset_prel31 (unsigned long addr
, bfd_vma offset
)
16012 return (addr
& ~0x7ffffffful
) | ((addr
+ offset
) & 0x7ffffffful
);
16015 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
16019 copy_exidx_entry (bfd
*output_bfd
, bfd_byte
*to
, bfd_byte
*from
, bfd_vma offset
)
16021 unsigned long first_word
= bfd_get_32 (output_bfd
, from
);
16022 unsigned long second_word
= bfd_get_32 (output_bfd
, from
+ 4);
16024 /* High bit of first word is supposed to be zero. */
16025 if ((first_word
& 0x80000000ul
) == 0)
16026 first_word
= offset_prel31 (first_word
, offset
);
16028 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
16029 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
16030 if ((second_word
!= 0x1) && ((second_word
& 0x80000000ul
) == 0))
16031 second_word
= offset_prel31 (second_word
, offset
);
16033 bfd_put_32 (output_bfd
, first_word
, to
);
16034 bfd_put_32 (output_bfd
, second_word
, to
+ 4);
16037 /* Data for make_branch_to_a8_stub(). */
16039 struct a8_branch_to_stub_data
16041 asection
*writing_section
;
16042 bfd_byte
*contents
;
16046 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
16047 places for a particular section. */
16050 make_branch_to_a8_stub (struct bfd_hash_entry
*gen_entry
,
16053 struct elf32_arm_stub_hash_entry
*stub_entry
;
16054 struct a8_branch_to_stub_data
*data
;
16055 bfd_byte
*contents
;
16056 unsigned long branch_insn
;
16057 bfd_vma veneered_insn_loc
, veneer_entry_loc
;
16058 bfd_signed_vma branch_offset
;
16060 unsigned int target
;
16062 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
16063 data
= (struct a8_branch_to_stub_data
*) in_arg
;
16065 if (stub_entry
->target_section
!= data
->writing_section
16066 || stub_entry
->stub_type
< arm_stub_a8_veneer_lwm
)
16069 contents
= data
->contents
;
16071 veneered_insn_loc
= stub_entry
->target_section
->output_section
->vma
16072 + stub_entry
->target_section
->output_offset
16073 + stub_entry
->target_value
;
16075 veneer_entry_loc
= stub_entry
->stub_sec
->output_section
->vma
16076 + stub_entry
->stub_sec
->output_offset
16077 + stub_entry
->stub_offset
;
16079 if (stub_entry
->stub_type
== arm_stub_a8_veneer_blx
)
16080 veneered_insn_loc
&= ~3u;
16082 branch_offset
= veneer_entry_loc
- veneered_insn_loc
- 4;
16084 abfd
= stub_entry
->target_section
->owner
;
16085 target
= stub_entry
->target_value
;
16087 /* We attempt to avoid this condition by setting stubs_always_after_branch
16088 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
16089 This check is just to be on the safe side... */
16090 if ((veneered_insn_loc
& ~0xfff) == (veneer_entry_loc
& ~0xfff))
16092 (*_bfd_error_handler
) (_("%B: error: Cortex-A8 erratum stub is "
16093 "allocated in unsafe location"), abfd
);
16097 switch (stub_entry
->stub_type
)
16099 case arm_stub_a8_veneer_b
:
16100 case arm_stub_a8_veneer_b_cond
:
16101 branch_insn
= 0xf0009000;
16104 case arm_stub_a8_veneer_blx
:
16105 branch_insn
= 0xf000e800;
16108 case arm_stub_a8_veneer_bl
:
16110 unsigned int i1
, j1
, i2
, j2
, s
;
16112 branch_insn
= 0xf000d000;
16115 if (branch_offset
< -16777216 || branch_offset
> 16777214)
16117 /* There's not much we can do apart from complain if this
16119 (*_bfd_error_handler
) (_("%B: error: Cortex-A8 erratum stub out "
16120 "of range (input file too large)"), abfd
);
16124 /* i1 = not(j1 eor s), so:
16126 j1 = (not i1) eor s. */
16128 branch_insn
|= (branch_offset
>> 1) & 0x7ff;
16129 branch_insn
|= ((branch_offset
>> 12) & 0x3ff) << 16;
16130 i2
= (branch_offset
>> 22) & 1;
16131 i1
= (branch_offset
>> 23) & 1;
16132 s
= (branch_offset
>> 24) & 1;
16135 branch_insn
|= j2
<< 11;
16136 branch_insn
|= j1
<< 13;
16137 branch_insn
|= s
<< 26;
16146 bfd_put_16 (abfd
, (branch_insn
>> 16) & 0xffff, &contents
[target
]);
16147 bfd_put_16 (abfd
, branch_insn
& 0xffff, &contents
[target
+ 2]);
16152 /* Beginning of stm32l4xx work-around. */
16154 /* Functions encoding instructions necessary for the emission of the
16155 fix-stm32l4xx-629360.
16156 Encoding is extracted from the
16157 ARM (C) Architecture Reference Manual
16158 ARMv7-A and ARMv7-R edition
16159 ARM DDI 0406C.b (ID072512). */
16161 static inline bfd_vma
16162 create_instruction_branch_absolute (int branch_offset
)
16164 /* A8.8.18 B (A8-334)
16165 B target_address (Encoding T4). */
16166 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
16167 /* jump offset is: S:I1:I2:imm10:imm11:0. */
16168 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
16170 int s
= ((branch_offset
& 0x1000000) >> 24);
16171 int j1
= s
^ !((branch_offset
& 0x800000) >> 23);
16172 int j2
= s
^ !((branch_offset
& 0x400000) >> 22);
16174 if (branch_offset
< -(1 << 24) || branch_offset
>= (1 << 24))
16175 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
16177 bfd_vma patched_inst
= 0xf0009000
16179 | (((unsigned long) (branch_offset
) >> 12) & 0x3ff) << 16 /* imm10. */
16180 | j1
<< 13 /* J1. */
16181 | j2
<< 11 /* J2. */
16182 | (((unsigned long) (branch_offset
) >> 1) & 0x7ff); /* imm11. */
16184 return patched_inst
;
16187 static inline bfd_vma
16188 create_instruction_ldmia (int base_reg
, int wback
, int reg_mask
)
16190 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
16191 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
16192 bfd_vma patched_inst
= 0xe8900000
16193 | (/*W=*/wback
<< 21)
16195 | (reg_mask
& 0x0000ffff);
16197 return patched_inst
;
16200 static inline bfd_vma
16201 create_instruction_ldmdb (int base_reg
, int wback
, int reg_mask
)
16203 /* A8.8.60 LDMDB/LDMEA (A8-402)
16204 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
16205 bfd_vma patched_inst
= 0xe9100000
16206 | (/*W=*/wback
<< 21)
16208 | (reg_mask
& 0x0000ffff);
16210 return patched_inst
;
16213 static inline bfd_vma
16214 create_instruction_mov (int target_reg
, int source_reg
)
16216 /* A8.8.103 MOV (register) (A8-486)
16217 MOV Rd, Rm (Encoding T1). */
16218 bfd_vma patched_inst
= 0x4600
16219 | (target_reg
& 0x7)
16220 | ((target_reg
& 0x8) >> 3) << 7
16221 | (source_reg
<< 3);
16223 return patched_inst
;
16226 static inline bfd_vma
16227 create_instruction_sub (int target_reg
, int source_reg
, int value
)
16229 /* A8.8.221 SUB (immediate) (A8-708)
16230 SUB Rd, Rn, #value (Encoding T3). */
16231 bfd_vma patched_inst
= 0xf1a00000
16232 | (target_reg
<< 8)
16233 | (source_reg
<< 16)
16235 | ((value
& 0x800) >> 11) << 26
16236 | ((value
& 0x700) >> 8) << 12
16239 return patched_inst
;
16242 static inline bfd_vma
16243 create_instruction_vldmia (int base_reg
, int wback
, int num_regs
,
16246 /* A8.8.332 VLDM (A8-922)
16247 VLMD{MODE} Rn{!}, {list} (Encoding T2). */
16248 bfd_vma patched_inst
= 0xec900a00
16249 | (/*W=*/wback
<< 21)
16251 | (num_regs
& 0x000000ff)
16252 | (((unsigned)first_reg
>>1) & 0x0000000f) << 12
16253 | (first_reg
& 0x00000001) << 22;
16255 return patched_inst
;
16258 static inline bfd_vma
16259 create_instruction_vldmdb (int base_reg
, int num_regs
, int first_reg
)
16261 /* A8.8.332 VLDM (A8-922)
16262 VLMD{MODE} Rn!, {} (Encoding T2). */
16263 bfd_vma patched_inst
= 0xed300a00
16265 | (num_regs
& 0x000000ff)
16266 | (((unsigned)first_reg
>>1) & 0x0000000f) << 12
16267 | (first_reg
& 0x00000001) << 22;
16269 return patched_inst
;
16272 static inline bfd_vma
16273 create_instruction_udf_w (int value
)
16275 /* A8.8.247 UDF (A8-758)
16276 Undefined (Encoding T2). */
16277 bfd_vma patched_inst
= 0xf7f0a000
16278 | (value
& 0x00000fff)
16279 | (value
& 0x000f0000) << 16;
16281 return patched_inst
;
16284 static inline bfd_vma
16285 create_instruction_udf (int value
)
16287 /* A8.8.247 UDF (A8-758)
16288 Undefined (Encoding T1). */
16289 bfd_vma patched_inst
= 0xde00
16292 return patched_inst
;
16295 /* Functions writing an instruction in memory, returning the next
16296 memory position to write to. */
16298 static inline bfd_byte
*
16299 push_thumb2_insn32 (struct elf32_arm_link_hash_table
* htab
,
16300 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
16302 put_thumb2_insn (htab
, output_bfd
, insn
, pt
);
16306 static inline bfd_byte
*
16307 push_thumb2_insn16 (struct elf32_arm_link_hash_table
* htab
,
16308 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
16310 put_thumb_insn (htab
, output_bfd
, insn
, pt
);
16314 /* Function filling up a region in memory with T1 and T2 UDFs taking
16315 care of alignment. */
16318 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table
* htab
,
16320 const bfd_byte
* const base_stub_contents
,
16321 bfd_byte
* const from_stub_contents
,
16322 const bfd_byte
* const end_stub_contents
)
16324 bfd_byte
*current_stub_contents
= from_stub_contents
;
16326 /* Fill the remaining of the stub with deterministic contents : UDF
16328 Check if realignment is needed on modulo 4 frontier using T1, to
16330 if ((current_stub_contents
< end_stub_contents
)
16331 && !((current_stub_contents
- base_stub_contents
) % 2)
16332 && ((current_stub_contents
- base_stub_contents
) % 4))
16333 current_stub_contents
=
16334 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
16335 create_instruction_udf (0));
16337 for (; current_stub_contents
< end_stub_contents
;)
16338 current_stub_contents
=
16339 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16340 create_instruction_udf_w (0));
16342 return current_stub_contents
;
16345 /* Functions writing the stream of instructions equivalent to the
16346 derived sequence for ldmia, ldmdb, vldm respectively. */
16349 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table
* htab
,
16351 const insn32 initial_insn
,
16352 const bfd_byte
*const initial_insn_addr
,
16353 bfd_byte
*const base_stub_contents
)
16355 int wback
= (initial_insn
& 0x00200000) >> 21;
16356 int ri
, rn
= (initial_insn
& 0x000F0000) >> 16;
16357 int insn_all_registers
= initial_insn
& 0x0000ffff;
16358 int insn_low_registers
, insn_high_registers
;
16359 int usable_register_mask
;
16360 int nb_registers
= popcount (insn_all_registers
);
16361 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
16362 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
16363 bfd_byte
*current_stub_contents
= base_stub_contents
;
16365 BFD_ASSERT (is_thumb2_ldmia (initial_insn
));
16367 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16368 smaller than 8 registers load sequences that do not cause the
16370 if (nb_registers
<= 8)
16372 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16373 current_stub_contents
=
16374 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16377 /* B initial_insn_addr+4. */
16379 current_stub_contents
=
16380 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16381 create_instruction_branch_absolute
16382 (initial_insn_addr
- current_stub_contents
));
16385 /* Fill the remaining of the stub with deterministic contents. */
16386 current_stub_contents
=
16387 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
16388 base_stub_contents
, current_stub_contents
,
16389 base_stub_contents
+
16390 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
16395 /* - reg_list[13] == 0. */
16396 BFD_ASSERT ((insn_all_registers
& (1 << 13))==0);
16398 /* - reg_list[14] & reg_list[15] != 1. */
16399 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
16401 /* - if (wback==1) reg_list[rn] == 0. */
16402 BFD_ASSERT (!wback
|| !restore_rn
);
16404 /* - nb_registers > 8. */
16405 BFD_ASSERT (popcount (insn_all_registers
) > 8);
16407 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16409 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
16410 - One with the 7 lowest registers (register mask 0x007F)
16411 This LDM will finally contain between 2 and 7 registers
16412 - One with the 7 highest registers (register mask 0xDF80)
16413 This ldm will finally contain between 2 and 7 registers. */
16414 insn_low_registers
= insn_all_registers
& 0x007F;
16415 insn_high_registers
= insn_all_registers
& 0xDF80;
16417 /* A spare register may be needed during this veneer to temporarily
16418 handle the base register. This register will be restored with the
16419 last LDM operation.
16420 The usable register may be any general purpose register (that
16421 excludes PC, SP, LR : register mask is 0x1FFF). */
16422 usable_register_mask
= 0x1FFF;
16424 /* Generate the stub function. */
16427 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
16428 current_stub_contents
=
16429 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16430 create_instruction_ldmia
16431 (rn
, /*wback=*/1, insn_low_registers
));
16433 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
16434 current_stub_contents
=
16435 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16436 create_instruction_ldmia
16437 (rn
, /*wback=*/1, insn_high_registers
));
16440 /* B initial_insn_addr+4. */
16441 current_stub_contents
=
16442 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16443 create_instruction_branch_absolute
16444 (initial_insn_addr
- current_stub_contents
));
16447 else /* if (!wback). */
16451 /* If Rn is not part of the high-register-list, move it there. */
16452 if (!(insn_high_registers
& (1 << rn
)))
16454 /* Choose a Ri in the high-register-list that will be restored. */
16455 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
16458 current_stub_contents
=
16459 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
16460 create_instruction_mov (ri
, rn
));
16463 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
16464 current_stub_contents
=
16465 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16466 create_instruction_ldmia
16467 (ri
, /*wback=*/1, insn_low_registers
));
16469 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
16470 current_stub_contents
=
16471 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16472 create_instruction_ldmia
16473 (ri
, /*wback=*/0, insn_high_registers
));
16477 /* B initial_insn_addr+4. */
16478 current_stub_contents
=
16479 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16480 create_instruction_branch_absolute
16481 (initial_insn_addr
- current_stub_contents
));
16485 /* Fill the remaining of the stub with deterministic contents. */
16486 current_stub_contents
=
16487 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
16488 base_stub_contents
, current_stub_contents
,
16489 base_stub_contents
+
16490 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
16494 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table
* htab
,
16496 const insn32 initial_insn
,
16497 const bfd_byte
*const initial_insn_addr
,
16498 bfd_byte
*const base_stub_contents
)
16500 int wback
= (initial_insn
& 0x00200000) >> 21;
16501 int ri
, rn
= (initial_insn
& 0x000f0000) >> 16;
16502 int insn_all_registers
= initial_insn
& 0x0000ffff;
16503 int insn_low_registers
, insn_high_registers
;
16504 int usable_register_mask
;
16505 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
16506 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
16507 int nb_registers
= popcount (insn_all_registers
);
16508 bfd_byte
*current_stub_contents
= base_stub_contents
;
16510 BFD_ASSERT (is_thumb2_ldmdb (initial_insn
));
16512 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16513 smaller than 8 registers load sequences that do not cause the
16515 if (nb_registers
<= 8)
16517 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16518 current_stub_contents
=
16519 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16522 /* B initial_insn_addr+4. */
16523 current_stub_contents
=
16524 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16525 create_instruction_branch_absolute
16526 (initial_insn_addr
- current_stub_contents
));
16528 /* Fill the remaining of the stub with deterministic contents. */
16529 current_stub_contents
=
16530 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
16531 base_stub_contents
, current_stub_contents
,
16532 base_stub_contents
+
16533 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
16538 /* - reg_list[13] == 0. */
16539 BFD_ASSERT ((insn_all_registers
& (1 << 13)) == 0);
16541 /* - reg_list[14] & reg_list[15] != 1. */
16542 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
16544 /* - if (wback==1) reg_list[rn] == 0. */
16545 BFD_ASSERT (!wback
|| !restore_rn
);
16547 /* - nb_registers > 8. */
16548 BFD_ASSERT (popcount (insn_all_registers
) > 8);
16550 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16552 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
16553 - One with the 7 lowest registers (register mask 0x007F)
16554 This LDM will finally contain between 2 and 7 registers
16555 - One with the 7 highest registers (register mask 0xDF80)
16556 This ldm will finally contain between 2 and 7 registers. */
16557 insn_low_registers
= insn_all_registers
& 0x007F;
16558 insn_high_registers
= insn_all_registers
& 0xDF80;
16560 /* A spare register may be needed during this veneer to temporarily
16561 handle the base register. This register will be restored with
16562 the last LDM operation.
16563 The usable register may be any general purpose register (that excludes
16564 PC, SP, LR : register mask is 0x1FFF). */
16565 usable_register_mask
= 0x1FFF;
16567 /* Generate the stub function. */
16568 if (!wback
&& !restore_pc
&& !restore_rn
)
16570 /* Choose a Ri in the low-register-list that will be restored. */
16571 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
16574 current_stub_contents
=
16575 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
16576 create_instruction_mov (ri
, rn
));
16578 /* LDMDB Ri!, {R-high-register-list}. */
16579 current_stub_contents
=
16580 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16581 create_instruction_ldmdb
16582 (ri
, /*wback=*/1, insn_high_registers
));
16584 /* LDMDB Ri, {R-low-register-list}. */
16585 current_stub_contents
=
16586 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16587 create_instruction_ldmdb
16588 (ri
, /*wback=*/0, insn_low_registers
));
16590 /* B initial_insn_addr+4. */
16591 current_stub_contents
=
16592 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16593 create_instruction_branch_absolute
16594 (initial_insn_addr
- current_stub_contents
));
16596 else if (wback
&& !restore_pc
&& !restore_rn
)
16598 /* LDMDB Rn!, {R-high-register-list}. */
16599 current_stub_contents
=
16600 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16601 create_instruction_ldmdb
16602 (rn
, /*wback=*/1, insn_high_registers
));
16604 /* LDMDB Rn!, {R-low-register-list}. */
16605 current_stub_contents
=
16606 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16607 create_instruction_ldmdb
16608 (rn
, /*wback=*/1, insn_low_registers
));
16610 /* B initial_insn_addr+4. */
16611 current_stub_contents
=
16612 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16613 create_instruction_branch_absolute
16614 (initial_insn_addr
- current_stub_contents
));
16616 else if (!wback
&& restore_pc
&& !restore_rn
)
16618 /* Choose a Ri in the high-register-list that will be restored. */
16619 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
16621 /* SUB Ri, Rn, #(4*nb_registers). */
16622 current_stub_contents
=
16623 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16624 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
16626 /* LDMIA Ri!, {R-low-register-list}. */
16627 current_stub_contents
=
16628 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16629 create_instruction_ldmia
16630 (ri
, /*wback=*/1, insn_low_registers
));
16632 /* LDMIA Ri, {R-high-register-list}. */
16633 current_stub_contents
=
16634 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16635 create_instruction_ldmia
16636 (ri
, /*wback=*/0, insn_high_registers
));
16638 else if (wback
&& restore_pc
&& !restore_rn
)
16640 /* Choose a Ri in the high-register-list that will be restored. */
16641 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
16643 /* SUB Rn, Rn, #(4*nb_registers) */
16644 current_stub_contents
=
16645 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16646 create_instruction_sub (rn
, rn
, (4 * nb_registers
)));
16649 current_stub_contents
=
16650 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
16651 create_instruction_mov (ri
, rn
));
16653 /* LDMIA Ri!, {R-low-register-list}. */
16654 current_stub_contents
=
16655 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16656 create_instruction_ldmia
16657 (ri
, /*wback=*/1, insn_low_registers
));
16659 /* LDMIA Ri, {R-high-register-list}. */
16660 current_stub_contents
=
16661 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16662 create_instruction_ldmia
16663 (ri
, /*wback=*/0, insn_high_registers
));
16665 else if (!wback
&& !restore_pc
&& restore_rn
)
16668 if (!(insn_low_registers
& (1 << rn
)))
16670 /* Choose a Ri in the low-register-list that will be restored. */
16671 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
16674 current_stub_contents
=
16675 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
16676 create_instruction_mov (ri
, rn
));
16679 /* LDMDB Ri!, {R-high-register-list}. */
16680 current_stub_contents
=
16681 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16682 create_instruction_ldmdb
16683 (ri
, /*wback=*/1, insn_high_registers
));
16685 /* LDMDB Ri, {R-low-register-list}. */
16686 current_stub_contents
=
16687 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16688 create_instruction_ldmdb
16689 (ri
, /*wback=*/0, insn_low_registers
));
16691 /* B initial_insn_addr+4. */
16692 current_stub_contents
=
16693 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16694 create_instruction_branch_absolute
16695 (initial_insn_addr
- current_stub_contents
));
16697 else if (!wback
&& restore_pc
&& restore_rn
)
16700 if (!(insn_high_registers
& (1 << rn
)))
16702 /* Choose a Ri in the high-register-list that will be restored. */
16703 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
16706 /* SUB Ri, Rn, #(4*nb_registers). */
16707 current_stub_contents
=
16708 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16709 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
16711 /* LDMIA Ri!, {R-low-register-list}. */
16712 current_stub_contents
=
16713 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16714 create_instruction_ldmia
16715 (ri
, /*wback=*/1, insn_low_registers
));
16717 /* LDMIA Ri, {R-high-register-list}. */
16718 current_stub_contents
=
16719 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16720 create_instruction_ldmia
16721 (ri
, /*wback=*/0, insn_high_registers
));
16723 else if (wback
&& restore_rn
)
16725 /* The assembler should not have accepted to encode this. */
16726 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
16727 "undefined behavior.\n");
16730 /* Fill the remaining of the stub with deterministic contents. */
16731 current_stub_contents
=
16732 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
16733 base_stub_contents
, current_stub_contents
,
16734 base_stub_contents
+
16735 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
16740 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table
* htab
,
16742 const insn32 initial_insn
,
16743 const bfd_byte
*const initial_insn_addr
,
16744 bfd_byte
*const base_stub_contents
)
16746 int num_regs
= ((unsigned int)initial_insn
<< 24) >> 24;
16747 bfd_byte
*current_stub_contents
= base_stub_contents
;
16749 BFD_ASSERT (is_thumb2_vldm (initial_insn
));
16751 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16752 smaller than 8 registers load sequences that do not cause the
16756 /* Untouched instruction. */
16757 current_stub_contents
=
16758 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16761 /* B initial_insn_addr+4. */
16762 current_stub_contents
=
16763 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16764 create_instruction_branch_absolute
16765 (initial_insn_addr
- current_stub_contents
));
16769 bfd_boolean is_ia_nobang
= /* (IA without !). */
16770 (((initial_insn
<< 7) >> 28) & 0xd) == 0x4;
16771 bfd_boolean is_ia_bang
= /* (IA with !) - includes VPOP. */
16772 (((initial_insn
<< 7) >> 28) & 0xd) == 0x5;
16773 bfd_boolean is_db_bang
= /* (DB with !). */
16774 (((initial_insn
<< 7) >> 28) & 0xd) == 0x9;
16775 int base_reg
= ((unsigned int)initial_insn
<< 12) >> 28;
16776 /* d = UInt (Vd:D);. */
16777 int first_reg
= ((((unsigned int)initial_insn
<< 16) >> 28) << 1)
16778 | (((unsigned int)initial_insn
<< 9) >> 31);
16780 /* Compute the number of 8-register chunks needed to split. */
16781 int chunks
= (num_regs
%8) ? (num_regs
/8 + 1) : (num_regs
/8);
16784 /* The test coverage has been done assuming the following
16785 hypothesis that exactly one of the previous is_ predicates is
16787 BFD_ASSERT ((is_ia_nobang
^ is_ia_bang
^ is_db_bang
) &&
16788 !(is_ia_nobang
& is_ia_bang
& is_db_bang
));
16790 /* We treat the cutting of the register in one pass for all
16791 cases, then we emit the adjustments:
16794 -> vldm rx!, {8_words_or_less} for each needed 8_word
16795 -> sub rx, rx, #size (list)
16798 -> vldm rx!, {8_words_or_less} for each needed 8_word
16799 This also handles vpop instruction (when rx is sp)
16802 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
16803 for (chunk
= 0; chunk
<chunks
; ++chunk
)
16805 if (is_ia_nobang
|| is_ia_bang
)
16807 current_stub_contents
=
16808 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16809 create_instruction_vldmia
16812 chunks
- (chunk
+ 1) ?
16813 8 : num_regs
- chunk
* 8,
16814 first_reg
+ chunk
* 8));
16816 else if (is_db_bang
)
16818 current_stub_contents
=
16819 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16820 create_instruction_vldmdb
16822 chunks
- (chunk
+ 1) ?
16823 8 : num_regs
- chunk
* 8,
16824 first_reg
+ chunk
* 8));
16828 /* Only this case requires the base register compensation
16832 current_stub_contents
=
16833 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16834 create_instruction_sub
16835 (base_reg
, base_reg
, 4*num_regs
));
16838 /* B initial_insn_addr+4. */
16839 current_stub_contents
=
16840 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16841 create_instruction_branch_absolute
16842 (initial_insn_addr
- current_stub_contents
));
16845 /* Fill the remaining of the stub with deterministic contents. */
16846 current_stub_contents
=
16847 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
16848 base_stub_contents
, current_stub_contents
,
16849 base_stub_contents
+
16850 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
16854 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table
* htab
,
16856 const insn32 wrong_insn
,
16857 const bfd_byte
*const wrong_insn_addr
,
16858 bfd_byte
*const stub_contents
)
16860 if (is_thumb2_ldmia (wrong_insn
))
16861 stm32l4xx_create_replacing_stub_ldmia (htab
, output_bfd
,
16862 wrong_insn
, wrong_insn_addr
,
16864 else if (is_thumb2_ldmdb (wrong_insn
))
16865 stm32l4xx_create_replacing_stub_ldmdb (htab
, output_bfd
,
16866 wrong_insn
, wrong_insn_addr
,
16868 else if (is_thumb2_vldm (wrong_insn
))
16869 stm32l4xx_create_replacing_stub_vldm (htab
, output_bfd
,
16870 wrong_insn
, wrong_insn_addr
,
16874 /* End of stm32l4xx work-around. */
16877 /* Do code byteswapping. Return FALSE afterwards so that the section is
16878 written out as normal. */
16881 elf32_arm_write_section (bfd
*output_bfd
,
16882 struct bfd_link_info
*link_info
,
16884 bfd_byte
*contents
)
16886 unsigned int mapcount
, errcount
;
16887 _arm_elf_section_data
*arm_data
;
16888 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
16889 elf32_arm_section_map
*map
;
16890 elf32_vfp11_erratum_list
*errnode
;
16891 elf32_stm32l4xx_erratum_list
*stm32l4xx_errnode
;
16894 bfd_vma offset
= sec
->output_section
->vma
+ sec
->output_offset
;
16898 if (globals
== NULL
)
16901 /* If this section has not been allocated an _arm_elf_section_data
16902 structure then we cannot record anything. */
16903 arm_data
= get_arm_elf_section_data (sec
);
16904 if (arm_data
== NULL
)
16907 mapcount
= arm_data
->mapcount
;
16908 map
= arm_data
->map
;
16909 errcount
= arm_data
->erratumcount
;
16913 unsigned int endianflip
= bfd_big_endian (output_bfd
) ? 3 : 0;
16915 for (errnode
= arm_data
->erratumlist
; errnode
!= 0;
16916 errnode
= errnode
->next
)
16918 bfd_vma target
= errnode
->vma
- offset
;
16920 switch (errnode
->type
)
16922 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
16924 bfd_vma branch_to_veneer
;
16925 /* Original condition code of instruction, plus bit mask for
16926 ARM B instruction. */
16927 unsigned int insn
= (errnode
->u
.b
.vfp_insn
& 0xf0000000)
16930 /* The instruction is before the label. */
16933 /* Above offset included in -4 below. */
16934 branch_to_veneer
= errnode
->u
.b
.veneer
->vma
16935 - errnode
->vma
- 4;
16937 if ((signed) branch_to_veneer
< -(1 << 25)
16938 || (signed) branch_to_veneer
>= (1 << 25))
16939 (*_bfd_error_handler
) (_("%B: error: VFP11 veneer out of "
16940 "range"), output_bfd
);
16942 insn
|= (branch_to_veneer
>> 2) & 0xffffff;
16943 contents
[endianflip
^ target
] = insn
& 0xff;
16944 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
16945 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
16946 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
16950 case VFP11_ERRATUM_ARM_VENEER
:
16952 bfd_vma branch_from_veneer
;
16955 /* Take size of veneer into account. */
16956 branch_from_veneer
= errnode
->u
.v
.branch
->vma
16957 - errnode
->vma
- 12;
16959 if ((signed) branch_from_veneer
< -(1 << 25)
16960 || (signed) branch_from_veneer
>= (1 << 25))
16961 (*_bfd_error_handler
) (_("%B: error: VFP11 veneer out of "
16962 "range"), output_bfd
);
16964 /* Original instruction. */
16965 insn
= errnode
->u
.v
.branch
->u
.b
.vfp_insn
;
16966 contents
[endianflip
^ target
] = insn
& 0xff;
16967 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
16968 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
16969 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
16971 /* Branch back to insn after original insn. */
16972 insn
= 0xea000000 | ((branch_from_veneer
>> 2) & 0xffffff);
16973 contents
[endianflip
^ (target
+ 4)] = insn
& 0xff;
16974 contents
[endianflip
^ (target
+ 5)] = (insn
>> 8) & 0xff;
16975 contents
[endianflip
^ (target
+ 6)] = (insn
>> 16) & 0xff;
16976 contents
[endianflip
^ (target
+ 7)] = (insn
>> 24) & 0xff;
16986 if (arm_data
->stm32l4xx_erratumcount
!= 0)
16988 for (stm32l4xx_errnode
= arm_data
->stm32l4xx_erratumlist
;
16989 stm32l4xx_errnode
!= 0;
16990 stm32l4xx_errnode
= stm32l4xx_errnode
->next
)
16992 bfd_vma target
= stm32l4xx_errnode
->vma
- offset
;
16994 switch (stm32l4xx_errnode
->type
)
16996 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
16999 bfd_vma branch_to_veneer
=
17000 stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
;
17002 if ((signed) branch_to_veneer
< -(1 << 24)
17003 || (signed) branch_to_veneer
>= (1 << 24))
17005 bfd_vma out_of_range
=
17006 ((signed) branch_to_veneer
< -(1 << 24)) ?
17007 - branch_to_veneer
- (1 << 24) :
17008 ((signed) branch_to_veneer
>= (1 << 24)) ?
17009 branch_to_veneer
- (1 << 24) : 0;
17011 (*_bfd_error_handler
)
17012 (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
17013 "Jump out of range by %ld bytes. "
17014 "Cannot encode branch instruction. "),
17016 (long) (stm32l4xx_errnode
->vma
- 4),
17021 insn
= create_instruction_branch_absolute
17022 (stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
);
17024 /* The instruction is before the label. */
17027 put_thumb2_insn (globals
, output_bfd
,
17028 (bfd_vma
) insn
, contents
+ target
);
17032 case STM32L4XX_ERRATUM_VENEER
:
17035 bfd_byte
* veneer_r
;
17038 veneer
= contents
+ target
;
17040 + stm32l4xx_errnode
->u
.b
.veneer
->vma
17041 - stm32l4xx_errnode
->vma
- 4;
17043 if ((signed) (veneer_r
- veneer
-
17044 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
>
17045 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
?
17046 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
:
17047 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
) < -(1 << 24)
17048 || (signed) (veneer_r
- veneer
) >= (1 << 24))
17050 (*_bfd_error_handler
) (_("%B: error: Cannot create STM32L4XX "
17051 "veneer."), output_bfd
);
17055 /* Original instruction. */
17056 insn
= stm32l4xx_errnode
->u
.v
.branch
->u
.b
.insn
;
17058 stm32l4xx_create_replacing_stub
17059 (globals
, output_bfd
, insn
, (void*)veneer_r
, (void*)veneer
);
17069 if (arm_data
->elf
.this_hdr
.sh_type
== SHT_ARM_EXIDX
)
17071 arm_unwind_table_edit
*edit_node
17072 = arm_data
->u
.exidx
.unwind_edit_list
;
17073 /* Now, sec->size is the size of the section we will write. The original
17074 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
17075 markers) was sec->rawsize. (This isn't the case if we perform no
17076 edits, then rawsize will be zero and we should use size). */
17077 bfd_byte
*edited_contents
= (bfd_byte
*) bfd_malloc (sec
->size
);
17078 unsigned int input_size
= sec
->rawsize
? sec
->rawsize
: sec
->size
;
17079 unsigned int in_index
, out_index
;
17080 bfd_vma add_to_offsets
= 0;
17082 for (in_index
= 0, out_index
= 0; in_index
* 8 < input_size
|| edit_node
;)
17086 unsigned int edit_index
= edit_node
->index
;
17088 if (in_index
< edit_index
&& in_index
* 8 < input_size
)
17090 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
17091 contents
+ in_index
* 8, add_to_offsets
);
17095 else if (in_index
== edit_index
17096 || (in_index
* 8 >= input_size
17097 && edit_index
== UINT_MAX
))
17099 switch (edit_node
->type
)
17101 case DELETE_EXIDX_ENTRY
:
17103 add_to_offsets
+= 8;
17106 case INSERT_EXIDX_CANTUNWIND_AT_END
:
17108 asection
*text_sec
= edit_node
->linked_section
;
17109 bfd_vma text_offset
= text_sec
->output_section
->vma
17110 + text_sec
->output_offset
17112 bfd_vma exidx_offset
= offset
+ out_index
* 8;
17113 unsigned long prel31_offset
;
17115 /* Note: this is meant to be equivalent to an
17116 R_ARM_PREL31 relocation. These synthetic
17117 EXIDX_CANTUNWIND markers are not relocated by the
17118 usual BFD method. */
17119 prel31_offset
= (text_offset
- exidx_offset
)
17122 /* First address we can't unwind. */
17123 bfd_put_32 (output_bfd
, prel31_offset
,
17124 &edited_contents
[out_index
* 8]);
17126 /* Code for EXIDX_CANTUNWIND. */
17127 bfd_put_32 (output_bfd
, 0x1,
17128 &edited_contents
[out_index
* 8 + 4]);
17131 add_to_offsets
-= 8;
17136 edit_node
= edit_node
->next
;
17141 /* No more edits, copy remaining entries verbatim. */
17142 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
17143 contents
+ in_index
* 8, add_to_offsets
);
17149 if (!(sec
->flags
& SEC_EXCLUDE
) && !(sec
->flags
& SEC_NEVER_LOAD
))
17150 bfd_set_section_contents (output_bfd
, sec
->output_section
,
17152 (file_ptr
) sec
->output_offset
, sec
->size
);
17157 /* Fix code to point to Cortex-A8 erratum stubs. */
17158 if (globals
->fix_cortex_a8
)
17160 struct a8_branch_to_stub_data data
;
17162 data
.writing_section
= sec
;
17163 data
.contents
= contents
;
17165 bfd_hash_traverse (& globals
->stub_hash_table
, make_branch_to_a8_stub
,
17172 if (globals
->byteswap_code
)
17174 qsort (map
, mapcount
, sizeof (* map
), elf32_arm_compare_mapping
);
17177 for (i
= 0; i
< mapcount
; i
++)
17179 if (i
== mapcount
- 1)
17182 end
= map
[i
+ 1].vma
;
17184 switch (map
[i
].type
)
17187 /* Byte swap code words. */
17188 while (ptr
+ 3 < end
)
17190 tmp
= contents
[ptr
];
17191 contents
[ptr
] = contents
[ptr
+ 3];
17192 contents
[ptr
+ 3] = tmp
;
17193 tmp
= contents
[ptr
+ 1];
17194 contents
[ptr
+ 1] = contents
[ptr
+ 2];
17195 contents
[ptr
+ 2] = tmp
;
17201 /* Byte swap code halfwords. */
17202 while (ptr
+ 1 < end
)
17204 tmp
= contents
[ptr
];
17205 contents
[ptr
] = contents
[ptr
+ 1];
17206 contents
[ptr
+ 1] = tmp
;
17212 /* Leave data alone. */
17220 arm_data
->mapcount
= -1;
17221 arm_data
->mapsize
= 0;
17222 arm_data
->map
= NULL
;
17227 /* Mangle thumb function symbols as we read them in. */
17230 elf32_arm_swap_symbol_in (bfd
* abfd
,
17233 Elf_Internal_Sym
*dst
)
17235 if (!bfd_elf32_swap_symbol_in (abfd
, psrc
, pshn
, dst
))
17238 /* New EABI objects mark thumb function symbols by setting the low bit of
17240 if (ELF_ST_TYPE (dst
->st_info
) == STT_FUNC
17241 || ELF_ST_TYPE (dst
->st_info
) == STT_GNU_IFUNC
)
17243 if (dst
->st_value
& 1)
17245 dst
->st_value
&= ~(bfd_vma
) 1;
17246 dst
->st_target_internal
= ST_BRANCH_TO_THUMB
;
17249 dst
->st_target_internal
= ST_BRANCH_TO_ARM
;
17251 else if (ELF_ST_TYPE (dst
->st_info
) == STT_ARM_TFUNC
)
17253 dst
->st_info
= ELF_ST_INFO (ELF_ST_BIND (dst
->st_info
), STT_FUNC
);
17254 dst
->st_target_internal
= ST_BRANCH_TO_THUMB
;
17256 else if (ELF_ST_TYPE (dst
->st_info
) == STT_SECTION
)
17257 dst
->st_target_internal
= ST_BRANCH_LONG
;
17259 dst
->st_target_internal
= ST_BRANCH_UNKNOWN
;
17265 /* Mangle thumb function symbols as we write them out. */
17268 elf32_arm_swap_symbol_out (bfd
*abfd
,
17269 const Elf_Internal_Sym
*src
,
17273 Elf_Internal_Sym newsym
;
17275 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
17276 of the address set, as per the new EABI. We do this unconditionally
17277 because objcopy does not set the elf header flags until after
17278 it writes out the symbol table. */
17279 if (src
->st_target_internal
== ST_BRANCH_TO_THUMB
)
17282 if (ELF_ST_TYPE (src
->st_info
) != STT_GNU_IFUNC
)
17283 newsym
.st_info
= ELF_ST_INFO (ELF_ST_BIND (src
->st_info
), STT_FUNC
);
17284 if (newsym
.st_shndx
!= SHN_UNDEF
)
17286 /* Do this only for defined symbols. At link type, the static
17287 linker will simulate the work of dynamic linker of resolving
17288 symbols and will carry over the thumbness of found symbols to
17289 the output symbol table. It's not clear how it happens, but
17290 the thumbness of undefined symbols can well be different at
17291 runtime, and writing '1' for them will be confusing for users
17292 and possibly for dynamic linker itself.
17294 newsym
.st_value
|= 1;
17299 bfd_elf32_swap_symbol_out (abfd
, src
, cdst
, shndx
);
17302 /* Add the PT_ARM_EXIDX program header. */
17305 elf32_arm_modify_segment_map (bfd
*abfd
,
17306 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
17308 struct elf_segment_map
*m
;
17311 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
17312 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
17314 /* If there is already a PT_ARM_EXIDX header, then we do not
17315 want to add another one. This situation arises when running
17316 "strip"; the input binary already has the header. */
17317 m
= elf_seg_map (abfd
);
17318 while (m
&& m
->p_type
!= PT_ARM_EXIDX
)
17322 m
= (struct elf_segment_map
*)
17323 bfd_zalloc (abfd
, sizeof (struct elf_segment_map
));
17326 m
->p_type
= PT_ARM_EXIDX
;
17328 m
->sections
[0] = sec
;
17330 m
->next
= elf_seg_map (abfd
);
17331 elf_seg_map (abfd
) = m
;
17338 /* We may add a PT_ARM_EXIDX program header. */
17341 elf32_arm_additional_program_headers (bfd
*abfd
,
17342 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
17346 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
17347 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
17353 /* Hook called by the linker routine which adds symbols from an object
17357 elf32_arm_add_symbol_hook (bfd
*abfd
, struct bfd_link_info
*info
,
17358 Elf_Internal_Sym
*sym
, const char **namep
,
17359 flagword
*flagsp
, asection
**secp
, bfd_vma
*valp
)
17361 if ((ELF_ST_TYPE (sym
->st_info
) == STT_GNU_IFUNC
17362 || ELF_ST_BIND (sym
->st_info
) == STB_GNU_UNIQUE
)
17363 && (abfd
->flags
& DYNAMIC
) == 0
17364 && bfd_get_flavour (info
->output_bfd
) == bfd_target_elf_flavour
)
17365 elf_tdata (info
->output_bfd
)->has_gnu_symbols
= elf_gnu_symbol_any
;
17367 if (elf32_arm_hash_table (info
) == NULL
)
17370 if (elf32_arm_hash_table (info
)->vxworks_p
17371 && !elf_vxworks_add_symbol_hook (abfd
, info
, sym
, namep
,
17372 flagsp
, secp
, valp
))
17378 /* We use this to override swap_symbol_in and swap_symbol_out. */
17379 const struct elf_size_info elf32_arm_size_info
=
17381 sizeof (Elf32_External_Ehdr
),
17382 sizeof (Elf32_External_Phdr
),
17383 sizeof (Elf32_External_Shdr
),
17384 sizeof (Elf32_External_Rel
),
17385 sizeof (Elf32_External_Rela
),
17386 sizeof (Elf32_External_Sym
),
17387 sizeof (Elf32_External_Dyn
),
17388 sizeof (Elf_External_Note
),
17392 ELFCLASS32
, EV_CURRENT
,
17393 bfd_elf32_write_out_phdrs
,
17394 bfd_elf32_write_shdrs_and_ehdr
,
17395 bfd_elf32_checksum_contents
,
17396 bfd_elf32_write_relocs
,
17397 elf32_arm_swap_symbol_in
,
17398 elf32_arm_swap_symbol_out
,
17399 bfd_elf32_slurp_reloc_table
,
17400 bfd_elf32_slurp_symbol_table
,
17401 bfd_elf32_swap_dyn_in
,
17402 bfd_elf32_swap_dyn_out
,
17403 bfd_elf32_swap_reloc_in
,
17404 bfd_elf32_swap_reloc_out
,
17405 bfd_elf32_swap_reloca_in
,
17406 bfd_elf32_swap_reloca_out
17410 read_code32 (const bfd
*abfd
, const bfd_byte
*addr
)
17412 /* V7 BE8 code is always little endian. */
17413 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
17414 return bfd_getl32 (addr
);
17416 return bfd_get_32 (abfd
, addr
);
17420 read_code16 (const bfd
*abfd
, const bfd_byte
*addr
)
17422 /* V7 BE8 code is always little endian. */
17423 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
17424 return bfd_getl16 (addr
);
17426 return bfd_get_16 (abfd
, addr
);
17429 /* Return size of plt0 entry starting at ADDR
17430 or (bfd_vma) -1 if size can not be determined. */
17433 elf32_arm_plt0_size (const bfd
*abfd
, const bfd_byte
*addr
)
17435 bfd_vma first_word
;
17438 first_word
= read_code32 (abfd
, addr
);
17440 if (first_word
== elf32_arm_plt0_entry
[0])
17441 plt0_size
= 4 * ARRAY_SIZE (elf32_arm_plt0_entry
);
17442 else if (first_word
== elf32_thumb2_plt0_entry
[0])
17443 plt0_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
17445 /* We don't yet handle this PLT format. */
17446 return (bfd_vma
) -1;
17451 /* Return size of plt entry starting at offset OFFSET
17452 of plt section located at address START
17453 or (bfd_vma) -1 if size can not be determined. */
17456 elf32_arm_plt_size (const bfd
*abfd
, const bfd_byte
*start
, bfd_vma offset
)
17458 bfd_vma first_insn
;
17459 bfd_vma plt_size
= 0;
17460 const bfd_byte
*addr
= start
+ offset
;
17462 /* PLT entry size if fixed on Thumb-only platforms. */
17463 if (read_code32 (abfd
, start
) == elf32_thumb2_plt0_entry
[0])
17464 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
17466 /* Respect Thumb stub if necessary. */
17467 if (read_code16 (abfd
, addr
) == elf32_arm_plt_thumb_stub
[0])
17469 plt_size
+= 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub
);
17472 /* Strip immediate from first add. */
17473 first_insn
= read_code32 (abfd
, addr
+ plt_size
) & 0xffffff00;
17475 #ifdef FOUR_WORD_PLT
17476 if (first_insn
== elf32_arm_plt_entry
[0])
17477 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry
);
17479 if (first_insn
== elf32_arm_plt_entry_long
[0])
17480 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_long
);
17481 else if (first_insn
== elf32_arm_plt_entry_short
[0])
17482 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_short
);
17485 /* We don't yet handle this PLT format. */
17486 return (bfd_vma
) -1;
17491 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
17494 elf32_arm_get_synthetic_symtab (bfd
*abfd
,
17495 long symcount ATTRIBUTE_UNUSED
,
17496 asymbol
**syms ATTRIBUTE_UNUSED
,
17506 Elf_Internal_Shdr
*hdr
;
17514 if ((abfd
->flags
& (DYNAMIC
| EXEC_P
)) == 0)
17517 if (dynsymcount
<= 0)
17520 relplt
= bfd_get_section_by_name (abfd
, ".rel.plt");
17521 if (relplt
== NULL
)
17524 hdr
= &elf_section_data (relplt
)->this_hdr
;
17525 if (hdr
->sh_link
!= elf_dynsymtab (abfd
)
17526 || (hdr
->sh_type
!= SHT_REL
&& hdr
->sh_type
!= SHT_RELA
))
17529 plt
= bfd_get_section_by_name (abfd
, ".plt");
17533 if (!elf32_arm_size_info
.slurp_reloc_table (abfd
, relplt
, dynsyms
, TRUE
))
17536 data
= plt
->contents
;
17539 if (!bfd_get_full_section_contents(abfd
, (asection
*) plt
, &data
) || data
== NULL
)
17541 bfd_cache_section_contents((asection
*) plt
, data
);
17544 count
= relplt
->size
/ hdr
->sh_entsize
;
17545 size
= count
* sizeof (asymbol
);
17546 p
= relplt
->relocation
;
17547 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
17549 size
+= strlen ((*p
->sym_ptr_ptr
)->name
) + sizeof ("@plt");
17550 if (p
->addend
!= 0)
17551 size
+= sizeof ("+0x") - 1 + 8;
17554 s
= *ret
= (asymbol
*) bfd_malloc (size
);
17558 offset
= elf32_arm_plt0_size (abfd
, data
);
17559 if (offset
== (bfd_vma
) -1)
17562 names
= (char *) (s
+ count
);
17563 p
= relplt
->relocation
;
17565 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
17569 bfd_vma plt_size
= elf32_arm_plt_size (abfd
, data
, offset
);
17570 if (plt_size
== (bfd_vma
) -1)
17573 *s
= **p
->sym_ptr_ptr
;
17574 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
17575 we are defining a symbol, ensure one of them is set. */
17576 if ((s
->flags
& BSF_LOCAL
) == 0)
17577 s
->flags
|= BSF_GLOBAL
;
17578 s
->flags
|= BSF_SYNTHETIC
;
17583 len
= strlen ((*p
->sym_ptr_ptr
)->name
);
17584 memcpy (names
, (*p
->sym_ptr_ptr
)->name
, len
);
17586 if (p
->addend
!= 0)
17590 memcpy (names
, "+0x", sizeof ("+0x") - 1);
17591 names
+= sizeof ("+0x") - 1;
17592 bfd_sprintf_vma (abfd
, buf
, p
->addend
);
17593 for (a
= buf
; *a
== '0'; ++a
)
17596 memcpy (names
, a
, len
);
17599 memcpy (names
, "@plt", sizeof ("@plt"));
17600 names
+= sizeof ("@plt");
17602 offset
+= plt_size
;
17608 #define ELF_ARCH bfd_arch_arm
17609 #define ELF_TARGET_ID ARM_ELF_DATA
17610 #define ELF_MACHINE_CODE EM_ARM
17611 #ifdef __QNXTARGET__
17612 #define ELF_MAXPAGESIZE 0x1000
17614 #define ELF_MAXPAGESIZE 0x10000
17616 #define ELF_MINPAGESIZE 0x1000
17617 #define ELF_COMMONPAGESIZE 0x1000
17619 #define bfd_elf32_mkobject elf32_arm_mkobject
17621 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
17622 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
17623 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
17624 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
17625 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
17626 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
17627 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
17628 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
17629 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
17630 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
17631 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
17632 #define bfd_elf32_bfd_final_link elf32_arm_final_link
17633 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
17635 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
17636 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
17637 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
17638 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
17639 #define elf_backend_check_relocs elf32_arm_check_relocs
17640 #define elf_backend_relocate_section elf32_arm_relocate_section
17641 #define elf_backend_write_section elf32_arm_write_section
17642 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
17643 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
17644 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
17645 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
17646 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
17647 #define elf_backend_always_size_sections elf32_arm_always_size_sections
17648 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
17649 #define elf_backend_post_process_headers elf32_arm_post_process_headers
17650 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
17651 #define elf_backend_object_p elf32_arm_object_p
17652 #define elf_backend_fake_sections elf32_arm_fake_sections
17653 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
17654 #define elf_backend_final_write_processing elf32_arm_final_write_processing
17655 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
17656 #define elf_backend_size_info elf32_arm_size_info
17657 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
17658 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
17659 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
17660 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
17661 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
17663 #define elf_backend_can_refcount 1
17664 #define elf_backend_can_gc_sections 1
17665 #define elf_backend_plt_readonly 1
17666 #define elf_backend_want_got_plt 1
17667 #define elf_backend_want_plt_sym 0
17668 #define elf_backend_may_use_rel_p 1
17669 #define elf_backend_may_use_rela_p 0
17670 #define elf_backend_default_use_rela_p 0
17672 #define elf_backend_got_header_size 12
17673 #define elf_backend_extern_protected_data 1
17675 #undef elf_backend_obj_attrs_vendor
17676 #define elf_backend_obj_attrs_vendor "aeabi"
17677 #undef elf_backend_obj_attrs_section
17678 #define elf_backend_obj_attrs_section ".ARM.attributes"
17679 #undef elf_backend_obj_attrs_arg_type
17680 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
17681 #undef elf_backend_obj_attrs_section_type
17682 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
17683 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
17684 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
17686 #include "elf32-target.h"
17688 /* Native Client targets. */
17690 #undef TARGET_LITTLE_SYM
17691 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
17692 #undef TARGET_LITTLE_NAME
17693 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
17694 #undef TARGET_BIG_SYM
17695 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
17696 #undef TARGET_BIG_NAME
17697 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
17699 /* Like elf32_arm_link_hash_table_create -- but overrides
17700 appropriately for NaCl. */
17702 static struct bfd_link_hash_table
*
17703 elf32_arm_nacl_link_hash_table_create (bfd
*abfd
)
17705 struct bfd_link_hash_table
*ret
;
17707 ret
= elf32_arm_link_hash_table_create (abfd
);
17710 struct elf32_arm_link_hash_table
*htab
17711 = (struct elf32_arm_link_hash_table
*) ret
;
17715 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry
);
17716 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry
);
17721 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
17722 really need to use elf32_arm_modify_segment_map. But we do it
17723 anyway just to reduce gratuitous differences with the stock ARM backend. */
17726 elf32_arm_nacl_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
17728 return (elf32_arm_modify_segment_map (abfd
, info
)
17729 && nacl_modify_segment_map (abfd
, info
));
17733 elf32_arm_nacl_final_write_processing (bfd
*abfd
, bfd_boolean linker
)
17735 elf32_arm_final_write_processing (abfd
, linker
);
17736 nacl_final_write_processing (abfd
, linker
);
17740 elf32_arm_nacl_plt_sym_val (bfd_vma i
, const asection
*plt
,
17741 const arelent
*rel ATTRIBUTE_UNUSED
)
17744 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry
) +
17745 i
* ARRAY_SIZE (elf32_arm_nacl_plt_entry
));
17749 #define elf32_bed elf32_arm_nacl_bed
17750 #undef bfd_elf32_bfd_link_hash_table_create
17751 #define bfd_elf32_bfd_link_hash_table_create \
17752 elf32_arm_nacl_link_hash_table_create
17753 #undef elf_backend_plt_alignment
17754 #define elf_backend_plt_alignment 4
17755 #undef elf_backend_modify_segment_map
17756 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
17757 #undef elf_backend_modify_program_headers
17758 #define elf_backend_modify_program_headers nacl_modify_program_headers
17759 #undef elf_backend_final_write_processing
17760 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
17761 #undef bfd_elf32_get_synthetic_symtab
17762 #undef elf_backend_plt_sym_val
17763 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
17765 #undef ELF_MINPAGESIZE
17766 #undef ELF_COMMONPAGESIZE
17769 #include "elf32-target.h"
17771 /* Reset to defaults. */
17772 #undef elf_backend_plt_alignment
17773 #undef elf_backend_modify_segment_map
17774 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
17775 #undef elf_backend_modify_program_headers
17776 #undef elf_backend_final_write_processing
17777 #define elf_backend_final_write_processing elf32_arm_final_write_processing
17778 #undef ELF_MINPAGESIZE
17779 #define ELF_MINPAGESIZE 0x1000
17780 #undef ELF_COMMONPAGESIZE
17781 #define ELF_COMMONPAGESIZE 0x1000
17784 /* VxWorks Targets. */
17786 #undef TARGET_LITTLE_SYM
17787 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
17788 #undef TARGET_LITTLE_NAME
17789 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
17790 #undef TARGET_BIG_SYM
17791 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
17792 #undef TARGET_BIG_NAME
17793 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
17795 /* Like elf32_arm_link_hash_table_create -- but overrides
17796 appropriately for VxWorks. */
17798 static struct bfd_link_hash_table
*
17799 elf32_arm_vxworks_link_hash_table_create (bfd
*abfd
)
17801 struct bfd_link_hash_table
*ret
;
17803 ret
= elf32_arm_link_hash_table_create (abfd
);
17806 struct elf32_arm_link_hash_table
*htab
17807 = (struct elf32_arm_link_hash_table
*) ret
;
17809 htab
->vxworks_p
= 1;
17815 elf32_arm_vxworks_final_write_processing (bfd
*abfd
, bfd_boolean linker
)
17817 elf32_arm_final_write_processing (abfd
, linker
);
17818 elf_vxworks_final_write_processing (abfd
, linker
);
17822 #define elf32_bed elf32_arm_vxworks_bed
17824 #undef bfd_elf32_bfd_link_hash_table_create
17825 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
17826 #undef elf_backend_final_write_processing
17827 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
17828 #undef elf_backend_emit_relocs
17829 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
17831 #undef elf_backend_may_use_rel_p
17832 #define elf_backend_may_use_rel_p 0
17833 #undef elf_backend_may_use_rela_p
17834 #define elf_backend_may_use_rela_p 1
17835 #undef elf_backend_default_use_rela_p
17836 #define elf_backend_default_use_rela_p 1
17837 #undef elf_backend_want_plt_sym
17838 #define elf_backend_want_plt_sym 1
17839 #undef ELF_MAXPAGESIZE
17840 #define ELF_MAXPAGESIZE 0x1000
17842 #include "elf32-target.h"
17845 /* Merge backend specific data from an object file to the output
17846 object file when linking. */
17849 elf32_arm_merge_private_bfd_data (bfd
* ibfd
, bfd
* obfd
)
17851 flagword out_flags
;
17853 bfd_boolean flags_compatible
= TRUE
;
17856 /* Check if we have the same endianness. */
17857 if (! _bfd_generic_verify_endian_match (ibfd
, obfd
))
17860 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
17863 if (!elf32_arm_merge_eabi_attributes (ibfd
, obfd
))
17866 /* The input BFD must have had its flags initialised. */
17867 /* The following seems bogus to me -- The flags are initialized in
17868 the assembler but I don't think an elf_flags_init field is
17869 written into the object. */
17870 /* BFD_ASSERT (elf_flags_init (ibfd)); */
17872 in_flags
= elf_elfheader (ibfd
)->e_flags
;
17873 out_flags
= elf_elfheader (obfd
)->e_flags
;
17875 /* In theory there is no reason why we couldn't handle this. However
17876 in practice it isn't even close to working and there is no real
17877 reason to want it. */
17878 if (EF_ARM_EABI_VERSION (in_flags
) >= EF_ARM_EABI_VER4
17879 && !(ibfd
->flags
& DYNAMIC
)
17880 && (in_flags
& EF_ARM_BE8
))
17882 _bfd_error_handler (_("error: %B is already in final BE8 format"),
17887 if (!elf_flags_init (obfd
))
17889 /* If the input is the default architecture and had the default
17890 flags then do not bother setting the flags for the output
17891 architecture, instead allow future merges to do this. If no
17892 future merges ever set these flags then they will retain their
17893 uninitialised values, which surprise surprise, correspond
17894 to the default values. */
17895 if (bfd_get_arch_info (ibfd
)->the_default
17896 && elf_elfheader (ibfd
)->e_flags
== 0)
17899 elf_flags_init (obfd
) = TRUE
;
17900 elf_elfheader (obfd
)->e_flags
= in_flags
;
17902 if (bfd_get_arch (obfd
) == bfd_get_arch (ibfd
)
17903 && bfd_get_arch_info (obfd
)->the_default
)
17904 return bfd_set_arch_mach (obfd
, bfd_get_arch (ibfd
), bfd_get_mach (ibfd
));
17909 /* Determine what should happen if the input ARM architecture
17910 does not match the output ARM architecture. */
17911 if (! bfd_arm_merge_machines (ibfd
, obfd
))
17914 /* Identical flags must be compatible. */
17915 if (in_flags
== out_flags
)
17918 /* Check to see if the input BFD actually contains any sections. If
17919 not, its flags may not have been initialised either, but it
17920 cannot actually cause any incompatiblity. Do not short-circuit
17921 dynamic objects; their section list may be emptied by
17922 elf_link_add_object_symbols.
17924 Also check to see if there are no code sections in the input.
17925 In this case there is no need to check for code specific flags.
17926 XXX - do we need to worry about floating-point format compatability
17927 in data sections ? */
17928 if (!(ibfd
->flags
& DYNAMIC
))
17930 bfd_boolean null_input_bfd
= TRUE
;
17931 bfd_boolean only_data_sections
= TRUE
;
17933 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
17935 /* Ignore synthetic glue sections. */
17936 if (strcmp (sec
->name
, ".glue_7")
17937 && strcmp (sec
->name
, ".glue_7t"))
17939 if ((bfd_get_section_flags (ibfd
, sec
)
17940 & (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
17941 == (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
17942 only_data_sections
= FALSE
;
17944 null_input_bfd
= FALSE
;
17949 if (null_input_bfd
|| only_data_sections
)
17953 /* Complain about various flag mismatches. */
17954 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags
),
17955 EF_ARM_EABI_VERSION (out_flags
)))
17958 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
17960 (in_flags
& EF_ARM_EABIMASK
) >> 24,
17961 (out_flags
& EF_ARM_EABIMASK
) >> 24);
17965 /* Not sure what needs to be checked for EABI versions >= 1. */
17966 /* VxWorks libraries do not use these flags. */
17967 if (get_elf_backend_data (obfd
) != &elf32_arm_vxworks_bed
17968 && get_elf_backend_data (ibfd
) != &elf32_arm_vxworks_bed
17969 && EF_ARM_EABI_VERSION (in_flags
) == EF_ARM_EABI_UNKNOWN
)
17971 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
17974 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
17976 in_flags
& EF_ARM_APCS_26
? 26 : 32,
17977 out_flags
& EF_ARM_APCS_26
? 26 : 32);
17978 flags_compatible
= FALSE
;
17981 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
17983 if (in_flags
& EF_ARM_APCS_FLOAT
)
17985 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
17989 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
17992 flags_compatible
= FALSE
;
17995 if ((in_flags
& EF_ARM_VFP_FLOAT
) != (out_flags
& EF_ARM_VFP_FLOAT
))
17997 if (in_flags
& EF_ARM_VFP_FLOAT
)
17999 (_("error: %B uses VFP instructions, whereas %B does not"),
18003 (_("error: %B uses FPA instructions, whereas %B does not"),
18006 flags_compatible
= FALSE
;
18009 if ((in_flags
& EF_ARM_MAVERICK_FLOAT
) != (out_flags
& EF_ARM_MAVERICK_FLOAT
))
18011 if (in_flags
& EF_ARM_MAVERICK_FLOAT
)
18013 (_("error: %B uses Maverick instructions, whereas %B does not"),
18017 (_("error: %B does not use Maverick instructions, whereas %B does"),
18020 flags_compatible
= FALSE
;
18023 #ifdef EF_ARM_SOFT_FLOAT
18024 if ((in_flags
& EF_ARM_SOFT_FLOAT
) != (out_flags
& EF_ARM_SOFT_FLOAT
))
18026 /* We can allow interworking between code that is VFP format
18027 layout, and uses either soft float or integer regs for
18028 passing floating point arguments and results. We already
18029 know that the APCS_FLOAT flags match; similarly for VFP
18031 if ((in_flags
& EF_ARM_APCS_FLOAT
) != 0
18032 || (in_flags
& EF_ARM_VFP_FLOAT
) == 0)
18034 if (in_flags
& EF_ARM_SOFT_FLOAT
)
18036 (_("error: %B uses software FP, whereas %B uses hardware FP"),
18040 (_("error: %B uses hardware FP, whereas %B uses software FP"),
18043 flags_compatible
= FALSE
;
18048 /* Interworking mismatch is only a warning. */
18049 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
18051 if (in_flags
& EF_ARM_INTERWORK
)
18054 (_("Warning: %B supports interworking, whereas %B does not"),
18060 (_("Warning: %B does not support interworking, whereas %B does"),
18066 return flags_compatible
;
18070 /* Symbian OS Targets. */
18072 #undef TARGET_LITTLE_SYM
18073 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
18074 #undef TARGET_LITTLE_NAME
18075 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
18076 #undef TARGET_BIG_SYM
18077 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
18078 #undef TARGET_BIG_NAME
18079 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
18081 /* Like elf32_arm_link_hash_table_create -- but overrides
18082 appropriately for Symbian OS. */
18084 static struct bfd_link_hash_table
*
18085 elf32_arm_symbian_link_hash_table_create (bfd
*abfd
)
18087 struct bfd_link_hash_table
*ret
;
18089 ret
= elf32_arm_link_hash_table_create (abfd
);
18092 struct elf32_arm_link_hash_table
*htab
18093 = (struct elf32_arm_link_hash_table
*)ret
;
18094 /* There is no PLT header for Symbian OS. */
18095 htab
->plt_header_size
= 0;
18096 /* The PLT entries are each one instruction and one word. */
18097 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry
);
18098 htab
->symbian_p
= 1;
18099 /* Symbian uses armv5t or above, so use_blx is always true. */
18101 htab
->root
.is_relocatable_executable
= 1;
18106 static const struct bfd_elf_special_section
18107 elf32_arm_symbian_special_sections
[] =
18109 /* In a BPABI executable, the dynamic linking sections do not go in
18110 the loadable read-only segment. The post-linker may wish to
18111 refer to these sections, but they are not part of the final
18113 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC
, 0 },
18114 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB
, 0 },
18115 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM
, 0 },
18116 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS
, 0 },
18117 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH
, 0 },
18118 /* These sections do not need to be writable as the SymbianOS
18119 postlinker will arrange things so that no dynamic relocation is
18121 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY
, SHF_ALLOC
},
18122 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY
, SHF_ALLOC
},
18123 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY
, SHF_ALLOC
},
18124 { NULL
, 0, 0, 0, 0 }
18128 elf32_arm_symbian_begin_write_processing (bfd
*abfd
,
18129 struct bfd_link_info
*link_info
)
18131 /* BPABI objects are never loaded directly by an OS kernel; they are
18132 processed by a postlinker first, into an OS-specific format. If
18133 the D_PAGED bit is set on the file, BFD will align segments on
18134 page boundaries, so that an OS can directly map the file. With
18135 BPABI objects, that just results in wasted space. In addition,
18136 because we clear the D_PAGED bit, map_sections_to_segments will
18137 recognize that the program headers should not be mapped into any
18138 loadable segment. */
18139 abfd
->flags
&= ~D_PAGED
;
18140 elf32_arm_begin_write_processing (abfd
, link_info
);
18144 elf32_arm_symbian_modify_segment_map (bfd
*abfd
,
18145 struct bfd_link_info
*info
)
18147 struct elf_segment_map
*m
;
18150 /* BPABI shared libraries and executables should have a PT_DYNAMIC
18151 segment. However, because the .dynamic section is not marked
18152 with SEC_LOAD, the generic ELF code will not create such a
18154 dynsec
= bfd_get_section_by_name (abfd
, ".dynamic");
18157 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
18158 if (m
->p_type
== PT_DYNAMIC
)
18163 m
= _bfd_elf_make_dynamic_segment (abfd
, dynsec
);
18164 m
->next
= elf_seg_map (abfd
);
18165 elf_seg_map (abfd
) = m
;
18169 /* Also call the generic arm routine. */
18170 return elf32_arm_modify_segment_map (abfd
, info
);
18173 /* Return address for Ith PLT stub in section PLT, for relocation REL
18174 or (bfd_vma) -1 if it should not be included. */
18177 elf32_arm_symbian_plt_sym_val (bfd_vma i
, const asection
*plt
,
18178 const arelent
*rel ATTRIBUTE_UNUSED
)
18180 return plt
->vma
+ 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry
) * i
;
18185 #define elf32_bed elf32_arm_symbian_bed
18187 /* The dynamic sections are not allocated on SymbianOS; the postlinker
18188 will process them and then discard them. */
18189 #undef ELF_DYNAMIC_SEC_FLAGS
18190 #define ELF_DYNAMIC_SEC_FLAGS \
18191 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
18193 #undef elf_backend_emit_relocs
18195 #undef bfd_elf32_bfd_link_hash_table_create
18196 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
18197 #undef elf_backend_special_sections
18198 #define elf_backend_special_sections elf32_arm_symbian_special_sections
18199 #undef elf_backend_begin_write_processing
18200 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
18201 #undef elf_backend_final_write_processing
18202 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18204 #undef elf_backend_modify_segment_map
18205 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
18207 /* There is no .got section for BPABI objects, and hence no header. */
18208 #undef elf_backend_got_header_size
18209 #define elf_backend_got_header_size 0
18211 /* Similarly, there is no .got.plt section. */
18212 #undef elf_backend_want_got_plt
18213 #define elf_backend_want_got_plt 0
18215 #undef elf_backend_plt_sym_val
18216 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
18218 #undef elf_backend_may_use_rel_p
18219 #define elf_backend_may_use_rel_p 1
18220 #undef elf_backend_may_use_rela_p
18221 #define elf_backend_may_use_rela_p 0
18222 #undef elf_backend_default_use_rela_p
18223 #define elf_backend_default_use_rela_p 0
18224 #undef elf_backend_want_plt_sym
18225 #define elf_backend_want_plt_sym 0
18226 #undef ELF_MAXPAGESIZE
18227 #define ELF_MAXPAGESIZE 0x8000
18229 #include "elf32-target.h"