[gdb/testsuite] Fix gdb.base/list-no-debug.exp on debian
[binutils-gdb.git] / bfd / elf32-arm.c
blob779c7081c53698fc9e703e18377eb6619f92e97c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2024 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
21 #include "sysdep.h"
22 #include <limits.h>
24 #include "bfd.h"
25 #include "libiberty.h"
26 #include "libbfd.h"
27 #include "elf-bfd.h"
28 #include "elf-nacl.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31 #include "elf32-arm.h"
32 #include "cpu-arm.h"
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
42 ((HTAB)->use_rel \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
49 ((HTAB)->use_rel \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
56 ((HTAB)->use_rel \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
60 #define elf_info_to_howto NULL
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
69 static bool elf32_arm_write_section (bfd *output_bfd,
70 struct bfd_link_info *link_info,
71 asection *sec,
72 bfd_byte *contents);
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
76 in that slot. */
78 static reloc_howto_type elf32_arm_howto_table_1[] =
80 /* No relocation. */
81 HOWTO (R_ARM_NONE, /* type */
82 0, /* rightshift */
83 0, /* size */
84 0, /* bitsize */
85 false, /* pc_relative */
86 0, /* bitpos */
87 complain_overflow_dont,/* complain_on_overflow */
88 bfd_elf_generic_reloc, /* special_function */
89 "R_ARM_NONE", /* name */
90 false, /* partial_inplace */
91 0, /* src_mask */
92 0, /* dst_mask */
93 false), /* pcrel_offset */
95 HOWTO (R_ARM_PC24, /* type */
96 2, /* rightshift */
97 4, /* size */
98 24, /* bitsize */
99 true, /* pc_relative */
100 0, /* bitpos */
101 complain_overflow_signed,/* complain_on_overflow */
102 bfd_elf_generic_reloc, /* special_function */
103 "R_ARM_PC24", /* name */
104 false, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 true), /* pcrel_offset */
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32, /* type */
111 0, /* rightshift */
112 4, /* size */
113 32, /* bitsize */
114 false, /* pc_relative */
115 0, /* bitpos */
116 complain_overflow_bitfield,/* complain_on_overflow */
117 bfd_elf_generic_reloc, /* special_function */
118 "R_ARM_ABS32", /* name */
119 false, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 false), /* pcrel_offset */
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32, /* type */
126 0, /* rightshift */
127 4, /* size */
128 32, /* bitsize */
129 true, /* pc_relative */
130 0, /* bitpos */
131 complain_overflow_bitfield,/* complain_on_overflow */
132 bfd_elf_generic_reloc, /* special_function */
133 "R_ARM_REL32", /* name */
134 false, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 true), /* pcrel_offset */
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0, /* type */
141 0, /* rightshift */
142 1, /* size */
143 32, /* bitsize */
144 true, /* pc_relative */
145 0, /* bitpos */
146 complain_overflow_dont,/* complain_on_overflow */
147 bfd_elf_generic_reloc, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 false, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 true), /* pcrel_offset */
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16, /* type */
156 0, /* rightshift */
157 2, /* size */
158 16, /* bitsize */
159 false, /* pc_relative */
160 0, /* bitpos */
161 complain_overflow_bitfield,/* complain_on_overflow */
162 bfd_elf_generic_reloc, /* special_function */
163 "R_ARM_ABS16", /* name */
164 false, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 false), /* pcrel_offset */
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12, /* type */
171 0, /* rightshift */
172 4, /* size */
173 12, /* bitsize */
174 false, /* pc_relative */
175 0, /* bitpos */
176 complain_overflow_bitfield,/* complain_on_overflow */
177 bfd_elf_generic_reloc, /* special_function */
178 "R_ARM_ABS12", /* name */
179 false, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 false), /* pcrel_offset */
184 HOWTO (R_ARM_THM_ABS5, /* type */
185 6, /* rightshift */
186 2, /* size */
187 5, /* bitsize */
188 false, /* pc_relative */
189 0, /* bitpos */
190 complain_overflow_bitfield,/* complain_on_overflow */
191 bfd_elf_generic_reloc, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 false, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 false), /* pcrel_offset */
198 /* 8 bit absolute */
199 HOWTO (R_ARM_ABS8, /* type */
200 0, /* rightshift */
201 1, /* size */
202 8, /* bitsize */
203 false, /* pc_relative */
204 0, /* bitpos */
205 complain_overflow_bitfield,/* complain_on_overflow */
206 bfd_elf_generic_reloc, /* special_function */
207 "R_ARM_ABS8", /* name */
208 false, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 false), /* pcrel_offset */
213 HOWTO (R_ARM_SBREL32, /* type */
214 0, /* rightshift */
215 4, /* size */
216 32, /* bitsize */
217 false, /* pc_relative */
218 0, /* bitpos */
219 complain_overflow_dont,/* complain_on_overflow */
220 bfd_elf_generic_reloc, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 false, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 false), /* pcrel_offset */
227 HOWTO (R_ARM_THM_CALL, /* type */
228 1, /* rightshift */
229 4, /* size */
230 24, /* bitsize */
231 true, /* pc_relative */
232 0, /* bitpos */
233 complain_overflow_signed,/* complain_on_overflow */
234 bfd_elf_generic_reloc, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 false, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 true), /* pcrel_offset */
241 HOWTO (R_ARM_THM_PC8, /* type */
242 1, /* rightshift */
243 2, /* size */
244 8, /* bitsize */
245 true, /* pc_relative */
246 0, /* bitpos */
247 complain_overflow_signed,/* complain_on_overflow */
248 bfd_elf_generic_reloc, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 false, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 true), /* pcrel_offset */
255 HOWTO (R_ARM_BREL_ADJ, /* type */
256 1, /* rightshift */
257 2, /* size */
258 32, /* bitsize */
259 false, /* pc_relative */
260 0, /* bitpos */
261 complain_overflow_signed,/* complain_on_overflow */
262 bfd_elf_generic_reloc, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 false, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 false), /* pcrel_offset */
269 HOWTO (R_ARM_TLS_DESC, /* type */
270 0, /* rightshift */
271 4, /* size */
272 32, /* bitsize */
273 false, /* pc_relative */
274 0, /* bitpos */
275 complain_overflow_bitfield,/* complain_on_overflow */
276 bfd_elf_generic_reloc, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 false, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 false), /* pcrel_offset */
283 HOWTO (R_ARM_THM_SWI8, /* type */
284 0, /* rightshift */
285 0, /* size */
286 0, /* bitsize */
287 false, /* pc_relative */
288 0, /* bitpos */
289 complain_overflow_signed,/* complain_on_overflow */
290 bfd_elf_generic_reloc, /* special_function */
291 "R_ARM_SWI8", /* name */
292 false, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 false), /* pcrel_offset */
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25, /* type */
299 2, /* rightshift */
300 4, /* size */
301 24, /* bitsize */
302 true, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_signed,/* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_ARM_XPC25", /* name */
307 false, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 true), /* pcrel_offset */
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22, /* type */
314 2, /* rightshift */
315 4, /* size */
316 24, /* bitsize */
317 true, /* pc_relative */
318 0, /* bitpos */
319 complain_overflow_signed,/* complain_on_overflow */
320 bfd_elf_generic_reloc, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 false, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 true), /* pcrel_offset */
327 /* Dynamic TLS relocations. */
329 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
330 0, /* rightshift */
331 4, /* size */
332 32, /* bitsize */
333 false, /* pc_relative */
334 0, /* bitpos */
335 complain_overflow_bitfield,/* complain_on_overflow */
336 bfd_elf_generic_reloc, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 true, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 false), /* pcrel_offset */
343 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
344 0, /* rightshift */
345 4, /* size */
346 32, /* bitsize */
347 false, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_bitfield,/* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 true, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 false), /* pcrel_offset */
357 HOWTO (R_ARM_TLS_TPOFF32, /* type */
358 0, /* rightshift */
359 4, /* size */
360 32, /* bitsize */
361 false, /* pc_relative */
362 0, /* bitpos */
363 complain_overflow_bitfield,/* complain_on_overflow */
364 bfd_elf_generic_reloc, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 true, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 false), /* pcrel_offset */
371 /* Relocs used in ARM Linux */
373 HOWTO (R_ARM_COPY, /* type */
374 0, /* rightshift */
375 4, /* size */
376 32, /* bitsize */
377 false, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_bitfield,/* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_ARM_COPY", /* name */
382 true, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 false), /* pcrel_offset */
387 HOWTO (R_ARM_GLOB_DAT, /* type */
388 0, /* rightshift */
389 4, /* size */
390 32, /* bitsize */
391 false, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_bitfield,/* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 true, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 false), /* pcrel_offset */
401 HOWTO (R_ARM_JUMP_SLOT, /* type */
402 0, /* rightshift */
403 4, /* size */
404 32, /* bitsize */
405 false, /* pc_relative */
406 0, /* bitpos */
407 complain_overflow_bitfield,/* complain_on_overflow */
408 bfd_elf_generic_reloc, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 true, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 false), /* pcrel_offset */
415 HOWTO (R_ARM_RELATIVE, /* type */
416 0, /* rightshift */
417 4, /* size */
418 32, /* bitsize */
419 false, /* pc_relative */
420 0, /* bitpos */
421 complain_overflow_bitfield,/* complain_on_overflow */
422 bfd_elf_generic_reloc, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 true, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 false), /* pcrel_offset */
429 HOWTO (R_ARM_GOTOFF32, /* type */
430 0, /* rightshift */
431 4, /* size */
432 32, /* bitsize */
433 false, /* pc_relative */
434 0, /* bitpos */
435 complain_overflow_bitfield,/* complain_on_overflow */
436 bfd_elf_generic_reloc, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 true, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 false), /* pcrel_offset */
443 HOWTO (R_ARM_GOTPC, /* type */
444 0, /* rightshift */
445 4, /* size */
446 32, /* bitsize */
447 true, /* pc_relative */
448 0, /* bitpos */
449 complain_overflow_bitfield,/* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 true, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 true), /* pcrel_offset */
457 HOWTO (R_ARM_GOT32, /* type */
458 0, /* rightshift */
459 4, /* size */
460 32, /* bitsize */
461 false, /* pc_relative */
462 0, /* bitpos */
463 complain_overflow_bitfield,/* complain_on_overflow */
464 bfd_elf_generic_reloc, /* special_function */
465 "R_ARM_GOT32", /* name */
466 true, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 false), /* pcrel_offset */
471 HOWTO (R_ARM_PLT32, /* type */
472 2, /* rightshift */
473 4, /* size */
474 24, /* bitsize */
475 true, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_bitfield,/* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_ARM_PLT32", /* name */
480 false, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 true), /* pcrel_offset */
485 HOWTO (R_ARM_CALL, /* type */
486 2, /* rightshift */
487 4, /* size */
488 24, /* bitsize */
489 true, /* pc_relative */
490 0, /* bitpos */
491 complain_overflow_signed,/* complain_on_overflow */
492 bfd_elf_generic_reloc, /* special_function */
493 "R_ARM_CALL", /* name */
494 false, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 true), /* pcrel_offset */
499 HOWTO (R_ARM_JUMP24, /* type */
500 2, /* rightshift */
501 4, /* size */
502 24, /* bitsize */
503 true, /* pc_relative */
504 0, /* bitpos */
505 complain_overflow_signed,/* complain_on_overflow */
506 bfd_elf_generic_reloc, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 false, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 true), /* pcrel_offset */
513 HOWTO (R_ARM_THM_JUMP24, /* type */
514 1, /* rightshift */
515 4, /* size */
516 24, /* bitsize */
517 true, /* pc_relative */
518 0, /* bitpos */
519 complain_overflow_signed,/* complain_on_overflow */
520 bfd_elf_generic_reloc, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 false, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 true), /* pcrel_offset */
527 HOWTO (R_ARM_BASE_ABS, /* type */
528 0, /* rightshift */
529 4, /* size */
530 32, /* bitsize */
531 false, /* pc_relative */
532 0, /* bitpos */
533 complain_overflow_dont,/* complain_on_overflow */
534 bfd_elf_generic_reloc, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 false, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 false), /* pcrel_offset */
541 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
542 0, /* rightshift */
543 4, /* size */
544 12, /* bitsize */
545 true, /* pc_relative */
546 0, /* bitpos */
547 complain_overflow_dont,/* complain_on_overflow */
548 bfd_elf_generic_reloc, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 false, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 true), /* pcrel_offset */
555 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
556 0, /* rightshift */
557 4, /* size */
558 12, /* bitsize */
559 true, /* pc_relative */
560 8, /* bitpos */
561 complain_overflow_dont,/* complain_on_overflow */
562 bfd_elf_generic_reloc, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 false, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 true), /* pcrel_offset */
569 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
570 0, /* rightshift */
571 4, /* size */
572 12, /* bitsize */
573 true, /* pc_relative */
574 16, /* bitpos */
575 complain_overflow_dont,/* complain_on_overflow */
576 bfd_elf_generic_reloc, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 false, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 true), /* pcrel_offset */
583 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
584 0, /* rightshift */
585 4, /* size */
586 12, /* bitsize */
587 false, /* pc_relative */
588 0, /* bitpos */
589 complain_overflow_dont,/* complain_on_overflow */
590 bfd_elf_generic_reloc, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 false, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 false), /* pcrel_offset */
597 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
598 0, /* rightshift */
599 4, /* size */
600 8, /* bitsize */
601 false, /* pc_relative */
602 12, /* bitpos */
603 complain_overflow_dont,/* complain_on_overflow */
604 bfd_elf_generic_reloc, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 false, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 false), /* pcrel_offset */
611 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
612 0, /* rightshift */
613 4, /* size */
614 8, /* bitsize */
615 false, /* pc_relative */
616 20, /* bitpos */
617 complain_overflow_dont,/* complain_on_overflow */
618 bfd_elf_generic_reloc, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 false, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 false), /* pcrel_offset */
625 HOWTO (R_ARM_TARGET1, /* type */
626 0, /* rightshift */
627 4, /* size */
628 32, /* bitsize */
629 false, /* pc_relative */
630 0, /* bitpos */
631 complain_overflow_dont,/* complain_on_overflow */
632 bfd_elf_generic_reloc, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 false, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 false), /* pcrel_offset */
639 HOWTO (R_ARM_ROSEGREL32, /* type */
640 0, /* rightshift */
641 4, /* size */
642 32, /* bitsize */
643 false, /* pc_relative */
644 0, /* bitpos */
645 complain_overflow_dont,/* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 false, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 false), /* pcrel_offset */
653 HOWTO (R_ARM_V4BX, /* type */
654 0, /* rightshift */
655 4, /* size */
656 32, /* bitsize */
657 false, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_dont,/* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 "R_ARM_V4BX", /* name */
662 false, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 false), /* pcrel_offset */
667 HOWTO (R_ARM_TARGET2, /* type */
668 0, /* rightshift */
669 4, /* size */
670 32, /* bitsize */
671 false, /* pc_relative */
672 0, /* bitpos */
673 complain_overflow_signed,/* complain_on_overflow */
674 bfd_elf_generic_reloc, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 false, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 true), /* pcrel_offset */
681 HOWTO (R_ARM_PREL31, /* type */
682 0, /* rightshift */
683 4, /* size */
684 31, /* bitsize */
685 true, /* pc_relative */
686 0, /* bitpos */
687 complain_overflow_signed,/* complain_on_overflow */
688 bfd_elf_generic_reloc, /* special_function */
689 "R_ARM_PREL31", /* name */
690 false, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 true), /* pcrel_offset */
695 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
696 0, /* rightshift */
697 4, /* size */
698 16, /* bitsize */
699 false, /* pc_relative */
700 0, /* bitpos */
701 complain_overflow_dont,/* complain_on_overflow */
702 bfd_elf_generic_reloc, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 false, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 false), /* pcrel_offset */
709 HOWTO (R_ARM_MOVT_ABS, /* type */
710 0, /* rightshift */
711 4, /* size */
712 16, /* bitsize */
713 false, /* pc_relative */
714 0, /* bitpos */
715 complain_overflow_bitfield,/* complain_on_overflow */
716 bfd_elf_generic_reloc, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 false, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 false), /* pcrel_offset */
723 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
724 0, /* rightshift */
725 4, /* size */
726 16, /* bitsize */
727 true, /* pc_relative */
728 0, /* bitpos */
729 complain_overflow_dont,/* complain_on_overflow */
730 bfd_elf_generic_reloc, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 false, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 true), /* pcrel_offset */
737 HOWTO (R_ARM_MOVT_PREL, /* type */
738 0, /* rightshift */
739 4, /* size */
740 16, /* bitsize */
741 true, /* pc_relative */
742 0, /* bitpos */
743 complain_overflow_bitfield,/* complain_on_overflow */
744 bfd_elf_generic_reloc, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 false, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 true), /* pcrel_offset */
751 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
752 0, /* rightshift */
753 4, /* size */
754 16, /* bitsize */
755 false, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont,/* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 false, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 false), /* pcrel_offset */
765 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
766 0, /* rightshift */
767 4, /* size */
768 16, /* bitsize */
769 false, /* pc_relative */
770 0, /* bitpos */
771 complain_overflow_bitfield,/* complain_on_overflow */
772 bfd_elf_generic_reloc, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 false, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 false), /* pcrel_offset */
779 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780 0, /* rightshift */
781 4, /* size */
782 16, /* bitsize */
783 true, /* pc_relative */
784 0, /* bitpos */
785 complain_overflow_dont,/* complain_on_overflow */
786 bfd_elf_generic_reloc, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 false, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 true), /* pcrel_offset */
793 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
794 0, /* rightshift */
795 4, /* size */
796 16, /* bitsize */
797 true, /* pc_relative */
798 0, /* bitpos */
799 complain_overflow_bitfield,/* complain_on_overflow */
800 bfd_elf_generic_reloc, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 false, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 true), /* pcrel_offset */
807 HOWTO (R_ARM_THM_JUMP19, /* type */
808 1, /* rightshift */
809 4, /* size */
810 19, /* bitsize */
811 true, /* pc_relative */
812 0, /* bitpos */
813 complain_overflow_signed,/* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 false, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 true), /* pcrel_offset */
821 HOWTO (R_ARM_THM_JUMP6, /* type */
822 1, /* rightshift */
823 2, /* size */
824 6, /* bitsize */
825 true, /* pc_relative */
826 0, /* bitpos */
827 complain_overflow_unsigned,/* complain_on_overflow */
828 bfd_elf_generic_reloc, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 false, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 true), /* pcrel_offset */
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 versa. */
838 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839 0, /* rightshift */
840 4, /* size */
841 13, /* bitsize */
842 true, /* pc_relative */
843 0, /* bitpos */
844 complain_overflow_dont,/* complain_on_overflow */
845 bfd_elf_generic_reloc, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 false, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 true), /* pcrel_offset */
852 HOWTO (R_ARM_THM_PC12, /* type */
853 0, /* rightshift */
854 4, /* size */
855 13, /* bitsize */
856 true, /* pc_relative */
857 0, /* bitpos */
858 complain_overflow_dont,/* complain_on_overflow */
859 bfd_elf_generic_reloc, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 false, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 true), /* pcrel_offset */
866 HOWTO (R_ARM_ABS32_NOI, /* type */
867 0, /* rightshift */
868 4, /* size */
869 32, /* bitsize */
870 false, /* pc_relative */
871 0, /* bitpos */
872 complain_overflow_dont,/* complain_on_overflow */
873 bfd_elf_generic_reloc, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 false, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 false), /* pcrel_offset */
880 HOWTO (R_ARM_REL32_NOI, /* type */
881 0, /* rightshift */
882 4, /* size */
883 32, /* bitsize */
884 true, /* pc_relative */
885 0, /* bitpos */
886 complain_overflow_dont,/* complain_on_overflow */
887 bfd_elf_generic_reloc, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 false, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 false), /* pcrel_offset */
894 /* Group relocations. */
896 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
897 0, /* rightshift */
898 4, /* size */
899 32, /* bitsize */
900 true, /* pc_relative */
901 0, /* bitpos */
902 complain_overflow_dont,/* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 false, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 true), /* pcrel_offset */
910 HOWTO (R_ARM_ALU_PC_G0, /* type */
911 0, /* rightshift */
912 4, /* size */
913 32, /* bitsize */
914 true, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_dont,/* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 false, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 true), /* pcrel_offset */
924 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
925 0, /* rightshift */
926 4, /* size */
927 32, /* bitsize */
928 true, /* pc_relative */
929 0, /* bitpos */
930 complain_overflow_dont,/* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 false, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 true), /* pcrel_offset */
938 HOWTO (R_ARM_ALU_PC_G1, /* type */
939 0, /* rightshift */
940 4, /* size */
941 32, /* bitsize */
942 true, /* pc_relative */
943 0, /* bitpos */
944 complain_overflow_dont,/* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 false, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 true), /* pcrel_offset */
952 HOWTO (R_ARM_ALU_PC_G2, /* type */
953 0, /* rightshift */
954 4, /* size */
955 32, /* bitsize */
956 true, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont,/* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 false, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 true), /* pcrel_offset */
966 HOWTO (R_ARM_LDR_PC_G1, /* type */
967 0, /* rightshift */
968 4, /* size */
969 32, /* bitsize */
970 true, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_dont,/* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 false, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 true), /* pcrel_offset */
980 HOWTO (R_ARM_LDR_PC_G2, /* type */
981 0, /* rightshift */
982 4, /* size */
983 32, /* bitsize */
984 true, /* pc_relative */
985 0, /* bitpos */
986 complain_overflow_dont,/* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 false, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 true), /* pcrel_offset */
994 HOWTO (R_ARM_LDRS_PC_G0, /* type */
995 0, /* rightshift */
996 4, /* size */
997 32, /* bitsize */
998 true, /* pc_relative */
999 0, /* bitpos */
1000 complain_overflow_dont,/* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 false, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 true), /* pcrel_offset */
1008 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1009 0, /* rightshift */
1010 4, /* size */
1011 32, /* bitsize */
1012 true, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont,/* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 false, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 true), /* pcrel_offset */
1022 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1023 0, /* rightshift */
1024 4, /* size */
1025 32, /* bitsize */
1026 true, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont,/* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 false, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 true), /* pcrel_offset */
1036 HOWTO (R_ARM_LDC_PC_G0, /* type */
1037 0, /* rightshift */
1038 4, /* size */
1039 32, /* bitsize */
1040 true, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont,/* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 false, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 true), /* pcrel_offset */
1050 HOWTO (R_ARM_LDC_PC_G1, /* type */
1051 0, /* rightshift */
1052 4, /* size */
1053 32, /* bitsize */
1054 true, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont,/* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 false, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 true), /* pcrel_offset */
1064 HOWTO (R_ARM_LDC_PC_G2, /* type */
1065 0, /* rightshift */
1066 4, /* size */
1067 32, /* bitsize */
1068 true, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont,/* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 false, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 true), /* pcrel_offset */
1078 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1079 0, /* rightshift */
1080 4, /* size */
1081 32, /* bitsize */
1082 true, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont,/* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 false, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 true), /* pcrel_offset */
1092 HOWTO (R_ARM_ALU_SB_G0, /* type */
1093 0, /* rightshift */
1094 4, /* size */
1095 32, /* bitsize */
1096 true, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont,/* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 false, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 true), /* pcrel_offset */
1106 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1107 0, /* rightshift */
1108 4, /* size */
1109 32, /* bitsize */
1110 true, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont,/* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 false, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 true), /* pcrel_offset */
1120 HOWTO (R_ARM_ALU_SB_G1, /* type */
1121 0, /* rightshift */
1122 4, /* size */
1123 32, /* bitsize */
1124 true, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont,/* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 false, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 true), /* pcrel_offset */
1134 HOWTO (R_ARM_ALU_SB_G2, /* type */
1135 0, /* rightshift */
1136 4, /* size */
1137 32, /* bitsize */
1138 true, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont,/* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 false, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 true), /* pcrel_offset */
1148 HOWTO (R_ARM_LDR_SB_G0, /* type */
1149 0, /* rightshift */
1150 4, /* size */
1151 32, /* bitsize */
1152 true, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont,/* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 false, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 true), /* pcrel_offset */
1162 HOWTO (R_ARM_LDR_SB_G1, /* type */
1163 0, /* rightshift */
1164 4, /* size */
1165 32, /* bitsize */
1166 true, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont,/* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 false, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 true), /* pcrel_offset */
1176 HOWTO (R_ARM_LDR_SB_G2, /* type */
1177 0, /* rightshift */
1178 4, /* size */
1179 32, /* bitsize */
1180 true, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont,/* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 false, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 true), /* pcrel_offset */
1190 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1191 0, /* rightshift */
1192 4, /* size */
1193 32, /* bitsize */
1194 true, /* pc_relative */
1195 0, /* bitpos */
1196 complain_overflow_dont,/* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 false, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 true), /* pcrel_offset */
1204 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1205 0, /* rightshift */
1206 4, /* size */
1207 32, /* bitsize */
1208 true, /* pc_relative */
1209 0, /* bitpos */
1210 complain_overflow_dont,/* complain_on_overflow */
1211 bfd_elf_generic_reloc, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 false, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 true), /* pcrel_offset */
1218 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1219 0, /* rightshift */
1220 4, /* size */
1221 32, /* bitsize */
1222 true, /* pc_relative */
1223 0, /* bitpos */
1224 complain_overflow_dont,/* complain_on_overflow */
1225 bfd_elf_generic_reloc, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 false, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 true), /* pcrel_offset */
1232 HOWTO (R_ARM_LDC_SB_G0, /* type */
1233 0, /* rightshift */
1234 4, /* size */
1235 32, /* bitsize */
1236 true, /* pc_relative */
1237 0, /* bitpos */
1238 complain_overflow_dont,/* complain_on_overflow */
1239 bfd_elf_generic_reloc, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 false, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 true), /* pcrel_offset */
1246 HOWTO (R_ARM_LDC_SB_G1, /* type */
1247 0, /* rightshift */
1248 4, /* size */
1249 32, /* bitsize */
1250 true, /* pc_relative */
1251 0, /* bitpos */
1252 complain_overflow_dont,/* complain_on_overflow */
1253 bfd_elf_generic_reloc, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 false, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 true), /* pcrel_offset */
1260 HOWTO (R_ARM_LDC_SB_G2, /* type */
1261 0, /* rightshift */
1262 4, /* size */
1263 32, /* bitsize */
1264 true, /* pc_relative */
1265 0, /* bitpos */
1266 complain_overflow_dont,/* complain_on_overflow */
1267 bfd_elf_generic_reloc, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 false, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 true), /* pcrel_offset */
1274 /* End of group relocations. */
1276 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1277 0, /* rightshift */
1278 4, /* size */
1279 16, /* bitsize */
1280 false, /* pc_relative */
1281 0, /* bitpos */
1282 complain_overflow_dont,/* complain_on_overflow */
1283 bfd_elf_generic_reloc, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 false, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 false), /* pcrel_offset */
1290 HOWTO (R_ARM_MOVT_BREL, /* type */
1291 0, /* rightshift */
1292 4, /* size */
1293 16, /* bitsize */
1294 false, /* pc_relative */
1295 0, /* bitpos */
1296 complain_overflow_bitfield,/* complain_on_overflow */
1297 bfd_elf_generic_reloc, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 false, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 false), /* pcrel_offset */
1304 HOWTO (R_ARM_MOVW_BREL, /* type */
1305 0, /* rightshift */
1306 4, /* size */
1307 16, /* bitsize */
1308 false, /* pc_relative */
1309 0, /* bitpos */
1310 complain_overflow_dont,/* complain_on_overflow */
1311 bfd_elf_generic_reloc, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 false, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 false), /* pcrel_offset */
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319 0, /* rightshift */
1320 4, /* size */
1321 16, /* bitsize */
1322 false, /* pc_relative */
1323 0, /* bitpos */
1324 complain_overflow_dont,/* complain_on_overflow */
1325 bfd_elf_generic_reloc, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 false, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 false), /* pcrel_offset */
1332 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1333 0, /* rightshift */
1334 4, /* size */
1335 16, /* bitsize */
1336 false, /* pc_relative */
1337 0, /* bitpos */
1338 complain_overflow_bitfield,/* complain_on_overflow */
1339 bfd_elf_generic_reloc, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 false, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 false), /* pcrel_offset */
1346 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1347 0, /* rightshift */
1348 4, /* size */
1349 16, /* bitsize */
1350 false, /* pc_relative */
1351 0, /* bitpos */
1352 complain_overflow_dont,/* complain_on_overflow */
1353 bfd_elf_generic_reloc, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 false, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 false), /* pcrel_offset */
1360 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1361 0, /* rightshift */
1362 4, /* size */
1363 32, /* bitsize */
1364 false, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_bitfield,/* complain_on_overflow */
1367 NULL, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 true, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 false), /* pcrel_offset */
1374 HOWTO (R_ARM_TLS_CALL, /* type */
1375 0, /* rightshift */
1376 4, /* size */
1377 24, /* bitsize */
1378 false, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 false, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 false), /* pcrel_offset */
1388 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1389 0, /* rightshift */
1390 4, /* size */
1391 0, /* bitsize */
1392 false, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_dont,/* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 false, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 false), /* pcrel_offset */
1402 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1403 0, /* rightshift */
1404 4, /* size */
1405 24, /* bitsize */
1406 false, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_dont,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 false, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 false), /* pcrel_offset */
1416 HOWTO (R_ARM_PLT32_ABS, /* type */
1417 0, /* rightshift */
1418 4, /* size */
1419 32, /* bitsize */
1420 false, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_dont,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 false, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 false), /* pcrel_offset */
1430 HOWTO (R_ARM_GOT_ABS, /* type */
1431 0, /* rightshift */
1432 4, /* size */
1433 32, /* bitsize */
1434 false, /* pc_relative */
1435 0, /* bitpos */
1436 complain_overflow_dont,/* complain_on_overflow */
1437 bfd_elf_generic_reloc, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 false, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 false), /* pcrel_offset */
1444 HOWTO (R_ARM_GOT_PREL, /* type */
1445 0, /* rightshift */
1446 4, /* size */
1447 32, /* bitsize */
1448 true, /* pc_relative */
1449 0, /* bitpos */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 false, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 true), /* pcrel_offset */
1458 HOWTO (R_ARM_GOT_BREL12, /* type */
1459 0, /* rightshift */
1460 4, /* size */
1461 12, /* bitsize */
1462 false, /* pc_relative */
1463 0, /* bitpos */
1464 complain_overflow_bitfield,/* complain_on_overflow */
1465 bfd_elf_generic_reloc, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 false, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 false), /* pcrel_offset */
1472 HOWTO (R_ARM_GOTOFF12, /* type */
1473 0, /* rightshift */
1474 4, /* size */
1475 12, /* bitsize */
1476 false, /* pc_relative */
1477 0, /* bitpos */
1478 complain_overflow_bitfield,/* complain_on_overflow */
1479 bfd_elf_generic_reloc, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 false, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 false), /* pcrel_offset */
1486 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1490 0, /* rightshift */
1491 4, /* size */
1492 0, /* bitsize */
1493 false, /* pc_relative */
1494 0, /* bitpos */
1495 complain_overflow_dont, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 false, /* partial_inplace */
1499 0, /* src_mask */
1500 0, /* dst_mask */
1501 false), /* pcrel_offset */
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505 0, /* rightshift */
1506 4, /* size */
1507 0, /* bitsize */
1508 false, /* pc_relative */
1509 0, /* bitpos */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 NULL, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 false, /* partial_inplace */
1514 0, /* src_mask */
1515 0, /* dst_mask */
1516 false), /* pcrel_offset */
1518 HOWTO (R_ARM_THM_JUMP11, /* type */
1519 1, /* rightshift */
1520 2, /* size */
1521 11, /* bitsize */
1522 true, /* pc_relative */
1523 0, /* bitpos */
1524 complain_overflow_signed, /* complain_on_overflow */
1525 bfd_elf_generic_reloc, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 false, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 true), /* pcrel_offset */
1532 HOWTO (R_ARM_THM_JUMP8, /* type */
1533 1, /* rightshift */
1534 2, /* size */
1535 8, /* bitsize */
1536 true, /* pc_relative */
1537 0, /* bitpos */
1538 complain_overflow_signed, /* complain_on_overflow */
1539 bfd_elf_generic_reloc, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 false, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 true), /* pcrel_offset */
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32, /* type */
1548 0, /* rightshift */
1549 4, /* size */
1550 32, /* bitsize */
1551 false, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 NULL, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 true, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 false), /* pcrel_offset */
1561 HOWTO (R_ARM_TLS_LDM32, /* type */
1562 0, /* rightshift */
1563 4, /* size */
1564 32, /* bitsize */
1565 false, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 true, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 false), /* pcrel_offset */
1575 HOWTO (R_ARM_TLS_LDO32, /* type */
1576 0, /* rightshift */
1577 4, /* size */
1578 32, /* bitsize */
1579 false, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 true, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 false), /* pcrel_offset */
1589 HOWTO (R_ARM_TLS_IE32, /* type */
1590 0, /* rightshift */
1591 4, /* size */
1592 32, /* bitsize */
1593 false, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 NULL, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 true, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 false), /* pcrel_offset */
1603 HOWTO (R_ARM_TLS_LE32, /* type */
1604 0, /* rightshift */
1605 4, /* size */
1606 32, /* bitsize */
1607 false, /* pc_relative */
1608 0, /* bitpos */
1609 complain_overflow_bitfield,/* complain_on_overflow */
1610 NULL, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 true, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 false), /* pcrel_offset */
1617 HOWTO (R_ARM_TLS_LDO12, /* type */
1618 0, /* rightshift */
1619 4, /* size */
1620 12, /* bitsize */
1621 false, /* pc_relative */
1622 0, /* bitpos */
1623 complain_overflow_bitfield,/* complain_on_overflow */
1624 bfd_elf_generic_reloc, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 false, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 false), /* pcrel_offset */
1631 HOWTO (R_ARM_TLS_LE12, /* type */
1632 0, /* rightshift */
1633 4, /* size */
1634 12, /* bitsize */
1635 false, /* pc_relative */
1636 0, /* bitpos */
1637 complain_overflow_bitfield,/* complain_on_overflow */
1638 bfd_elf_generic_reloc, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 false, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 false), /* pcrel_offset */
1645 HOWTO (R_ARM_TLS_IE12GP, /* type */
1646 0, /* rightshift */
1647 4, /* size */
1648 12, /* bitsize */
1649 false, /* pc_relative */
1650 0, /* bitpos */
1651 complain_overflow_bitfield,/* complain_on_overflow */
1652 bfd_elf_generic_reloc, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 false, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 false), /* pcrel_offset */
1659 /* 112-127 private relocations. */
1660 EMPTY_HOWTO (112),
1661 EMPTY_HOWTO (113),
1662 EMPTY_HOWTO (114),
1663 EMPTY_HOWTO (115),
1664 EMPTY_HOWTO (116),
1665 EMPTY_HOWTO (117),
1666 EMPTY_HOWTO (118),
1667 EMPTY_HOWTO (119),
1668 EMPTY_HOWTO (120),
1669 EMPTY_HOWTO (121),
1670 EMPTY_HOWTO (122),
1671 EMPTY_HOWTO (123),
1672 EMPTY_HOWTO (124),
1673 EMPTY_HOWTO (125),
1674 EMPTY_HOWTO (126),
1675 EMPTY_HOWTO (127),
1677 /* R_ARM_ME_TOO, obsolete. */
1678 EMPTY_HOWTO (128),
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1681 0, /* rightshift */
1682 2, /* size */
1683 0, /* bitsize */
1684 false, /* pc_relative */
1685 0, /* bitpos */
1686 complain_overflow_dont,/* complain_on_overflow */
1687 bfd_elf_generic_reloc, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 false, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 false), /* pcrel_offset */
1693 EMPTY_HOWTO (130),
1694 EMPTY_HOWTO (131),
1695 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1696 0, /* rightshift. */
1697 2, /* size. */
1698 16, /* bitsize. */
1699 false, /* pc_relative. */
1700 0, /* bitpos. */
1701 complain_overflow_bitfield,/* complain_on_overflow. */
1702 bfd_elf_generic_reloc, /* special_function. */
1703 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1704 false, /* partial_inplace. */
1705 0x00000000, /* src_mask. */
1706 0x00000000, /* dst_mask. */
1707 false), /* pcrel_offset. */
1708 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1709 0, /* rightshift. */
1710 2, /* size. */
1711 16, /* bitsize. */
1712 false, /* pc_relative. */
1713 0, /* bitpos. */
1714 complain_overflow_bitfield,/* complain_on_overflow. */
1715 bfd_elf_generic_reloc, /* special_function. */
1716 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1717 false, /* partial_inplace. */
1718 0x00000000, /* src_mask. */
1719 0x00000000, /* dst_mask. */
1720 false), /* pcrel_offset. */
1721 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1722 0, /* rightshift. */
1723 2, /* size. */
1724 16, /* bitsize. */
1725 false, /* pc_relative. */
1726 0, /* bitpos. */
1727 complain_overflow_bitfield,/* complain_on_overflow. */
1728 bfd_elf_generic_reloc, /* special_function. */
1729 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1730 false, /* partial_inplace. */
1731 0x00000000, /* src_mask. */
1732 0x00000000, /* dst_mask. */
1733 false), /* pcrel_offset. */
1734 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1735 0, /* rightshift. */
1736 2, /* size. */
1737 16, /* bitsize. */
1738 false, /* pc_relative. */
1739 0, /* bitpos. */
1740 complain_overflow_bitfield,/* complain_on_overflow. */
1741 bfd_elf_generic_reloc, /* special_function. */
1742 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1743 false, /* partial_inplace. */
1744 0x00000000, /* src_mask. */
1745 0x00000000, /* dst_mask. */
1746 false), /* pcrel_offset. */
1747 /* Relocations for Armv8.1-M Mainline. */
1748 HOWTO (R_ARM_THM_BF16, /* type. */
1749 0, /* rightshift. */
1750 2, /* size. */
1751 16, /* bitsize. */
1752 true, /* pc_relative. */
1753 0, /* bitpos. */
1754 complain_overflow_dont,/* do not complain_on_overflow. */
1755 bfd_elf_generic_reloc, /* special_function. */
1756 "R_ARM_THM_BF16", /* name. */
1757 false, /* partial_inplace. */
1758 0x001f0ffe, /* src_mask. */
1759 0x001f0ffe, /* dst_mask. */
1760 true), /* pcrel_offset. */
1761 HOWTO (R_ARM_THM_BF12, /* type. */
1762 0, /* rightshift. */
1763 2, /* size. */
1764 12, /* bitsize. */
1765 true, /* pc_relative. */
1766 0, /* bitpos. */
1767 complain_overflow_dont,/* do not complain_on_overflow. */
1768 bfd_elf_generic_reloc, /* special_function. */
1769 "R_ARM_THM_BF12", /* name. */
1770 false, /* partial_inplace. */
1771 0x00010ffe, /* src_mask. */
1772 0x00010ffe, /* dst_mask. */
1773 true), /* pcrel_offset. */
1774 HOWTO (R_ARM_THM_BF18, /* type. */
1775 0, /* rightshift. */
1776 2, /* size. */
1777 18, /* bitsize. */
1778 true, /* pc_relative. */
1779 0, /* bitpos. */
1780 complain_overflow_dont,/* do not complain_on_overflow. */
1781 bfd_elf_generic_reloc, /* special_function. */
1782 "R_ARM_THM_BF18", /* name. */
1783 false, /* partial_inplace. */
1784 0x007f0ffe, /* src_mask. */
1785 0x007f0ffe, /* dst_mask. */
1786 true), /* pcrel_offset. */
1789 /* 160 onwards: */
1790 static reloc_howto_type elf32_arm_howto_table_2[8] =
1792 HOWTO (R_ARM_IRELATIVE, /* type */
1793 0, /* rightshift */
1794 4, /* size */
1795 32, /* bitsize */
1796 false, /* pc_relative */
1797 0, /* bitpos */
1798 complain_overflow_bitfield,/* complain_on_overflow */
1799 bfd_elf_generic_reloc, /* special_function */
1800 "R_ARM_IRELATIVE", /* name */
1801 true, /* partial_inplace */
1802 0xffffffff, /* src_mask */
1803 0xffffffff, /* dst_mask */
1804 false), /* pcrel_offset */
1805 HOWTO (R_ARM_GOTFUNCDESC, /* type */
1806 0, /* rightshift */
1807 4, /* size */
1808 32, /* bitsize */
1809 false, /* pc_relative */
1810 0, /* bitpos */
1811 complain_overflow_bitfield,/* complain_on_overflow */
1812 bfd_elf_generic_reloc, /* special_function */
1813 "R_ARM_GOTFUNCDESC", /* name */
1814 false, /* partial_inplace */
1815 0, /* src_mask */
1816 0xffffffff, /* dst_mask */
1817 false), /* pcrel_offset */
1818 HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1819 0, /* rightshift */
1820 4, /* size */
1821 32, /* bitsize */
1822 false, /* pc_relative */
1823 0, /* bitpos */
1824 complain_overflow_bitfield,/* complain_on_overflow */
1825 bfd_elf_generic_reloc, /* special_function */
1826 "R_ARM_GOTOFFFUNCDESC",/* name */
1827 false, /* partial_inplace */
1828 0, /* src_mask */
1829 0xffffffff, /* dst_mask */
1830 false), /* pcrel_offset */
1831 HOWTO (R_ARM_FUNCDESC, /* type */
1832 0, /* rightshift */
1833 4, /* size */
1834 32, /* bitsize */
1835 false, /* pc_relative */
1836 0, /* bitpos */
1837 complain_overflow_bitfield,/* complain_on_overflow */
1838 bfd_elf_generic_reloc, /* special_function */
1839 "R_ARM_FUNCDESC", /* name */
1840 false, /* partial_inplace */
1841 0, /* src_mask */
1842 0xffffffff, /* dst_mask */
1843 false), /* pcrel_offset */
1844 HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
1845 0, /* rightshift */
1846 4, /* size */
1847 64, /* bitsize */
1848 false, /* pc_relative */
1849 0, /* bitpos */
1850 complain_overflow_bitfield,/* complain_on_overflow */
1851 bfd_elf_generic_reloc, /* special_function */
1852 "R_ARM_FUNCDESC_VALUE",/* name */
1853 false, /* partial_inplace */
1854 0, /* src_mask */
1855 0xffffffff, /* dst_mask */
1856 false), /* pcrel_offset */
1857 HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
1858 0, /* rightshift */
1859 4, /* size */
1860 32, /* bitsize */
1861 false, /* pc_relative */
1862 0, /* bitpos */
1863 complain_overflow_bitfield,/* complain_on_overflow */
1864 bfd_elf_generic_reloc, /* special_function */
1865 "R_ARM_TLS_GD32_FDPIC",/* name */
1866 false, /* partial_inplace */
1867 0, /* src_mask */
1868 0xffffffff, /* dst_mask */
1869 false), /* pcrel_offset */
1870 HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
1871 0, /* rightshift */
1872 4, /* size */
1873 32, /* bitsize */
1874 false, /* pc_relative */
1875 0, /* bitpos */
1876 complain_overflow_bitfield,/* complain_on_overflow */
1877 bfd_elf_generic_reloc, /* special_function */
1878 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879 false, /* partial_inplace */
1880 0, /* src_mask */
1881 0xffffffff, /* dst_mask */
1882 false), /* pcrel_offset */
1883 HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
1884 0, /* rightshift */
1885 4, /* size */
1886 32, /* bitsize */
1887 false, /* pc_relative */
1888 0, /* bitpos */
1889 complain_overflow_bitfield,/* complain_on_overflow */
1890 bfd_elf_generic_reloc, /* special_function */
1891 "R_ARM_TLS_IE32_FDPIC",/* name */
1892 false, /* partial_inplace */
1893 0, /* src_mask */
1894 0xffffffff, /* dst_mask */
1895 false), /* pcrel_offset */
1898 /* 249-255 extended, currently unused, relocations: */
1899 static reloc_howto_type elf32_arm_howto_table_3[4] =
1901 HOWTO (R_ARM_RREL32, /* type */
1902 0, /* rightshift */
1903 0, /* size */
1904 0, /* bitsize */
1905 false, /* pc_relative */
1906 0, /* bitpos */
1907 complain_overflow_dont,/* complain_on_overflow */
1908 bfd_elf_generic_reloc, /* special_function */
1909 "R_ARM_RREL32", /* name */
1910 false, /* partial_inplace */
1911 0, /* src_mask */
1912 0, /* dst_mask */
1913 false), /* pcrel_offset */
1915 HOWTO (R_ARM_RABS32, /* type */
1916 0, /* rightshift */
1917 0, /* size */
1918 0, /* bitsize */
1919 false, /* pc_relative */
1920 0, /* bitpos */
1921 complain_overflow_dont,/* complain_on_overflow */
1922 bfd_elf_generic_reloc, /* special_function */
1923 "R_ARM_RABS32", /* name */
1924 false, /* partial_inplace */
1925 0, /* src_mask */
1926 0, /* dst_mask */
1927 false), /* pcrel_offset */
1929 HOWTO (R_ARM_RPC24, /* type */
1930 0, /* rightshift */
1931 0, /* size */
1932 0, /* bitsize */
1933 false, /* pc_relative */
1934 0, /* bitpos */
1935 complain_overflow_dont,/* complain_on_overflow */
1936 bfd_elf_generic_reloc, /* special_function */
1937 "R_ARM_RPC24", /* name */
1938 false, /* partial_inplace */
1939 0, /* src_mask */
1940 0, /* dst_mask */
1941 false), /* pcrel_offset */
1943 HOWTO (R_ARM_RBASE, /* type */
1944 0, /* rightshift */
1945 0, /* size */
1946 0, /* bitsize */
1947 false, /* pc_relative */
1948 0, /* bitpos */
1949 complain_overflow_dont,/* complain_on_overflow */
1950 bfd_elf_generic_reloc, /* special_function */
1951 "R_ARM_RBASE", /* name */
1952 false, /* partial_inplace */
1953 0, /* src_mask */
1954 0, /* dst_mask */
1955 false) /* pcrel_offset */
1958 static reloc_howto_type *
1959 elf32_arm_howto_from_type (unsigned int r_type)
1961 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1962 return &elf32_arm_howto_table_1[r_type];
1964 if (r_type >= R_ARM_IRELATIVE
1965 && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1966 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1968 if (r_type >= R_ARM_RREL32
1969 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1970 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1972 return NULL;
1975 static bool
1976 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1977 Elf_Internal_Rela * elf_reloc)
1979 unsigned int r_type;
1981 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1982 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1984 /* xgettext:c-format */
1985 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1986 abfd, r_type);
1987 bfd_set_error (bfd_error_bad_value);
1988 return false;
1990 return true;
1993 struct elf32_arm_reloc_map
1995 bfd_reloc_code_real_type bfd_reloc_val;
1996 unsigned char elf_reloc_val;
1999 /* All entries in this list must also be present in elf32_arm_howto_table. */
2000 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
2002 {BFD_RELOC_NONE, R_ARM_NONE},
2003 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
2004 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
2005 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
2006 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
2007 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
2008 {BFD_RELOC_32, R_ARM_ABS32},
2009 {BFD_RELOC_32_PCREL, R_ARM_REL32},
2010 {BFD_RELOC_8, R_ARM_ABS8},
2011 {BFD_RELOC_16, R_ARM_ABS16},
2012 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
2013 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
2014 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
2015 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
2016 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
2017 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
2018 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
2019 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
2020 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
2021 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
2022 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
2023 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
2024 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
2025 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
2026 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
2027 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2028 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
2029 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
2030 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
2031 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
2032 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
2033 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2034 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
2035 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
2036 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
2037 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
2038 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
2039 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
2040 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
2041 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
2042 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
2043 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
2044 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
2045 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
2046 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
2047 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
2048 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
2049 {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
2050 {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
2051 {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
2052 {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
2053 {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
2054 {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
2055 {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
2056 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
2057 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
2058 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
2059 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
2060 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
2061 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
2062 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
2063 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
2064 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2065 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2066 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2067 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2068 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2069 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2070 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2071 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2072 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2073 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2074 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2075 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2076 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2077 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2078 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2079 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2080 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2081 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2082 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2083 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2084 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2085 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2086 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2087 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2088 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2089 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2090 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2091 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2092 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2093 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2094 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
2095 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2096 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2097 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2098 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
2099 {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16},
2100 {BFD_RELOC_ARM_THUMB_BF13, R_ARM_THM_BF12},
2101 {BFD_RELOC_ARM_THUMB_BF19, R_ARM_THM_BF18}
2104 static reloc_howto_type *
2105 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2106 bfd_reloc_code_real_type code)
2108 unsigned int i;
2110 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2111 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2112 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2114 return NULL;
2117 static reloc_howto_type *
2118 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2119 const char *r_name)
2121 unsigned int i;
2123 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2124 if (elf32_arm_howto_table_1[i].name != NULL
2125 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2126 return &elf32_arm_howto_table_1[i];
2128 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2129 if (elf32_arm_howto_table_2[i].name != NULL
2130 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2131 return &elf32_arm_howto_table_2[i];
2133 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2134 if (elf32_arm_howto_table_3[i].name != NULL
2135 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2136 return &elf32_arm_howto_table_3[i];
2138 return NULL;
2141 /* Support for core dump NOTE sections. */
2143 static bool
2144 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2146 int offset;
2147 size_t size;
2149 switch (note->descsz)
2151 default:
2152 return false;
2154 case 148: /* Linux/ARM 32-bit. */
2155 /* pr_cursig */
2156 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2158 /* pr_pid */
2159 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2161 /* pr_reg */
2162 offset = 72;
2163 size = 72;
2165 break;
2168 /* Make a ".reg/999" section. */
2169 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2170 size, note->descpos + offset);
2173 static bool
2174 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2176 switch (note->descsz)
2178 default:
2179 return false;
2181 case 124: /* Linux/ARM elf_prpsinfo. */
2182 elf_tdata (abfd)->core->pid
2183 = bfd_get_32 (abfd, note->descdata + 12);
2184 elf_tdata (abfd)->core->program
2185 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2186 elf_tdata (abfd)->core->command
2187 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2190 /* Note that for some reason, a spurious space is tacked
2191 onto the end of the args in some (at least one anyway)
2192 implementations, so strip it off if it exists. */
2194 char *command = elf_tdata (abfd)->core->command;
2195 int n = strlen (command);
2197 if (0 < n && command[n - 1] == ' ')
2198 command[n - 1] = '\0';
2201 return true;
2204 static char *
2205 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2206 int note_type, ...)
2208 switch (note_type)
2210 default:
2211 return NULL;
2213 case NT_PRPSINFO:
2215 char data[124] ATTRIBUTE_NONSTRING;
2216 va_list ap;
2218 va_start (ap, note_type);
2219 memset (data, 0, sizeof (data));
2220 strncpy (data + 28, va_arg (ap, const char *), 16);
2221 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2222 DIAGNOSTIC_PUSH;
2223 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224 -Wstringop-truncation:
2225 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2227 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2228 #endif
2229 strncpy (data + 44, va_arg (ap, const char *), 80);
2230 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2231 DIAGNOSTIC_POP;
2232 #endif
2233 va_end (ap);
2235 return elfcore_write_note (abfd, buf, bufsiz,
2236 "CORE", note_type, data, sizeof (data));
2239 case NT_PRSTATUS:
2241 char data[148];
2242 va_list ap;
2243 long pid;
2244 int cursig;
2245 const void *greg;
2247 va_start (ap, note_type);
2248 memset (data, 0, sizeof (data));
2249 pid = va_arg (ap, long);
2250 bfd_put_32 (abfd, pid, data + 24);
2251 cursig = va_arg (ap, int);
2252 bfd_put_16 (abfd, cursig, data + 12);
2253 greg = va_arg (ap, const void *);
2254 memcpy (data + 72, greg, 72);
2255 va_end (ap);
2257 return elfcore_write_note (abfd, buf, bufsiz,
2258 "CORE", note_type, data, sizeof (data));
2263 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2264 #define TARGET_LITTLE_NAME "elf32-littlearm"
2265 #define TARGET_BIG_SYM arm_elf32_be_vec
2266 #define TARGET_BIG_NAME "elf32-bigarm"
2268 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2269 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2270 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2272 typedef unsigned long int insn32;
2273 typedef unsigned short int insn16;
2275 /* In lieu of proper flags, assume all EABIv4 or later objects are
2276 interworkable. */
2277 #define INTERWORK_FLAG(abfd) \
2278 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280 || ((abfd)->flags & BFD_LINKER_CREATED))
2282 /* The linker script knows the section names for placement.
2283 The entry_names are used to do simple name mangling on the stubs.
2284 Given a function name, and its type, the stub can be found. The
2285 name can be changed. The only requirement is the %s be present. */
2286 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2289 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2292 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2295 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2298 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2301 #define STUB_ENTRY_NAME "__%s_veneer"
2303 #define CMSE_PREFIX "__acle_se_"
2305 #define CMSE_STUB_NAME ".gnu.sgstubs"
2307 /* The name of the dynamic interpreter. This is put in the .interp
2308 section. */
2309 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2311 /* FDPIC default stack size. */
2312 #define DEFAULT_STACK_SIZE 0x8000
2314 static const unsigned long tls_trampoline [] =
2316 0xe08e0000, /* add r0, lr, r0 */
2317 0xe5901004, /* ldr r1, [r0,#4] */
2318 0xe12fff11, /* bx r1 */
2321 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2323 0xe52d2004, /* push {r2} */
2324 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2325 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2326 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2327 0xe081100f, /* 2: add r1, pc */
2328 0xe12fff12, /* bx r2 */
2329 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330 + dl_tlsdesc_lazy_resolver(GOT) */
2331 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2334 /* NOTE: [Thumb nop sequence]
2335 When adding code that transitions from Thumb to Arm the instruction that
2336 should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337 a nop for performance reasons. */
2339 /* ARM FDPIC PLT entry. */
2340 /* The last 5 words contain PLT lazy fragment code and data. */
2341 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2343 0xe59fc008, /* ldr r12, .L1 */
2344 0xe08cc009, /* add r12, r12, r9 */
2345 0xe59c9004, /* ldr r9, [r12, #4] */
2346 0xe59cf000, /* ldr pc, [r12] */
2347 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2348 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2349 0xe51fc00c, /* ldr r12, [pc, #-12] */
2350 0xe92d1000, /* push {r12} */
2351 0xe599c004, /* ldr r12, [r9, #4] */
2352 0xe599f000, /* ldr pc, [r9] */
2355 /* Thumb FDPIC PLT entry. */
2356 /* The last 5 words contain PLT lazy fragment code and data. */
2357 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2359 0xc00cf8df, /* ldr.w r12, .L1 */
2360 0x0c09eb0c, /* add.w r12, r12, r9 */
2361 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2362 0xf000f8dc, /* ldr.w pc, [r12] */
2363 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2364 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2365 0xc008f85f, /* ldr.w r12, .L2 */
2366 0xcd04f84d, /* push {r12} */
2367 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2368 0xf000f8d9, /* ldr.w pc, [r9] */
2371 #ifdef FOUR_WORD_PLT
2373 /* The first entry in a procedure linkage table looks like
2374 this. It is set up so that any shared library function that is
2375 called before the relocation has been set up calls the dynamic
2376 linker first. */
2377 static const bfd_vma elf32_arm_plt0_entry [] =
2379 0xe52de004, /* str lr, [sp, #-4]! */
2380 0xe59fe010, /* ldr lr, [pc, #16] */
2381 0xe08fe00e, /* add lr, pc, lr */
2382 0xe5bef008, /* ldr pc, [lr, #8]! */
2385 /* Subsequent entries in a procedure linkage table look like
2386 this. */
2387 static const bfd_vma elf32_arm_plt_entry [] =
2389 0xe28fc600, /* add ip, pc, #NN */
2390 0xe28cca00, /* add ip, ip, #NN */
2391 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2392 0x00000000, /* unused */
2395 #else /* not FOUR_WORD_PLT */
2397 /* The first entry in a procedure linkage table looks like
2398 this. It is set up so that any shared library function that is
2399 called before the relocation has been set up calls the dynamic
2400 linker first. */
2401 static const bfd_vma elf32_arm_plt0_entry [] =
2403 0xe52de004, /* str lr, [sp, #-4]! */
2404 0xe59fe004, /* ldr lr, [pc, #4] */
2405 0xe08fe00e, /* add lr, pc, lr */
2406 0xe5bef008, /* ldr pc, [lr, #8]! */
2407 0x00000000, /* &GOT[0] - . */
2410 /* By default subsequent entries in a procedure linkage table look like
2411 this. Offsets that don't fit into 28 bits will cause link error. */
2412 static const bfd_vma elf32_arm_plt_entry_short [] =
2414 0xe28fc600, /* add ip, pc, #0xNN00000 */
2415 0xe28cca00, /* add ip, ip, #0xNN000 */
2416 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2419 /* When explicitly asked, we'll use this "long" entry format
2420 which can cope with arbitrary displacements. */
2421 static const bfd_vma elf32_arm_plt_entry_long [] =
2423 0xe28fc200, /* add ip, pc, #0xN0000000 */
2424 0xe28cc600, /* add ip, ip, #0xNN00000 */
2425 0xe28cca00, /* add ip, ip, #0xNN000 */
2426 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2429 static bool elf32_arm_use_long_plt_entry = false;
2431 #endif /* not FOUR_WORD_PLT */
2433 /* The first entry in a procedure linkage table looks like this.
2434 It is set up so that any shared library function that is called before the
2435 relocation has been set up calls the dynamic linker first. */
2436 static const bfd_vma elf32_thumb2_plt0_entry [] =
2438 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439 an instruction maybe encoded to one or two array elements. */
2440 0xf8dfb500, /* push {lr} */
2441 0x44fee008, /* ldr.w lr, [pc, #8] */
2442 /* add lr, pc */
2443 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2444 0x00000000, /* &GOT[0] - . */
2447 /* Subsequent entries in a procedure linkage table for thumb only target
2448 look like this. */
2449 static const bfd_vma elf32_thumb2_plt_entry [] =
2451 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452 an instruction maybe encoded to one or two array elements. */
2453 0x0c00f240, /* movw ip, #0xNNNN */
2454 0x0c00f2c0, /* movt ip, #0xNNNN */
2455 0xf8dc44fc, /* add ip, pc */
2456 0xe7fcf000 /* ldr.w pc, [ip] */
2457 /* b .-4 */
2460 /* The format of the first entry in the procedure linkage table
2461 for a VxWorks executable. */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2464 0xe52dc008, /* str ip,[sp,#-8]! */
2465 0xe59fc000, /* ldr ip,[pc] */
2466 0xe59cf008, /* ldr pc,[ip,#8] */
2467 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2470 /* The format of subsequent entries in a VxWorks executable. */
2471 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2473 0xe59fc000, /* ldr ip,[pc] */
2474 0xe59cf000, /* ldr pc,[ip] */
2475 0x00000000, /* .long @got */
2476 0xe59fc000, /* ldr ip,[pc] */
2477 0xea000000, /* b _PLT */
2478 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2481 /* The format of entries in a VxWorks shared library. */
2482 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2484 0xe59fc000, /* ldr ip,[pc] */
2485 0xe79cf009, /* ldr pc,[ip,r9] */
2486 0x00000000, /* .long @got */
2487 0xe59fc000, /* ldr ip,[pc] */
2488 0xe599f008, /* ldr pc,[r9,#8] */
2489 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2492 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2493 #define PLT_THUMB_STUB_SIZE 4
2494 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2496 0x4778, /* bx pc */
2497 0xe7fd /* b .-2 */
2500 /* The first entry in a procedure linkage table looks like
2501 this. It is set up so that any shared library function that is
2502 called before the relocation has been set up calls the dynamic
2503 linker first. */
2504 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2506 /* First bundle: */
2507 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2508 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2509 0xe08cc00f, /* add ip, ip, pc */
2510 0xe52dc008, /* str ip, [sp, #-8]! */
2511 /* Second bundle: */
2512 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2513 0xe59cc000, /* ldr ip, [ip] */
2514 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2515 0xe12fff1c, /* bx ip */
2516 /* Third bundle: */
2517 0xe320f000, /* nop */
2518 0xe320f000, /* nop */
2519 0xe320f000, /* nop */
2520 /* .Lplt_tail: */
2521 0xe50dc004, /* str ip, [sp, #-4] */
2522 /* Fourth bundle: */
2523 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2524 0xe59cc000, /* ldr ip, [ip] */
2525 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2526 0xe12fff1c, /* bx ip */
2528 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2530 /* Subsequent entries in a procedure linkage table look like this. */
2531 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2533 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2534 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2535 0xe08cc00f, /* add ip, ip, pc */
2536 0xea000000, /* b .Lplt_tail */
2539 /* PR 28924:
2540 There was a bug due to too high values of THM_MAX_FWD_BRANCH_OFFSET and
2541 THM2_MAX_FWD_BRANCH_OFFSET. The first macro concerns the case when Thumb-2
2542 is not available, and second macro when Thumb-2 is available. Among other
2543 things, they affect the range of branches represented as BLX instructions
2544 in Encoding T2 defined in Section A8.8.25 of the ARM Architecture
2545 Reference Manual ARMv7-A and ARMv7-R edition issue C.d. Such branches are
2546 specified there to have a maximum forward offset that is a multiple of 4.
2547 Previously, the respective values defined here were multiples of 2 but not
2548 4 and they are included in comments for reference. */
2549 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2550 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2551 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) - 4 + 4)
2552 /* #def THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) - 2 + 4) */
2553 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2554 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 4) + 4)
2555 /* #def THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4) */
2556 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2557 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2558 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2560 enum stub_insn_type
2562 THUMB16_TYPE = 1,
2563 THUMB32_TYPE,
2564 ARM_TYPE,
2565 DATA_TYPE
2568 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2569 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2570 is inserted in arm_build_one_stub(). */
2571 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2572 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2573 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2574 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2575 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2576 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2577 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2578 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2580 typedef struct
2582 bfd_vma data;
2583 enum stub_insn_type type;
2584 unsigned int r_type;
2585 int reloc_addend;
2586 } insn_sequence;
2588 /* See note [Thumb nop sequence] when adding a veneer. */
2590 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2591 to reach the stub if necessary. */
2592 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2594 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2595 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2598 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2599 available. */
2600 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2602 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2603 ARM_INSN (0xe12fff1c), /* bx ip */
2604 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2607 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2608 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2610 THUMB16_INSN (0xb401), /* push {r0} */
2611 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2612 THUMB16_INSN (0x4684), /* mov ip, r0 */
2613 THUMB16_INSN (0xbc01), /* pop {r0} */
2614 THUMB16_INSN (0x4760), /* bx ip */
2615 THUMB16_INSN (0xbf00), /* nop */
2616 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2619 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2620 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2622 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2623 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2626 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2627 M-profile architectures. */
2628 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2630 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2631 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2632 THUMB16_INSN (0x4760), /* bx ip */
2635 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2636 allowed. */
2637 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2639 THUMB16_INSN (0x4778), /* bx pc */
2640 THUMB16_INSN (0xe7fd), /* b .-2 */
2641 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2642 ARM_INSN (0xe12fff1c), /* bx ip */
2643 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2646 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2647 available. */
2648 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2650 THUMB16_INSN (0x4778), /* bx pc */
2651 THUMB16_INSN (0xe7fd), /* b .-2 */
2652 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2653 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2656 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2657 one, when the destination is close enough. */
2658 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2660 THUMB16_INSN (0x4778), /* bx pc */
2661 THUMB16_INSN (0xe7fd), /* b .-2 */
2662 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2665 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2666 blx to reach the stub if necessary. */
2667 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2669 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2670 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2671 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2674 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2675 blx to reach the stub if necessary. We can not add into pc;
2676 it is not guaranteed to mode switch (different in ARMv6 and
2677 ARMv7). */
2678 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2680 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2681 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2682 ARM_INSN (0xe12fff1c), /* bx ip */
2683 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2686 /* V4T ARM -> ARM long branch stub, PIC. */
2687 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2689 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2690 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2691 ARM_INSN (0xe12fff1c), /* bx ip */
2692 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2695 /* V4T Thumb -> ARM long branch stub, PIC. */
2696 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2698 THUMB16_INSN (0x4778), /* bx pc */
2699 THUMB16_INSN (0xe7fd), /* b .-2 */
2700 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2701 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2702 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2705 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2706 architectures. */
2707 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2709 THUMB16_INSN (0xb401), /* push {r0} */
2710 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2711 THUMB16_INSN (0x46fc), /* mov ip, pc */
2712 THUMB16_INSN (0x4484), /* add ip, r0 */
2713 THUMB16_INSN (0xbc01), /* pop {r0} */
2714 THUMB16_INSN (0x4760), /* bx ip */
2715 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2718 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2719 allowed. */
2720 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2722 THUMB16_INSN (0x4778), /* bx pc */
2723 THUMB16_INSN (0xe7fd), /* b .-2 */
2724 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2725 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2726 ARM_INSN (0xe12fff1c), /* bx ip */
2727 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2730 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2731 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2732 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2734 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2735 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2736 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2739 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2740 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2741 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2743 THUMB16_INSN (0x4778), /* bx pc */
2744 THUMB16_INSN (0xe7fd), /* b .-2 */
2745 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2746 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2747 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2750 /* NaCl ARM -> ARM long branch stub. */
2751 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2753 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2754 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2755 ARM_INSN (0xe12fff1c), /* bx ip */
2756 ARM_INSN (0xe320f000), /* nop */
2757 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2758 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2759 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2760 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2763 /* NaCl ARM -> ARM long branch stub, PIC. */
2764 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2766 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2767 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2768 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2769 ARM_INSN (0xe12fff1c), /* bx ip */
2770 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2771 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2772 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2773 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2776 /* Stub used for transition to secure state (aka SG veneer). */
2777 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2779 THUMB32_INSN (0xe97fe97f), /* sg. */
2780 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2784 /* Cortex-A8 erratum-workaround stubs. */
2786 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2787 can't use a conditional branch to reach this stub). */
2789 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2791 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2792 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2793 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2796 /* Stub used for b.w and bl.w instructions. */
2798 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2800 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2803 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2805 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2808 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2809 instruction (which switches to ARM mode) to point to this stub. Jump to the
2810 real destination using an ARM-mode branch. */
2812 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2814 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2817 /* For each section group there can be a specially created linker section
2818 to hold the stubs for that group. The name of the stub section is based
2819 upon the name of another section within that group with the suffix below
2820 applied.
2822 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2823 create what appeared to be a linker stub section when it actually
2824 contained user code/data. For example, consider this fragment:
2826 const char * stubborn_problems[] = { "np" };
2828 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2829 section called:
2831 .data.rel.local.stubborn_problems
2833 This then causes problems in arm32_arm_build_stubs() as it triggers:
2835 // Ignore non-stub sections.
2836 if (!strstr (stub_sec->name, STUB_SUFFIX))
2837 continue;
2839 And so the section would be ignored instead of being processed. Hence
2840 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2841 C identifier. */
2842 #define STUB_SUFFIX ".__stub"
2844 /* One entry per long/short branch stub defined above. */
2845 #define DEF_STUBS \
2846 DEF_STUB (long_branch_any_any) \
2847 DEF_STUB (long_branch_v4t_arm_thumb) \
2848 DEF_STUB (long_branch_thumb_only) \
2849 DEF_STUB (long_branch_v4t_thumb_thumb) \
2850 DEF_STUB (long_branch_v4t_thumb_arm) \
2851 DEF_STUB (short_branch_v4t_thumb_arm) \
2852 DEF_STUB (long_branch_any_arm_pic) \
2853 DEF_STUB (long_branch_any_thumb_pic) \
2854 DEF_STUB (long_branch_v4t_thumb_thumb_pic) \
2855 DEF_STUB (long_branch_v4t_arm_thumb_pic) \
2856 DEF_STUB (long_branch_v4t_thumb_arm_pic) \
2857 DEF_STUB (long_branch_thumb_only_pic) \
2858 DEF_STUB (long_branch_any_tls_pic) \
2859 DEF_STUB (long_branch_v4t_thumb_tls_pic) \
2860 DEF_STUB (long_branch_arm_nacl) \
2861 DEF_STUB (long_branch_arm_nacl_pic) \
2862 DEF_STUB (cmse_branch_thumb_only) \
2863 DEF_STUB (a8_veneer_b_cond) \
2864 DEF_STUB (a8_veneer_b) \
2865 DEF_STUB (a8_veneer_bl) \
2866 DEF_STUB (a8_veneer_blx) \
2867 DEF_STUB (long_branch_thumb2_only) \
2868 DEF_STUB (long_branch_thumb2_only_pure)
2870 #define DEF_STUB(x) arm_stub_##x,
2871 enum elf32_arm_stub_type
2873 arm_stub_none,
2874 DEF_STUBS
2875 max_stub_type
2877 #undef DEF_STUB
2879 /* Note the first a8_veneer type. */
2880 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2882 typedef struct
2884 const insn_sequence* template_sequence;
2885 int template_size;
2886 } stub_def;
2888 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2889 static const stub_def stub_definitions[] =
2891 {NULL, 0},
2892 DEF_STUBS
2895 struct elf32_arm_stub_hash_entry
2897 /* Base hash table entry structure. */
2898 struct bfd_hash_entry root;
2900 /* The stub section. */
2901 asection *stub_sec;
2903 /* Offset within stub_sec of the beginning of this stub. */
2904 bfd_vma stub_offset;
2906 /* Given the symbol's value and its section we can determine its final
2907 value when building the stubs (so the stub knows where to jump). */
2908 bfd_vma target_value;
2909 asection *target_section;
2911 /* Same as above but for the source of the branch to the stub. Used for
2912 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2913 such, source section does not need to be recorded since Cortex-A8 erratum
2914 workaround stubs are only generated when both source and target are in the
2915 same section. */
2916 bfd_vma source_value;
2918 /* The instruction which caused this stub to be generated (only valid for
2919 Cortex-A8 erratum workaround stubs at present). */
2920 unsigned long orig_insn;
2922 /* The stub type. */
2923 enum elf32_arm_stub_type stub_type;
2924 /* Its encoding size in bytes. */
2925 int stub_size;
2926 /* Its template. */
2927 const insn_sequence *stub_template;
2928 /* The size of the template (number of entries). */
2929 int stub_template_size;
2931 /* The symbol table entry, if any, that this was derived from. */
2932 struct elf32_arm_link_hash_entry *h;
2934 /* Type of branch. */
2935 enum arm_st_branch_type branch_type;
2937 /* Where this stub is being called from, or, in the case of combined
2938 stub sections, the first input section in the group. */
2939 asection *id_sec;
2941 /* The name for the local symbol at the start of this stub. The
2942 stub name in the hash table has to be unique; this does not, so
2943 it can be friendlier. */
2944 char *output_name;
2947 /* Used to build a map of a section. This is required for mixed-endian
2948 code/data. */
2950 typedef struct elf32_elf_section_map
2952 bfd_vma vma;
2953 char type;
2955 elf32_arm_section_map;
2957 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2959 typedef enum
2961 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2962 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2963 VFP11_ERRATUM_ARM_VENEER,
2964 VFP11_ERRATUM_THUMB_VENEER
2966 elf32_vfp11_erratum_type;
2968 typedef struct elf32_vfp11_erratum_list
2970 struct elf32_vfp11_erratum_list *next;
2971 bfd_vma vma;
2972 union
2974 struct
2976 struct elf32_vfp11_erratum_list *veneer;
2977 unsigned int vfp_insn;
2978 } b;
2979 struct
2981 struct elf32_vfp11_erratum_list *branch;
2982 unsigned int id;
2983 } v;
2984 } u;
2985 elf32_vfp11_erratum_type type;
2987 elf32_vfp11_erratum_list;
2989 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2990 veneer. */
2991 typedef enum
2993 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2994 STM32L4XX_ERRATUM_VENEER
2996 elf32_stm32l4xx_erratum_type;
2998 typedef struct elf32_stm32l4xx_erratum_list
3000 struct elf32_stm32l4xx_erratum_list *next;
3001 bfd_vma vma;
3002 union
3004 struct
3006 struct elf32_stm32l4xx_erratum_list *veneer;
3007 unsigned int insn;
3008 } b;
3009 struct
3011 struct elf32_stm32l4xx_erratum_list *branch;
3012 unsigned int id;
3013 } v;
3014 } u;
3015 elf32_stm32l4xx_erratum_type type;
3017 elf32_stm32l4xx_erratum_list;
3019 typedef enum
3021 DELETE_EXIDX_ENTRY,
3022 INSERT_EXIDX_CANTUNWIND_AT_END
3024 arm_unwind_edit_type;
3026 /* A (sorted) list of edits to apply to an unwind table. */
3027 typedef struct arm_unwind_table_edit
3029 arm_unwind_edit_type type;
3030 /* Note: we sometimes want to insert an unwind entry corresponding to a
3031 section different from the one we're currently writing out, so record the
3032 (text) section this edit relates to here. */
3033 asection *linked_section;
3034 unsigned int index;
3035 struct arm_unwind_table_edit *next;
3037 arm_unwind_table_edit;
3039 typedef struct _arm_elf_section_data
3041 /* Information about mapping symbols. */
3042 struct bfd_elf_section_data elf;
3043 unsigned int mapcount;
3044 unsigned int mapsize;
3045 elf32_arm_section_map *map;
3046 /* Information about CPU errata. */
3047 unsigned int erratumcount;
3048 elf32_vfp11_erratum_list *erratumlist;
3049 unsigned int stm32l4xx_erratumcount;
3050 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
3051 unsigned int additional_reloc_count;
3052 /* Information about unwind tables. */
3053 union
3055 /* Unwind info attached to a text section. */
3056 struct
3058 asection *arm_exidx_sec;
3059 } text;
3061 /* Unwind info attached to an .ARM.exidx section. */
3062 struct
3064 arm_unwind_table_edit *unwind_edit_list;
3065 arm_unwind_table_edit *unwind_edit_tail;
3066 } exidx;
3067 } u;
3069 _arm_elf_section_data;
3071 #define elf32_arm_section_data(sec) \
3072 ((_arm_elf_section_data *) elf_section_data (sec))
3074 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3075 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3076 so may be created multiple times: we use an array of these entries whilst
3077 relaxing which we can refresh easily, then create stubs for each potentially
3078 erratum-triggering instruction once we've settled on a solution. */
3080 struct a8_erratum_fix
3082 bfd *input_bfd;
3083 asection *section;
3084 bfd_vma offset;
3085 bfd_vma target_offset;
3086 unsigned long orig_insn;
3087 char *stub_name;
3088 enum elf32_arm_stub_type stub_type;
3089 enum arm_st_branch_type branch_type;
3092 /* A table of relocs applied to branches which might trigger Cortex-A8
3093 erratum. */
3095 struct a8_erratum_reloc
3097 bfd_vma from;
3098 bfd_vma destination;
3099 struct elf32_arm_link_hash_entry *hash;
3100 const char *sym_name;
3101 unsigned int r_type;
3102 enum arm_st_branch_type branch_type;
3103 bool non_a8_stub;
3106 /* The size of the thread control block. */
3107 #define TCB_SIZE 8
3109 /* ARM-specific information about a PLT entry, over and above the usual
3110 gotplt_union. */
3111 struct arm_plt_info
3113 /* We reference count Thumb references to a PLT entry separately,
3114 so that we can emit the Thumb trampoline only if needed. */
3115 bfd_signed_vma thumb_refcount;
3117 /* Some references from Thumb code may be eliminated by BL->BLX
3118 conversion, so record them separately. */
3119 bfd_signed_vma maybe_thumb_refcount;
3121 /* How many of the recorded PLT accesses were from non-call relocations.
3122 This information is useful when deciding whether anything takes the
3123 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3124 non-call references to the function should resolve directly to the
3125 real runtime target. */
3126 unsigned int noncall_refcount;
3128 /* Since PLT entries have variable size if the Thumb prologue is
3129 used, we need to record the index into .got.plt instead of
3130 recomputing it from the PLT offset. */
3131 bfd_signed_vma got_offset;
3134 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3135 struct arm_local_iplt_info
3137 /* The information that is usually found in the generic ELF part of
3138 the hash table entry. */
3139 union gotplt_union root;
3141 /* The information that is usually found in the ARM-specific part of
3142 the hash table entry. */
3143 struct arm_plt_info arm;
3145 /* A list of all potential dynamic relocations against this symbol. */
3146 struct elf_dyn_relocs *dyn_relocs;
3149 /* Structure to handle FDPIC support for local functions. */
3150 struct fdpic_local
3152 unsigned int funcdesc_cnt;
3153 unsigned int gotofffuncdesc_cnt;
3154 int funcdesc_offset;
3157 struct elf_arm_obj_tdata
3159 struct elf_obj_tdata root;
3161 /* Zero to warn when linking objects with incompatible enum sizes. */
3162 int no_enum_size_warning;
3164 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3165 int no_wchar_size_warning;
3167 /* The number of entries in each of the arrays in this strcuture.
3168 Used to avoid buffer overruns. */
3169 bfd_size_type num_entries;
3171 /* tls_type for each local got entry. */
3172 char *local_got_tls_type;
3174 /* GOTPLT entries for TLS descriptors. */
3175 bfd_vma *local_tlsdesc_gotent;
3177 /* Information for local symbols that need entries in .iplt. */
3178 struct arm_local_iplt_info **local_iplt;
3180 /* Maintains FDPIC counters and funcdesc info. */
3181 struct fdpic_local *local_fdpic_cnts;
3184 #define elf_arm_tdata(bfd) \
3185 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3187 #define elf32_arm_num_entries(bfd) \
3188 (elf_arm_tdata (bfd)->num_entries)
3190 #define elf32_arm_local_got_tls_type(bfd) \
3191 (elf_arm_tdata (bfd)->local_got_tls_type)
3193 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3194 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3196 #define elf32_arm_local_iplt(bfd) \
3197 (elf_arm_tdata (bfd)->local_iplt)
3199 #define elf32_arm_local_fdpic_cnts(bfd) \
3200 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3202 #define is_arm_elf(bfd) \
3203 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3204 && elf_tdata (bfd) != NULL \
3205 && elf_object_id (bfd) == ARM_ELF_DATA)
3207 static bool
3208 elf32_arm_mkobject (bfd *abfd)
3210 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3211 ARM_ELF_DATA);
3214 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3216 /* Structure to handle FDPIC support for extern functions. */
3217 struct fdpic_global {
3218 unsigned int gotofffuncdesc_cnt;
3219 unsigned int gotfuncdesc_cnt;
3220 unsigned int funcdesc_cnt;
3221 int funcdesc_offset;
3222 int gotfuncdesc_offset;
3225 /* Arm ELF linker hash entry. */
3226 struct elf32_arm_link_hash_entry
3228 struct elf_link_hash_entry root;
3230 /* ARM-specific PLT information. */
3231 struct arm_plt_info plt;
3233 #define GOT_UNKNOWN 0
3234 #define GOT_NORMAL 1
3235 #define GOT_TLS_GD 2
3236 #define GOT_TLS_IE 4
3237 #define GOT_TLS_GDESC 8
3238 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3239 unsigned int tls_type : 8;
3241 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3242 unsigned int is_iplt : 1;
3244 unsigned int unused : 23;
3246 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3247 starting at the end of the jump table. */
3248 bfd_vma tlsdesc_got;
3250 /* The symbol marking the real symbol location for exported thumb
3251 symbols with Arm stubs. */
3252 struct elf_link_hash_entry *export_glue;
3254 /* A pointer to the most recently used stub hash entry against this
3255 symbol. */
3256 struct elf32_arm_stub_hash_entry *stub_cache;
3258 /* Counter for FDPIC relocations against this symbol. */
3259 struct fdpic_global fdpic_cnts;
3262 /* Traverse an arm ELF linker hash table. */
3263 #define elf32_arm_link_hash_traverse(table, func, info) \
3264 (elf_link_hash_traverse \
3265 (&(table)->root, \
3266 (bool (*) (struct elf_link_hash_entry *, void *)) (func), \
3267 (info)))
3269 /* Get the ARM elf linker hash table from a link_info structure. */
3270 #define elf32_arm_hash_table(p) \
3271 ((is_elf_hash_table ((p)->hash) \
3272 && elf_hash_table_id (elf_hash_table (p)) == ARM_ELF_DATA) \
3273 ? (struct elf32_arm_link_hash_table *) (p)->hash : NULL)
3275 #define arm_stub_hash_lookup(table, string, create, copy) \
3276 ((struct elf32_arm_stub_hash_entry *) \
3277 bfd_hash_lookup ((table), (string), (create), (copy)))
3279 /* Array to keep track of which stub sections have been created, and
3280 information on stub grouping. */
3281 struct map_stub
3283 /* This is the section to which stubs in the group will be
3284 attached. */
3285 asection *link_sec;
3286 /* The stub section. */
3287 asection *stub_sec;
3290 #define elf32_arm_compute_jump_table_size(htab) \
3291 ((htab)->next_tls_desc_index * 4)
3293 /* ARM ELF linker hash table. */
3294 struct elf32_arm_link_hash_table
3296 /* The main hash table. */
3297 struct elf_link_hash_table root;
3299 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3300 bfd_size_type thumb_glue_size;
3302 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3303 bfd_size_type arm_glue_size;
3305 /* The size in bytes of section containing the ARMv4 BX veneers. */
3306 bfd_size_type bx_glue_size;
3308 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3309 veneer has been populated. */
3310 bfd_vma bx_glue_offset[15];
3312 /* The size in bytes of the section containing glue for VFP11 erratum
3313 veneers. */
3314 bfd_size_type vfp11_erratum_glue_size;
3316 /* The size in bytes of the section containing glue for STM32L4XX erratum
3317 veneers. */
3318 bfd_size_type stm32l4xx_erratum_glue_size;
3320 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3321 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3322 elf32_arm_write_section(). */
3323 struct a8_erratum_fix *a8_erratum_fixes;
3324 unsigned int num_a8_erratum_fixes;
3326 /* An arbitrary input BFD chosen to hold the glue sections. */
3327 bfd * bfd_of_glue_owner;
3329 /* Nonzero to output a BE8 image. */
3330 int byteswap_code;
3332 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3333 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3334 int target1_is_rel;
3336 /* The relocation to use for R_ARM_TARGET2 relocations. */
3337 int target2_reloc;
3339 /* 0 = Ignore R_ARM_V4BX.
3340 1 = Convert BX to MOV PC.
3341 2 = Generate v4 interworing stubs. */
3342 int fix_v4bx;
3344 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3345 int fix_cortex_a8;
3347 /* Whether we should fix the ARM1176 BLX immediate issue. */
3348 int fix_arm1176;
3350 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3351 int use_blx;
3353 /* What sort of code sequences we should look for which may trigger the
3354 VFP11 denorm erratum. */
3355 bfd_arm_vfp11_fix vfp11_fix;
3357 /* Global counter for the number of fixes we have emitted. */
3358 int num_vfp11_fixes;
3360 /* What sort of code sequences we should look for which may trigger the
3361 STM32L4XX erratum. */
3362 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3364 /* Global counter for the number of fixes we have emitted. */
3365 int num_stm32l4xx_fixes;
3367 /* Nonzero to force PIC branch veneers. */
3368 int pic_veneer;
3370 /* The number of bytes in the initial entry in the PLT. */
3371 bfd_size_type plt_header_size;
3373 /* The number of bytes in the subsequent PLT etries. */
3374 bfd_size_type plt_entry_size;
3376 /* True if the target uses REL relocations. */
3377 bool use_rel;
3379 /* Nonzero if import library must be a secure gateway import library
3380 as per ARMv8-M Security Extensions. */
3381 int cmse_implib;
3383 /* The import library whose symbols' address must remain stable in
3384 the import library generated. */
3385 bfd *in_implib_bfd;
3387 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3388 bfd_vma next_tls_desc_index;
3390 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3391 bfd_vma num_tls_desc;
3393 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3394 asection *srelplt2;
3396 /* Offset in .plt section of tls_arm_trampoline. */
3397 bfd_vma tls_trampoline;
3399 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3400 union
3402 bfd_signed_vma refcount;
3403 bfd_vma offset;
3404 } tls_ldm_got;
3406 /* For convenience in allocate_dynrelocs. */
3407 bfd * obfd;
3409 /* The amount of space used by the reserved portion of the sgotplt
3410 section, plus whatever space is used by the jump slots. */
3411 bfd_vma sgotplt_jump_table_size;
3413 /* The stub hash table. */
3414 struct bfd_hash_table stub_hash_table;
3416 /* Linker stub bfd. */
3417 bfd *stub_bfd;
3419 /* Linker call-backs. */
3420 asection * (*add_stub_section) (const char *, asection *, asection *,
3421 unsigned int);
3422 void (*layout_sections_again) (void);
3424 /* Array to keep track of which stub sections have been created, and
3425 information on stub grouping. */
3426 struct map_stub *stub_group;
3428 /* Input stub section holding secure gateway veneers. */
3429 asection *cmse_stub_sec;
3431 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3432 start to be allocated. */
3433 bfd_vma new_cmse_stub_offset;
3435 /* Number of elements in stub_group. */
3436 unsigned int top_id;
3438 /* Assorted information used by elf32_arm_size_stubs. */
3439 unsigned int bfd_count;
3440 unsigned int top_index;
3441 asection **input_list;
3443 /* True if the target system uses FDPIC. */
3444 int fdpic_p;
3446 /* Fixup section. Used for FDPIC. */
3447 asection *srofixup;
3450 /* Add an FDPIC read-only fixup. */
3451 static void
3452 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3454 bfd_vma fixup_offset;
3456 fixup_offset = srofixup->reloc_count++ * 4;
3457 BFD_ASSERT (fixup_offset < srofixup->size);
3458 bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3461 static inline int
3462 ctz (unsigned int mask)
3464 #if GCC_VERSION >= 3004
3465 return __builtin_ctz (mask);
3466 #else
3467 unsigned int i;
3469 for (i = 0; i < 8 * sizeof (mask); i++)
3471 if (mask & 0x1)
3472 break;
3473 mask = (mask >> 1);
3475 return i;
3476 #endif
3479 static inline int
3480 elf32_arm_popcount (unsigned int mask)
3482 #if GCC_VERSION >= 3004
3483 return __builtin_popcount (mask);
3484 #else
3485 unsigned int i;
3486 int sum = 0;
3488 for (i = 0; i < 8 * sizeof (mask); i++)
3490 if (mask & 0x1)
3491 sum++;
3492 mask = (mask >> 1);
3494 return sum;
3495 #endif
3498 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3499 asection *sreloc, Elf_Internal_Rela *rel);
3501 static void
3502 arm_elf_fill_funcdesc (bfd *output_bfd,
3503 struct bfd_link_info *info,
3504 int *funcdesc_offset,
3505 int dynindx,
3506 int offset,
3507 bfd_vma addr,
3508 bfd_vma dynreloc_value,
3509 bfd_vma seg)
3511 if ((*funcdesc_offset & 1) == 0)
3513 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3514 asection *sgot = globals->root.sgot;
3516 if (bfd_link_pic (info))
3518 asection *srelgot = globals->root.srelgot;
3519 Elf_Internal_Rela outrel;
3521 outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3522 outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3523 outrel.r_addend = 0;
3525 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3526 bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3527 bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3529 else
3531 struct elf_link_hash_entry *hgot = globals->root.hgot;
3532 bfd_vma got_value = hgot->root.u.def.value
3533 + hgot->root.u.def.section->output_section->vma
3534 + hgot->root.u.def.section->output_offset;
3536 arm_elf_add_rofixup (output_bfd, globals->srofixup,
3537 sgot->output_section->vma + sgot->output_offset
3538 + offset);
3539 arm_elf_add_rofixup (output_bfd, globals->srofixup,
3540 sgot->output_section->vma + sgot->output_offset
3541 + offset + 4);
3542 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3543 bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3545 *funcdesc_offset |= 1;
3549 /* Create an entry in an ARM ELF linker hash table. */
3551 static struct bfd_hash_entry *
3552 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3553 struct bfd_hash_table * table,
3554 const char * string)
3556 struct elf32_arm_link_hash_entry * ret =
3557 (struct elf32_arm_link_hash_entry *) entry;
3559 /* Allocate the structure if it has not already been allocated by a
3560 subclass. */
3561 if (ret == NULL)
3562 ret = (struct elf32_arm_link_hash_entry *)
3563 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3564 if (ret == NULL)
3565 return (struct bfd_hash_entry *) ret;
3567 /* Call the allocation method of the superclass. */
3568 ret = ((struct elf32_arm_link_hash_entry *)
3569 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3570 table, string));
3571 if (ret != NULL)
3573 ret->tls_type = GOT_UNKNOWN;
3574 ret->tlsdesc_got = (bfd_vma) -1;
3575 ret->plt.thumb_refcount = 0;
3576 ret->plt.maybe_thumb_refcount = 0;
3577 ret->plt.noncall_refcount = 0;
3578 ret->plt.got_offset = -1;
3579 ret->is_iplt = false;
3580 ret->export_glue = NULL;
3582 ret->stub_cache = NULL;
3584 ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3585 ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3586 ret->fdpic_cnts.funcdesc_cnt = 0;
3587 ret->fdpic_cnts.funcdesc_offset = -1;
3588 ret->fdpic_cnts.gotfuncdesc_offset = -1;
3591 return (struct bfd_hash_entry *) ret;
3594 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3595 symbols. */
3597 static bool
3598 elf32_arm_allocate_local_sym_info (bfd *abfd)
3600 if (elf_local_got_refcounts (abfd) == NULL)
3602 bfd_size_type num_syms;
3604 elf32_arm_num_entries (abfd) = 0;
3606 /* Whilst it might be tempting to allocate a single block of memory and
3607 then divide it up amoungst the arrays in the elf_arm_obj_tdata
3608 structure, this interferes with the work of memory checkers looking
3609 for buffer overruns. So allocate each array individually. */
3611 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3613 elf_local_got_refcounts (abfd) = bfd_zalloc
3614 (abfd, num_syms * sizeof (* elf_local_got_refcounts (abfd)));
3616 if (elf_local_got_refcounts (abfd) == NULL)
3617 return false;
3619 elf32_arm_local_tlsdesc_gotent (abfd) = bfd_zalloc
3620 (abfd, num_syms * sizeof (* elf32_arm_local_tlsdesc_gotent (abfd)));
3622 if (elf32_arm_local_tlsdesc_gotent (abfd) == NULL)
3623 return false;
3625 elf32_arm_local_iplt (abfd) = bfd_zalloc
3626 (abfd, num_syms * sizeof (* elf32_arm_local_iplt (abfd)));
3628 if (elf32_arm_local_iplt (abfd) == NULL)
3629 return false;
3631 elf32_arm_local_fdpic_cnts (abfd) = bfd_zalloc
3632 (abfd, num_syms * sizeof (* elf32_arm_local_fdpic_cnts (abfd)));
3634 if (elf32_arm_local_fdpic_cnts (abfd) == NULL)
3635 return false;
3637 elf32_arm_local_got_tls_type (abfd) = bfd_zalloc
3638 (abfd, num_syms * sizeof (* elf32_arm_local_got_tls_type (abfd)));
3640 if (elf32_arm_local_got_tls_type (abfd) == NULL)
3641 return false;
3643 elf32_arm_num_entries (abfd) = num_syms;
3645 #if GCC_VERSION >= 3000
3646 BFD_ASSERT (__alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd))
3647 <= __alignof__ (*elf_local_got_refcounts (abfd)));
3648 BFD_ASSERT (__alignof__ (*elf32_arm_local_iplt (abfd))
3649 <= __alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd)));
3650 BFD_ASSERT (__alignof__ (*elf32_arm_local_fdpic_cnts (abfd))
3651 <= __alignof__ (*elf32_arm_local_iplt (abfd)));
3652 BFD_ASSERT (__alignof__ (*elf32_arm_local_got_tls_type (abfd))
3653 <= __alignof__ (*elf32_arm_local_fdpic_cnts (abfd)));
3654 #endif
3656 return true;
3659 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3660 to input bfd ABFD. Create the information if it doesn't already exist.
3661 Return null if an allocation fails. */
3663 static struct arm_local_iplt_info *
3664 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3666 struct arm_local_iplt_info **ptr;
3668 if (!elf32_arm_allocate_local_sym_info (abfd))
3669 return NULL;
3671 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3672 BFD_ASSERT (r_symndx < elf32_arm_num_entries (abfd));
3673 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3674 if (*ptr == NULL)
3675 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3676 return *ptr;
3679 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3680 in ABFD's symbol table. If the symbol is global, H points to its
3681 hash table entry, otherwise H is null.
3683 Return true if the symbol does have PLT information. When returning
3684 true, point *ROOT_PLT at the target-independent reference count/offset
3685 union and *ARM_PLT at the ARM-specific information. */
3687 static bool
3688 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3689 struct elf32_arm_link_hash_entry *h,
3690 unsigned long r_symndx, union gotplt_union **root_plt,
3691 struct arm_plt_info **arm_plt)
3693 struct arm_local_iplt_info *local_iplt;
3695 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3696 return false;
3698 if (h != NULL)
3700 *root_plt = &h->root.plt;
3701 *arm_plt = &h->plt;
3702 return true;
3705 if (elf32_arm_local_iplt (abfd) == NULL)
3706 return false;
3708 if (r_symndx >= elf32_arm_num_entries (abfd))
3709 return false;
3711 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3712 if (local_iplt == NULL)
3713 return false;
3715 *root_plt = &local_iplt->root;
3716 *arm_plt = &local_iplt->arm;
3717 return true;
3720 static bool using_thumb_only (struct elf32_arm_link_hash_table *globals);
3722 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3723 before it. */
3725 static bool
3726 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3727 struct arm_plt_info *arm_plt)
3729 struct elf32_arm_link_hash_table *htab;
3731 htab = elf32_arm_hash_table (info);
3733 return (!using_thumb_only (htab) && (arm_plt->thumb_refcount != 0
3734 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3737 /* Return a pointer to the head of the dynamic reloc list that should
3738 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3739 ABFD's symbol table. Return null if an error occurs. */
3741 static struct elf_dyn_relocs **
3742 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3743 Elf_Internal_Sym *isym)
3745 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3747 struct arm_local_iplt_info *local_iplt;
3749 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3750 if (local_iplt == NULL)
3751 return NULL;
3752 return &local_iplt->dyn_relocs;
3754 else
3756 /* Track dynamic relocs needed for local syms too.
3757 We really need local syms available to do this
3758 easily. Oh well. */
3759 asection *s;
3760 void *vpp;
3762 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3763 if (s == NULL)
3764 return NULL;
3766 vpp = &elf_section_data (s)->local_dynrel;
3767 return (struct elf_dyn_relocs **) vpp;
3771 /* Initialize an entry in the stub hash table. */
3773 static struct bfd_hash_entry *
3774 stub_hash_newfunc (struct bfd_hash_entry *entry,
3775 struct bfd_hash_table *table,
3776 const char *string)
3778 /* Allocate the structure if it has not already been allocated by a
3779 subclass. */
3780 if (entry == NULL)
3782 entry = (struct bfd_hash_entry *)
3783 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3784 if (entry == NULL)
3785 return entry;
3788 /* Call the allocation method of the superclass. */
3789 entry = bfd_hash_newfunc (entry, table, string);
3790 if (entry != NULL)
3792 struct elf32_arm_stub_hash_entry *eh;
3794 /* Initialize the local fields. */
3795 eh = (struct elf32_arm_stub_hash_entry *) entry;
3796 eh->stub_sec = NULL;
3797 eh->stub_offset = (bfd_vma) -1;
3798 eh->source_value = 0;
3799 eh->target_value = 0;
3800 eh->target_section = NULL;
3801 eh->orig_insn = 0;
3802 eh->stub_type = arm_stub_none;
3803 eh->stub_size = 0;
3804 eh->stub_template = NULL;
3805 eh->stub_template_size = -1;
3806 eh->h = NULL;
3807 eh->id_sec = NULL;
3808 eh->output_name = NULL;
3811 return entry;
3814 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3815 shortcuts to them in our hash table. */
3817 static bool
3818 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3820 struct elf32_arm_link_hash_table *htab;
3822 htab = elf32_arm_hash_table (info);
3823 if (htab == NULL)
3824 return false;
3826 if (! _bfd_elf_create_got_section (dynobj, info))
3827 return false;
3829 /* Also create .rofixup. */
3830 if (htab->fdpic_p)
3832 htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3833 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3834 | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3835 if (htab->srofixup == NULL
3836 || !bfd_set_section_alignment (htab->srofixup, 2))
3837 return false;
3840 return true;
3843 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3845 static bool
3846 create_ifunc_sections (struct bfd_link_info *info)
3848 struct elf32_arm_link_hash_table *htab;
3849 const struct elf_backend_data *bed;
3850 bfd *dynobj;
3851 asection *s;
3852 flagword flags;
3854 htab = elf32_arm_hash_table (info);
3855 dynobj = htab->root.dynobj;
3856 bed = get_elf_backend_data (dynobj);
3857 flags = bed->dynamic_sec_flags;
3859 if (htab->root.iplt == NULL)
3861 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3862 flags | SEC_READONLY | SEC_CODE);
3863 if (s == NULL
3864 || !bfd_set_section_alignment (s, bed->plt_alignment))
3865 return false;
3866 htab->root.iplt = s;
3869 if (htab->root.irelplt == NULL)
3871 s = bfd_make_section_anyway_with_flags (dynobj,
3872 RELOC_SECTION (htab, ".iplt"),
3873 flags | SEC_READONLY);
3874 if (s == NULL
3875 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3876 return false;
3877 htab->root.irelplt = s;
3880 if (htab->root.igotplt == NULL)
3882 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3883 if (s == NULL
3884 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3885 return false;
3886 htab->root.igotplt = s;
3888 return true;
3891 /* Determine if we're dealing with a Thumb only architecture. */
3893 static bool
3894 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3896 int arch;
3897 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3898 Tag_CPU_arch_profile);
3900 if (profile)
3901 return profile == 'M';
3903 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3905 /* Force return logic to be reviewed for each new architecture. */
3906 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3908 if (arch == TAG_CPU_ARCH_V6_M
3909 || arch == TAG_CPU_ARCH_V6S_M
3910 || arch == TAG_CPU_ARCH_V7E_M
3911 || arch == TAG_CPU_ARCH_V8M_BASE
3912 || arch == TAG_CPU_ARCH_V8M_MAIN
3913 || arch == TAG_CPU_ARCH_V8_1M_MAIN)
3914 return true;
3916 return false;
3919 /* Determine if we're dealing with a Thumb-2 object. */
3921 static bool
3922 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3924 int arch;
3925 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3926 Tag_THUMB_ISA_use);
3928 /* No use of thumb permitted, or a legacy thumb-1/2 definition. */
3929 if (thumb_isa < 3)
3930 return thumb_isa == 2;
3932 /* Variant of thumb is described by the architecture tag. */
3933 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3935 /* Force return logic to be reviewed for each new architecture. */
3936 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3938 return (arch == TAG_CPU_ARCH_V6T2
3939 || arch == TAG_CPU_ARCH_V7
3940 || arch == TAG_CPU_ARCH_V7E_M
3941 || arch == TAG_CPU_ARCH_V8
3942 || arch == TAG_CPU_ARCH_V8R
3943 || arch == TAG_CPU_ARCH_V8M_MAIN
3944 || arch == TAG_CPU_ARCH_V8_1M_MAIN);
3947 /* Determine whether Thumb-2 BL instruction is available. */
3949 static bool
3950 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3952 int arch =
3953 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3955 /* Force return logic to be reviewed for each new architecture. */
3956 BFD_ASSERT (arch <= TAG_CPU_ARCH_V9);
3958 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3959 return (arch == TAG_CPU_ARCH_V6T2
3960 || arch >= TAG_CPU_ARCH_V7);
3963 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3964 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3965 hash table. */
3967 static bool
3968 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3970 struct elf32_arm_link_hash_table *htab;
3972 htab = elf32_arm_hash_table (info);
3973 if (htab == NULL)
3974 return false;
3976 if (!htab->root.sgot && !create_got_section (dynobj, info))
3977 return false;
3979 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3980 return false;
3982 if (htab->root.target_os == is_vxworks)
3984 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3985 return false;
3987 if (bfd_link_pic (info))
3989 htab->plt_header_size = 0;
3990 htab->plt_entry_size
3991 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3993 else
3995 htab->plt_header_size
3996 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3997 htab->plt_entry_size
3998 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
4001 if (elf_elfheader (dynobj))
4002 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
4004 else
4006 /* PR ld/16017
4007 Test for thumb only architectures. Note - we cannot just call
4008 using_thumb_only() as the attributes in the output bfd have not been
4009 initialised at this point, so instead we use the input bfd. */
4010 bfd * saved_obfd = htab->obfd;
4012 htab->obfd = dynobj;
4013 if (using_thumb_only (htab))
4015 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
4016 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
4018 htab->obfd = saved_obfd;
4021 if (htab->fdpic_p) {
4022 htab->plt_header_size = 0;
4023 if (info->flags & DF_BIND_NOW)
4024 htab->plt_entry_size = 4 * (ARRAY_SIZE (elf32_arm_fdpic_plt_entry) - 5);
4025 else
4026 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry);
4029 if (!htab->root.splt
4030 || !htab->root.srelplt
4031 || !htab->root.sdynbss
4032 || (!bfd_link_pic (info) && !htab->root.srelbss))
4033 abort ();
4035 return true;
4038 /* Copy the extra info we tack onto an elf_link_hash_entry. */
4040 static void
4041 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
4042 struct elf_link_hash_entry *dir,
4043 struct elf_link_hash_entry *ind)
4045 struct elf32_arm_link_hash_entry *edir, *eind;
4047 edir = (struct elf32_arm_link_hash_entry *) dir;
4048 eind = (struct elf32_arm_link_hash_entry *) ind;
4050 if (ind->root.type == bfd_link_hash_indirect)
4052 /* Copy over PLT info. */
4053 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4054 eind->plt.thumb_refcount = 0;
4055 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4056 eind->plt.maybe_thumb_refcount = 0;
4057 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4058 eind->plt.noncall_refcount = 0;
4060 /* Copy FDPIC counters. */
4061 edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4062 edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4063 edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4065 /* We should only allocate a function to .iplt once the final
4066 symbol information is known. */
4067 BFD_ASSERT (!eind->is_iplt);
4069 if (dir->got.refcount <= 0)
4071 edir->tls_type = eind->tls_type;
4072 eind->tls_type = GOT_UNKNOWN;
4076 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4079 /* Destroy an ARM elf linker hash table. */
4081 static void
4082 elf32_arm_link_hash_table_free (bfd *obfd)
4084 struct elf32_arm_link_hash_table *ret
4085 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4087 bfd_hash_table_free (&ret->stub_hash_table);
4088 _bfd_elf_link_hash_table_free (obfd);
4091 /* Create an ARM elf linker hash table. */
4093 static struct bfd_link_hash_table *
4094 elf32_arm_link_hash_table_create (bfd *abfd)
4096 struct elf32_arm_link_hash_table *ret;
4097 size_t amt = sizeof (struct elf32_arm_link_hash_table);
4099 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4100 if (ret == NULL)
4101 return NULL;
4103 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4104 elf32_arm_link_hash_newfunc,
4105 sizeof (struct elf32_arm_link_hash_entry),
4106 ARM_ELF_DATA))
4108 free (ret);
4109 return NULL;
4112 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4113 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4114 #ifdef FOUR_WORD_PLT
4115 ret->plt_header_size = 16;
4116 ret->plt_entry_size = 16;
4117 #else
4118 ret->plt_header_size = 20;
4119 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4120 #endif
4121 ret->use_rel = true;
4122 ret->obfd = abfd;
4123 ret->fdpic_p = 0;
4125 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4126 sizeof (struct elf32_arm_stub_hash_entry)))
4128 _bfd_elf_link_hash_table_free (abfd);
4129 return NULL;
4131 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4133 return &ret->root.root;
4136 /* Determine what kind of NOPs are available. */
4138 static bool
4139 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4141 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4142 Tag_CPU_arch);
4144 /* Force return logic to be reviewed for each new architecture. */
4145 BFD_ASSERT (arch <= TAG_CPU_ARCH_V9);
4147 return (arch == TAG_CPU_ARCH_V6T2
4148 || arch == TAG_CPU_ARCH_V6K
4149 || arch == TAG_CPU_ARCH_V7
4150 || arch == TAG_CPU_ARCH_V8
4151 || arch == TAG_CPU_ARCH_V8R
4152 || arch == TAG_CPU_ARCH_V9);
4155 static bool
4156 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4158 switch (stub_type)
4160 case arm_stub_long_branch_thumb_only:
4161 case arm_stub_long_branch_thumb2_only:
4162 case arm_stub_long_branch_thumb2_only_pure:
4163 case arm_stub_long_branch_v4t_thumb_arm:
4164 case arm_stub_short_branch_v4t_thumb_arm:
4165 case arm_stub_long_branch_v4t_thumb_arm_pic:
4166 case arm_stub_long_branch_v4t_thumb_tls_pic:
4167 case arm_stub_long_branch_thumb_only_pic:
4168 case arm_stub_cmse_branch_thumb_only:
4169 return true;
4170 case arm_stub_none:
4171 BFD_FAIL ();
4172 return false;
4173 break;
4174 default:
4175 return false;
4179 /* Determine the type of stub needed, if any, for a call. */
4181 static enum elf32_arm_stub_type
4182 arm_type_of_stub (struct bfd_link_info *info,
4183 asection *input_sec,
4184 const Elf_Internal_Rela *rel,
4185 unsigned char st_type,
4186 enum arm_st_branch_type *actual_branch_type,
4187 struct elf32_arm_link_hash_entry *hash,
4188 bfd_vma destination,
4189 asection *sym_sec,
4190 bfd *input_bfd,
4191 const char *name)
4193 bfd_vma location;
4194 bfd_signed_vma branch_offset;
4195 unsigned int r_type;
4196 struct elf32_arm_link_hash_table * globals;
4197 bool thumb2, thumb2_bl, thumb_only;
4198 enum elf32_arm_stub_type stub_type = arm_stub_none;
4199 int use_plt = 0;
4200 enum arm_st_branch_type branch_type = *actual_branch_type;
4201 union gotplt_union *root_plt;
4202 struct arm_plt_info *arm_plt;
4203 int arch;
4204 int thumb2_movw;
4206 if (branch_type == ST_BRANCH_LONG)
4207 return stub_type;
4209 globals = elf32_arm_hash_table (info);
4210 if (globals == NULL)
4211 return stub_type;
4213 thumb_only = using_thumb_only (globals);
4214 thumb2 = using_thumb2 (globals);
4215 thumb2_bl = using_thumb2_bl (globals);
4217 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4219 /* True for architectures that implement the thumb2 movw instruction. */
4220 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
4222 /* Determine where the call point is. */
4223 location = (input_sec->output_offset
4224 + input_sec->output_section->vma
4225 + rel->r_offset);
4227 r_type = ELF32_R_TYPE (rel->r_info);
4229 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4230 are considering a function call relocation. */
4231 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4232 || r_type == R_ARM_THM_JUMP19)
4233 && branch_type == ST_BRANCH_TO_ARM)
4234 branch_type = ST_BRANCH_TO_THUMB;
4236 /* For TLS call relocs, it is the caller's responsibility to provide
4237 the address of the appropriate trampoline. */
4238 if (r_type != R_ARM_TLS_CALL
4239 && r_type != R_ARM_THM_TLS_CALL
4240 && elf32_arm_get_plt_info (input_bfd, globals, hash,
4241 ELF32_R_SYM (rel->r_info), &root_plt,
4242 &arm_plt)
4243 && root_plt->offset != (bfd_vma) -1)
4245 asection *splt;
4247 if (hash == NULL || hash->is_iplt)
4248 splt = globals->root.iplt;
4249 else
4250 splt = globals->root.splt;
4251 if (splt != NULL)
4253 use_plt = 1;
4255 /* Note when dealing with PLT entries: the main PLT stub is in
4256 ARM mode, so if the branch is in Thumb mode, another
4257 Thumb->ARM stub will be inserted later just before the ARM
4258 PLT stub. If a long branch stub is needed, we'll add a
4259 Thumb->Arm one and branch directly to the ARM PLT entry.
4260 Here, we have to check if a pre-PLT Thumb->ARM stub
4261 is needed and if it will be close enough. */
4263 destination = (splt->output_section->vma
4264 + splt->output_offset
4265 + root_plt->offset);
4266 st_type = STT_FUNC;
4268 /* Thumb branch/call to PLT: it can become a branch to ARM
4269 or to Thumb. We must perform the same checks and
4270 corrections as in elf32_arm_final_link_relocate. */
4271 if ((r_type == R_ARM_THM_CALL)
4272 || (r_type == R_ARM_THM_JUMP24))
4274 if (globals->use_blx
4275 && r_type == R_ARM_THM_CALL
4276 && !thumb_only)
4278 /* If the Thumb BLX instruction is available, convert
4279 the BL to a BLX instruction to call the ARM-mode
4280 PLT entry. */
4281 branch_type = ST_BRANCH_TO_ARM;
4283 else
4285 if (!thumb_only)
4286 /* Target the Thumb stub before the ARM PLT entry. */
4287 destination -= PLT_THUMB_STUB_SIZE;
4288 branch_type = ST_BRANCH_TO_THUMB;
4291 else
4293 branch_type = ST_BRANCH_TO_ARM;
4297 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4298 BFD_ASSERT (st_type != STT_GNU_IFUNC);
4300 branch_offset = (bfd_signed_vma)(destination - location);
4302 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4303 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4305 /* Handle cases where:
4306 - this call goes too far (different Thumb/Thumb2 max
4307 distance)
4308 - it's a Thumb->Arm call and blx is not available, or it's a
4309 Thumb->Arm branch (not bl). A stub is needed in this case,
4310 but only if this call is not through a PLT entry. Indeed,
4311 PLT stubs handle mode switching already. */
4312 if ((!thumb2_bl
4313 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4314 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4315 || (thumb2_bl
4316 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4317 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4318 || (thumb2
4319 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4320 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4321 && (r_type == R_ARM_THM_JUMP19))
4322 || (branch_type == ST_BRANCH_TO_ARM
4323 && (((r_type == R_ARM_THM_CALL
4324 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4325 || (r_type == R_ARM_THM_JUMP24)
4326 || (r_type == R_ARM_THM_JUMP19))
4327 && !use_plt))
4329 /* If we need to insert a Thumb-Thumb long branch stub to a
4330 PLT, use one that branches directly to the ARM PLT
4331 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4332 stub, undo this now. */
4333 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4335 branch_type = ST_BRANCH_TO_ARM;
4336 branch_offset += PLT_THUMB_STUB_SIZE;
4339 if (branch_type == ST_BRANCH_TO_THUMB)
4341 /* Thumb to thumb. */
4342 if (!thumb_only)
4344 if (input_sec->flags & SEC_ELF_PURECODE)
4345 _bfd_error_handler
4346 (_("%pB(%pA): warning: long branch veneers used in"
4347 " section with SHF_ARM_PURECODE section"
4348 " attribute is only supported for M-profile"
4349 " targets that implement the movw instruction"),
4350 input_bfd, input_sec);
4352 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4353 /* PIC stubs. */
4354 ? ((globals->use_blx
4355 && (r_type == R_ARM_THM_CALL))
4356 /* V5T and above. Stub starts with ARM code, so
4357 we must be able to switch mode before
4358 reaching it, which is only possible for 'bl'
4359 (ie R_ARM_THM_CALL relocation). */
4360 ? arm_stub_long_branch_any_thumb_pic
4361 /* On V4T, use Thumb code only. */
4362 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4364 /* non-PIC stubs. */
4365 : ((globals->use_blx
4366 && (r_type == R_ARM_THM_CALL))
4367 /* V5T and above. */
4368 ? arm_stub_long_branch_any_any
4369 /* V4T. */
4370 : arm_stub_long_branch_v4t_thumb_thumb);
4372 else
4374 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4375 stub_type = arm_stub_long_branch_thumb2_only_pure;
4376 else
4378 if (input_sec->flags & SEC_ELF_PURECODE)
4379 _bfd_error_handler
4380 (_("%pB(%pA): warning: long branch veneers used in"
4381 " section with SHF_ARM_PURECODE section"
4382 " attribute is only supported for M-profile"
4383 " targets that implement the movw instruction"),
4384 input_bfd, input_sec);
4386 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4387 /* PIC stub. */
4388 ? arm_stub_long_branch_thumb_only_pic
4389 /* non-PIC stub. */
4390 : (thumb2 ? arm_stub_long_branch_thumb2_only
4391 : arm_stub_long_branch_thumb_only);
4395 else
4397 if (input_sec->flags & SEC_ELF_PURECODE)
4398 _bfd_error_handler
4399 (_("%pB(%pA): warning: long branch veneers used in"
4400 " section with SHF_ARM_PURECODE section"
4401 " attribute is only supported" " for M-profile"
4402 " targets that implement the movw instruction"),
4403 input_bfd, input_sec);
4405 /* Thumb to arm. */
4406 if (sym_sec != NULL
4407 && sym_sec->owner != NULL
4408 && !INTERWORK_FLAG (sym_sec->owner))
4410 _bfd_error_handler
4411 (_("%pB(%s): warning: interworking not enabled;"
4412 " first occurrence: %pB: %s call to %s"),
4413 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4416 stub_type =
4417 (bfd_link_pic (info) | globals->pic_veneer)
4418 /* PIC stubs. */
4419 ? (r_type == R_ARM_THM_TLS_CALL
4420 /* TLS PIC stubs. */
4421 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4422 : arm_stub_long_branch_v4t_thumb_tls_pic)
4423 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4424 /* V5T PIC and above. */
4425 ? arm_stub_long_branch_any_arm_pic
4426 /* V4T PIC stub. */
4427 : arm_stub_long_branch_v4t_thumb_arm_pic))
4429 /* non-PIC stubs. */
4430 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4431 /* V5T and above. */
4432 ? arm_stub_long_branch_any_any
4433 /* V4T. */
4434 : arm_stub_long_branch_v4t_thumb_arm);
4436 /* Handle v4t short branches. */
4437 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4438 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4439 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4440 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4444 else if (r_type == R_ARM_CALL
4445 || r_type == R_ARM_JUMP24
4446 || r_type == R_ARM_PLT32
4447 || r_type == R_ARM_TLS_CALL)
4449 if (input_sec->flags & SEC_ELF_PURECODE)
4450 _bfd_error_handler
4451 (_("%pB(%pA): warning: long branch veneers used in"
4452 " section with SHF_ARM_PURECODE section"
4453 " attribute is only supported for M-profile"
4454 " targets that implement the movw instruction"),
4455 input_bfd, input_sec);
4456 if (branch_type == ST_BRANCH_TO_THUMB)
4458 /* Arm to thumb. */
4460 if (sym_sec != NULL
4461 && sym_sec->owner != NULL
4462 && !INTERWORK_FLAG (sym_sec->owner))
4464 _bfd_error_handler
4465 (_("%pB(%s): warning: interworking not enabled;"
4466 " first occurrence: %pB: %s call to %s"),
4467 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4470 /* We have an extra 2-bytes reach because of
4471 the mode change (bit 24 (H) of BLX encoding). */
4472 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4473 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4474 || (r_type == R_ARM_CALL && !globals->use_blx)
4475 || (r_type == R_ARM_JUMP24)
4476 || (r_type == R_ARM_PLT32))
4478 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4479 /* PIC stubs. */
4480 ? ((globals->use_blx)
4481 /* V5T and above. */
4482 ? arm_stub_long_branch_any_thumb_pic
4483 /* V4T stub. */
4484 : arm_stub_long_branch_v4t_arm_thumb_pic)
4486 /* non-PIC stubs. */
4487 : ((globals->use_blx)
4488 /* V5T and above. */
4489 ? arm_stub_long_branch_any_any
4490 /* V4T. */
4491 : arm_stub_long_branch_v4t_arm_thumb);
4494 else
4496 /* Arm to arm. */
4497 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4498 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4500 stub_type =
4501 (bfd_link_pic (info) | globals->pic_veneer)
4502 /* PIC stubs. */
4503 ? (r_type == R_ARM_TLS_CALL
4504 /* TLS PIC Stub. */
4505 ? arm_stub_long_branch_any_tls_pic
4506 : (globals->root.target_os == is_nacl
4507 ? arm_stub_long_branch_arm_nacl_pic
4508 : arm_stub_long_branch_any_arm_pic))
4509 /* non-PIC stubs. */
4510 : (globals->root.target_os == is_nacl
4511 ? arm_stub_long_branch_arm_nacl
4512 : arm_stub_long_branch_any_any);
4517 /* If a stub is needed, record the actual destination type. */
4518 if (stub_type != arm_stub_none)
4519 *actual_branch_type = branch_type;
4521 return stub_type;
4524 /* Build a name for an entry in the stub hash table. */
4526 static char *
4527 elf32_arm_stub_name (const asection *input_section,
4528 const asection *sym_sec,
4529 const struct elf32_arm_link_hash_entry *hash,
4530 const Elf_Internal_Rela *rel,
4531 enum elf32_arm_stub_type stub_type)
4533 char *stub_name;
4534 bfd_size_type len;
4536 if (hash)
4538 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4539 stub_name = (char *) bfd_malloc (len);
4540 if (stub_name != NULL)
4541 sprintf (stub_name, "%08x_%s+%x_%d",
4542 input_section->id & 0xffffffff,
4543 hash->root.root.root.string,
4544 (int) rel->r_addend & 0xffffffff,
4545 (int) stub_type);
4547 else
4549 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4550 stub_name = (char *) bfd_malloc (len);
4551 if (stub_name != NULL)
4552 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4553 input_section->id & 0xffffffff,
4554 sym_sec->id & 0xffffffff,
4555 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4556 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4557 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4558 (int) rel->r_addend & 0xffffffff,
4559 (int) stub_type);
4562 return stub_name;
4565 /* Look up an entry in the stub hash. Stub entries are cached because
4566 creating the stub name takes a bit of time. */
4568 static struct elf32_arm_stub_hash_entry *
4569 elf32_arm_get_stub_entry (const asection *input_section,
4570 const asection *sym_sec,
4571 struct elf_link_hash_entry *hash,
4572 const Elf_Internal_Rela *rel,
4573 struct elf32_arm_link_hash_table *htab,
4574 enum elf32_arm_stub_type stub_type)
4576 struct elf32_arm_stub_hash_entry *stub_entry;
4577 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4578 const asection *id_sec;
4580 if ((input_section->flags & SEC_CODE) == 0)
4581 return NULL;
4583 /* If the input section is the CMSE stubs one and it needs a long
4584 branch stub to reach it's final destination, give up with an
4585 error message: this is not supported. See PR ld/24709. */
4586 if (!strncmp (input_section->name, CMSE_STUB_NAME, strlen (CMSE_STUB_NAME)))
4588 bfd *output_bfd = htab->obfd;
4589 asection *out_sec = bfd_get_section_by_name (output_bfd, CMSE_STUB_NAME);
4591 _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4592 "(%#" PRIx64 ") from destination (%#" PRIx64 ")"),
4593 CMSE_STUB_NAME,
4594 (uint64_t)out_sec->output_section->vma
4595 + out_sec->output_offset,
4596 (uint64_t)sym_sec->output_section->vma
4597 + sym_sec->output_offset
4598 + h->root.root.u.def.value);
4599 /* Exit, rather than leave incompletely processed
4600 relocations. */
4601 xexit (1);
4604 /* If this input section is part of a group of sections sharing one
4605 stub section, then use the id of the first section in the group.
4606 Stub names need to include a section id, as there may well be
4607 more than one stub used to reach say, printf, and we need to
4608 distinguish between them. */
4609 BFD_ASSERT (input_section->id <= htab->top_id);
4610 id_sec = htab->stub_group[input_section->id].link_sec;
4612 if (h != NULL && h->stub_cache != NULL
4613 && h->stub_cache->h == h
4614 && h->stub_cache->id_sec == id_sec
4615 && h->stub_cache->stub_type == stub_type)
4617 stub_entry = h->stub_cache;
4619 else
4621 char *stub_name;
4623 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4624 if (stub_name == NULL)
4625 return NULL;
4627 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4628 stub_name, false, false);
4629 if (h != NULL)
4630 h->stub_cache = stub_entry;
4632 free (stub_name);
4635 return stub_entry;
4638 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4639 section. */
4641 static bool
4642 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4644 if (stub_type >= max_stub_type)
4645 abort (); /* Should be unreachable. */
4647 switch (stub_type)
4649 case arm_stub_cmse_branch_thumb_only:
4650 return true;
4652 default:
4653 return false;
4656 abort (); /* Should be unreachable. */
4659 /* Required alignment (as a power of 2) for the dedicated section holding
4660 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4661 with input sections. */
4663 static int
4664 arm_dedicated_stub_output_section_required_alignment
4665 (enum elf32_arm_stub_type stub_type)
4667 if (stub_type >= max_stub_type)
4668 abort (); /* Should be unreachable. */
4670 switch (stub_type)
4672 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4673 boundary. */
4674 case arm_stub_cmse_branch_thumb_only:
4675 return 5;
4677 default:
4678 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4679 return 0;
4682 abort (); /* Should be unreachable. */
4685 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4686 NULL if veneers of this type are interspersed with input sections. */
4688 static const char *
4689 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4691 if (stub_type >= max_stub_type)
4692 abort (); /* Should be unreachable. */
4694 switch (stub_type)
4696 case arm_stub_cmse_branch_thumb_only:
4697 return CMSE_STUB_NAME;
4699 default:
4700 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4701 return NULL;
4704 abort (); /* Should be unreachable. */
4707 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4708 returns the address of the hash table field in HTAB holding a pointer to the
4709 corresponding input section. Otherwise, returns NULL. */
4711 static asection **
4712 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4713 enum elf32_arm_stub_type stub_type)
4715 if (stub_type >= max_stub_type)
4716 abort (); /* Should be unreachable. */
4718 switch (stub_type)
4720 case arm_stub_cmse_branch_thumb_only:
4721 return &htab->cmse_stub_sec;
4723 default:
4724 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4725 return NULL;
4728 abort (); /* Should be unreachable. */
4731 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4732 is the section that branch into veneer and can be NULL if stub should go in
4733 a dedicated output section. Returns a pointer to the stub section, and the
4734 section to which the stub section will be attached (in *LINK_SEC_P).
4735 LINK_SEC_P may be NULL. */
4737 static asection *
4738 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4739 struct elf32_arm_link_hash_table *htab,
4740 enum elf32_arm_stub_type stub_type)
4742 asection *link_sec, *out_sec, **stub_sec_p;
4743 const char *stub_sec_prefix;
4744 bool dedicated_output_section =
4745 arm_dedicated_stub_output_section_required (stub_type);
4746 int align;
4748 if (dedicated_output_section)
4750 bfd *output_bfd = htab->obfd;
4751 const char *out_sec_name =
4752 arm_dedicated_stub_output_section_name (stub_type);
4753 link_sec = NULL;
4754 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4755 stub_sec_prefix = out_sec_name;
4756 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4757 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4758 if (out_sec == NULL)
4760 _bfd_error_handler (_("no address assigned to the veneers output "
4761 "section %s"), out_sec_name);
4762 return NULL;
4765 else
4767 BFD_ASSERT (section->id <= htab->top_id);
4768 link_sec = htab->stub_group[section->id].link_sec;
4769 BFD_ASSERT (link_sec != NULL);
4770 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4771 if (*stub_sec_p == NULL)
4772 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4773 stub_sec_prefix = link_sec->name;
4774 out_sec = link_sec->output_section;
4775 align = htab->root.target_os == is_nacl ? 4 : 3;
4778 if (*stub_sec_p == NULL)
4780 size_t namelen;
4781 bfd_size_type len;
4782 char *s_name;
4784 namelen = strlen (stub_sec_prefix);
4785 len = namelen + sizeof (STUB_SUFFIX);
4786 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4787 if (s_name == NULL)
4788 return NULL;
4790 memcpy (s_name, stub_sec_prefix, namelen);
4791 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4792 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4793 align);
4794 if (*stub_sec_p == NULL)
4795 return NULL;
4797 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4798 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4799 | SEC_KEEP;
4802 if (!dedicated_output_section)
4803 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4805 if (link_sec_p)
4806 *link_sec_p = link_sec;
4808 return *stub_sec_p;
4811 /* Add a new stub entry to the stub hash. Not all fields of the new
4812 stub entry are initialised. */
4814 static struct elf32_arm_stub_hash_entry *
4815 elf32_arm_add_stub (const char *stub_name, asection *section,
4816 struct elf32_arm_link_hash_table *htab,
4817 enum elf32_arm_stub_type stub_type)
4819 asection *link_sec;
4820 asection *stub_sec;
4821 struct elf32_arm_stub_hash_entry *stub_entry;
4823 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4824 stub_type);
4825 if (stub_sec == NULL)
4826 return NULL;
4828 /* Enter this entry into the linker stub hash table. */
4829 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4830 true, false);
4831 if (stub_entry == NULL)
4833 if (section == NULL)
4834 section = stub_sec;
4835 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4836 section->owner, stub_name);
4837 return NULL;
4840 stub_entry->stub_sec = stub_sec;
4841 stub_entry->stub_offset = (bfd_vma) -1;
4842 stub_entry->id_sec = link_sec;
4844 return stub_entry;
4847 /* Store an Arm insn into an output section not processed by
4848 elf32_arm_write_section. */
4850 static void
4851 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4852 bfd * output_bfd, bfd_vma val, void * ptr)
4854 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4855 bfd_putl32 (val, ptr);
4856 else
4857 bfd_putb32 (val, ptr);
4860 /* Store a 16-bit Thumb insn into an output section not processed by
4861 elf32_arm_write_section. */
4863 static void
4864 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4865 bfd * output_bfd, bfd_vma val, void * ptr)
4867 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4868 bfd_putl16 (val, ptr);
4869 else
4870 bfd_putb16 (val, ptr);
4873 /* Store a Thumb2 insn into an output section not processed by
4874 elf32_arm_write_section. */
4876 static void
4877 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4878 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4880 /* T2 instructions are 16-bit streamed. */
4881 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4883 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4884 bfd_putl16 ((val & 0xffff), ptr + 2);
4886 else
4888 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4889 bfd_putb16 ((val & 0xffff), ptr + 2);
4893 /* If it's possible to change R_TYPE to a more efficient access
4894 model, return the new reloc type. */
4896 static unsigned
4897 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4898 struct elf_link_hash_entry *h)
4900 int is_local = (h == NULL);
4902 if (bfd_link_dll (info)
4903 || (h && h->root.type == bfd_link_hash_undefweak))
4904 return r_type;
4906 /* We do not support relaxations for Old TLS models. */
4907 switch (r_type)
4909 case R_ARM_TLS_GOTDESC:
4910 case R_ARM_TLS_CALL:
4911 case R_ARM_THM_TLS_CALL:
4912 case R_ARM_TLS_DESCSEQ:
4913 case R_ARM_THM_TLS_DESCSEQ:
4914 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4917 return r_type;
4920 static bfd_reloc_status_type elf32_arm_final_link_relocate
4921 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4922 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4923 const char *, unsigned char, enum arm_st_branch_type,
4924 struct elf_link_hash_entry *, bool *, char **);
4926 static unsigned int
4927 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4929 switch (stub_type)
4931 case arm_stub_a8_veneer_b_cond:
4932 case arm_stub_a8_veneer_b:
4933 case arm_stub_a8_veneer_bl:
4934 return 2;
4936 case arm_stub_long_branch_any_any:
4937 case arm_stub_long_branch_v4t_arm_thumb:
4938 case arm_stub_long_branch_thumb_only:
4939 case arm_stub_long_branch_thumb2_only:
4940 case arm_stub_long_branch_thumb2_only_pure:
4941 case arm_stub_long_branch_v4t_thumb_thumb:
4942 case arm_stub_long_branch_v4t_thumb_arm:
4943 case arm_stub_short_branch_v4t_thumb_arm:
4944 case arm_stub_long_branch_any_arm_pic:
4945 case arm_stub_long_branch_any_thumb_pic:
4946 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4947 case arm_stub_long_branch_v4t_arm_thumb_pic:
4948 case arm_stub_long_branch_v4t_thumb_arm_pic:
4949 case arm_stub_long_branch_thumb_only_pic:
4950 case arm_stub_long_branch_any_tls_pic:
4951 case arm_stub_long_branch_v4t_thumb_tls_pic:
4952 case arm_stub_cmse_branch_thumb_only:
4953 case arm_stub_a8_veneer_blx:
4954 return 4;
4956 case arm_stub_long_branch_arm_nacl:
4957 case arm_stub_long_branch_arm_nacl_pic:
4958 return 16;
4960 default:
4961 abort (); /* Should be unreachable. */
4965 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4966 veneering (TRUE) or have their own symbol (FALSE). */
4968 static bool
4969 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4971 if (stub_type >= max_stub_type)
4972 abort (); /* Should be unreachable. */
4974 switch (stub_type)
4976 case arm_stub_cmse_branch_thumb_only:
4977 return true;
4979 default:
4980 return false;
4983 abort (); /* Should be unreachable. */
4986 /* Returns the padding needed for the dedicated section used stubs of type
4987 STUB_TYPE. */
4989 static int
4990 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4992 if (stub_type >= max_stub_type)
4993 abort (); /* Should be unreachable. */
4995 switch (stub_type)
4997 case arm_stub_cmse_branch_thumb_only:
4998 return 32;
5000 default:
5001 return 0;
5004 abort (); /* Should be unreachable. */
5007 /* If veneers of type STUB_TYPE should go in a dedicated output section,
5008 returns the address of the hash table field in HTAB holding the offset at
5009 which new veneers should be layed out in the stub section. */
5011 static bfd_vma*
5012 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
5013 enum elf32_arm_stub_type stub_type)
5015 switch (stub_type)
5017 case arm_stub_cmse_branch_thumb_only:
5018 return &htab->new_cmse_stub_offset;
5020 default:
5021 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
5022 return NULL;
5026 static bool
5027 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
5028 void * in_arg)
5030 #define MAXRELOCS 3
5031 bool removed_sg_veneer;
5032 struct elf32_arm_stub_hash_entry *stub_entry;
5033 struct elf32_arm_link_hash_table *globals;
5034 struct bfd_link_info *info;
5035 asection *stub_sec;
5036 bfd *stub_bfd;
5037 bfd_byte *loc;
5038 bfd_vma sym_value;
5039 int template_size;
5040 int size;
5041 const insn_sequence *template_sequence;
5042 int i;
5043 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
5044 int stub_reloc_offset[MAXRELOCS] = {0, 0};
5045 int nrelocs = 0;
5046 int just_allocated = 0;
5048 /* Massage our args to the form they really have. */
5049 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5050 info = (struct bfd_link_info *) in_arg;
5052 /* Fail if the target section could not be assigned to an output
5053 section. The user should fix his linker script. */
5054 if (stub_entry->target_section->output_section == NULL
5055 && info->non_contiguous_regions)
5056 info->callbacks->einfo (_("%F%P: Could not assign `%pA' to an output section. "
5057 "Retry without --enable-non-contiguous-regions.\n"),
5058 stub_entry->target_section);
5060 globals = elf32_arm_hash_table (info);
5061 if (globals == NULL)
5062 return false;
5064 stub_sec = stub_entry->stub_sec;
5066 if ((globals->fix_cortex_a8 < 0)
5067 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
5068 /* We have to do less-strictly-aligned fixes last. */
5069 return true;
5071 /* Assign a slot at the end of section if none assigned yet. */
5072 if (stub_entry->stub_offset == (bfd_vma) -1)
5074 stub_entry->stub_offset = stub_sec->size;
5075 just_allocated = 1;
5077 loc = stub_sec->contents + stub_entry->stub_offset;
5079 stub_bfd = stub_sec->owner;
5081 /* This is the address of the stub destination. */
5082 sym_value = (stub_entry->target_value
5083 + stub_entry->target_section->output_offset
5084 + stub_entry->target_section->output_section->vma);
5086 template_sequence = stub_entry->stub_template;
5087 template_size = stub_entry->stub_template_size;
5089 size = 0;
5090 for (i = 0; i < template_size; i++)
5092 switch (template_sequence[i].type)
5094 case THUMB16_TYPE:
5096 bfd_vma data = (bfd_vma) template_sequence[i].data;
5097 if (template_sequence[i].reloc_addend != 0)
5099 /* We've borrowed the reloc_addend field to mean we should
5100 insert a condition code into this (Thumb-1 branch)
5101 instruction. See THUMB16_BCOND_INSN. */
5102 BFD_ASSERT ((data & 0xff00) == 0xd000);
5103 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5105 bfd_put_16 (stub_bfd, data, loc + size);
5106 size += 2;
5108 break;
5110 case THUMB32_TYPE:
5111 bfd_put_16 (stub_bfd,
5112 (template_sequence[i].data >> 16) & 0xffff,
5113 loc + size);
5114 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5115 loc + size + 2);
5116 if (template_sequence[i].r_type != R_ARM_NONE)
5118 stub_reloc_idx[nrelocs] = i;
5119 stub_reloc_offset[nrelocs++] = size;
5121 size += 4;
5122 break;
5124 case ARM_TYPE:
5125 bfd_put_32 (stub_bfd, template_sequence[i].data,
5126 loc + size);
5127 /* Handle cases where the target is encoded within the
5128 instruction. */
5129 if (template_sequence[i].r_type == R_ARM_JUMP24)
5131 stub_reloc_idx[nrelocs] = i;
5132 stub_reloc_offset[nrelocs++] = size;
5134 size += 4;
5135 break;
5137 case DATA_TYPE:
5138 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5139 stub_reloc_idx[nrelocs] = i;
5140 stub_reloc_offset[nrelocs++] = size;
5141 size += 4;
5142 break;
5144 default:
5145 BFD_FAIL ();
5146 return false;
5150 if (just_allocated)
5151 stub_sec->size += size;
5153 /* Stub size has already been computed in arm_size_one_stub. Check
5154 consistency. */
5155 BFD_ASSERT (size == stub_entry->stub_size);
5157 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5158 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5159 sym_value |= 1;
5161 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5162 to relocate in each stub. */
5163 removed_sg_veneer =
5164 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5165 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5167 for (i = 0; i < nrelocs; i++)
5169 Elf_Internal_Rela rel;
5170 bool unresolved_reloc;
5171 char *error_message;
5172 bfd_vma points_to =
5173 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5175 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5176 rel.r_info = ELF32_R_INFO (0,
5177 template_sequence[stub_reloc_idx[i]].r_type);
5178 rel.r_addend = 0;
5180 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5181 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5182 template should refer back to the instruction after the original
5183 branch. We use target_section as Cortex-A8 erratum workaround stubs
5184 are only generated when both source and target are in the same
5185 section. */
5186 points_to = stub_entry->target_section->output_section->vma
5187 + stub_entry->target_section->output_offset
5188 + stub_entry->source_value;
5190 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5191 (template_sequence[stub_reloc_idx[i]].r_type),
5192 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5193 points_to, info, stub_entry->target_section, "", STT_FUNC,
5194 stub_entry->branch_type,
5195 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5196 &error_message);
5199 return true;
5200 #undef MAXRELOCS
5203 /* Calculate the template, template size and instruction size for a stub.
5204 Return value is the instruction size. */
5206 static unsigned int
5207 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5208 const insn_sequence **stub_template,
5209 int *stub_template_size)
5211 const insn_sequence *template_sequence = NULL;
5212 int template_size = 0, i;
5213 unsigned int size;
5215 template_sequence = stub_definitions[stub_type].template_sequence;
5216 if (stub_template)
5217 *stub_template = template_sequence;
5219 template_size = stub_definitions[stub_type].template_size;
5220 if (stub_template_size)
5221 *stub_template_size = template_size;
5223 size = 0;
5224 for (i = 0; i < template_size; i++)
5226 switch (template_sequence[i].type)
5228 case THUMB16_TYPE:
5229 size += 2;
5230 break;
5232 case ARM_TYPE:
5233 case THUMB32_TYPE:
5234 case DATA_TYPE:
5235 size += 4;
5236 break;
5238 default:
5239 BFD_FAIL ();
5240 return 0;
5244 return size;
5247 /* As above, but don't actually build the stub. Just bump offset so
5248 we know stub section sizes. */
5250 static bool
5251 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5252 void *in_arg ATTRIBUTE_UNUSED)
5254 struct elf32_arm_stub_hash_entry *stub_entry;
5255 const insn_sequence *template_sequence;
5256 int template_size, size;
5258 /* Massage our args to the form they really have. */
5259 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5261 BFD_ASSERT ((stub_entry->stub_type > arm_stub_none)
5262 && stub_entry->stub_type < ARRAY_SIZE (stub_definitions));
5264 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5265 &template_size);
5267 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5268 if (stub_entry->stub_template_size)
5270 stub_entry->stub_size = size;
5271 stub_entry->stub_template = template_sequence;
5272 stub_entry->stub_template_size = template_size;
5275 /* Already accounted for. */
5276 if (stub_entry->stub_offset != (bfd_vma) -1)
5277 return true;
5279 size = (size + 7) & ~7;
5280 stub_entry->stub_sec->size += size;
5282 return true;
5285 /* External entry points for sizing and building linker stubs. */
5287 /* Set up various things so that we can make a list of input sections
5288 for each output section included in the link. Returns -1 on error,
5289 0 when no stubs will be needed, and 1 on success. */
5292 elf32_arm_setup_section_lists (bfd *output_bfd,
5293 struct bfd_link_info *info)
5295 bfd *input_bfd;
5296 unsigned int bfd_count;
5297 unsigned int top_id, top_index;
5298 asection *section;
5299 asection **input_list, **list;
5300 size_t amt;
5301 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5303 if (htab == NULL)
5304 return 0;
5306 /* Count the number of input BFDs and find the top input section id. */
5307 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5308 input_bfd != NULL;
5309 input_bfd = input_bfd->link.next)
5311 bfd_count += 1;
5312 for (section = input_bfd->sections;
5313 section != NULL;
5314 section = section->next)
5316 if (top_id < section->id)
5317 top_id = section->id;
5320 htab->bfd_count = bfd_count;
5322 amt = sizeof (struct map_stub) * (top_id + 1);
5323 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5324 if (htab->stub_group == NULL)
5325 return -1;
5326 htab->top_id = top_id;
5328 /* We can't use output_bfd->section_count here to find the top output
5329 section index as some sections may have been removed, and
5330 _bfd_strip_section_from_output doesn't renumber the indices. */
5331 for (section = output_bfd->sections, top_index = 0;
5332 section != NULL;
5333 section = section->next)
5335 if (top_index < section->index)
5336 top_index = section->index;
5339 htab->top_index = top_index;
5340 amt = sizeof (asection *) * (top_index + 1);
5341 input_list = (asection **) bfd_malloc (amt);
5342 htab->input_list = input_list;
5343 if (input_list == NULL)
5344 return -1;
5346 /* For sections we aren't interested in, mark their entries with a
5347 value we can check later. */
5348 list = input_list + top_index;
5350 *list = bfd_abs_section_ptr;
5351 while (list-- != input_list);
5353 for (section = output_bfd->sections;
5354 section != NULL;
5355 section = section->next)
5357 if ((section->flags & SEC_CODE) != 0)
5358 input_list[section->index] = NULL;
5361 return 1;
5364 /* The linker repeatedly calls this function for each input section,
5365 in the order that input sections are linked into output sections.
5366 Build lists of input sections to determine groupings between which
5367 we may insert linker stubs. */
5369 void
5370 elf32_arm_next_input_section (struct bfd_link_info *info,
5371 asection *isec)
5373 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5375 if (htab == NULL)
5376 return;
5378 if (isec->output_section->index <= htab->top_index)
5380 asection **list = htab->input_list + isec->output_section->index;
5382 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5384 /* Steal the link_sec pointer for our list. */
5385 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5386 /* This happens to make the list in reverse order,
5387 which we reverse later. */
5388 PREV_SEC (isec) = *list;
5389 *list = isec;
5394 /* See whether we can group stub sections together. Grouping stub
5395 sections may result in fewer stubs. More importantly, we need to
5396 put all .init* and .fini* stubs at the end of the .init or
5397 .fini output sections respectively, because glibc splits the
5398 _init and _fini functions into multiple parts. Putting a stub in
5399 the middle of a function is not a good idea. */
5401 static void
5402 group_sections (struct elf32_arm_link_hash_table *htab,
5403 bfd_size_type stub_group_size,
5404 bool stubs_always_after_branch)
5406 asection **list = htab->input_list;
5410 asection *tail = *list;
5411 asection *head;
5413 if (tail == bfd_abs_section_ptr)
5414 continue;
5416 /* Reverse the list: we must avoid placing stubs at the
5417 beginning of the section because the beginning of the text
5418 section may be required for an interrupt vector in bare metal
5419 code. */
5420 #define NEXT_SEC PREV_SEC
5421 head = NULL;
5422 while (tail != NULL)
5424 /* Pop from tail. */
5425 asection *item = tail;
5426 tail = PREV_SEC (item);
5428 /* Push on head. */
5429 NEXT_SEC (item) = head;
5430 head = item;
5433 while (head != NULL)
5435 asection *curr;
5436 asection *next;
5437 bfd_vma stub_group_start = head->output_offset;
5438 bfd_vma end_of_next;
5440 curr = head;
5441 while (NEXT_SEC (curr) != NULL)
5443 next = NEXT_SEC (curr);
5444 end_of_next = next->output_offset + next->size;
5445 if (end_of_next - stub_group_start >= stub_group_size)
5446 /* End of NEXT is too far from start, so stop. */
5447 break;
5448 /* Add NEXT to the group. */
5449 curr = next;
5452 /* OK, the size from the start to the start of CURR is less
5453 than stub_group_size and thus can be handled by one stub
5454 section. (Or the head section is itself larger than
5455 stub_group_size, in which case we may be toast.)
5456 We should really be keeping track of the total size of
5457 stubs added here, as stubs contribute to the final output
5458 section size. */
5461 next = NEXT_SEC (head);
5462 /* Set up this stub group. */
5463 htab->stub_group[head->id].link_sec = curr;
5465 while (head != curr && (head = next) != NULL);
5467 /* But wait, there's more! Input sections up to stub_group_size
5468 bytes after the stub section can be handled by it too. */
5469 if (!stubs_always_after_branch)
5471 stub_group_start = curr->output_offset + curr->size;
5473 while (next != NULL)
5475 end_of_next = next->output_offset + next->size;
5476 if (end_of_next - stub_group_start >= stub_group_size)
5477 /* End of NEXT is too far from stubs, so stop. */
5478 break;
5479 /* Add NEXT to the stub group. */
5480 head = next;
5481 next = NEXT_SEC (head);
5482 htab->stub_group[head->id].link_sec = curr;
5485 head = next;
5488 while (list++ != htab->input_list + htab->top_index);
5490 free (htab->input_list);
5491 #undef PREV_SEC
5492 #undef NEXT_SEC
5495 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5496 erratum fix. */
5498 static int
5499 a8_reloc_compare (const void *a, const void *b)
5501 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5502 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5504 if (ra->from < rb->from)
5505 return -1;
5506 else if (ra->from > rb->from)
5507 return 1;
5508 else
5509 return 0;
5512 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5513 const char *, char **);
5515 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5516 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5517 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5518 otherwise. */
5520 static bool
5521 cortex_a8_erratum_scan (bfd *input_bfd,
5522 struct bfd_link_info *info,
5523 struct a8_erratum_fix **a8_fixes_p,
5524 unsigned int *num_a8_fixes_p,
5525 unsigned int *a8_fix_table_size_p,
5526 struct a8_erratum_reloc *a8_relocs,
5527 unsigned int num_a8_relocs,
5528 unsigned prev_num_a8_fixes,
5529 bool *stub_changed_p)
5531 asection *section;
5532 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5533 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5534 unsigned int num_a8_fixes = *num_a8_fixes_p;
5535 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5537 if (htab == NULL)
5538 return false;
5540 for (section = input_bfd->sections;
5541 section != NULL;
5542 section = section->next)
5544 bfd_byte *contents = NULL;
5545 struct _arm_elf_section_data *sec_data;
5546 unsigned int span;
5547 bfd_vma base_vma;
5549 if (elf_section_type (section) != SHT_PROGBITS
5550 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5551 || (section->flags & SEC_EXCLUDE) != 0
5552 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5553 || (section->output_section == bfd_abs_section_ptr))
5554 continue;
5556 base_vma = section->output_section->vma + section->output_offset;
5558 if (elf_section_data (section)->this_hdr.contents != NULL)
5559 contents = elf_section_data (section)->this_hdr.contents;
5560 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5561 return true;
5563 sec_data = elf32_arm_section_data (section);
5565 for (span = 0; span < sec_data->mapcount; span++)
5567 unsigned int span_start = sec_data->map[span].vma;
5568 unsigned int span_end = (span == sec_data->mapcount - 1)
5569 ? section->size : sec_data->map[span + 1].vma;
5570 unsigned int i;
5571 char span_type = sec_data->map[span].type;
5572 bool last_was_32bit = false, last_was_branch = false;
5574 if (span_type != 't')
5575 continue;
5577 /* Span is entirely within a single 4KB region: skip scanning. */
5578 if (((base_vma + span_start) & ~0xfff)
5579 == ((base_vma + span_end) & ~0xfff))
5580 continue;
5582 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5584 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5585 * The branch target is in the same 4KB region as the
5586 first half of the branch.
5587 * The instruction before the branch is a 32-bit
5588 length non-branch instruction. */
5589 for (i = span_start; i < span_end;)
5591 unsigned int insn = bfd_getl16 (&contents[i]);
5592 bool insn_32bit = false, is_blx = false, is_b = false;
5593 bool is_bl = false, is_bcc = false, is_32bit_branch;
5595 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5596 insn_32bit = true;
5598 if (insn_32bit)
5600 /* Load the rest of the insn (in manual-friendly order). */
5601 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5603 /* Encoding T4: B<c>.W. */
5604 is_b = (insn & 0xf800d000) == 0xf0009000;
5605 /* Encoding T1: BL<c>.W. */
5606 is_bl = (insn & 0xf800d000) == 0xf000d000;
5607 /* Encoding T2: BLX<c>.W. */
5608 is_blx = (insn & 0xf800d000) == 0xf000c000;
5609 /* Encoding T3: B<c>.W (not permitted in IT block). */
5610 is_bcc = (insn & 0xf800d000) == 0xf0008000
5611 && (insn & 0x07f00000) != 0x03800000;
5614 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5616 if (((base_vma + i) & 0xfff) == 0xffe
5617 && insn_32bit
5618 && is_32bit_branch
5619 && last_was_32bit
5620 && ! last_was_branch)
5622 bfd_signed_vma offset = 0;
5623 bool force_target_arm = false;
5624 bool force_target_thumb = false;
5625 bfd_vma target;
5626 enum elf32_arm_stub_type stub_type = arm_stub_none;
5627 struct a8_erratum_reloc key, *found;
5628 bool use_plt = false;
5630 key.from = base_vma + i;
5631 found = (struct a8_erratum_reloc *)
5632 bsearch (&key, a8_relocs, num_a8_relocs,
5633 sizeof (struct a8_erratum_reloc),
5634 &a8_reloc_compare);
5636 if (found)
5638 char *error_message = NULL;
5639 struct elf_link_hash_entry *entry;
5641 /* We don't care about the error returned from this
5642 function, only if there is glue or not. */
5643 entry = find_thumb_glue (info, found->sym_name,
5644 &error_message);
5646 if (entry)
5647 found->non_a8_stub = true;
5649 /* Keep a simpler condition, for the sake of clarity. */
5650 if (htab->root.splt != NULL && found->hash != NULL
5651 && found->hash->root.plt.offset != (bfd_vma) -1)
5652 use_plt = true;
5654 if (found->r_type == R_ARM_THM_CALL)
5656 if (found->branch_type == ST_BRANCH_TO_ARM
5657 || use_plt)
5658 force_target_arm = true;
5659 else
5660 force_target_thumb = true;
5664 /* Check if we have an offending branch instruction. */
5666 if (found && found->non_a8_stub)
5667 /* We've already made a stub for this instruction, e.g.
5668 it's a long branch or a Thumb->ARM stub. Assume that
5669 stub will suffice to work around the A8 erratum (see
5670 setting of always_after_branch above). */
5672 else if (is_bcc)
5674 offset = (insn & 0x7ff) << 1;
5675 offset |= (insn & 0x3f0000) >> 4;
5676 offset |= (insn & 0x2000) ? 0x40000 : 0;
5677 offset |= (insn & 0x800) ? 0x80000 : 0;
5678 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5679 if (offset & 0x100000)
5680 offset |= ~ ((bfd_signed_vma) 0xfffff);
5681 stub_type = arm_stub_a8_veneer_b_cond;
5683 else if (is_b || is_bl || is_blx)
5685 int s = (insn & 0x4000000) != 0;
5686 int j1 = (insn & 0x2000) != 0;
5687 int j2 = (insn & 0x800) != 0;
5688 int i1 = !(j1 ^ s);
5689 int i2 = !(j2 ^ s);
5691 offset = (insn & 0x7ff) << 1;
5692 offset |= (insn & 0x3ff0000) >> 4;
5693 offset |= i2 << 22;
5694 offset |= i1 << 23;
5695 offset |= s << 24;
5696 if (offset & 0x1000000)
5697 offset |= ~ ((bfd_signed_vma) 0xffffff);
5699 if (is_blx)
5700 offset &= ~ ((bfd_signed_vma) 3);
5702 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5703 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5706 if (stub_type != arm_stub_none)
5708 bfd_vma pc_for_insn = base_vma + i + 4;
5710 /* The original instruction is a BL, but the target is
5711 an ARM instruction. If we were not making a stub,
5712 the BL would have been converted to a BLX. Use the
5713 BLX stub instead in that case. */
5714 if (htab->use_blx && force_target_arm
5715 && stub_type == arm_stub_a8_veneer_bl)
5717 stub_type = arm_stub_a8_veneer_blx;
5718 is_blx = true;
5719 is_bl = false;
5721 /* Conversely, if the original instruction was
5722 BLX but the target is Thumb mode, use the BL
5723 stub. */
5724 else if (force_target_thumb
5725 && stub_type == arm_stub_a8_veneer_blx)
5727 stub_type = arm_stub_a8_veneer_bl;
5728 is_blx = false;
5729 is_bl = true;
5732 if (is_blx)
5733 pc_for_insn &= ~ ((bfd_vma) 3);
5735 /* If we found a relocation, use the proper destination,
5736 not the offset in the (unrelocated) instruction.
5737 Note this is always done if we switched the stub type
5738 above. */
5739 if (found)
5740 offset =
5741 (bfd_signed_vma) (found->destination - pc_for_insn);
5743 /* If the stub will use a Thumb-mode branch to a
5744 PLT target, redirect it to the preceding Thumb
5745 entry point. */
5746 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5747 offset -= PLT_THUMB_STUB_SIZE;
5749 target = pc_for_insn + offset;
5751 /* The BLX stub is ARM-mode code. Adjust the offset to
5752 take the different PC value (+8 instead of +4) into
5753 account. */
5754 if (stub_type == arm_stub_a8_veneer_blx)
5755 offset += 4;
5757 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5759 char *stub_name = NULL;
5761 if (num_a8_fixes == a8_fix_table_size)
5763 a8_fix_table_size *= 2;
5764 a8_fixes = (struct a8_erratum_fix *)
5765 bfd_realloc (a8_fixes,
5766 sizeof (struct a8_erratum_fix)
5767 * a8_fix_table_size);
5770 if (num_a8_fixes < prev_num_a8_fixes)
5772 /* If we're doing a subsequent scan,
5773 check if we've found the same fix as
5774 before, and try and reuse the stub
5775 name. */
5776 stub_name = a8_fixes[num_a8_fixes].stub_name;
5777 if ((a8_fixes[num_a8_fixes].section != section)
5778 || (a8_fixes[num_a8_fixes].offset != i))
5780 free (stub_name);
5781 stub_name = NULL;
5782 *stub_changed_p = true;
5786 if (!stub_name)
5788 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5789 if (stub_name != NULL)
5790 sprintf (stub_name, "%x:%x", section->id, i);
5793 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5794 a8_fixes[num_a8_fixes].section = section;
5795 a8_fixes[num_a8_fixes].offset = i;
5796 a8_fixes[num_a8_fixes].target_offset =
5797 target - base_vma;
5798 a8_fixes[num_a8_fixes].orig_insn = insn;
5799 a8_fixes[num_a8_fixes].stub_name = stub_name;
5800 a8_fixes[num_a8_fixes].stub_type = stub_type;
5801 a8_fixes[num_a8_fixes].branch_type =
5802 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5804 num_a8_fixes++;
5809 i += insn_32bit ? 4 : 2;
5810 last_was_32bit = insn_32bit;
5811 last_was_branch = is_32bit_branch;
5815 if (elf_section_data (section)->this_hdr.contents == NULL)
5816 free (contents);
5819 *a8_fixes_p = a8_fixes;
5820 *num_a8_fixes_p = num_a8_fixes;
5821 *a8_fix_table_size_p = a8_fix_table_size;
5823 return false;
5826 /* Create or update a stub entry depending on whether the stub can already be
5827 found in HTAB. The stub is identified by:
5828 - its type STUB_TYPE
5829 - its source branch (note that several can share the same stub) whose
5830 section and relocation (if any) are given by SECTION and IRELA
5831 respectively
5832 - its target symbol whose input section, hash, name, value and branch type
5833 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5834 respectively
5836 If found, the value of the stub's target symbol is updated from SYM_VALUE
5837 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5838 TRUE and the stub entry is initialized.
5840 Returns the stub that was created or updated, or NULL if an error
5841 occurred. */
5843 static struct elf32_arm_stub_hash_entry *
5844 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5845 enum elf32_arm_stub_type stub_type, asection *section,
5846 Elf_Internal_Rela *irela, asection *sym_sec,
5847 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5848 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5849 bool *new_stub)
5851 const asection *id_sec;
5852 char *stub_name;
5853 struct elf32_arm_stub_hash_entry *stub_entry;
5854 unsigned int r_type;
5855 bool sym_claimed = arm_stub_sym_claimed (stub_type);
5857 BFD_ASSERT (stub_type != arm_stub_none);
5858 *new_stub = false;
5860 if (sym_claimed)
5861 stub_name = sym_name;
5862 else
5864 BFD_ASSERT (irela);
5865 BFD_ASSERT (section);
5866 BFD_ASSERT (section->id <= htab->top_id);
5868 /* Support for grouping stub sections. */
5869 id_sec = htab->stub_group[section->id].link_sec;
5871 /* Get the name of this stub. */
5872 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5873 stub_type);
5874 if (!stub_name)
5875 return NULL;
5878 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, false,
5879 false);
5880 /* The proper stub has already been created, just update its value. */
5881 if (stub_entry != NULL)
5883 if (!sym_claimed)
5884 free (stub_name);
5885 stub_entry->target_value = sym_value;
5886 return stub_entry;
5889 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5890 if (stub_entry == NULL)
5892 if (!sym_claimed)
5893 free (stub_name);
5894 return NULL;
5897 stub_entry->target_value = sym_value;
5898 stub_entry->target_section = sym_sec;
5899 stub_entry->stub_type = stub_type;
5900 stub_entry->h = hash;
5901 stub_entry->branch_type = branch_type;
5903 if (sym_claimed)
5904 stub_entry->output_name = sym_name;
5905 else
5907 if (sym_name == NULL)
5908 sym_name = "unnamed";
5909 stub_entry->output_name = (char *)
5910 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5911 + strlen (sym_name));
5912 if (stub_entry->output_name == NULL)
5914 free (stub_name);
5915 return NULL;
5918 /* For historical reasons, use the existing names for ARM-to-Thumb and
5919 Thumb-to-ARM stubs. */
5920 r_type = ELF32_R_TYPE (irela->r_info);
5921 if ((r_type == (unsigned int) R_ARM_THM_CALL
5922 || r_type == (unsigned int) R_ARM_THM_JUMP24
5923 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5924 && branch_type == ST_BRANCH_TO_ARM)
5925 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5926 else if ((r_type == (unsigned int) R_ARM_CALL
5927 || r_type == (unsigned int) R_ARM_JUMP24)
5928 && branch_type == ST_BRANCH_TO_THUMB)
5929 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5930 else
5931 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5934 *new_stub = true;
5935 return stub_entry;
5938 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5939 gateway veneer to transition from non secure to secure state and create them
5940 accordingly.
5942 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5943 defines the conditions that govern Secure Gateway veneer creation for a
5944 given symbol <SYM> as follows:
5945 - it has function type
5946 - it has non local binding
5947 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5948 same type, binding and value as <SYM> (called normal symbol).
5949 An entry function can handle secure state transition itself in which case
5950 its special symbol would have a different value from the normal symbol.
5952 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5953 entry mapping while HTAB gives the name to hash entry mapping.
5954 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5955 created.
5957 The return value gives whether a stub failed to be allocated. */
5959 static bool
5960 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5961 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5962 int *cmse_stub_created)
5964 const struct elf_backend_data *bed;
5965 Elf_Internal_Shdr *symtab_hdr;
5966 unsigned i, j, sym_count, ext_start;
5967 Elf_Internal_Sym *cmse_sym, *local_syms;
5968 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5969 enum arm_st_branch_type branch_type;
5970 char *sym_name, *lsym_name;
5971 bfd_vma sym_value;
5972 asection *section;
5973 struct elf32_arm_stub_hash_entry *stub_entry;
5974 bool is_v8m, new_stub, cmse_invalid, ret = true;
5976 bed = get_elf_backend_data (input_bfd);
5977 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5978 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5979 ext_start = symtab_hdr->sh_info;
5980 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5981 && out_attr[Tag_CPU_arch_profile].i == 'M');
5983 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5984 if (local_syms == NULL)
5985 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5986 symtab_hdr->sh_info, 0, NULL, NULL,
5987 NULL);
5988 if (symtab_hdr->sh_info && local_syms == NULL)
5989 return false;
5991 /* Scan symbols. */
5992 for (i = 0; i < sym_count; i++)
5994 cmse_invalid = false;
5996 if (i < ext_start)
5998 cmse_sym = &local_syms[i];
5999 sym_name = bfd_elf_string_from_elf_section (input_bfd,
6000 symtab_hdr->sh_link,
6001 cmse_sym->st_name);
6002 if (!sym_name || !startswith (sym_name, CMSE_PREFIX))
6003 continue;
6005 /* Special symbol with local binding. */
6006 cmse_invalid = true;
6008 else
6010 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
6011 if (cmse_hash == NULL)
6012 continue;
6014 sym_name = (char *) cmse_hash->root.root.root.string;
6015 if (!startswith (sym_name, CMSE_PREFIX))
6016 continue;
6018 /* Special symbol has incorrect binding or type. */
6019 if ((cmse_hash->root.root.type != bfd_link_hash_defined
6020 && cmse_hash->root.root.type != bfd_link_hash_defweak)
6021 || cmse_hash->root.type != STT_FUNC)
6022 cmse_invalid = true;
6025 if (!is_v8m)
6027 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
6028 "ARMv8-M architecture or later"),
6029 input_bfd, sym_name);
6030 is_v8m = true; /* Avoid multiple warning. */
6031 ret = false;
6034 if (cmse_invalid)
6036 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
6037 " a global or weak function symbol"),
6038 input_bfd, sym_name);
6039 ret = false;
6040 if (i < ext_start)
6041 continue;
6044 sym_name += strlen (CMSE_PREFIX);
6045 hash = (struct elf32_arm_link_hash_entry *)
6046 elf_link_hash_lookup (&(htab)->root, sym_name, false, false, true);
6048 /* No associated normal symbol or it is neither global nor weak. */
6049 if (!hash
6050 || (hash->root.root.type != bfd_link_hash_defined
6051 && hash->root.root.type != bfd_link_hash_defweak)
6052 || hash->root.type != STT_FUNC)
6054 /* Initialize here to avoid warning about use of possibly
6055 uninitialized variable. */
6056 j = 0;
6058 if (!hash)
6060 /* Searching for a normal symbol with local binding. */
6061 for (; j < ext_start; j++)
6063 lsym_name =
6064 bfd_elf_string_from_elf_section (input_bfd,
6065 symtab_hdr->sh_link,
6066 local_syms[j].st_name);
6067 if (!strcmp (sym_name, lsym_name))
6068 break;
6072 if (hash || j < ext_start)
6074 _bfd_error_handler
6075 (_("%pB: invalid standard symbol `%s'; it must be "
6076 "a global or weak function symbol"),
6077 input_bfd, sym_name);
6079 else
6080 _bfd_error_handler
6081 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6082 ret = false;
6083 if (!hash)
6084 continue;
6087 sym_value = hash->root.root.u.def.value;
6088 section = hash->root.root.u.def.section;
6090 if (cmse_hash->root.root.u.def.section != section)
6092 _bfd_error_handler
6093 (_("%pB: `%s' and its special symbol are in different sections"),
6094 input_bfd, sym_name);
6095 ret = false;
6097 if (cmse_hash->root.root.u.def.value != sym_value)
6098 continue; /* Ignore: could be an entry function starting with SG. */
6100 /* If this section is a link-once section that will be discarded, then
6101 don't create any stubs. */
6102 if (section->output_section == NULL)
6104 _bfd_error_handler
6105 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6106 continue;
6109 if (hash->root.size == 0)
6111 _bfd_error_handler
6112 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6113 ret = false;
6116 if (!ret)
6117 continue;
6118 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6119 stub_entry
6120 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6121 NULL, NULL, section, hash, sym_name,
6122 sym_value, branch_type, &new_stub);
6124 if (stub_entry == NULL)
6125 ret = false;
6126 else
6128 BFD_ASSERT (new_stub);
6129 (*cmse_stub_created)++;
6133 if (!symtab_hdr->contents)
6134 free (local_syms);
6135 return ret;
6138 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6139 code entry function, ie can be called from non secure code without using a
6140 veneer. */
6142 static bool
6143 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6145 bfd_byte contents[4];
6146 uint32_t first_insn;
6147 asection *section;
6148 file_ptr offset;
6149 bfd *abfd;
6151 /* Defined symbol of function type. */
6152 if (hash->root.root.type != bfd_link_hash_defined
6153 && hash->root.root.type != bfd_link_hash_defweak)
6154 return false;
6155 if (hash->root.type != STT_FUNC)
6156 return false;
6158 /* Read first instruction. */
6159 section = hash->root.root.u.def.section;
6160 abfd = section->owner;
6161 offset = hash->root.root.u.def.value - section->vma;
6162 if (!bfd_get_section_contents (abfd, section, contents, offset,
6163 sizeof (contents)))
6164 return false;
6166 first_insn = bfd_get_32 (abfd, contents);
6168 /* Starts by SG instruction. */
6169 return first_insn == 0xe97fe97f;
6172 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6173 secure gateway veneers (ie. the veneers was not in the input import library)
6174 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6176 static bool
6177 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6179 struct elf32_arm_stub_hash_entry *stub_entry;
6180 struct bfd_link_info *info;
6182 /* Massage our args to the form they really have. */
6183 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6184 info = (struct bfd_link_info *) gen_info;
6186 if (info->out_implib_bfd)
6187 return true;
6189 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6190 return true;
6192 if (stub_entry->stub_offset == (bfd_vma) -1)
6193 _bfd_error_handler (" %s", stub_entry->output_name);
6195 return true;
6198 /* Set offset of each secure gateway veneers so that its address remain
6199 identical to the one in the input import library referred by
6200 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6201 (present in input import library but absent from the executable being
6202 linked) or if new veneers appeared and there is no output import library
6203 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6204 number of secure gateway veneers found in the input import library.
6206 The function returns whether an error occurred. If no error occurred,
6207 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6208 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6209 veneer observed set for new veneers to be layed out after. */
6211 static bool
6212 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6213 struct elf32_arm_link_hash_table *htab,
6214 int *cmse_stub_created)
6216 long symsize;
6217 char *sym_name;
6218 flagword flags;
6219 long i, symcount;
6220 bfd *in_implib_bfd;
6221 asection *stub_out_sec;
6222 bool ret = true;
6223 Elf_Internal_Sym *intsym;
6224 const char *out_sec_name;
6225 bfd_size_type cmse_stub_size;
6226 asymbol **sympp = NULL, *sym;
6227 struct elf32_arm_link_hash_entry *hash;
6228 const insn_sequence *cmse_stub_template;
6229 struct elf32_arm_stub_hash_entry *stub_entry;
6230 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6231 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6232 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6234 /* No input secure gateway import library. */
6235 if (!htab->in_implib_bfd)
6236 return true;
6238 in_implib_bfd = htab->in_implib_bfd;
6239 if (!htab->cmse_implib)
6241 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6242 "Gateway import libraries"), in_implib_bfd);
6243 return false;
6246 /* Get symbol table size. */
6247 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6248 if (symsize < 0)
6249 return false;
6251 /* Read in the input secure gateway import library's symbol table. */
6252 sympp = (asymbol **) bfd_malloc (symsize);
6253 if (sympp == NULL)
6254 return false;
6256 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6257 if (symcount < 0)
6259 ret = false;
6260 goto free_sym_buf;
6263 htab->new_cmse_stub_offset = 0;
6264 cmse_stub_size =
6265 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6266 &cmse_stub_template,
6267 &cmse_stub_template_size);
6268 out_sec_name =
6269 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6270 stub_out_sec =
6271 bfd_get_section_by_name (htab->obfd, out_sec_name);
6272 if (stub_out_sec != NULL)
6273 cmse_stub_sec_vma = stub_out_sec->vma;
6275 /* Set addresses of veneers mentionned in input secure gateway import
6276 library's symbol table. */
6277 for (i = 0; i < symcount; i++)
6279 sym = sympp[i];
6280 flags = sym->flags;
6281 sym_name = (char *) bfd_asymbol_name (sym);
6282 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6284 if (sym->section != bfd_abs_section_ptr
6285 || !(flags & (BSF_GLOBAL | BSF_WEAK))
6286 || (flags & BSF_FUNCTION) != BSF_FUNCTION
6287 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6288 != ST_BRANCH_TO_THUMB))
6290 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6291 "symbol should be absolute, global and "
6292 "refer to Thumb functions"),
6293 in_implib_bfd, sym_name);
6294 ret = false;
6295 continue;
6298 veneer_value = bfd_asymbol_value (sym);
6299 stub_offset = veneer_value - cmse_stub_sec_vma;
6300 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6301 false, false);
6302 hash = (struct elf32_arm_link_hash_entry *)
6303 elf_link_hash_lookup (&(htab)->root, sym_name, false, false, true);
6305 /* Stub entry should have been created by cmse_scan or the symbol be of
6306 a secure function callable from non secure code. */
6307 if (!stub_entry && !hash)
6309 bool new_stub;
6311 _bfd_error_handler
6312 (_("entry function `%s' disappeared from secure code"), sym_name);
6313 hash = (struct elf32_arm_link_hash_entry *)
6314 elf_link_hash_lookup (&(htab)->root, sym_name, true, true, true);
6315 stub_entry
6316 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6317 NULL, NULL, bfd_abs_section_ptr, hash,
6318 sym_name, veneer_value,
6319 ST_BRANCH_TO_THUMB, &new_stub);
6320 if (stub_entry == NULL)
6321 ret = false;
6322 else
6324 BFD_ASSERT (new_stub);
6325 new_cmse_stubs_created++;
6326 (*cmse_stub_created)++;
6328 stub_entry->stub_template_size = stub_entry->stub_size = 0;
6329 stub_entry->stub_offset = stub_offset;
6331 /* Symbol found is not callable from non secure code. */
6332 else if (!stub_entry)
6334 if (!cmse_entry_fct_p (hash))
6336 _bfd_error_handler (_("`%s' refers to a non entry function"),
6337 sym_name);
6338 ret = false;
6340 continue;
6342 else
6344 /* Only stubs for SG veneers should have been created. */
6345 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6347 /* Check visibility hasn't changed. */
6348 if (!!(flags & BSF_GLOBAL)
6349 != (hash->root.root.type == bfd_link_hash_defined))
6350 _bfd_error_handler
6351 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6352 sym_name);
6354 stub_entry->stub_offset = stub_offset;
6357 /* Size should match that of a SG veneer. */
6358 if (intsym->st_size != cmse_stub_size)
6360 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6361 in_implib_bfd, sym_name);
6362 ret = false;
6365 /* Previous veneer address is before current SG veneer section. */
6366 if (veneer_value < cmse_stub_sec_vma)
6368 /* Avoid offset underflow. */
6369 if (stub_entry)
6370 stub_entry->stub_offset = 0;
6371 stub_offset = 0;
6372 ret = false;
6375 /* Complain if stub offset not a multiple of stub size. */
6376 if (stub_offset % cmse_stub_size)
6378 _bfd_error_handler
6379 (_("offset of veneer for entry function `%s' not a multiple of "
6380 "its size"), sym_name);
6381 ret = false;
6384 if (!ret)
6385 continue;
6387 new_cmse_stubs_created--;
6388 if (veneer_value < cmse_stub_array_start)
6389 cmse_stub_array_start = veneer_value;
6390 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6391 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6392 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6395 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6397 BFD_ASSERT (new_cmse_stubs_created > 0);
6398 _bfd_error_handler
6399 (_("new entry function(s) introduced but no output import library "
6400 "specified:"));
6401 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6404 if (cmse_stub_array_start != cmse_stub_sec_vma)
6406 _bfd_error_handler
6407 (_("start address of `%s' is different from previous link"),
6408 out_sec_name);
6409 ret = false;
6412 free_sym_buf:
6413 free (sympp);
6414 return ret;
6417 /* Determine and set the size of the stub section for a final link.
6419 The basic idea here is to examine all the relocations looking for
6420 PC-relative calls to a target that is unreachable with a "bl"
6421 instruction. */
6423 bool
6424 elf32_arm_size_stubs (bfd *output_bfd,
6425 bfd *stub_bfd,
6426 struct bfd_link_info *info,
6427 bfd_signed_vma group_size,
6428 asection * (*add_stub_section) (const char *, asection *,
6429 asection *,
6430 unsigned int),
6431 void (*layout_sections_again) (void))
6433 bool ret = true;
6434 obj_attribute *out_attr;
6435 int cmse_stub_created = 0;
6436 bfd_size_type stub_group_size;
6437 bool m_profile, stubs_always_after_branch, first_veneer_scan = true;
6438 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6439 struct a8_erratum_fix *a8_fixes = NULL;
6440 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6441 struct a8_erratum_reloc *a8_relocs = NULL;
6442 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6444 if (htab == NULL)
6445 return false;
6447 if (htab->fix_cortex_a8)
6449 a8_fixes = (struct a8_erratum_fix *)
6450 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6451 a8_relocs = (struct a8_erratum_reloc *)
6452 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6455 /* Propagate mach to stub bfd, because it may not have been
6456 finalized when we created stub_bfd. */
6457 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6458 bfd_get_mach (output_bfd));
6460 /* Stash our params away. */
6461 htab->stub_bfd = stub_bfd;
6462 htab->add_stub_section = add_stub_section;
6463 htab->layout_sections_again = layout_sections_again;
6464 stubs_always_after_branch = group_size < 0;
6466 out_attr = elf_known_obj_attributes_proc (output_bfd);
6467 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6469 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6470 as the first half of a 32-bit branch straddling two 4K pages. This is a
6471 crude way of enforcing that. */
6472 if (htab->fix_cortex_a8)
6473 stubs_always_after_branch = 1;
6475 if (group_size < 0)
6476 stub_group_size = -group_size;
6477 else
6478 stub_group_size = group_size;
6480 if (stub_group_size == 1)
6482 /* Default values. */
6483 /* Thumb branch range is +-4MB has to be used as the default
6484 maximum size (a given section can contain both ARM and Thumb
6485 code, so the worst case has to be taken into account).
6487 This value is 24K less than that, which allows for 2025
6488 12-byte stubs. If we exceed that, then we will fail to link.
6489 The user will have to relink with an explicit group size
6490 option. */
6491 stub_group_size = 4170000;
6494 group_sections (htab, stub_group_size, stubs_always_after_branch);
6496 /* If we're applying the cortex A8 fix, we need to determine the
6497 program header size now, because we cannot change it later --
6498 that could alter section placements. Notice the A8 erratum fix
6499 ends up requiring the section addresses to remain unchanged
6500 modulo the page size. That's something we cannot represent
6501 inside BFD, and we don't want to force the section alignment to
6502 be the page size. */
6503 if (htab->fix_cortex_a8)
6504 (*htab->layout_sections_again) ();
6506 while (1)
6508 bfd *input_bfd;
6509 unsigned int bfd_indx;
6510 asection *stub_sec;
6511 enum elf32_arm_stub_type stub_type;
6512 bool stub_changed = false;
6513 unsigned prev_num_a8_fixes = num_a8_fixes;
6515 num_a8_fixes = 0;
6516 for (input_bfd = info->input_bfds, bfd_indx = 0;
6517 input_bfd != NULL;
6518 input_bfd = input_bfd->link.next, bfd_indx++)
6520 Elf_Internal_Shdr *symtab_hdr;
6521 asection *section;
6522 Elf_Internal_Sym *local_syms = NULL;
6524 if (!is_arm_elf (input_bfd))
6525 continue;
6526 if ((input_bfd->flags & DYNAMIC) != 0
6527 && (elf_sym_hashes (input_bfd) == NULL
6528 || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0))
6529 continue;
6531 num_a8_relocs = 0;
6533 /* We'll need the symbol table in a second. */
6534 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6535 if (symtab_hdr->sh_info == 0)
6536 continue;
6538 /* Limit scan of symbols to object file whose profile is
6539 Microcontroller to not hinder performance in the general case. */
6540 if (m_profile && first_veneer_scan)
6542 struct elf_link_hash_entry **sym_hashes;
6544 sym_hashes = elf_sym_hashes (input_bfd);
6545 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6546 &cmse_stub_created))
6547 goto error_ret_free_local;
6549 if (cmse_stub_created != 0)
6550 stub_changed = true;
6553 /* Walk over each section attached to the input bfd. */
6554 for (section = input_bfd->sections;
6555 section != NULL;
6556 section = section->next)
6558 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6560 /* If there aren't any relocs, then there's nothing more
6561 to do. */
6562 if ((section->flags & SEC_RELOC) == 0
6563 || section->reloc_count == 0
6564 || (section->flags & SEC_CODE) == 0)
6565 continue;
6567 /* If this section is a link-once section that will be
6568 discarded, then don't create any stubs. */
6569 if (section->output_section == NULL
6570 || section->output_section->owner != output_bfd)
6571 continue;
6573 /* Get the relocs. */
6574 internal_relocs
6575 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6576 NULL, info->keep_memory);
6577 if (internal_relocs == NULL)
6578 goto error_ret_free_local;
6580 /* Now examine each relocation. */
6581 irela = internal_relocs;
6582 irelaend = irela + section->reloc_count;
6583 for (; irela < irelaend; irela++)
6585 unsigned int r_type, r_indx;
6586 asection *sym_sec;
6587 bfd_vma sym_value;
6588 bfd_vma destination;
6589 struct elf32_arm_link_hash_entry *hash;
6590 const char *sym_name;
6591 unsigned char st_type;
6592 enum arm_st_branch_type branch_type;
6593 bool created_stub = false;
6595 r_type = ELF32_R_TYPE (irela->r_info);
6596 r_indx = ELF32_R_SYM (irela->r_info);
6598 if (r_type >= (unsigned int) R_ARM_max)
6600 bfd_set_error (bfd_error_bad_value);
6601 error_ret_free_internal:
6602 if (elf_section_data (section)->relocs == NULL)
6603 free (internal_relocs);
6604 /* Fall through. */
6605 error_ret_free_local:
6606 if (symtab_hdr->contents != (unsigned char *) local_syms)
6607 free (local_syms);
6608 return false;
6611 hash = NULL;
6612 if (r_indx >= symtab_hdr->sh_info)
6613 hash = elf32_arm_hash_entry
6614 (elf_sym_hashes (input_bfd)
6615 [r_indx - symtab_hdr->sh_info]);
6617 /* Only look for stubs on branch instructions, or
6618 non-relaxed TLSCALL */
6619 if ((r_type != (unsigned int) R_ARM_CALL)
6620 && (r_type != (unsigned int) R_ARM_THM_CALL)
6621 && (r_type != (unsigned int) R_ARM_JUMP24)
6622 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6623 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6624 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6625 && (r_type != (unsigned int) R_ARM_PLT32)
6626 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6627 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6628 && r_type == (elf32_arm_tls_transition
6629 (info, r_type,
6630 (struct elf_link_hash_entry *) hash))
6631 && ((hash ? hash->tls_type
6632 : (elf32_arm_local_got_tls_type
6633 (input_bfd)[r_indx]))
6634 & GOT_TLS_GDESC) != 0))
6635 continue;
6637 /* Now determine the call target, its name, value,
6638 section. */
6639 sym_sec = NULL;
6640 sym_value = 0;
6641 destination = 0;
6642 sym_name = NULL;
6644 if (r_type == (unsigned int) R_ARM_TLS_CALL
6645 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6647 /* A non-relaxed TLS call. The target is the
6648 plt-resident trampoline and nothing to do
6649 with the symbol. */
6650 BFD_ASSERT (htab->tls_trampoline > 0);
6651 sym_sec = htab->root.splt;
6652 sym_value = htab->tls_trampoline;
6653 hash = 0;
6654 st_type = STT_FUNC;
6655 branch_type = ST_BRANCH_TO_ARM;
6657 else if (!hash)
6659 /* It's a local symbol. */
6660 Elf_Internal_Sym *sym;
6662 if (local_syms == NULL)
6664 local_syms
6665 = (Elf_Internal_Sym *) symtab_hdr->contents;
6666 if (local_syms == NULL)
6667 local_syms
6668 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6669 symtab_hdr->sh_info, 0,
6670 NULL, NULL, NULL);
6671 if (local_syms == NULL)
6672 goto error_ret_free_internal;
6675 sym = local_syms + r_indx;
6676 if (sym->st_shndx == SHN_UNDEF)
6677 sym_sec = bfd_und_section_ptr;
6678 else if (sym->st_shndx == SHN_ABS)
6679 sym_sec = bfd_abs_section_ptr;
6680 else if (sym->st_shndx == SHN_COMMON)
6681 sym_sec = bfd_com_section_ptr;
6682 else
6683 sym_sec =
6684 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6686 if (!sym_sec)
6687 /* This is an undefined symbol. It can never
6688 be resolved. */
6689 continue;
6691 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6692 sym_value = sym->st_value;
6693 destination = (sym_value + irela->r_addend
6694 + sym_sec->output_offset
6695 + sym_sec->output_section->vma);
6696 st_type = ELF_ST_TYPE (sym->st_info);
6697 branch_type =
6698 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6699 sym_name
6700 = bfd_elf_string_from_elf_section (input_bfd,
6701 symtab_hdr->sh_link,
6702 sym->st_name);
6704 else
6706 /* It's an external symbol. */
6707 while (hash->root.root.type == bfd_link_hash_indirect
6708 || hash->root.root.type == bfd_link_hash_warning)
6709 hash = ((struct elf32_arm_link_hash_entry *)
6710 hash->root.root.u.i.link);
6712 if (hash->root.root.type == bfd_link_hash_defined
6713 || hash->root.root.type == bfd_link_hash_defweak)
6715 sym_sec = hash->root.root.u.def.section;
6716 sym_value = hash->root.root.u.def.value;
6718 struct elf32_arm_link_hash_table *globals =
6719 elf32_arm_hash_table (info);
6721 /* For a destination in a shared library,
6722 use the PLT stub as target address to
6723 decide whether a branch stub is
6724 needed. */
6725 if (globals != NULL
6726 && globals->root.splt != NULL
6727 && hash != NULL
6728 && hash->root.plt.offset != (bfd_vma) -1)
6730 sym_sec = globals->root.splt;
6731 sym_value = hash->root.plt.offset;
6732 if (sym_sec->output_section != NULL)
6733 destination = (sym_value
6734 + sym_sec->output_offset
6735 + sym_sec->output_section->vma);
6737 else if (sym_sec->output_section != NULL)
6738 destination = (sym_value + irela->r_addend
6739 + sym_sec->output_offset
6740 + sym_sec->output_section->vma);
6742 else if ((hash->root.root.type == bfd_link_hash_undefined)
6743 || (hash->root.root.type == bfd_link_hash_undefweak))
6745 /* For a shared library, use the PLT stub as
6746 target address to decide whether a long
6747 branch stub is needed.
6748 For absolute code, they cannot be handled. */
6749 struct elf32_arm_link_hash_table *globals =
6750 elf32_arm_hash_table (info);
6752 if (globals != NULL
6753 && globals->root.splt != NULL
6754 && hash != NULL
6755 && hash->root.plt.offset != (bfd_vma) -1)
6757 sym_sec = globals->root.splt;
6758 sym_value = hash->root.plt.offset;
6759 if (sym_sec->output_section != NULL)
6760 destination = (sym_value
6761 + sym_sec->output_offset
6762 + sym_sec->output_section->vma);
6764 else
6765 continue;
6767 else
6769 bfd_set_error (bfd_error_bad_value);
6770 goto error_ret_free_internal;
6772 st_type = hash->root.type;
6773 branch_type =
6774 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6775 sym_name = hash->root.root.root.string;
6780 bool new_stub;
6781 struct elf32_arm_stub_hash_entry *stub_entry;
6783 /* Determine what (if any) linker stub is needed. */
6784 stub_type = arm_type_of_stub (info, section, irela,
6785 st_type, &branch_type,
6786 hash, destination, sym_sec,
6787 input_bfd, sym_name);
6788 if (stub_type == arm_stub_none)
6789 break;
6791 /* We've either created a stub for this reloc already,
6792 or we are about to. */
6793 stub_entry =
6794 elf32_arm_create_stub (htab, stub_type, section, irela,
6795 sym_sec, hash,
6796 (char *) sym_name, sym_value,
6797 branch_type, &new_stub);
6799 created_stub = stub_entry != NULL;
6800 if (!created_stub)
6801 goto error_ret_free_internal;
6802 else if (!new_stub)
6803 break;
6804 else
6805 stub_changed = true;
6807 while (0);
6809 /* Look for relocations which might trigger Cortex-A8
6810 erratum. */
6811 if (htab->fix_cortex_a8
6812 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6813 || r_type == (unsigned int) R_ARM_THM_JUMP19
6814 || r_type == (unsigned int) R_ARM_THM_CALL
6815 || r_type == (unsigned int) R_ARM_THM_XPC22))
6817 bfd_vma from = section->output_section->vma
6818 + section->output_offset
6819 + irela->r_offset;
6821 if ((from & 0xfff) == 0xffe)
6823 /* Found a candidate. Note we haven't checked the
6824 destination is within 4K here: if we do so (and
6825 don't create an entry in a8_relocs) we can't tell
6826 that a branch should have been relocated when
6827 scanning later. */
6828 if (num_a8_relocs == a8_reloc_table_size)
6830 a8_reloc_table_size *= 2;
6831 a8_relocs = (struct a8_erratum_reloc *)
6832 bfd_realloc (a8_relocs,
6833 sizeof (struct a8_erratum_reloc)
6834 * a8_reloc_table_size);
6837 a8_relocs[num_a8_relocs].from = from;
6838 a8_relocs[num_a8_relocs].destination = destination;
6839 a8_relocs[num_a8_relocs].r_type = r_type;
6840 a8_relocs[num_a8_relocs].branch_type = branch_type;
6841 a8_relocs[num_a8_relocs].sym_name = sym_name;
6842 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6843 a8_relocs[num_a8_relocs].hash = hash;
6845 num_a8_relocs++;
6850 /* We're done with the internal relocs, free them. */
6851 if (elf_section_data (section)->relocs == NULL)
6852 free (internal_relocs);
6855 if (htab->fix_cortex_a8)
6857 /* Sort relocs which might apply to Cortex-A8 erratum. */
6858 qsort (a8_relocs, num_a8_relocs,
6859 sizeof (struct a8_erratum_reloc),
6860 &a8_reloc_compare);
6862 /* Scan for branches which might trigger Cortex-A8 erratum. */
6863 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6864 &num_a8_fixes, &a8_fix_table_size,
6865 a8_relocs, num_a8_relocs,
6866 prev_num_a8_fixes, &stub_changed)
6867 != 0)
6868 goto error_ret_free_local;
6871 if (local_syms != NULL
6872 && symtab_hdr->contents != (unsigned char *) local_syms)
6874 if (!info->keep_memory)
6875 free (local_syms);
6876 else
6877 symtab_hdr->contents = (unsigned char *) local_syms;
6881 if (first_veneer_scan
6882 && !set_cmse_veneer_addr_from_implib (info, htab,
6883 &cmse_stub_created))
6884 ret = false;
6886 if (prev_num_a8_fixes != num_a8_fixes)
6887 stub_changed = true;
6889 if (!stub_changed)
6890 break;
6892 /* OK, we've added some stubs. Find out the new size of the
6893 stub sections. */
6894 for (stub_sec = htab->stub_bfd->sections;
6895 stub_sec != NULL;
6896 stub_sec = stub_sec->next)
6898 /* Ignore non-stub sections. */
6899 if (!strstr (stub_sec->name, STUB_SUFFIX))
6900 continue;
6902 stub_sec->size = 0;
6905 /* Add new SG veneers after those already in the input import
6906 library. */
6907 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6908 stub_type++)
6910 bfd_vma *start_offset_p;
6911 asection **stub_sec_p;
6913 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6914 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6915 if (start_offset_p == NULL)
6916 continue;
6918 BFD_ASSERT (stub_sec_p != NULL);
6919 if (*stub_sec_p != NULL)
6920 (*stub_sec_p)->size = *start_offset_p;
6923 /* Compute stub section size, considering padding. */
6924 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6925 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6926 stub_type++)
6928 int size, padding;
6929 asection **stub_sec_p;
6931 padding = arm_dedicated_stub_section_padding (stub_type);
6932 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6933 /* Skip if no stub input section or no stub section padding
6934 required. */
6935 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6936 continue;
6937 /* Stub section padding required but no dedicated section. */
6938 BFD_ASSERT (stub_sec_p);
6940 size = (*stub_sec_p)->size;
6941 size = (size + padding - 1) & ~(padding - 1);
6942 (*stub_sec_p)->size = size;
6945 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6946 if (htab->fix_cortex_a8)
6947 for (i = 0; i < num_a8_fixes; i++)
6949 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6950 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6952 if (stub_sec == NULL)
6953 return false;
6955 stub_sec->size
6956 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6957 NULL);
6961 /* Ask the linker to do its stuff. */
6962 (*htab->layout_sections_again) ();
6963 first_veneer_scan = false;
6966 /* Add stubs for Cortex-A8 erratum fixes now. */
6967 if (htab->fix_cortex_a8)
6969 for (i = 0; i < num_a8_fixes; i++)
6971 struct elf32_arm_stub_hash_entry *stub_entry;
6972 char *stub_name = a8_fixes[i].stub_name;
6973 asection *section = a8_fixes[i].section;
6974 unsigned int section_id = a8_fixes[i].section->id;
6975 asection *link_sec = htab->stub_group[section_id].link_sec;
6976 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6977 const insn_sequence *template_sequence;
6978 int template_size, size = 0;
6980 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6981 true, false);
6982 if (stub_entry == NULL)
6984 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6985 section->owner, stub_name);
6986 return false;
6989 stub_entry->stub_sec = stub_sec;
6990 stub_entry->stub_offset = (bfd_vma) -1;
6991 stub_entry->id_sec = link_sec;
6992 stub_entry->stub_type = a8_fixes[i].stub_type;
6993 stub_entry->source_value = a8_fixes[i].offset;
6994 stub_entry->target_section = a8_fixes[i].section;
6995 stub_entry->target_value = a8_fixes[i].target_offset;
6996 stub_entry->orig_insn = a8_fixes[i].orig_insn;
6997 stub_entry->branch_type = a8_fixes[i].branch_type;
6999 size = find_stub_size_and_template (a8_fixes[i].stub_type,
7000 &template_sequence,
7001 &template_size);
7003 stub_entry->stub_size = size;
7004 stub_entry->stub_template = template_sequence;
7005 stub_entry->stub_template_size = template_size;
7008 /* Stash the Cortex-A8 erratum fix array for use later in
7009 elf32_arm_write_section(). */
7010 htab->a8_erratum_fixes = a8_fixes;
7011 htab->num_a8_erratum_fixes = num_a8_fixes;
7013 else
7015 htab->a8_erratum_fixes = NULL;
7016 htab->num_a8_erratum_fixes = 0;
7018 return ret;
7021 /* Build all the stubs associated with the current output file. The
7022 stubs are kept in a hash table attached to the main linker hash
7023 table. We also set up the .plt entries for statically linked PIC
7024 functions here. This function is called via arm_elf_finish in the
7025 linker. */
7027 bool
7028 elf32_arm_build_stubs (struct bfd_link_info *info)
7030 asection *stub_sec;
7031 struct bfd_hash_table *table;
7032 enum elf32_arm_stub_type stub_type;
7033 struct elf32_arm_link_hash_table *htab;
7035 htab = elf32_arm_hash_table (info);
7036 if (htab == NULL)
7037 return false;
7039 for (stub_sec = htab->stub_bfd->sections;
7040 stub_sec != NULL;
7041 stub_sec = stub_sec->next)
7043 bfd_size_type size;
7045 /* Ignore non-stub sections. */
7046 if (!strstr (stub_sec->name, STUB_SUFFIX))
7047 continue;
7049 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
7050 must at least be done for stub section requiring padding and for SG
7051 veneers to ensure that a non secure code branching to a removed SG
7052 veneer causes an error. */
7053 size = stub_sec->size;
7054 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
7055 if (stub_sec->contents == NULL && size != 0)
7056 return false;
7058 stub_sec->size = 0;
7061 /* Add new SG veneers after those already in the input import library. */
7062 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7064 bfd_vma *start_offset_p;
7065 asection **stub_sec_p;
7067 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
7068 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
7069 if (start_offset_p == NULL)
7070 continue;
7072 BFD_ASSERT (stub_sec_p != NULL);
7073 if (*stub_sec_p != NULL)
7074 (*stub_sec_p)->size = *start_offset_p;
7077 /* Build the stubs as directed by the stub hash table. */
7078 table = &htab->stub_hash_table;
7079 bfd_hash_traverse (table, arm_build_one_stub, info);
7080 if (htab->fix_cortex_a8)
7082 /* Place the cortex a8 stubs last. */
7083 htab->fix_cortex_a8 = -1;
7084 bfd_hash_traverse (table, arm_build_one_stub, info);
7087 return true;
7090 /* Locate the Thumb encoded calling stub for NAME. */
7092 static struct elf_link_hash_entry *
7093 find_thumb_glue (struct bfd_link_info *link_info,
7094 const char *name,
7095 char **error_message)
7097 char *tmp_name;
7098 struct elf_link_hash_entry *hash;
7099 struct elf32_arm_link_hash_table *hash_table;
7101 /* We need a pointer to the armelf specific hash table. */
7102 hash_table = elf32_arm_hash_table (link_info);
7103 if (hash_table == NULL)
7104 return NULL;
7106 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7107 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7109 BFD_ASSERT (tmp_name);
7111 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7113 hash = elf_link_hash_lookup
7114 (&(hash_table)->root, tmp_name, false, false, true);
7116 if (hash == NULL)
7118 *error_message = bfd_asprintf (_("unable to find %s glue '%s' for '%s'"),
7119 "Thumb", tmp_name, name);
7120 if (*error_message == NULL)
7121 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7124 free (tmp_name);
7126 return hash;
7129 /* Locate the ARM encoded calling stub for NAME. */
7131 static struct elf_link_hash_entry *
7132 find_arm_glue (struct bfd_link_info *link_info,
7133 const char *name,
7134 char **error_message)
7136 char *tmp_name;
7137 struct elf_link_hash_entry *myh;
7138 struct elf32_arm_link_hash_table *hash_table;
7140 /* We need a pointer to the elfarm specific hash table. */
7141 hash_table = elf32_arm_hash_table (link_info);
7142 if (hash_table == NULL)
7143 return NULL;
7145 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7146 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7147 BFD_ASSERT (tmp_name);
7149 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7151 myh = elf_link_hash_lookup
7152 (&(hash_table)->root, tmp_name, false, false, true);
7154 if (myh == NULL)
7156 *error_message = bfd_asprintf (_("unable to find %s glue '%s' for '%s'"),
7157 "ARM", tmp_name, name);
7158 if (*error_message == NULL)
7159 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7161 free (tmp_name);
7163 return myh;
7166 /* ARM->Thumb glue (static images):
7168 .arm
7169 __func_from_arm:
7170 ldr r12, __func_addr
7171 bx r12
7172 __func_addr:
7173 .word func @ behave as if you saw a ARM_32 reloc.
7175 (v5t static images)
7176 .arm
7177 __func_from_arm:
7178 ldr pc, __func_addr
7179 __func_addr:
7180 .word func @ behave as if you saw a ARM_32 reloc.
7182 (relocatable images)
7183 .arm
7184 __func_from_arm:
7185 ldr r12, __func_offset
7186 add r12, r12, pc
7187 bx r12
7188 __func_offset:
7189 .word func - . */
7191 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7192 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7193 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7194 static const insn32 a2t3_func_addr_insn = 0x00000001;
7196 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7197 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7198 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7200 #define ARM2THUMB_PIC_GLUE_SIZE 16
7201 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7202 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7203 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7205 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7207 .thumb .thumb
7208 .align 2 .align 2
7209 __func_from_thumb: __func_from_thumb:
7210 bx pc push {r6, lr}
7211 nop ldr r6, __func_addr
7212 .arm mov lr, pc
7213 b func bx r6
7214 .arm
7215 ;; back_to_thumb
7216 ldmia r13! {r6, lr}
7217 bx lr
7218 __func_addr:
7219 .word func */
7221 #define THUMB2ARM_GLUE_SIZE 8
7222 static const insn16 t2a1_bx_pc_insn = 0x4778;
7223 static const insn16 t2a2_noop_insn = 0x46c0;
7224 static const insn32 t2a3_b_insn = 0xea000000;
7226 #define VFP11_ERRATUM_VENEER_SIZE 8
7227 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7228 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7230 #define ARM_BX_VENEER_SIZE 12
7231 static const insn32 armbx1_tst_insn = 0xe3100001;
7232 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7233 static const insn32 armbx3_bx_insn = 0xe12fff10;
7235 #ifndef ELFARM_NABI_C_INCLUDED
7236 static void
7237 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7239 asection * s;
7240 bfd_byte * contents;
7242 if (size == 0)
7244 /* Do not include empty glue sections in the output. */
7245 if (abfd != NULL)
7247 s = bfd_get_linker_section (abfd, name);
7248 if (s != NULL)
7249 s->flags |= SEC_EXCLUDE;
7251 return;
7254 BFD_ASSERT (abfd != NULL);
7256 s = bfd_get_linker_section (abfd, name);
7257 BFD_ASSERT (s != NULL);
7259 contents = (bfd_byte *) bfd_zalloc (abfd, size);
7261 BFD_ASSERT (s->size == size);
7262 s->contents = contents;
7265 bool
7266 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7268 struct elf32_arm_link_hash_table * globals;
7270 globals = elf32_arm_hash_table (info);
7271 BFD_ASSERT (globals != NULL);
7273 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7274 globals->arm_glue_size,
7275 ARM2THUMB_GLUE_SECTION_NAME);
7277 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7278 globals->thumb_glue_size,
7279 THUMB2ARM_GLUE_SECTION_NAME);
7281 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7282 globals->vfp11_erratum_glue_size,
7283 VFP11_ERRATUM_VENEER_SECTION_NAME);
7285 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7286 globals->stm32l4xx_erratum_glue_size,
7287 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7289 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7290 globals->bx_glue_size,
7291 ARM_BX_GLUE_SECTION_NAME);
7293 return true;
7296 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7297 returns the symbol identifying the stub. */
7299 static struct elf_link_hash_entry *
7300 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7301 struct elf_link_hash_entry * h)
7303 const char * name = h->root.root.string;
7304 asection * s;
7305 char * tmp_name;
7306 struct elf_link_hash_entry * myh;
7307 struct bfd_link_hash_entry * bh;
7308 struct elf32_arm_link_hash_table * globals;
7309 bfd_vma val;
7310 bfd_size_type size;
7312 globals = elf32_arm_hash_table (link_info);
7313 BFD_ASSERT (globals != NULL);
7314 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7316 s = bfd_get_linker_section
7317 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7319 BFD_ASSERT (s != NULL);
7321 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7322 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7323 BFD_ASSERT (tmp_name);
7325 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7327 myh = elf_link_hash_lookup
7328 (&(globals)->root, tmp_name, false, false, true);
7330 if (myh != NULL)
7332 /* We've already seen this guy. */
7333 free (tmp_name);
7334 return myh;
7337 /* The only trick here is using hash_table->arm_glue_size as the value.
7338 Even though the section isn't allocated yet, this is where we will be
7339 putting it. The +1 on the value marks that the stub has not been
7340 output yet - not that it is a Thumb function. */
7341 bh = NULL;
7342 val = globals->arm_glue_size + 1;
7343 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7344 tmp_name, BSF_GLOBAL, s, val,
7345 NULL, true, false, &bh);
7347 myh = (struct elf_link_hash_entry *) bh;
7348 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7349 myh->forced_local = 1;
7351 free (tmp_name);
7353 if (bfd_link_pic (link_info)
7354 || globals->pic_veneer)
7355 size = ARM2THUMB_PIC_GLUE_SIZE;
7356 else if (globals->use_blx)
7357 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7358 else
7359 size = ARM2THUMB_STATIC_GLUE_SIZE;
7361 s->size += size;
7362 globals->arm_glue_size += size;
7364 return myh;
7367 /* Allocate space for ARMv4 BX veneers. */
7369 static void
7370 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7372 asection * s;
7373 struct elf32_arm_link_hash_table *globals;
7374 char *tmp_name;
7375 struct elf_link_hash_entry *myh;
7376 struct bfd_link_hash_entry *bh;
7377 bfd_vma val;
7379 /* BX PC does not need a veneer. */
7380 if (reg == 15)
7381 return;
7383 globals = elf32_arm_hash_table (link_info);
7384 BFD_ASSERT (globals != NULL);
7385 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7387 /* Check if this veneer has already been allocated. */
7388 if (globals->bx_glue_offset[reg])
7389 return;
7391 s = bfd_get_linker_section
7392 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7394 BFD_ASSERT (s != NULL);
7396 /* Add symbol for veneer. */
7397 tmp_name = (char *)
7398 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7399 BFD_ASSERT (tmp_name);
7401 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7403 myh = elf_link_hash_lookup
7404 (&(globals)->root, tmp_name, false, false, false);
7406 BFD_ASSERT (myh == NULL);
7408 bh = NULL;
7409 val = globals->bx_glue_size;
7410 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7411 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7412 NULL, true, false, &bh);
7414 myh = (struct elf_link_hash_entry *) bh;
7415 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7416 myh->forced_local = 1;
7418 s->size += ARM_BX_VENEER_SIZE;
7419 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7420 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7424 /* Add an entry to the code/data map for section SEC. */
7426 static void
7427 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7429 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7430 unsigned int newidx;
7432 if (sec_data->map == NULL)
7434 sec_data->map = (elf32_arm_section_map *)
7435 bfd_malloc (sizeof (elf32_arm_section_map));
7436 sec_data->mapcount = 0;
7437 sec_data->mapsize = 1;
7440 newidx = sec_data->mapcount++;
7442 if (sec_data->mapcount > sec_data->mapsize)
7444 sec_data->mapsize *= 2;
7445 sec_data->map = (elf32_arm_section_map *)
7446 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7447 * sizeof (elf32_arm_section_map));
7450 if (sec_data->map)
7452 sec_data->map[newidx].vma = vma;
7453 sec_data->map[newidx].type = type;
7458 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7459 veneers are handled for now. */
7461 static bfd_vma
7462 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7463 elf32_vfp11_erratum_list *branch,
7464 bfd *branch_bfd,
7465 asection *branch_sec,
7466 unsigned int offset)
7468 asection *s;
7469 struct elf32_arm_link_hash_table *hash_table;
7470 char *tmp_name;
7471 struct elf_link_hash_entry *myh;
7472 struct bfd_link_hash_entry *bh;
7473 bfd_vma val;
7474 struct _arm_elf_section_data *sec_data;
7475 elf32_vfp11_erratum_list *newerr;
7477 hash_table = elf32_arm_hash_table (link_info);
7478 BFD_ASSERT (hash_table != NULL);
7479 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7481 s = bfd_get_linker_section
7482 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7484 sec_data = elf32_arm_section_data (s);
7486 BFD_ASSERT (s != NULL);
7488 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7489 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7490 BFD_ASSERT (tmp_name);
7492 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7493 hash_table->num_vfp11_fixes);
7495 myh = elf_link_hash_lookup
7496 (&(hash_table)->root, tmp_name, false, false, false);
7498 BFD_ASSERT (myh == NULL);
7500 bh = NULL;
7501 val = hash_table->vfp11_erratum_glue_size;
7502 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7503 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7504 NULL, true, false, &bh);
7506 myh = (struct elf_link_hash_entry *) bh;
7507 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7508 myh->forced_local = 1;
7510 /* Link veneer back to calling location. */
7511 sec_data->erratumcount += 1;
7512 newerr = (elf32_vfp11_erratum_list *)
7513 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7515 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7516 newerr->vma = -1;
7517 newerr->u.v.branch = branch;
7518 newerr->u.v.id = hash_table->num_vfp11_fixes;
7519 branch->u.b.veneer = newerr;
7521 newerr->next = sec_data->erratumlist;
7522 sec_data->erratumlist = newerr;
7524 /* A symbol for the return from the veneer. */
7525 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7526 hash_table->num_vfp11_fixes);
7528 myh = elf_link_hash_lookup
7529 (&(hash_table)->root, tmp_name, false, false, false);
7531 if (myh != NULL)
7532 abort ();
7534 bh = NULL;
7535 val = offset + 4;
7536 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7537 branch_sec, val, NULL, true, false, &bh);
7539 myh = (struct elf_link_hash_entry *) bh;
7540 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7541 myh->forced_local = 1;
7543 free (tmp_name);
7545 /* Generate a mapping symbol for the veneer section, and explicitly add an
7546 entry for that symbol to the code/data map for the section. */
7547 if (hash_table->vfp11_erratum_glue_size == 0)
7549 bh = NULL;
7550 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7551 ever requires this erratum fix. */
7552 _bfd_generic_link_add_one_symbol (link_info,
7553 hash_table->bfd_of_glue_owner, "$a",
7554 BSF_LOCAL, s, 0, NULL,
7555 true, false, &bh);
7557 myh = (struct elf_link_hash_entry *) bh;
7558 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7559 myh->forced_local = 1;
7561 /* The elf32_arm_init_maps function only cares about symbols from input
7562 BFDs. We must make a note of this generated mapping symbol
7563 ourselves so that code byteswapping works properly in
7564 elf32_arm_write_section. */
7565 elf32_arm_section_map_add (s, 'a', 0);
7568 s->size += VFP11_ERRATUM_VENEER_SIZE;
7569 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7570 hash_table->num_vfp11_fixes++;
7572 /* The offset of the veneer. */
7573 return val;
7576 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7577 veneers need to be handled because used only in Cortex-M. */
7579 static bfd_vma
7580 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7581 elf32_stm32l4xx_erratum_list *branch,
7582 bfd *branch_bfd,
7583 asection *branch_sec,
7584 unsigned int offset,
7585 bfd_size_type veneer_size)
7587 asection *s;
7588 struct elf32_arm_link_hash_table *hash_table;
7589 char *tmp_name;
7590 struct elf_link_hash_entry *myh;
7591 struct bfd_link_hash_entry *bh;
7592 bfd_vma val;
7593 struct _arm_elf_section_data *sec_data;
7594 elf32_stm32l4xx_erratum_list *newerr;
7596 hash_table = elf32_arm_hash_table (link_info);
7597 BFD_ASSERT (hash_table != NULL);
7598 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7600 s = bfd_get_linker_section
7601 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7603 BFD_ASSERT (s != NULL);
7605 sec_data = elf32_arm_section_data (s);
7607 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7608 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7609 BFD_ASSERT (tmp_name);
7611 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7612 hash_table->num_stm32l4xx_fixes);
7614 myh = elf_link_hash_lookup
7615 (&(hash_table)->root, tmp_name, false, false, false);
7617 BFD_ASSERT (myh == NULL);
7619 bh = NULL;
7620 val = hash_table->stm32l4xx_erratum_glue_size;
7621 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7622 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7623 NULL, true, false, &bh);
7625 myh = (struct elf_link_hash_entry *) bh;
7626 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7627 myh->forced_local = 1;
7629 /* Link veneer back to calling location. */
7630 sec_data->stm32l4xx_erratumcount += 1;
7631 newerr = (elf32_stm32l4xx_erratum_list *)
7632 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7634 newerr->type = STM32L4XX_ERRATUM_VENEER;
7635 newerr->vma = -1;
7636 newerr->u.v.branch = branch;
7637 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7638 branch->u.b.veneer = newerr;
7640 newerr->next = sec_data->stm32l4xx_erratumlist;
7641 sec_data->stm32l4xx_erratumlist = newerr;
7643 /* A symbol for the return from the veneer. */
7644 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7645 hash_table->num_stm32l4xx_fixes);
7647 myh = elf_link_hash_lookup
7648 (&(hash_table)->root, tmp_name, false, false, false);
7650 if (myh != NULL)
7651 abort ();
7653 bh = NULL;
7654 val = offset + 4;
7655 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7656 branch_sec, val, NULL, true, false, &bh);
7658 myh = (struct elf_link_hash_entry *) bh;
7659 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7660 myh->forced_local = 1;
7662 free (tmp_name);
7664 /* Generate a mapping symbol for the veneer section, and explicitly add an
7665 entry for that symbol to the code/data map for the section. */
7666 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7668 bh = NULL;
7669 /* Creates a THUMB symbol since there is no other choice. */
7670 _bfd_generic_link_add_one_symbol (link_info,
7671 hash_table->bfd_of_glue_owner, "$t",
7672 BSF_LOCAL, s, 0, NULL,
7673 true, false, &bh);
7675 myh = (struct elf_link_hash_entry *) bh;
7676 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7677 myh->forced_local = 1;
7679 /* The elf32_arm_init_maps function only cares about symbols from input
7680 BFDs. We must make a note of this generated mapping symbol
7681 ourselves so that code byteswapping works properly in
7682 elf32_arm_write_section. */
7683 elf32_arm_section_map_add (s, 't', 0);
7686 s->size += veneer_size;
7687 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7688 hash_table->num_stm32l4xx_fixes++;
7690 /* The offset of the veneer. */
7691 return val;
7694 #define ARM_GLUE_SECTION_FLAGS \
7695 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7696 | SEC_READONLY | SEC_LINKER_CREATED)
7698 /* Create a fake section for use by the ARM backend of the linker. */
7700 static bool
7701 arm_make_glue_section (bfd * abfd, const char * name)
7703 asection * sec;
7705 sec = bfd_get_linker_section (abfd, name);
7706 if (sec != NULL)
7707 /* Already made. */
7708 return true;
7710 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7712 if (sec == NULL
7713 || !bfd_set_section_alignment (sec, 2))
7714 return false;
7716 /* Set the gc mark to prevent the section from being removed by garbage
7717 collection, despite the fact that no relocs refer to this section. */
7718 sec->gc_mark = 1;
7720 return true;
7723 /* Set size of .plt entries. This function is called from the
7724 linker scripts in ld/emultempl/{armelf}.em. */
7726 void
7727 bfd_elf32_arm_use_long_plt (void)
7729 elf32_arm_use_long_plt_entry = true;
7732 /* Add the glue sections to ABFD. This function is called from the
7733 linker scripts in ld/emultempl/{armelf}.em. */
7735 bool
7736 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7737 struct bfd_link_info *info)
7739 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7740 bool dostm32l4xx = globals
7741 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7742 bool addglue;
7744 /* If we are only performing a partial
7745 link do not bother adding the glue. */
7746 if (bfd_link_relocatable (info))
7747 return true;
7749 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7750 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7751 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7752 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7754 if (!dostm32l4xx)
7755 return addglue;
7757 return addglue
7758 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7761 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7762 ensures they are not marked for deletion by
7763 strip_excluded_output_sections () when veneers are going to be created
7764 later. Not doing so would trigger assert on empty section size in
7765 lang_size_sections_1 (). */
7767 void
7768 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7770 enum elf32_arm_stub_type stub_type;
7772 /* If we are only performing a partial
7773 link do not bother adding the glue. */
7774 if (bfd_link_relocatable (info))
7775 return;
7777 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7779 asection *out_sec;
7780 const char *out_sec_name;
7782 if (!arm_dedicated_stub_output_section_required (stub_type))
7783 continue;
7785 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7786 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7787 if (out_sec != NULL)
7788 out_sec->flags |= SEC_KEEP;
7792 /* Select a BFD to be used to hold the sections used by the glue code.
7793 This function is called from the linker scripts in ld/emultempl/
7794 {armelf/pe}.em. */
7796 bool
7797 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7799 struct elf32_arm_link_hash_table *globals;
7801 /* If we are only performing a partial link
7802 do not bother getting a bfd to hold the glue. */
7803 if (bfd_link_relocatable (info))
7804 return true;
7806 /* Make sure we don't attach the glue sections to a dynamic object. */
7807 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7809 globals = elf32_arm_hash_table (info);
7810 BFD_ASSERT (globals != NULL);
7812 if (globals->bfd_of_glue_owner != NULL)
7813 return true;
7815 /* Save the bfd for later use. */
7816 globals->bfd_of_glue_owner = abfd;
7818 return true;
7821 static void
7822 check_use_blx (struct elf32_arm_link_hash_table *globals)
7824 int cpu_arch;
7826 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7827 Tag_CPU_arch);
7829 if (globals->fix_arm1176)
7831 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7832 globals->use_blx = 1;
7834 else
7836 if (cpu_arch > TAG_CPU_ARCH_V4T)
7837 globals->use_blx = 1;
7841 bool
7842 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7843 struct bfd_link_info *link_info)
7845 Elf_Internal_Shdr *symtab_hdr;
7846 Elf_Internal_Rela *internal_relocs = NULL;
7847 Elf_Internal_Rela *irel, *irelend;
7848 bfd_byte *contents = NULL;
7850 asection *sec;
7851 struct elf32_arm_link_hash_table *globals;
7853 /* If we are only performing a partial link do not bother
7854 to construct any glue. */
7855 if (bfd_link_relocatable (link_info))
7856 return true;
7858 /* Here we have a bfd that is to be included on the link. We have a
7859 hook to do reloc rummaging, before section sizes are nailed down. */
7860 globals = elf32_arm_hash_table (link_info);
7861 BFD_ASSERT (globals != NULL);
7863 check_use_blx (globals);
7865 if (globals->byteswap_code && !bfd_big_endian (abfd))
7867 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7868 abfd);
7869 return false;
7872 /* PR 5398: If we have not decided to include any loadable sections in
7873 the output then we will not have a glue owner bfd. This is OK, it
7874 just means that there is nothing else for us to do here. */
7875 if (globals->bfd_of_glue_owner == NULL)
7876 return true;
7878 /* Rummage around all the relocs and map the glue vectors. */
7879 sec = abfd->sections;
7881 if (sec == NULL)
7882 return true;
7884 for (; sec != NULL; sec = sec->next)
7886 if (sec->reloc_count == 0)
7887 continue;
7889 if ((sec->flags & SEC_EXCLUDE) != 0
7890 || (sec->flags & SEC_HAS_CONTENTS) == 0)
7891 continue;
7893 symtab_hdr = & elf_symtab_hdr (abfd);
7895 /* Load the relocs. */
7896 internal_relocs
7897 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, false);
7899 if (internal_relocs == NULL)
7900 goto error_return;
7902 irelend = internal_relocs + sec->reloc_count;
7903 for (irel = internal_relocs; irel < irelend; irel++)
7905 long r_type;
7906 unsigned long r_index;
7908 struct elf_link_hash_entry *h;
7910 r_type = ELF32_R_TYPE (irel->r_info);
7911 r_index = ELF32_R_SYM (irel->r_info);
7913 /* These are the only relocation types we care about. */
7914 if ( r_type != R_ARM_PC24
7915 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7916 continue;
7918 /* Get the section contents if we haven't done so already. */
7919 if (contents == NULL)
7921 /* Get cached copy if it exists. */
7922 if (elf_section_data (sec)->this_hdr.contents != NULL)
7923 contents = elf_section_data (sec)->this_hdr.contents;
7924 else
7926 /* Go get them off disk. */
7927 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7928 goto error_return;
7932 if (r_type == R_ARM_V4BX)
7934 int reg;
7936 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7937 record_arm_bx_glue (link_info, reg);
7938 continue;
7941 /* If the relocation is not against a symbol it cannot concern us. */
7942 h = NULL;
7944 /* We don't care about local symbols. */
7945 if (r_index < symtab_hdr->sh_info)
7946 continue;
7948 /* This is an external symbol. */
7949 r_index -= symtab_hdr->sh_info;
7950 h = (struct elf_link_hash_entry *)
7951 elf_sym_hashes (abfd)[r_index];
7953 /* If the relocation is against a static symbol it must be within
7954 the current section and so cannot be a cross ARM/Thumb relocation. */
7955 if (h == NULL)
7956 continue;
7958 /* If the call will go through a PLT entry then we do not need
7959 glue. */
7960 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7961 continue;
7963 switch (r_type)
7965 case R_ARM_PC24:
7966 /* This one is a call from arm code. We need to look up
7967 the target of the call. If it is a thumb target, we
7968 insert glue. */
7969 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7970 == ST_BRANCH_TO_THUMB)
7971 record_arm_to_thumb_glue (link_info, h);
7972 break;
7974 default:
7975 abort ();
7979 if (elf_section_data (sec)->this_hdr.contents != contents)
7980 free (contents);
7981 contents = NULL;
7983 if (elf_section_data (sec)->relocs != internal_relocs)
7984 free (internal_relocs);
7985 internal_relocs = NULL;
7988 return true;
7990 error_return:
7991 if (elf_section_data (sec)->this_hdr.contents != contents)
7992 free (contents);
7993 if (elf_section_data (sec)->relocs != internal_relocs)
7994 free (internal_relocs);
7996 return false;
7998 #endif
8001 /* Initialise maps of ARM/Thumb/data for input BFDs. */
8003 void
8004 bfd_elf32_arm_init_maps (bfd *abfd)
8006 Elf_Internal_Sym *isymbuf;
8007 Elf_Internal_Shdr *hdr;
8008 unsigned int i, localsyms;
8010 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
8011 if (! is_arm_elf (abfd))
8012 return;
8014 if ((abfd->flags & DYNAMIC) != 0)
8015 return;
8017 hdr = & elf_symtab_hdr (abfd);
8018 localsyms = hdr->sh_info;
8020 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
8021 should contain the number of local symbols, which should come before any
8022 global symbols. Mapping symbols are always local. */
8023 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
8024 NULL);
8026 /* No internal symbols read? Skip this BFD. */
8027 if (isymbuf == NULL)
8028 return;
8030 for (i = 0; i < localsyms; i++)
8032 Elf_Internal_Sym *isym = &isymbuf[i];
8033 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
8034 const char *name;
8036 if (sec != NULL
8037 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
8039 name = bfd_elf_string_from_elf_section (abfd,
8040 hdr->sh_link, isym->st_name);
8042 if (bfd_is_arm_special_symbol_name (name,
8043 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
8044 elf32_arm_section_map_add (sec, name[1], isym->st_value);
8050 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8051 say what they wanted. */
8053 void
8054 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
8056 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8057 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8059 if (globals == NULL)
8060 return;
8062 if (globals->fix_cortex_a8 == -1)
8064 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
8065 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
8066 && (out_attr[Tag_CPU_arch_profile].i == 'A'
8067 || out_attr[Tag_CPU_arch_profile].i == 0))
8068 globals->fix_cortex_a8 = 1;
8069 else
8070 globals->fix_cortex_a8 = 0;
8075 void
8076 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8078 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8079 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8081 if (globals == NULL)
8082 return;
8083 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8084 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8086 switch (globals->vfp11_fix)
8088 case BFD_ARM_VFP11_FIX_DEFAULT:
8089 case BFD_ARM_VFP11_FIX_NONE:
8090 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8091 break;
8093 default:
8094 /* Give a warning, but do as the user requests anyway. */
8095 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8096 "workaround is not necessary for target architecture"), obfd);
8099 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8100 /* For earlier architectures, we might need the workaround, but do not
8101 enable it by default. If users is running with broken hardware, they
8102 must enable the erratum fix explicitly. */
8103 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8106 void
8107 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8109 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8110 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8112 if (globals == NULL)
8113 return;
8115 /* We assume only Cortex-M4 may require the fix. */
8116 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8117 || out_attr[Tag_CPU_arch_profile].i != 'M')
8119 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8120 /* Give a warning, but do as the user requests anyway. */
8121 _bfd_error_handler
8122 (_("%pB: warning: selected STM32L4XX erratum "
8123 "workaround is not necessary for target architecture"), obfd);
8127 enum bfd_arm_vfp11_pipe
8129 VFP11_FMAC,
8130 VFP11_LS,
8131 VFP11_DS,
8132 VFP11_BAD
8135 /* Return a VFP register number. This is encoded as RX:X for single-precision
8136 registers, or X:RX for double-precision registers, where RX is the group of
8137 four bits in the instruction encoding and X is the single extension bit.
8138 RX and X fields are specified using their lowest (starting) bit. The return
8139 value is:
8141 0...31: single-precision registers s0...s31
8142 32...63: double-precision registers d0...d31.
8144 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8145 encounter VFP3 instructions, so we allow the full range for DP registers. */
8147 static unsigned int
8148 bfd_arm_vfp11_regno (unsigned int insn, bool is_double, unsigned int rx,
8149 unsigned int x)
8151 if (is_double)
8152 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8153 else
8154 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8157 /* Set bits in *WMASK according to a register number REG as encoded by
8158 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8160 static void
8161 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8163 if (reg < 32)
8164 *wmask |= 1 << reg;
8165 else if (reg < 48)
8166 *wmask |= 3 << ((reg - 32) * 2);
8169 /* Return TRUE if WMASK overwrites anything in REGS. */
8171 static bool
8172 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8174 int i;
8176 for (i = 0; i < numregs; i++)
8178 unsigned int reg = regs[i];
8180 if (reg < 32 && (wmask & (1 << reg)) != 0)
8181 return true;
8183 reg -= 32;
8185 if (reg >= 16)
8186 continue;
8188 if ((wmask & (3 << (reg * 2))) != 0)
8189 return true;
8192 return false;
8195 /* In this function, we're interested in two things: finding input registers
8196 for VFP data-processing instructions, and finding the set of registers which
8197 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8198 hold the written set, so FLDM etc. are easy to deal with (we're only
8199 interested in 32 SP registers or 16 dp registers, due to the VFP version
8200 implemented by the chip in question). DP registers are marked by setting
8201 both SP registers in the write mask). */
8203 static enum bfd_arm_vfp11_pipe
8204 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8205 int *numregs)
8207 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8208 bool is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8210 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8212 unsigned int pqrs;
8213 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8214 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8216 pqrs = ((insn & 0x00800000) >> 20)
8217 | ((insn & 0x00300000) >> 19)
8218 | ((insn & 0x00000040) >> 6);
8220 switch (pqrs)
8222 case 0: /* fmac[sd]. */
8223 case 1: /* fnmac[sd]. */
8224 case 2: /* fmsc[sd]. */
8225 case 3: /* fnmsc[sd]. */
8226 vpipe = VFP11_FMAC;
8227 bfd_arm_vfp11_write_mask (destmask, fd);
8228 regs[0] = fd;
8229 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8230 regs[2] = fm;
8231 *numregs = 3;
8232 break;
8234 case 4: /* fmul[sd]. */
8235 case 5: /* fnmul[sd]. */
8236 case 6: /* fadd[sd]. */
8237 case 7: /* fsub[sd]. */
8238 vpipe = VFP11_FMAC;
8239 goto vfp_binop;
8241 case 8: /* fdiv[sd]. */
8242 vpipe = VFP11_DS;
8243 vfp_binop:
8244 bfd_arm_vfp11_write_mask (destmask, fd);
8245 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8246 regs[1] = fm;
8247 *numregs = 2;
8248 break;
8250 case 15: /* extended opcode. */
8252 unsigned int extn = ((insn >> 15) & 0x1e)
8253 | ((insn >> 7) & 1);
8255 switch (extn)
8257 case 0: /* fcpy[sd]. */
8258 case 1: /* fabs[sd]. */
8259 case 2: /* fneg[sd]. */
8260 case 8: /* fcmp[sd]. */
8261 case 9: /* fcmpe[sd]. */
8262 case 10: /* fcmpz[sd]. */
8263 case 11: /* fcmpez[sd]. */
8264 case 16: /* fuito[sd]. */
8265 case 17: /* fsito[sd]. */
8266 case 24: /* ftoui[sd]. */
8267 case 25: /* ftouiz[sd]. */
8268 case 26: /* ftosi[sd]. */
8269 case 27: /* ftosiz[sd]. */
8270 /* These instructions will not bounce due to underflow. */
8271 *numregs = 0;
8272 vpipe = VFP11_FMAC;
8273 break;
8275 case 3: /* fsqrt[sd]. */
8276 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8277 registers to cause the erratum in previous instructions. */
8278 bfd_arm_vfp11_write_mask (destmask, fd);
8279 vpipe = VFP11_DS;
8280 break;
8282 case 15: /* fcvt{ds,sd}. */
8284 int rnum = 0;
8286 bfd_arm_vfp11_write_mask (destmask, fd);
8288 /* Only FCVTSD can underflow. */
8289 if ((insn & 0x100) != 0)
8290 regs[rnum++] = fm;
8292 *numregs = rnum;
8294 vpipe = VFP11_FMAC;
8296 break;
8298 default:
8299 return VFP11_BAD;
8302 break;
8304 default:
8305 return VFP11_BAD;
8308 /* Two-register transfer. */
8309 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8311 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8313 if ((insn & 0x100000) == 0)
8315 if (is_double)
8316 bfd_arm_vfp11_write_mask (destmask, fm);
8317 else
8319 bfd_arm_vfp11_write_mask (destmask, fm);
8320 bfd_arm_vfp11_write_mask (destmask, fm + 1);
8324 vpipe = VFP11_LS;
8326 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
8328 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8329 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8331 switch (puw)
8333 case 0: /* Two-reg transfer. We should catch these above. */
8334 abort ();
8336 case 2: /* fldm[sdx]. */
8337 case 3:
8338 case 5:
8340 unsigned int i, offset = insn & 0xff;
8342 if (is_double)
8343 offset >>= 1;
8345 for (i = fd; i < fd + offset; i++)
8346 bfd_arm_vfp11_write_mask (destmask, i);
8348 break;
8350 case 4: /* fld[sd]. */
8351 case 6:
8352 bfd_arm_vfp11_write_mask (destmask, fd);
8353 break;
8355 default:
8356 return VFP11_BAD;
8359 vpipe = VFP11_LS;
8361 /* Single-register transfer. Note L==0. */
8362 else if ((insn & 0x0f100e10) == 0x0e000a10)
8364 unsigned int opcode = (insn >> 21) & 7;
8365 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8367 switch (opcode)
8369 case 0: /* fmsr/fmdlr. */
8370 case 1: /* fmdhr. */
8371 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8372 destination register. I don't know if this is exactly right,
8373 but it is the conservative choice. */
8374 bfd_arm_vfp11_write_mask (destmask, fn);
8375 break;
8377 case 7: /* fmxr. */
8378 break;
8381 vpipe = VFP11_LS;
8384 return vpipe;
8388 static int elf32_arm_compare_mapping (const void * a, const void * b);
8391 /* Look for potentially-troublesome code sequences which might trigger the
8392 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8393 (available from ARM) for details of the erratum. A short version is
8394 described in ld.texinfo. */
8396 bool
8397 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8399 asection *sec;
8400 bfd_byte *contents = NULL;
8401 int state = 0;
8402 int regs[3], numregs = 0;
8403 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8404 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8406 if (globals == NULL)
8407 return false;
8409 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8410 The states transition as follows:
8412 0 -> 1 (vector) or 0 -> 2 (scalar)
8413 A VFP FMAC-pipeline instruction has been seen. Fill
8414 regs[0]..regs[numregs-1] with its input operands. Remember this
8415 instruction in 'first_fmac'.
8417 1 -> 2
8418 Any instruction, except for a VFP instruction which overwrites
8419 regs[*].
8421 1 -> 3 [ -> 0 ] or
8422 2 -> 3 [ -> 0 ]
8423 A VFP instruction has been seen which overwrites any of regs[*].
8424 We must make a veneer! Reset state to 0 before examining next
8425 instruction.
8427 2 -> 0
8428 If we fail to match anything in state 2, reset to state 0 and reset
8429 the instruction pointer to the instruction after 'first_fmac'.
8431 If the VFP11 vector mode is in use, there must be at least two unrelated
8432 instructions between anti-dependent VFP11 instructions to properly avoid
8433 triggering the erratum, hence the use of the extra state 1. */
8435 /* If we are only performing a partial link do not bother
8436 to construct any glue. */
8437 if (bfd_link_relocatable (link_info))
8438 return true;
8440 /* Skip if this bfd does not correspond to an ELF image. */
8441 if (! is_arm_elf (abfd))
8442 return true;
8444 /* We should have chosen a fix type by the time we get here. */
8445 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8447 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8448 return true;
8450 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8451 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8452 return true;
8454 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8456 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8457 struct _arm_elf_section_data *sec_data;
8459 /* If we don't have executable progbits, we're not interested in this
8460 section. Also skip if section is to be excluded. */
8461 if (elf_section_type (sec) != SHT_PROGBITS
8462 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8463 || (sec->flags & SEC_EXCLUDE) != 0
8464 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8465 || sec->output_section == bfd_abs_section_ptr
8466 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8467 continue;
8469 sec_data = elf32_arm_section_data (sec);
8471 if (sec_data->mapcount == 0)
8472 continue;
8474 if (elf_section_data (sec)->this_hdr.contents != NULL)
8475 contents = elf_section_data (sec)->this_hdr.contents;
8476 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8477 goto error_return;
8479 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8480 elf32_arm_compare_mapping);
8482 for (span = 0; span < sec_data->mapcount; span++)
8484 unsigned int span_start = sec_data->map[span].vma;
8485 unsigned int span_end = (span == sec_data->mapcount - 1)
8486 ? sec->size : sec_data->map[span + 1].vma;
8487 char span_type = sec_data->map[span].type;
8489 /* FIXME: Only ARM mode is supported at present. We may need to
8490 support Thumb-2 mode also at some point. */
8491 if (span_type != 'a')
8492 continue;
8494 for (i = span_start; i < span_end;)
8496 unsigned int next_i = i + 4;
8497 unsigned int insn = bfd_big_endian (abfd)
8498 ? (((unsigned) contents[i] << 24)
8499 | (contents[i + 1] << 16)
8500 | (contents[i + 2] << 8)
8501 | contents[i + 3])
8502 : (((unsigned) contents[i + 3] << 24)
8503 | (contents[i + 2] << 16)
8504 | (contents[i + 1] << 8)
8505 | contents[i]);
8506 unsigned int writemask = 0;
8507 enum bfd_arm_vfp11_pipe vpipe;
8509 switch (state)
8511 case 0:
8512 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8513 &numregs);
8514 /* I'm assuming the VFP11 erratum can trigger with denorm
8515 operands on either the FMAC or the DS pipeline. This might
8516 lead to slightly overenthusiastic veneer insertion. */
8517 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8519 state = use_vector ? 1 : 2;
8520 first_fmac = i;
8521 veneer_of_insn = insn;
8523 break;
8525 case 1:
8527 int other_regs[3], other_numregs;
8528 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8529 other_regs,
8530 &other_numregs);
8531 if (vpipe != VFP11_BAD
8532 && bfd_arm_vfp11_antidependency (writemask, regs,
8533 numregs))
8534 state = 3;
8535 else
8536 state = 2;
8538 break;
8540 case 2:
8542 int other_regs[3], other_numregs;
8543 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8544 other_regs,
8545 &other_numregs);
8546 if (vpipe != VFP11_BAD
8547 && bfd_arm_vfp11_antidependency (writemask, regs,
8548 numregs))
8549 state = 3;
8550 else
8552 state = 0;
8553 next_i = first_fmac + 4;
8556 break;
8558 case 3:
8559 abort (); /* Should be unreachable. */
8562 if (state == 3)
8564 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8565 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8567 elf32_arm_section_data (sec)->erratumcount += 1;
8569 newerr->u.b.vfp_insn = veneer_of_insn;
8571 switch (span_type)
8573 case 'a':
8574 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8575 break;
8577 default:
8578 abort ();
8581 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8582 first_fmac);
8584 newerr->vma = -1;
8586 newerr->next = sec_data->erratumlist;
8587 sec_data->erratumlist = newerr;
8589 state = 0;
8592 i = next_i;
8596 if (elf_section_data (sec)->this_hdr.contents != contents)
8597 free (contents);
8598 contents = NULL;
8601 return true;
8603 error_return:
8604 if (elf_section_data (sec)->this_hdr.contents != contents)
8605 free (contents);
8607 return false;
8610 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8611 after sections have been laid out, using specially-named symbols. */
8613 void
8614 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8615 struct bfd_link_info *link_info)
8617 asection *sec;
8618 struct elf32_arm_link_hash_table *globals;
8619 char *tmp_name;
8621 if (bfd_link_relocatable (link_info))
8622 return;
8624 /* Skip if this bfd does not correspond to an ELF image. */
8625 if (! is_arm_elf (abfd))
8626 return;
8628 globals = elf32_arm_hash_table (link_info);
8629 if (globals == NULL)
8630 return;
8632 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8633 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8634 BFD_ASSERT (tmp_name);
8636 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8638 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8639 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8641 for (; errnode != NULL; errnode = errnode->next)
8643 struct elf_link_hash_entry *myh;
8644 bfd_vma vma;
8646 switch (errnode->type)
8648 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8649 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8650 /* Find veneer symbol. */
8651 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8652 errnode->u.b.veneer->u.v.id);
8654 myh = elf_link_hash_lookup
8655 (&(globals)->root, tmp_name, false, false, true);
8657 if (myh == NULL)
8658 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8659 abfd, "VFP11", tmp_name);
8661 vma = myh->root.u.def.section->output_section->vma
8662 + myh->root.u.def.section->output_offset
8663 + myh->root.u.def.value;
8665 errnode->u.b.veneer->vma = vma;
8666 break;
8668 case VFP11_ERRATUM_ARM_VENEER:
8669 case VFP11_ERRATUM_THUMB_VENEER:
8670 /* Find return location. */
8671 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8672 errnode->u.v.id);
8674 myh = elf_link_hash_lookup
8675 (&(globals)->root, tmp_name, false, false, true);
8677 if (myh == NULL)
8678 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8679 abfd, "VFP11", tmp_name);
8681 vma = myh->root.u.def.section->output_section->vma
8682 + myh->root.u.def.section->output_offset
8683 + myh->root.u.def.value;
8685 errnode->u.v.branch->vma = vma;
8686 break;
8688 default:
8689 abort ();
8694 free (tmp_name);
8697 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8698 return locations after sections have been laid out, using
8699 specially-named symbols. */
8701 void
8702 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8703 struct bfd_link_info *link_info)
8705 asection *sec;
8706 struct elf32_arm_link_hash_table *globals;
8707 char *tmp_name;
8709 if (bfd_link_relocatable (link_info))
8710 return;
8712 /* Skip if this bfd does not correspond to an ELF image. */
8713 if (! is_arm_elf (abfd))
8714 return;
8716 globals = elf32_arm_hash_table (link_info);
8717 if (globals == NULL)
8718 return;
8720 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8721 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8722 BFD_ASSERT (tmp_name);
8724 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8726 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8727 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8729 for (; errnode != NULL; errnode = errnode->next)
8731 struct elf_link_hash_entry *myh;
8732 bfd_vma vma;
8734 switch (errnode->type)
8736 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8737 /* Find veneer symbol. */
8738 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8739 errnode->u.b.veneer->u.v.id);
8741 myh = elf_link_hash_lookup
8742 (&(globals)->root, tmp_name, false, false, true);
8744 if (myh == NULL)
8745 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8746 abfd, "STM32L4XX", tmp_name);
8748 vma = myh->root.u.def.section->output_section->vma
8749 + myh->root.u.def.section->output_offset
8750 + myh->root.u.def.value;
8752 errnode->u.b.veneer->vma = vma;
8753 break;
8755 case STM32L4XX_ERRATUM_VENEER:
8756 /* Find return location. */
8757 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8758 errnode->u.v.id);
8760 myh = elf_link_hash_lookup
8761 (&(globals)->root, tmp_name, false, false, true);
8763 if (myh == NULL)
8764 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8765 abfd, "STM32L4XX", tmp_name);
8767 vma = myh->root.u.def.section->output_section->vma
8768 + myh->root.u.def.section->output_offset
8769 + myh->root.u.def.value;
8771 errnode->u.v.branch->vma = vma;
8772 break;
8774 default:
8775 abort ();
8780 free (tmp_name);
8783 static inline bool
8784 is_thumb2_ldmia (const insn32 insn)
8786 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8787 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8788 return (insn & 0xffd02000) == 0xe8900000;
8791 static inline bool
8792 is_thumb2_ldmdb (const insn32 insn)
8794 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8795 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8796 return (insn & 0xffd02000) == 0xe9100000;
8799 static inline bool
8800 is_thumb2_vldm (const insn32 insn)
8802 /* A6.5 Extension register load or store instruction
8803 A7.7.229
8804 We look for SP 32-bit and DP 64-bit registers.
8805 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8806 <list> is consecutive 64-bit registers
8807 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8808 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8809 <list> is consecutive 32-bit registers
8810 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8811 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8812 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8813 return
8814 (((insn & 0xfe100f00) == 0xec100b00) ||
8815 ((insn & 0xfe100f00) == 0xec100a00))
8816 && /* (IA without !). */
8817 (((((insn << 7) >> 28) & 0xd) == 0x4)
8818 /* (IA with !), includes VPOP (when reg number is SP). */
8819 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8820 /* (DB with !). */
8821 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8824 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8825 VLDM opcode and:
8826 - computes the number and the mode of memory accesses
8827 - decides if the replacement should be done:
8828 . replaces only if > 8-word accesses
8829 . or (testing purposes only) replaces all accesses. */
8831 static bool
8832 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8833 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8835 int nb_words = 0;
8837 /* The field encoding the register list is the same for both LDMIA
8838 and LDMDB encodings. */
8839 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8840 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8841 else if (is_thumb2_vldm (insn))
8842 nb_words = (insn & 0xff);
8844 /* DEFAULT mode accounts for the real bug condition situation,
8845 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8846 return (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT
8847 ? nb_words > 8
8848 : stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL);
8851 /* Look for potentially-troublesome code sequences which might trigger
8852 the STM STM32L4XX erratum. */
8854 bool
8855 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8856 struct bfd_link_info *link_info)
8858 asection *sec;
8859 bfd_byte *contents = NULL;
8860 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8862 if (globals == NULL)
8863 return false;
8865 /* If we are only performing a partial link do not bother
8866 to construct any glue. */
8867 if (bfd_link_relocatable (link_info))
8868 return true;
8870 /* Skip if this bfd does not correspond to an ELF image. */
8871 if (! is_arm_elf (abfd))
8872 return true;
8874 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8875 return true;
8877 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8878 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8879 return true;
8881 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8883 unsigned int i, span;
8884 struct _arm_elf_section_data *sec_data;
8886 /* If we don't have executable progbits, we're not interested in this
8887 section. Also skip if section is to be excluded. */
8888 if (elf_section_type (sec) != SHT_PROGBITS
8889 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8890 || (sec->flags & SEC_EXCLUDE) != 0
8891 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8892 || sec->output_section == bfd_abs_section_ptr
8893 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8894 continue;
8896 sec_data = elf32_arm_section_data (sec);
8898 if (sec_data->mapcount == 0)
8899 continue;
8901 if (elf_section_data (sec)->this_hdr.contents != NULL)
8902 contents = elf_section_data (sec)->this_hdr.contents;
8903 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8904 goto error_return;
8906 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8907 elf32_arm_compare_mapping);
8909 for (span = 0; span < sec_data->mapcount; span++)
8911 unsigned int span_start = sec_data->map[span].vma;
8912 unsigned int span_end = (span == sec_data->mapcount - 1)
8913 ? sec->size : sec_data->map[span + 1].vma;
8914 char span_type = sec_data->map[span].type;
8915 int itblock_current_pos = 0;
8917 /* Only Thumb2 mode need be supported with this CM4 specific
8918 code, we should not encounter any arm mode eg span_type
8919 != 'a'. */
8920 if (span_type != 't')
8921 continue;
8923 for (i = span_start; i < span_end;)
8925 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8926 bool insn_32bit = false;
8927 bool is_ldm = false;
8928 bool is_vldm = false;
8929 bool is_not_last_in_it_block = false;
8931 /* The first 16-bits of all 32-bit thumb2 instructions start
8932 with opcode[15..13]=0b111 and the encoded op1 can be anything
8933 except opcode[12..11]!=0b00.
8934 See 32-bit Thumb instruction encoding. */
8935 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8936 insn_32bit = true;
8938 /* Compute the predicate that tells if the instruction
8939 is concerned by the IT block
8940 - Creates an error if there is a ldm that is not
8941 last in the IT block thus cannot be replaced
8942 - Otherwise we can create a branch at the end of the
8943 IT block, it will be controlled naturally by IT
8944 with the proper pseudo-predicate
8945 - So the only interesting predicate is the one that
8946 tells that we are not on the last item of an IT
8947 block. */
8948 if (itblock_current_pos != 0)
8949 is_not_last_in_it_block = !!--itblock_current_pos;
8951 if (insn_32bit)
8953 /* Load the rest of the insn (in manual-friendly order). */
8954 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8955 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8956 is_vldm = is_thumb2_vldm (insn);
8958 /* Veneers are created for (v)ldm depending on
8959 option flags and memory accesses conditions; but
8960 if the instruction is not the last instruction of
8961 an IT block, we cannot create a jump there, so we
8962 bail out. */
8963 if ((is_ldm || is_vldm)
8964 && stm32l4xx_need_create_replacing_stub
8965 (insn, globals->stm32l4xx_fix))
8967 if (is_not_last_in_it_block)
8969 _bfd_error_handler
8970 /* xgettext:c-format */
8971 (_("%pB(%pA+%#x): error: multiple load detected"
8972 " in non-last IT block instruction:"
8973 " STM32L4XX veneer cannot be generated; "
8974 "use gcc option -mrestrict-it to generate"
8975 " only one instruction per IT block"),
8976 abfd, sec, i);
8978 else
8980 elf32_stm32l4xx_erratum_list *newerr =
8981 (elf32_stm32l4xx_erratum_list *)
8982 bfd_zmalloc
8983 (sizeof (elf32_stm32l4xx_erratum_list));
8985 elf32_arm_section_data (sec)
8986 ->stm32l4xx_erratumcount += 1;
8987 newerr->u.b.insn = insn;
8988 /* We create only thumb branches. */
8989 newerr->type =
8990 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8991 record_stm32l4xx_erratum_veneer
8992 (link_info, newerr, abfd, sec,
8994 is_ldm ?
8995 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8996 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8997 newerr->vma = -1;
8998 newerr->next = sec_data->stm32l4xx_erratumlist;
8999 sec_data->stm32l4xx_erratumlist = newerr;
9003 else
9005 /* A7.7.37 IT p208
9006 IT blocks are only encoded in T1
9007 Encoding T1: IT{x{y{z}}} <firstcond>
9008 1 0 1 1 - 1 1 1 1 - firstcond - mask
9009 if mask = '0000' then see 'related encodings'
9010 We don't deal with UNPREDICTABLE, just ignore these.
9011 There can be no nested IT blocks so an IT block
9012 is naturally a new one for which it is worth
9013 computing its size. */
9014 bool is_newitblock = ((insn & 0xff00) == 0xbf00)
9015 && ((insn & 0x000f) != 0x0000);
9016 /* If we have a new IT block we compute its size. */
9017 if (is_newitblock)
9019 /* Compute the number of instructions controlled
9020 by the IT block, it will be used to decide
9021 whether we are inside an IT block or not. */
9022 unsigned int mask = insn & 0x000f;
9023 itblock_current_pos = 4 - ctz (mask);
9027 i += insn_32bit ? 4 : 2;
9031 if (elf_section_data (sec)->this_hdr.contents != contents)
9032 free (contents);
9033 contents = NULL;
9036 return true;
9038 error_return:
9039 if (elf_section_data (sec)->this_hdr.contents != contents)
9040 free (contents);
9042 return false;
9045 /* Set target relocation values needed during linking. */
9047 void
9048 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
9049 struct bfd_link_info *link_info,
9050 struct elf32_arm_params *params)
9052 struct elf32_arm_link_hash_table *globals;
9054 globals = elf32_arm_hash_table (link_info);
9055 if (globals == NULL)
9056 return;
9058 globals->target1_is_rel = params->target1_is_rel;
9059 if (globals->fdpic_p)
9060 globals->target2_reloc = R_ARM_GOT32;
9061 else if (strcmp (params->target2_type, "rel") == 0)
9062 globals->target2_reloc = R_ARM_REL32;
9063 else if (strcmp (params->target2_type, "abs") == 0)
9064 globals->target2_reloc = R_ARM_ABS32;
9065 else if (strcmp (params->target2_type, "got-rel") == 0)
9066 globals->target2_reloc = R_ARM_GOT_PREL;
9067 else
9069 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9070 params->target2_type);
9072 globals->fix_v4bx = params->fix_v4bx;
9073 globals->use_blx |= params->use_blx;
9074 globals->vfp11_fix = params->vfp11_denorm_fix;
9075 globals->stm32l4xx_fix = params->stm32l4xx_fix;
9076 if (globals->fdpic_p)
9077 globals->pic_veneer = 1;
9078 else
9079 globals->pic_veneer = params->pic_veneer;
9080 globals->fix_cortex_a8 = params->fix_cortex_a8;
9081 globals->fix_arm1176 = params->fix_arm1176;
9082 globals->cmse_implib = params->cmse_implib;
9083 globals->in_implib_bfd = params->in_implib_bfd;
9085 BFD_ASSERT (is_arm_elf (output_bfd));
9086 elf_arm_tdata (output_bfd)->no_enum_size_warning
9087 = params->no_enum_size_warning;
9088 elf_arm_tdata (output_bfd)->no_wchar_size_warning
9089 = params->no_wchar_size_warning;
9092 /* Replace the target offset of a Thumb bl or b.w instruction. */
9094 static void
9095 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9097 bfd_vma upper;
9098 bfd_vma lower;
9099 int reloc_sign;
9101 BFD_ASSERT ((offset & 1) == 0);
9103 upper = bfd_get_16 (abfd, insn);
9104 lower = bfd_get_16 (abfd, insn + 2);
9105 reloc_sign = (offset < 0) ? 1 : 0;
9106 upper = (upper & ~(bfd_vma) 0x7ff)
9107 | ((offset >> 12) & 0x3ff)
9108 | (reloc_sign << 10);
9109 lower = (lower & ~(bfd_vma) 0x2fff)
9110 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9111 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9112 | ((offset >> 1) & 0x7ff);
9113 bfd_put_16 (abfd, upper, insn);
9114 bfd_put_16 (abfd, lower, insn + 2);
9117 /* Thumb code calling an ARM function. */
9119 static int
9120 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9121 const char * name,
9122 bfd * input_bfd,
9123 bfd * output_bfd,
9124 asection * input_section,
9125 bfd_byte * hit_data,
9126 asection * sym_sec,
9127 bfd_vma offset,
9128 bfd_signed_vma addend,
9129 bfd_vma val,
9130 char **error_message)
9132 asection * s = 0;
9133 bfd_vma my_offset;
9134 long int ret_offset;
9135 struct elf_link_hash_entry * myh;
9136 struct elf32_arm_link_hash_table * globals;
9138 myh = find_thumb_glue (info, name, error_message);
9139 if (myh == NULL)
9140 return false;
9142 globals = elf32_arm_hash_table (info);
9143 BFD_ASSERT (globals != NULL);
9144 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9146 my_offset = myh->root.u.def.value;
9148 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9149 THUMB2ARM_GLUE_SECTION_NAME);
9151 BFD_ASSERT (s != NULL);
9152 BFD_ASSERT (s->contents != NULL);
9153 BFD_ASSERT (s->output_section != NULL);
9155 if ((my_offset & 0x01) == 0x01)
9157 if (sym_sec != NULL
9158 && sym_sec->owner != NULL
9159 && !INTERWORK_FLAG (sym_sec->owner))
9161 _bfd_error_handler
9162 (_("%pB(%s): warning: interworking not enabled;"
9163 " first occurrence: %pB: %s call to %s"),
9164 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9166 return false;
9169 --my_offset;
9170 myh->root.u.def.value = my_offset;
9172 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9173 s->contents + my_offset);
9175 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9176 s->contents + my_offset + 2);
9178 ret_offset =
9179 /* Address of destination of the stub. */
9180 ((bfd_signed_vma) val)
9181 - ((bfd_signed_vma)
9182 /* Offset from the start of the current section
9183 to the start of the stubs. */
9184 (s->output_offset
9185 /* Offset of the start of this stub from the start of the stubs. */
9186 + my_offset
9187 /* Address of the start of the current section. */
9188 + s->output_section->vma)
9189 /* The branch instruction is 4 bytes into the stub. */
9191 /* ARM branches work from the pc of the instruction + 8. */
9192 + 8);
9194 put_arm_insn (globals, output_bfd,
9195 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9196 s->contents + my_offset + 4);
9199 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9201 /* Now go back and fix up the original BL insn to point to here. */
9202 ret_offset =
9203 /* Address of where the stub is located. */
9204 (s->output_section->vma + s->output_offset + my_offset)
9205 /* Address of where the BL is located. */
9206 - (input_section->output_section->vma + input_section->output_offset
9207 + offset)
9208 /* Addend in the relocation. */
9209 - addend
9210 /* Biassing for PC-relative addressing. */
9211 - 8;
9213 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9215 return true;
9218 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9220 static struct elf_link_hash_entry *
9221 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9222 const char * name,
9223 bfd * input_bfd,
9224 bfd * output_bfd,
9225 asection * sym_sec,
9226 bfd_vma val,
9227 asection * s,
9228 char ** error_message)
9230 bfd_vma my_offset;
9231 long int ret_offset;
9232 struct elf_link_hash_entry * myh;
9233 struct elf32_arm_link_hash_table * globals;
9235 myh = find_arm_glue (info, name, error_message);
9236 if (myh == NULL)
9237 return NULL;
9239 globals = elf32_arm_hash_table (info);
9240 BFD_ASSERT (globals != NULL);
9241 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9243 my_offset = myh->root.u.def.value;
9245 if ((my_offset & 0x01) == 0x01)
9247 if (sym_sec != NULL
9248 && sym_sec->owner != NULL
9249 && !INTERWORK_FLAG (sym_sec->owner))
9251 _bfd_error_handler
9252 (_("%pB(%s): warning: interworking not enabled;"
9253 " first occurrence: %pB: %s call to %s"),
9254 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9257 --my_offset;
9258 myh->root.u.def.value = my_offset;
9260 if (bfd_link_pic (info)
9261 || globals->pic_veneer)
9263 /* For relocatable objects we can't use absolute addresses,
9264 so construct the address from a relative offset. */
9265 /* TODO: If the offset is small it's probably worth
9266 constructing the address with adds. */
9267 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9268 s->contents + my_offset);
9269 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9270 s->contents + my_offset + 4);
9271 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9272 s->contents + my_offset + 8);
9273 /* Adjust the offset by 4 for the position of the add,
9274 and 8 for the pipeline offset. */
9275 ret_offset = (val - (s->output_offset
9276 + s->output_section->vma
9277 + my_offset + 12))
9278 | 1;
9279 bfd_put_32 (output_bfd, ret_offset,
9280 s->contents + my_offset + 12);
9282 else if (globals->use_blx)
9284 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9285 s->contents + my_offset);
9287 /* It's a thumb address. Add the low order bit. */
9288 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9289 s->contents + my_offset + 4);
9291 else
9293 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9294 s->contents + my_offset);
9296 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9297 s->contents + my_offset + 4);
9299 /* It's a thumb address. Add the low order bit. */
9300 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9301 s->contents + my_offset + 8);
9303 my_offset += 12;
9307 BFD_ASSERT (my_offset <= globals->arm_glue_size);
9309 return myh;
9312 /* Arm code calling a Thumb function. */
9314 static int
9315 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9316 const char * name,
9317 bfd * input_bfd,
9318 bfd * output_bfd,
9319 asection * input_section,
9320 bfd_byte * hit_data,
9321 asection * sym_sec,
9322 bfd_vma offset,
9323 bfd_signed_vma addend,
9324 bfd_vma val,
9325 char **error_message)
9327 unsigned long int tmp;
9328 bfd_vma my_offset;
9329 asection * s;
9330 long int ret_offset;
9331 struct elf_link_hash_entry * myh;
9332 struct elf32_arm_link_hash_table * globals;
9334 globals = elf32_arm_hash_table (info);
9335 BFD_ASSERT (globals != NULL);
9336 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9338 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9339 ARM2THUMB_GLUE_SECTION_NAME);
9340 BFD_ASSERT (s != NULL);
9341 BFD_ASSERT (s->contents != NULL);
9342 BFD_ASSERT (s->output_section != NULL);
9344 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9345 sym_sec, val, s, error_message);
9346 if (!myh)
9347 return false;
9349 my_offset = myh->root.u.def.value;
9350 tmp = bfd_get_32 (input_bfd, hit_data);
9351 tmp = tmp & 0xFF000000;
9353 /* Somehow these are both 4 too far, so subtract 8. */
9354 ret_offset = (s->output_offset
9355 + my_offset
9356 + s->output_section->vma
9357 - (input_section->output_offset
9358 + input_section->output_section->vma
9359 + offset + addend)
9360 - 8);
9362 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9364 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9366 return true;
9369 /* Populate Arm stub for an exported Thumb function. */
9371 static bool
9372 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9374 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9375 asection * s;
9376 struct elf_link_hash_entry * myh;
9377 struct elf32_arm_link_hash_entry *eh;
9378 struct elf32_arm_link_hash_table * globals;
9379 asection *sec;
9380 bfd_vma val;
9381 char *error_message;
9383 eh = elf32_arm_hash_entry (h);
9384 /* Allocate stubs for exported Thumb functions on v4t. */
9385 if (eh->export_glue == NULL)
9386 return true;
9388 globals = elf32_arm_hash_table (info);
9389 BFD_ASSERT (globals != NULL);
9390 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9392 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9393 ARM2THUMB_GLUE_SECTION_NAME);
9394 BFD_ASSERT (s != NULL);
9395 BFD_ASSERT (s->contents != NULL);
9396 BFD_ASSERT (s->output_section != NULL);
9398 sec = eh->export_glue->root.u.def.section;
9400 BFD_ASSERT (sec->output_section != NULL);
9402 val = eh->export_glue->root.u.def.value + sec->output_offset
9403 + sec->output_section->vma;
9405 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9406 h->root.u.def.section->owner,
9407 globals->obfd, sec, val, s,
9408 &error_message);
9409 BFD_ASSERT (myh);
9410 return true;
9413 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9415 static bfd_vma
9416 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9418 bfd_byte *p;
9419 bfd_vma glue_addr;
9420 asection *s;
9421 struct elf32_arm_link_hash_table *globals;
9423 globals = elf32_arm_hash_table (info);
9424 BFD_ASSERT (globals != NULL);
9425 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9427 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9428 ARM_BX_GLUE_SECTION_NAME);
9429 BFD_ASSERT (s != NULL);
9430 BFD_ASSERT (s->contents != NULL);
9431 BFD_ASSERT (s->output_section != NULL);
9433 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9435 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9437 if ((globals->bx_glue_offset[reg] & 1) == 0)
9439 p = s->contents + glue_addr;
9440 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9441 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9442 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9443 globals->bx_glue_offset[reg] |= 1;
9446 return glue_addr + s->output_section->vma + s->output_offset;
9449 /* Generate Arm stubs for exported Thumb symbols. */
9450 static void
9451 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9452 struct bfd_link_info *link_info)
9454 struct elf32_arm_link_hash_table * globals;
9456 if (link_info == NULL)
9457 /* Ignore this if we are not called by the ELF backend linker. */
9458 return;
9460 globals = elf32_arm_hash_table (link_info);
9461 if (globals == NULL)
9462 return;
9464 /* If blx is available then exported Thumb symbols are OK and there is
9465 nothing to do. */
9466 if (globals->use_blx)
9467 return;
9469 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9470 link_info);
9473 /* Reserve space for COUNT dynamic relocations in relocation selection
9474 SRELOC. */
9476 static void
9477 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9478 bfd_size_type count)
9480 struct elf32_arm_link_hash_table *htab;
9482 htab = elf32_arm_hash_table (info);
9483 BFD_ASSERT (htab->root.dynamic_sections_created);
9484 if (sreloc == NULL)
9485 abort ();
9486 sreloc->size += RELOC_SIZE (htab) * count;
9489 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9490 dynamic, the relocations should go in SRELOC, otherwise they should
9491 go in the special .rel.iplt section. */
9493 static void
9494 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9495 bfd_size_type count)
9497 struct elf32_arm_link_hash_table *htab;
9499 htab = elf32_arm_hash_table (info);
9500 if (!htab->root.dynamic_sections_created)
9501 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9502 else
9504 BFD_ASSERT (sreloc != NULL);
9505 sreloc->size += RELOC_SIZE (htab) * count;
9509 /* Add relocation REL to the end of relocation section SRELOC. */
9511 static void
9512 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9513 asection *sreloc, Elf_Internal_Rela *rel)
9515 bfd_byte *loc;
9516 struct elf32_arm_link_hash_table *htab;
9518 htab = elf32_arm_hash_table (info);
9519 if (!htab->root.dynamic_sections_created
9520 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9521 sreloc = htab->root.irelplt;
9522 if (sreloc == NULL)
9523 abort ();
9524 loc = sreloc->contents;
9525 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9526 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9527 abort ();
9528 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9531 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9532 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9533 to .plt. */
9535 static void
9536 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9537 bool is_iplt_entry,
9538 union gotplt_union *root_plt,
9539 struct arm_plt_info *arm_plt)
9541 struct elf32_arm_link_hash_table *htab;
9542 asection *splt;
9543 asection *sgotplt;
9545 htab = elf32_arm_hash_table (info);
9547 if (is_iplt_entry)
9549 splt = htab->root.iplt;
9550 sgotplt = htab->root.igotplt;
9552 /* NaCl uses a special first entry in .iplt too. */
9553 if (htab->root.target_os == is_nacl && splt->size == 0)
9554 splt->size += htab->plt_header_size;
9556 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9557 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9559 else
9561 splt = htab->root.splt;
9562 sgotplt = htab->root.sgotplt;
9564 if (htab->fdpic_p)
9566 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9567 /* For lazy binding, relocations will be put into .rel.plt, in
9568 .rel.got otherwise. */
9569 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9570 if (info->flags & DF_BIND_NOW)
9571 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9572 else
9573 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9575 else
9577 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9578 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9581 /* If this is the first .plt entry, make room for the special
9582 first entry. */
9583 if (splt->size == 0)
9584 splt->size += htab->plt_header_size;
9586 htab->next_tls_desc_index++;
9589 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9590 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9591 splt->size += PLT_THUMB_STUB_SIZE;
9592 root_plt->offset = splt->size;
9593 splt->size += htab->plt_entry_size;
9595 /* We also need to make an entry in the .got.plt section, which
9596 will be placed in the .got section by the linker script. */
9597 if (is_iplt_entry)
9598 arm_plt->got_offset = sgotplt->size;
9599 else
9600 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9601 if (htab->fdpic_p)
9602 /* Function descriptor takes 64 bits in GOT. */
9603 sgotplt->size += 8;
9604 else
9605 sgotplt->size += 4;
9608 static bfd_vma
9609 arm_movw_immediate (bfd_vma value)
9611 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9614 static bfd_vma
9615 arm_movt_immediate (bfd_vma value)
9617 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9620 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9621 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9622 Otherwise, DYNINDX is the index of the symbol in the dynamic
9623 symbol table and SYM_VALUE is undefined.
9625 ROOT_PLT points to the offset of the PLT entry from the start of its
9626 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9627 bookkeeping information.
9629 Returns FALSE if there was a problem. */
9631 static bool
9632 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9633 union gotplt_union *root_plt,
9634 struct arm_plt_info *arm_plt,
9635 int dynindx, bfd_vma sym_value)
9637 struct elf32_arm_link_hash_table *htab;
9638 asection *sgot;
9639 asection *splt;
9640 asection *srel;
9641 bfd_byte *loc;
9642 bfd_vma plt_index;
9643 Elf_Internal_Rela rel;
9644 bfd_vma got_header_size;
9646 htab = elf32_arm_hash_table (info);
9648 /* Pick the appropriate sections and sizes. */
9649 if (dynindx == -1)
9651 splt = htab->root.iplt;
9652 sgot = htab->root.igotplt;
9653 srel = htab->root.irelplt;
9655 /* There are no reserved entries in .igot.plt, and no special
9656 first entry in .iplt. */
9657 got_header_size = 0;
9659 else
9661 splt = htab->root.splt;
9662 sgot = htab->root.sgotplt;
9663 srel = htab->root.srelplt;
9665 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9667 BFD_ASSERT (splt != NULL && srel != NULL);
9669 bfd_vma got_offset, got_address, plt_address;
9670 bfd_vma got_displacement, initial_got_entry;
9671 bfd_byte * ptr;
9673 BFD_ASSERT (sgot != NULL);
9675 /* Get the offset into the .(i)got.plt table of the entry that
9676 corresponds to this function. */
9677 got_offset = (arm_plt->got_offset & -2);
9679 /* Get the index in the procedure linkage table which
9680 corresponds to this symbol. This is the index of this symbol
9681 in all the symbols for which we are making plt entries.
9682 After the reserved .got.plt entries, all symbols appear in
9683 the same order as in .plt. */
9684 if (htab->fdpic_p)
9685 /* Function descriptor takes 8 bytes. */
9686 plt_index = (got_offset - got_header_size) / 8;
9687 else
9688 plt_index = (got_offset - got_header_size) / 4;
9690 /* Calculate the address of the GOT entry. */
9691 got_address = (sgot->output_section->vma
9692 + sgot->output_offset
9693 + got_offset);
9695 /* ...and the address of the PLT entry. */
9696 plt_address = (splt->output_section->vma
9697 + splt->output_offset
9698 + root_plt->offset);
9700 ptr = splt->contents + root_plt->offset;
9701 if (htab->root.target_os == is_vxworks && bfd_link_pic (info))
9703 unsigned int i;
9704 bfd_vma val;
9706 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9708 val = elf32_arm_vxworks_shared_plt_entry[i];
9709 if (i == 2)
9710 val |= got_address - sgot->output_section->vma;
9711 if (i == 5)
9712 val |= plt_index * RELOC_SIZE (htab);
9713 if (i == 2 || i == 5)
9714 bfd_put_32 (output_bfd, val, ptr);
9715 else
9716 put_arm_insn (htab, output_bfd, val, ptr);
9719 else if (htab->root.target_os == is_vxworks)
9721 unsigned int i;
9722 bfd_vma val;
9724 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9726 val = elf32_arm_vxworks_exec_plt_entry[i];
9727 if (i == 2)
9728 val |= got_address;
9729 if (i == 4)
9730 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9731 if (i == 5)
9732 val |= plt_index * RELOC_SIZE (htab);
9733 if (i == 2 || i == 5)
9734 bfd_put_32 (output_bfd, val, ptr);
9735 else
9736 put_arm_insn (htab, output_bfd, val, ptr);
9739 loc = (htab->srelplt2->contents
9740 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9742 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9743 referencing the GOT for this PLT entry. */
9744 rel.r_offset = plt_address + 8;
9745 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9746 rel.r_addend = got_offset;
9747 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9748 loc += RELOC_SIZE (htab);
9750 /* Create the R_ARM_ABS32 relocation referencing the
9751 beginning of the PLT for this GOT entry. */
9752 rel.r_offset = got_address;
9753 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9754 rel.r_addend = 0;
9755 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9757 else if (htab->root.target_os == is_nacl)
9759 /* Calculate the displacement between the PLT slot and the
9760 common tail that's part of the special initial PLT slot. */
9761 int32_t tail_displacement
9762 = ((splt->output_section->vma + splt->output_offset
9763 + ARM_NACL_PLT_TAIL_OFFSET)
9764 - (plt_address + htab->plt_entry_size + 4));
9765 BFD_ASSERT ((tail_displacement & 3) == 0);
9766 tail_displacement >>= 2;
9768 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9769 || (-tail_displacement & 0xff000000) == 0);
9771 /* Calculate the displacement between the PLT slot and the entry
9772 in the GOT. The offset accounts for the value produced by
9773 adding to pc in the penultimate instruction of the PLT stub. */
9774 got_displacement = (got_address
9775 - (plt_address + htab->plt_entry_size));
9777 /* NaCl does not support interworking at all. */
9778 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9780 put_arm_insn (htab, output_bfd,
9781 elf32_arm_nacl_plt_entry[0]
9782 | arm_movw_immediate (got_displacement),
9783 ptr + 0);
9784 put_arm_insn (htab, output_bfd,
9785 elf32_arm_nacl_plt_entry[1]
9786 | arm_movt_immediate (got_displacement),
9787 ptr + 4);
9788 put_arm_insn (htab, output_bfd,
9789 elf32_arm_nacl_plt_entry[2],
9790 ptr + 8);
9791 put_arm_insn (htab, output_bfd,
9792 elf32_arm_nacl_plt_entry[3]
9793 | (tail_displacement & 0x00ffffff),
9794 ptr + 12);
9796 else if (htab->fdpic_p)
9798 const bfd_vma *plt_entry = using_thumb_only (htab)
9799 ? elf32_arm_fdpic_thumb_plt_entry
9800 : elf32_arm_fdpic_plt_entry;
9802 /* Fill-up Thumb stub if needed. */
9803 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9805 put_thumb_insn (htab, output_bfd,
9806 elf32_arm_plt_thumb_stub[0], ptr - 4);
9807 put_thumb_insn (htab, output_bfd,
9808 elf32_arm_plt_thumb_stub[1], ptr - 2);
9810 /* As we are using 32 bit instructions even for the Thumb
9811 version, we have to use 'put_arm_insn' instead of
9812 'put_thumb_insn'. */
9813 put_arm_insn (htab, output_bfd, plt_entry[0], ptr + 0);
9814 put_arm_insn (htab, output_bfd, plt_entry[1], ptr + 4);
9815 put_arm_insn (htab, output_bfd, plt_entry[2], ptr + 8);
9816 put_arm_insn (htab, output_bfd, plt_entry[3], ptr + 12);
9817 bfd_put_32 (output_bfd, got_offset, ptr + 16);
9819 if (!(info->flags & DF_BIND_NOW))
9821 /* funcdesc_value_reloc_offset. */
9822 bfd_put_32 (output_bfd,
9823 htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9824 ptr + 20);
9825 put_arm_insn (htab, output_bfd, plt_entry[6], ptr + 24);
9826 put_arm_insn (htab, output_bfd, plt_entry[7], ptr + 28);
9827 put_arm_insn (htab, output_bfd, plt_entry[8], ptr + 32);
9828 put_arm_insn (htab, output_bfd, plt_entry[9], ptr + 36);
9831 else if (using_thumb_only (htab))
9833 /* PR ld/16017: Generate thumb only PLT entries. */
9834 if (!using_thumb2 (htab))
9836 /* FIXME: We ought to be able to generate thumb-1 PLT
9837 instructions... */
9838 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9839 output_bfd);
9840 return false;
9843 /* Calculate the displacement between the PLT slot and the entry in
9844 the GOT. The 12-byte offset accounts for the value produced by
9845 adding to pc in the 3rd instruction of the PLT stub. */
9846 got_displacement = got_address - (plt_address + 12);
9848 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9849 instead of 'put_thumb_insn'. */
9850 put_arm_insn (htab, output_bfd,
9851 elf32_thumb2_plt_entry[0]
9852 | ((got_displacement & 0x000000ff) << 16)
9853 | ((got_displacement & 0x00000700) << 20)
9854 | ((got_displacement & 0x00000800) >> 1)
9855 | ((got_displacement & 0x0000f000) >> 12),
9856 ptr + 0);
9857 put_arm_insn (htab, output_bfd,
9858 elf32_thumb2_plt_entry[1]
9859 | ((got_displacement & 0x00ff0000) )
9860 | ((got_displacement & 0x07000000) << 4)
9861 | ((got_displacement & 0x08000000) >> 17)
9862 | ((got_displacement & 0xf0000000) >> 28),
9863 ptr + 4);
9864 put_arm_insn (htab, output_bfd,
9865 elf32_thumb2_plt_entry[2],
9866 ptr + 8);
9867 put_arm_insn (htab, output_bfd,
9868 elf32_thumb2_plt_entry[3],
9869 ptr + 12);
9871 else
9873 /* Calculate the displacement between the PLT slot and the
9874 entry in the GOT. The eight-byte offset accounts for the
9875 value produced by adding to pc in the first instruction
9876 of the PLT stub. */
9877 got_displacement = got_address - (plt_address + 8);
9879 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9881 put_thumb_insn (htab, output_bfd,
9882 elf32_arm_plt_thumb_stub[0], ptr - 4);
9883 put_thumb_insn (htab, output_bfd,
9884 elf32_arm_plt_thumb_stub[1], ptr - 2);
9887 if (!elf32_arm_use_long_plt_entry)
9889 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9891 put_arm_insn (htab, output_bfd,
9892 elf32_arm_plt_entry_short[0]
9893 | ((got_displacement & 0x0ff00000) >> 20),
9894 ptr + 0);
9895 put_arm_insn (htab, output_bfd,
9896 elf32_arm_plt_entry_short[1]
9897 | ((got_displacement & 0x000ff000) >> 12),
9898 ptr+ 4);
9899 put_arm_insn (htab, output_bfd,
9900 elf32_arm_plt_entry_short[2]
9901 | (got_displacement & 0x00000fff),
9902 ptr + 8);
9903 #ifdef FOUR_WORD_PLT
9904 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9905 #endif
9907 else
9909 put_arm_insn (htab, output_bfd,
9910 elf32_arm_plt_entry_long[0]
9911 | ((got_displacement & 0xf0000000) >> 28),
9912 ptr + 0);
9913 put_arm_insn (htab, output_bfd,
9914 elf32_arm_plt_entry_long[1]
9915 | ((got_displacement & 0x0ff00000) >> 20),
9916 ptr + 4);
9917 put_arm_insn (htab, output_bfd,
9918 elf32_arm_plt_entry_long[2]
9919 | ((got_displacement & 0x000ff000) >> 12),
9920 ptr+ 8);
9921 put_arm_insn (htab, output_bfd,
9922 elf32_arm_plt_entry_long[3]
9923 | (got_displacement & 0x00000fff),
9924 ptr + 12);
9928 /* Fill in the entry in the .rel(a).(i)plt section. */
9929 rel.r_offset = got_address;
9930 rel.r_addend = 0;
9931 if (dynindx == -1)
9933 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9934 The dynamic linker or static executable then calls SYM_VALUE
9935 to determine the correct run-time value of the .igot.plt entry. */
9936 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9937 initial_got_entry = sym_value;
9939 else
9941 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9942 used by PLT entry. */
9943 if (htab->fdpic_p)
9945 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9946 initial_got_entry = 0;
9948 else
9950 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9951 initial_got_entry = (splt->output_section->vma
9952 + splt->output_offset);
9954 /* PR ld/16017
9955 When thumb only we need to set the LSB for any address that
9956 will be used with an interworking branch instruction. */
9957 if (using_thumb_only (htab))
9958 initial_got_entry |= 1;
9962 /* Fill in the entry in the global offset table. */
9963 bfd_put_32 (output_bfd, initial_got_entry,
9964 sgot->contents + got_offset);
9966 if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9968 /* Setup initial funcdesc value. */
9969 /* FIXME: we don't support lazy binding because there is a
9970 race condition between both words getting written and
9971 some other thread attempting to read them. The ARM
9972 architecture does not have an atomic 64 bit load/store
9973 instruction that could be used to prevent it; it is
9974 recommended that threaded FDPIC applications run with the
9975 LD_BIND_NOW environment variable set. */
9976 bfd_put_32 (output_bfd, plt_address + 0x18,
9977 sgot->contents + got_offset);
9978 bfd_put_32 (output_bfd, -1 /*TODO*/,
9979 sgot->contents + got_offset + 4);
9982 if (dynindx == -1)
9983 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9984 else
9986 if (htab->fdpic_p)
9988 /* For FDPIC we put PLT relocationss into .rel.got when not
9989 lazy binding otherwise we put them in .rel.plt. For now,
9990 we don't support lazy binding so put it in .rel.got. */
9991 if (info->flags & DF_BIND_NOW)
9992 elf32_arm_add_dynreloc (output_bfd, info, htab->root.srelgot, &rel);
9993 else
9994 elf32_arm_add_dynreloc (output_bfd, info, htab->root.srelplt, &rel);
9996 else
9998 loc = srel->contents + plt_index * RELOC_SIZE (htab);
9999 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
10003 return true;
10006 /* Some relocations map to different relocations depending on the
10007 target. Return the real relocation. */
10009 static int
10010 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
10011 int r_type)
10013 switch (r_type)
10015 case R_ARM_TARGET1:
10016 if (globals->target1_is_rel)
10017 return R_ARM_REL32;
10018 else
10019 return R_ARM_ABS32;
10021 case R_ARM_TARGET2:
10022 return globals->target2_reloc;
10024 default:
10025 return r_type;
10029 /* Return the base VMA address which should be subtracted from real addresses
10030 when resolving @dtpoff relocation.
10031 This is PT_TLS segment p_vaddr. */
10033 static bfd_vma
10034 dtpoff_base (struct bfd_link_info *info)
10036 /* If tls_sec is NULL, we should have signalled an error already. */
10037 if (elf_hash_table (info)->tls_sec == NULL)
10038 return 0;
10039 return elf_hash_table (info)->tls_sec->vma;
10042 /* Return the relocation value for @tpoff relocation
10043 if STT_TLS virtual address is ADDRESS. */
10045 static bfd_vma
10046 tpoff (struct bfd_link_info *info, bfd_vma address)
10048 struct elf_link_hash_table *htab = elf_hash_table (info);
10049 bfd_vma base;
10051 /* If tls_sec is NULL, we should have signalled an error already. */
10052 if (htab->tls_sec == NULL)
10053 return 0;
10054 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10055 return address - htab->tls_sec->vma + base;
10058 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10059 VALUE is the relocation value. */
10061 static bfd_reloc_status_type
10062 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10064 if (value > 0xfff)
10065 return bfd_reloc_overflow;
10067 value |= bfd_get_32 (abfd, data) & 0xfffff000;
10068 bfd_put_32 (abfd, value, data);
10069 return bfd_reloc_ok;
10072 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10073 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10074 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10076 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10077 is to then call final_link_relocate. Return other values in the
10078 case of error.
10080 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10081 the pre-relaxed code. It would be nice if the relocs were updated
10082 to match the optimization. */
10084 static bfd_reloc_status_type
10085 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10086 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10087 Elf_Internal_Rela *rel, unsigned long is_local)
10089 unsigned long insn;
10091 switch (ELF32_R_TYPE (rel->r_info))
10093 default:
10094 return bfd_reloc_notsupported;
10096 case R_ARM_TLS_GOTDESC:
10097 if (is_local)
10098 insn = 0;
10099 else
10101 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10102 if (insn & 1)
10103 insn -= 5; /* THUMB */
10104 else
10105 insn -= 8; /* ARM */
10107 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10108 return bfd_reloc_continue;
10110 case R_ARM_THM_TLS_DESCSEQ:
10111 /* Thumb insn. */
10112 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10113 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
10115 if (is_local)
10116 /* nop */
10117 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10119 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10121 if (is_local)
10122 /* nop */
10123 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10124 else
10125 /* ldr rx,[ry] */
10126 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10128 else if ((insn & 0xff87) == 0x4780) /* blx rx */
10130 if (is_local)
10131 /* nop */
10132 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10133 else
10134 /* mov r0, rx */
10135 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10136 contents + rel->r_offset);
10138 else
10140 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10141 /* It's a 32 bit instruction, fetch the rest of it for
10142 error generation. */
10143 insn = (insn << 16)
10144 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10145 _bfd_error_handler
10146 /* xgettext:c-format */
10147 (_("%pB(%pA+%#" PRIx64 "): "
10148 "unexpected %s instruction '%#lx' in TLS trampoline"),
10149 input_bfd, input_sec, (uint64_t) rel->r_offset,
10150 "Thumb", insn);
10151 return bfd_reloc_notsupported;
10153 break;
10155 case R_ARM_TLS_DESCSEQ:
10156 /* arm insn. */
10157 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10158 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10160 if (is_local)
10161 /* mov rx, ry */
10162 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10163 contents + rel->r_offset);
10165 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10167 if (is_local)
10168 /* nop */
10169 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10170 else
10171 /* ldr rx,[ry] */
10172 bfd_put_32 (input_bfd, insn & 0xfffff000,
10173 contents + rel->r_offset);
10175 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10177 if (is_local)
10178 /* nop */
10179 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10180 else
10181 /* mov r0, rx */
10182 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10183 contents + rel->r_offset);
10185 else
10187 _bfd_error_handler
10188 /* xgettext:c-format */
10189 (_("%pB(%pA+%#" PRIx64 "): "
10190 "unexpected %s instruction '%#lx' in TLS trampoline"),
10191 input_bfd, input_sec, (uint64_t) rel->r_offset,
10192 "ARM", insn);
10193 return bfd_reloc_notsupported;
10195 break;
10197 case R_ARM_TLS_CALL:
10198 /* GD->IE relaxation, turn the instruction into 'nop' or
10199 'ldr r0, [pc,r0]' */
10200 insn = is_local ? 0xe1a00000 : 0xe79f0000;
10201 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10202 break;
10204 case R_ARM_THM_TLS_CALL:
10205 /* GD->IE relaxation. */
10206 if (!is_local)
10207 /* add r0,pc; ldr r0, [r0] */
10208 insn = 0x44786800;
10209 else if (using_thumb2 (globals))
10210 /* nop.w */
10211 insn = 0xf3af8000;
10212 else
10213 /* nop; nop */
10214 insn = 0xbf00bf00;
10216 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10217 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10218 break;
10220 return bfd_reloc_ok;
10223 /* For a given value of n, calculate the value of G_n as required to
10224 deal with group relocations. We return it in the form of an
10225 encoded constant-and-rotation, together with the final residual. If n is
10226 specified as less than zero, then final_residual is filled with the
10227 input value and no further action is performed. */
10229 static bfd_vma
10230 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10232 int current_n;
10233 bfd_vma g_n;
10234 bfd_vma encoded_g_n = 0;
10235 bfd_vma residual = value; /* Also known as Y_n. */
10237 for (current_n = 0; current_n <= n; current_n++)
10239 int shift;
10241 /* Calculate which part of the value to mask. */
10242 if (residual == 0)
10243 shift = 0;
10244 else
10246 int msb;
10248 /* Determine the most significant bit in the residual and
10249 align the resulting value to a 2-bit boundary. */
10250 for (msb = 30; msb >= 0; msb -= 2)
10251 if (residual & (3u << msb))
10252 break;
10254 /* The desired shift is now (msb - 6), or zero, whichever
10255 is the greater. */
10256 shift = msb - 6;
10257 if (shift < 0)
10258 shift = 0;
10261 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10262 g_n = residual & (0xff << shift);
10263 encoded_g_n = (g_n >> shift)
10264 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10266 /* Calculate the residual for the next time around. */
10267 residual &= ~g_n;
10270 *final_residual = residual;
10272 return encoded_g_n;
10275 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10276 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10278 static int
10279 identify_add_or_sub (bfd_vma insn)
10281 int opcode = insn & 0x1e00000;
10283 if (opcode == 1 << 23) /* ADD */
10284 return 1;
10286 if (opcode == 1 << 22) /* SUB */
10287 return -1;
10289 return 0;
10292 /* Perform a relocation as part of a final link. */
10294 static bfd_reloc_status_type
10295 elf32_arm_final_link_relocate (reloc_howto_type * howto,
10296 bfd * input_bfd,
10297 bfd * output_bfd,
10298 asection * input_section,
10299 bfd_byte * contents,
10300 Elf_Internal_Rela * rel,
10301 bfd_vma value,
10302 struct bfd_link_info * info,
10303 asection * sym_sec,
10304 const char * sym_name,
10305 unsigned char st_type,
10306 enum arm_st_branch_type branch_type,
10307 struct elf_link_hash_entry * h,
10308 bool * unresolved_reloc_p,
10309 char ** error_message)
10311 unsigned long r_type = howto->type;
10312 unsigned long r_symndx;
10313 bfd_byte * hit_data = contents + rel->r_offset;
10314 bfd_vma * local_got_offsets;
10315 bfd_vma * local_tlsdesc_gotents;
10316 asection * sgot;
10317 asection * splt;
10318 asection * sreloc = NULL;
10319 asection * srelgot;
10320 bfd_vma addend;
10321 bfd_signed_vma signed_addend;
10322 unsigned char dynreloc_st_type;
10323 bfd_vma dynreloc_value;
10324 struct elf32_arm_link_hash_table * globals;
10325 struct elf32_arm_link_hash_entry *eh;
10326 union gotplt_union *root_plt;
10327 struct arm_plt_info *arm_plt;
10328 bfd_vma plt_offset;
10329 bfd_vma gotplt_offset;
10330 bool has_iplt_entry;
10331 bool resolved_to_zero;
10333 globals = elf32_arm_hash_table (info);
10334 if (globals == NULL)
10335 return bfd_reloc_notsupported;
10337 BFD_ASSERT (is_arm_elf (input_bfd));
10338 BFD_ASSERT (howto != NULL);
10340 /* Some relocation types map to different relocations depending on the
10341 target. We pick the right one here. */
10342 r_type = arm_real_reloc_type (globals, r_type);
10344 /* It is possible to have linker relaxations on some TLS access
10345 models. Update our information here. */
10346 r_type = elf32_arm_tls_transition (info, r_type, h);
10348 if (r_type != howto->type)
10349 howto = elf32_arm_howto_from_type (r_type);
10351 eh = (struct elf32_arm_link_hash_entry *) h;
10352 sgot = globals->root.sgot;
10353 local_got_offsets = elf_local_got_offsets (input_bfd);
10354 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10356 if (globals->root.dynamic_sections_created)
10357 srelgot = globals->root.srelgot;
10358 else
10359 srelgot = NULL;
10361 r_symndx = ELF32_R_SYM (rel->r_info);
10363 if (globals->use_rel)
10365 bfd_vma sign;
10367 switch (bfd_get_reloc_size (howto))
10369 case 1: addend = bfd_get_8 (input_bfd, hit_data); break;
10370 case 2: addend = bfd_get_16 (input_bfd, hit_data); break;
10371 case 4: addend = bfd_get_32 (input_bfd, hit_data); break;
10372 default: addend = 0; break;
10374 /* Note: the addend and signed_addend calculated here are
10375 incorrect for any split field. */
10376 addend &= howto->src_mask;
10377 sign = howto->src_mask & ~(howto->src_mask >> 1);
10378 signed_addend = (addend ^ sign) - sign;
10379 signed_addend = (bfd_vma) signed_addend << howto->rightshift;
10380 addend <<= howto->rightshift;
10382 else
10383 addend = signed_addend = rel->r_addend;
10385 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10386 are resolving a function call relocation. */
10387 if (using_thumb_only (globals)
10388 && (r_type == R_ARM_THM_CALL
10389 || r_type == R_ARM_THM_JUMP24)
10390 && branch_type == ST_BRANCH_TO_ARM)
10391 branch_type = ST_BRANCH_TO_THUMB;
10393 /* Record the symbol information that should be used in dynamic
10394 relocations. */
10395 dynreloc_st_type = st_type;
10396 dynreloc_value = value;
10397 if (branch_type == ST_BRANCH_TO_THUMB)
10398 dynreloc_value |= 1;
10400 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10401 VALUE appropriately for relocations that we resolve at link time. */
10402 has_iplt_entry = false;
10403 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10404 &arm_plt)
10405 && root_plt->offset != (bfd_vma) -1)
10407 plt_offset = root_plt->offset;
10408 gotplt_offset = arm_plt->got_offset;
10410 if (h == NULL || eh->is_iplt)
10412 has_iplt_entry = true;
10413 splt = globals->root.iplt;
10415 /* Populate .iplt entries here, because not all of them will
10416 be seen by finish_dynamic_symbol. The lower bit is set if
10417 we have already populated the entry. */
10418 if (plt_offset & 1)
10419 plt_offset--;
10420 else
10422 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10423 -1, dynreloc_value))
10424 root_plt->offset |= 1;
10425 else
10426 return bfd_reloc_notsupported;
10429 /* Static relocations always resolve to the .iplt entry. */
10430 st_type = STT_FUNC;
10431 value = (splt->output_section->vma
10432 + splt->output_offset
10433 + plt_offset);
10434 branch_type = ST_BRANCH_TO_ARM;
10436 /* If there are non-call relocations that resolve to the .iplt
10437 entry, then all dynamic ones must too. */
10438 if (arm_plt->noncall_refcount != 0)
10440 dynreloc_st_type = st_type;
10441 dynreloc_value = value;
10444 else
10445 /* We populate the .plt entry in finish_dynamic_symbol. */
10446 splt = globals->root.splt;
10448 else
10450 splt = NULL;
10451 plt_offset = (bfd_vma) -1;
10452 gotplt_offset = (bfd_vma) -1;
10455 resolved_to_zero = (h != NULL
10456 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10458 switch (r_type)
10460 case R_ARM_NONE:
10461 /* We don't need to find a value for this symbol. It's just a
10462 marker. */
10463 *unresolved_reloc_p = false;
10464 return bfd_reloc_ok;
10466 case R_ARM_ABS12:
10467 if (globals->root.target_os != is_vxworks)
10468 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10469 /* Fall through. */
10471 case R_ARM_PC24:
10472 case R_ARM_ABS32:
10473 case R_ARM_ABS32_NOI:
10474 case R_ARM_REL32:
10475 case R_ARM_REL32_NOI:
10476 case R_ARM_CALL:
10477 case R_ARM_JUMP24:
10478 case R_ARM_XPC25:
10479 case R_ARM_PREL31:
10480 case R_ARM_PLT32:
10481 /* Handle relocations which should use the PLT entry. ABS32/REL32
10482 will use the symbol's value, which may point to a PLT entry, but we
10483 don't need to handle that here. If we created a PLT entry, all
10484 branches in this object should go to it, except if the PLT is too
10485 far away, in which case a long branch stub should be inserted. */
10486 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10487 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10488 && r_type != R_ARM_CALL
10489 && r_type != R_ARM_JUMP24
10490 && r_type != R_ARM_PLT32)
10491 && plt_offset != (bfd_vma) -1)
10493 /* If we've created a .plt section, and assigned a PLT entry
10494 to this function, it must either be a STT_GNU_IFUNC reference
10495 or not be known to bind locally. In other cases, we should
10496 have cleared the PLT entry by now. */
10497 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10499 value = (splt->output_section->vma
10500 + splt->output_offset
10501 + plt_offset);
10502 *unresolved_reloc_p = false;
10503 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10504 contents, rel->r_offset, value,
10505 rel->r_addend);
10508 /* When generating a shared library or PIE, these relocations
10509 are copied into the output file to be resolved at run time. */
10510 if ((bfd_link_pic (info)
10511 || globals->fdpic_p)
10512 && (input_section->flags & SEC_ALLOC)
10513 && !(globals->root.target_os == is_vxworks
10514 && strcmp (input_section->output_section->name,
10515 ".tls_vars") == 0)
10516 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10517 || !SYMBOL_CALLS_LOCAL (info, h))
10518 && !(input_bfd == globals->stub_bfd
10519 && strstr (input_section->name, STUB_SUFFIX))
10520 && (h == NULL
10521 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10522 && !resolved_to_zero)
10523 || h->root.type != bfd_link_hash_undefweak)
10524 && r_type != R_ARM_PC24
10525 && r_type != R_ARM_CALL
10526 && r_type != R_ARM_JUMP24
10527 && r_type != R_ARM_PREL31
10528 && r_type != R_ARM_PLT32)
10530 Elf_Internal_Rela outrel;
10531 bool skip, relocate;
10532 int isrofixup = 0;
10534 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10535 && !h->def_regular)
10537 char *v = _("shared object");
10539 if (bfd_link_executable (info))
10540 v = _("PIE executable");
10542 _bfd_error_handler
10543 (_("%pB: relocation %s against external or undefined symbol `%s'"
10544 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10545 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10546 return bfd_reloc_notsupported;
10549 *unresolved_reloc_p = false;
10551 if (sreloc == NULL && globals->root.dynamic_sections_created)
10553 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10554 ! globals->use_rel);
10556 if (sreloc == NULL)
10557 return bfd_reloc_notsupported;
10560 skip = false;
10561 relocate = false;
10563 outrel.r_addend = addend;
10564 outrel.r_offset =
10565 _bfd_elf_section_offset (output_bfd, info, input_section,
10566 rel->r_offset);
10567 if (outrel.r_offset == (bfd_vma) -1)
10568 skip = true;
10569 else if (outrel.r_offset == (bfd_vma) -2)
10570 skip = true, relocate = true;
10571 outrel.r_offset += (input_section->output_section->vma
10572 + input_section->output_offset);
10574 if (skip)
10575 memset (&outrel, 0, sizeof outrel);
10576 else if (h != NULL
10577 && h->dynindx != -1
10578 && (!bfd_link_pic (info)
10579 || !(bfd_link_pie (info)
10580 || SYMBOLIC_BIND (info, h))
10581 || !h->def_regular))
10582 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10583 else
10585 int symbol;
10587 /* This symbol is local, or marked to become local. */
10588 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10589 || (globals->fdpic_p && !bfd_link_pic (info)));
10590 /* On SVR4-ish systems, the dynamic loader cannot
10591 relocate the text and data segments independently,
10592 so the symbol does not matter. */
10593 symbol = 0;
10594 if (dynreloc_st_type == STT_GNU_IFUNC)
10595 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10596 to the .iplt entry. Instead, every non-call reference
10597 must use an R_ARM_IRELATIVE relocation to obtain the
10598 correct run-time address. */
10599 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10600 else if (globals->fdpic_p && !bfd_link_pic (info))
10601 isrofixup = 1;
10602 else
10603 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10604 if (globals->use_rel)
10605 relocate = true;
10606 else
10607 outrel.r_addend += dynreloc_value;
10610 if (isrofixup)
10611 arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
10612 else
10613 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10615 /* If this reloc is against an external symbol, we do not want to
10616 fiddle with the addend. Otherwise, we need to include the symbol
10617 value so that it becomes an addend for the dynamic reloc. */
10618 if (! relocate)
10619 return bfd_reloc_ok;
10621 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10622 contents, rel->r_offset,
10623 dynreloc_value, (bfd_vma) 0);
10625 else switch (r_type)
10627 case R_ARM_ABS12:
10628 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10630 case R_ARM_XPC25: /* Arm BLX instruction. */
10631 case R_ARM_CALL:
10632 case R_ARM_JUMP24:
10633 case R_ARM_PC24: /* Arm B/BL instruction. */
10634 case R_ARM_PLT32:
10636 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10638 if (r_type == R_ARM_XPC25)
10640 /* Check for Arm calling Arm function. */
10641 /* FIXME: Should we translate the instruction into a BL
10642 instruction instead ? */
10643 if (branch_type != ST_BRANCH_TO_THUMB)
10644 _bfd_error_handler
10645 (_("\%pB: warning: %s BLX instruction targets"
10646 " %s function '%s'"),
10647 input_bfd, "ARM",
10648 "ARM", h ? h->root.root.string : "(local)");
10650 else if (r_type == R_ARM_PC24)
10652 /* Check for Arm calling Thumb function. */
10653 if (branch_type == ST_BRANCH_TO_THUMB)
10655 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10656 output_bfd, input_section,
10657 hit_data, sym_sec, rel->r_offset,
10658 signed_addend, value,
10659 error_message))
10660 return bfd_reloc_ok;
10661 else
10662 return bfd_reloc_dangerous;
10666 /* Check if a stub has to be inserted because the
10667 destination is too far or we are changing mode. */
10668 if ( r_type == R_ARM_CALL
10669 || r_type == R_ARM_JUMP24
10670 || r_type == R_ARM_PLT32)
10672 enum elf32_arm_stub_type stub_type = arm_stub_none;
10673 struct elf32_arm_link_hash_entry *hash;
10675 hash = (struct elf32_arm_link_hash_entry *) h;
10676 stub_type = arm_type_of_stub (info, input_section, rel,
10677 st_type, &branch_type,
10678 hash, value, sym_sec,
10679 input_bfd, sym_name);
10681 if (stub_type != arm_stub_none)
10683 /* The target is out of reach, so redirect the
10684 branch to the local stub for this function. */
10685 stub_entry = elf32_arm_get_stub_entry (input_section,
10686 sym_sec, h,
10687 rel, globals,
10688 stub_type);
10690 if (stub_entry != NULL)
10691 value = (stub_entry->stub_offset
10692 + stub_entry->stub_sec->output_offset
10693 + stub_entry->stub_sec->output_section->vma);
10695 if (plt_offset != (bfd_vma) -1)
10696 *unresolved_reloc_p = false;
10699 else
10701 /* If the call goes through a PLT entry, make sure to
10702 check distance to the right destination address. */
10703 if (plt_offset != (bfd_vma) -1)
10705 value = (splt->output_section->vma
10706 + splt->output_offset
10707 + plt_offset);
10708 *unresolved_reloc_p = false;
10709 /* The PLT entry is in ARM mode, regardless of the
10710 target function. */
10711 branch_type = ST_BRANCH_TO_ARM;
10716 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10717 where:
10718 S is the address of the symbol in the relocation.
10719 P is address of the instruction being relocated.
10720 A is the addend (extracted from the instruction) in bytes.
10722 S is held in 'value'.
10723 P is the base address of the section containing the
10724 instruction plus the offset of the reloc into that
10725 section, ie:
10726 (input_section->output_section->vma +
10727 input_section->output_offset +
10728 rel->r_offset).
10729 A is the addend, converted into bytes, ie:
10730 (signed_addend * 4)
10732 Note: None of these operations have knowledge of the pipeline
10733 size of the processor, thus it is up to the assembler to
10734 encode this information into the addend. */
10735 value -= (input_section->output_section->vma
10736 + input_section->output_offset);
10737 value -= rel->r_offset;
10738 value += signed_addend;
10740 signed_addend = value;
10741 signed_addend >>= howto->rightshift;
10743 /* A branch to an undefined weak symbol is turned into a jump to
10744 the next instruction unless a PLT entry will be created.
10745 Do the same for local undefined symbols (but not for STN_UNDEF).
10746 The jump to the next instruction is optimized as a NOP depending
10747 on the architecture. */
10748 if (h ? (h->root.type == bfd_link_hash_undefweak
10749 && plt_offset == (bfd_vma) -1)
10750 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10752 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10754 if (arch_has_arm_nop (globals))
10755 value |= 0x0320f000;
10756 else
10757 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10759 else
10761 /* Perform a signed range check. */
10762 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10763 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10764 return bfd_reloc_overflow;
10766 addend = (value & 2);
10768 value = (signed_addend & howto->dst_mask)
10769 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10771 if (r_type == R_ARM_CALL)
10773 /* Set the H bit in the BLX instruction. */
10774 if (branch_type == ST_BRANCH_TO_THUMB)
10776 if (addend)
10777 value |= (1 << 24);
10778 else
10779 value &= ~(bfd_vma)(1 << 24);
10782 /* Select the correct instruction (BL or BLX). */
10783 /* Only if we are not handling a BL to a stub. In this
10784 case, mode switching is performed by the stub. */
10785 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10786 value |= (1 << 28);
10787 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10789 value &= ~(bfd_vma)(1 << 28);
10790 value |= (1 << 24);
10795 break;
10797 case R_ARM_ABS32:
10798 value += addend;
10799 if (branch_type == ST_BRANCH_TO_THUMB)
10800 value |= 1;
10801 break;
10803 case R_ARM_ABS32_NOI:
10804 value += addend;
10805 break;
10807 case R_ARM_REL32:
10808 value += addend;
10809 if (branch_type == ST_BRANCH_TO_THUMB)
10810 value |= 1;
10811 value -= (input_section->output_section->vma
10812 + input_section->output_offset + rel->r_offset);
10813 break;
10815 case R_ARM_REL32_NOI:
10816 value += addend;
10817 value -= (input_section->output_section->vma
10818 + input_section->output_offset + rel->r_offset);
10819 break;
10821 case R_ARM_PREL31:
10822 value -= (input_section->output_section->vma
10823 + input_section->output_offset + rel->r_offset);
10824 value += signed_addend;
10825 if (! h || h->root.type != bfd_link_hash_undefweak)
10827 /* Check for overflow. */
10828 if ((value ^ (value >> 1)) & (1 << 30))
10829 return bfd_reloc_overflow;
10831 value &= 0x7fffffff;
10832 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10833 if (branch_type == ST_BRANCH_TO_THUMB)
10834 value |= 1;
10835 break;
10838 bfd_put_32 (input_bfd, value, hit_data);
10839 return bfd_reloc_ok;
10841 case R_ARM_ABS8:
10842 value += addend;
10844 /* There is no way to tell whether the user intended to use a signed or
10845 unsigned addend. When checking for overflow we accept either,
10846 as specified by the AAELF. */
10847 if ((long) value > 0xff || (long) value < -0x80)
10848 return bfd_reloc_overflow;
10850 bfd_put_8 (input_bfd, value, hit_data);
10851 return bfd_reloc_ok;
10853 case R_ARM_ABS16:
10854 value += addend;
10856 /* See comment for R_ARM_ABS8. */
10857 if ((long) value > 0xffff || (long) value < -0x8000)
10858 return bfd_reloc_overflow;
10860 bfd_put_16 (input_bfd, value, hit_data);
10861 return bfd_reloc_ok;
10863 case R_ARM_THM_ABS5:
10864 /* Support ldr and str instructions for the thumb. */
10865 if (globals->use_rel)
10867 /* Need to refetch addend. */
10868 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10869 /* ??? Need to determine shift amount from operand size. */
10870 addend >>= howto->rightshift;
10872 value += addend;
10874 /* ??? Isn't value unsigned? */
10875 if ((long) value > 0x1f || (long) value < -0x10)
10876 return bfd_reloc_overflow;
10878 /* ??? Value needs to be properly shifted into place first. */
10879 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10880 bfd_put_16 (input_bfd, value, hit_data);
10881 return bfd_reloc_ok;
10883 case R_ARM_THM_ALU_PREL_11_0:
10884 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10886 bfd_vma insn;
10887 bfd_signed_vma relocation;
10889 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10890 | bfd_get_16 (input_bfd, hit_data + 2);
10892 if (globals->use_rel)
10894 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10895 | ((insn & (1 << 26)) >> 15);
10896 if (insn & 0xf00000)
10897 signed_addend = -signed_addend;
10900 relocation = value + signed_addend;
10901 relocation -= Pa (input_section->output_section->vma
10902 + input_section->output_offset
10903 + rel->r_offset);
10905 /* PR 21523: Use an absolute value. The user of this reloc will
10906 have already selected an ADD or SUB insn appropriately. */
10907 value = llabs (relocation);
10909 if (value >= 0x1000)
10910 return bfd_reloc_overflow;
10912 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10913 if (branch_type == ST_BRANCH_TO_THUMB)
10914 value |= 1;
10916 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10917 | ((value & 0x700) << 4)
10918 | ((value & 0x800) << 15);
10919 if (relocation < 0)
10920 insn |= 0xa00000;
10922 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10923 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10925 return bfd_reloc_ok;
10928 case R_ARM_THM_PC8:
10929 /* PR 10073: This reloc is not generated by the GNU toolchain,
10930 but it is supported for compatibility with third party libraries
10931 generated by other compilers, specifically the ARM/IAR. */
10933 bfd_vma insn;
10934 bfd_signed_vma relocation;
10936 insn = bfd_get_16 (input_bfd, hit_data);
10938 if (globals->use_rel)
10939 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
10941 relocation = value + addend;
10942 relocation -= Pa (input_section->output_section->vma
10943 + input_section->output_offset
10944 + rel->r_offset);
10946 value = relocation;
10948 /* We do not check for overflow of this reloc. Although strictly
10949 speaking this is incorrect, it appears to be necessary in order
10950 to work with IAR generated relocs. Since GCC and GAS do not
10951 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10952 a problem for them. */
10953 value &= 0x3fc;
10955 insn = (insn & 0xff00) | (value >> 2);
10957 bfd_put_16 (input_bfd, insn, hit_data);
10959 return bfd_reloc_ok;
10962 case R_ARM_THM_PC12:
10963 /* Corresponds to: ldr.w reg, [pc, #offset]. */
10965 bfd_vma insn;
10966 bfd_signed_vma relocation;
10968 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10969 | bfd_get_16 (input_bfd, hit_data + 2);
10971 if (globals->use_rel)
10973 signed_addend = insn & 0xfff;
10974 if (!(insn & (1 << 23)))
10975 signed_addend = -signed_addend;
10978 relocation = value + signed_addend;
10979 relocation -= Pa (input_section->output_section->vma
10980 + input_section->output_offset
10981 + rel->r_offset);
10983 value = relocation;
10985 if (value >= 0x1000)
10986 return bfd_reloc_overflow;
10988 insn = (insn & 0xff7ff000) | value;
10989 if (relocation >= 0)
10990 insn |= (1 << 23);
10992 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10993 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10995 return bfd_reloc_ok;
10998 case R_ARM_THM_XPC22:
10999 case R_ARM_THM_CALL:
11000 case R_ARM_THM_JUMP24:
11001 /* Thumb BL (branch long instruction). */
11003 bfd_vma relocation;
11004 bfd_vma reloc_sign;
11005 bool overflow = false;
11006 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11007 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11008 bfd_signed_vma reloc_signed_max;
11009 bfd_signed_vma reloc_signed_min;
11010 bfd_vma check;
11011 bfd_signed_vma signed_check;
11012 int bitsize;
11013 const int thumb2 = using_thumb2 (globals);
11014 const int thumb2_bl = using_thumb2_bl (globals);
11016 /* A branch to an undefined weak symbol is turned into a jump to
11017 the next instruction unless a PLT entry will be created.
11018 The jump to the next instruction is optimized as a NOP.W for
11019 Thumb-2 enabled architectures. */
11020 if (h && h->root.type == bfd_link_hash_undefweak
11021 && plt_offset == (bfd_vma) -1)
11023 if (thumb2)
11025 bfd_put_16 (input_bfd, 0xf3af, hit_data);
11026 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11028 else
11030 bfd_put_16 (input_bfd, 0xe000, hit_data);
11031 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11033 return bfd_reloc_ok;
11036 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11037 with Thumb-1) involving the J1 and J2 bits. */
11038 if (globals->use_rel)
11040 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11041 bfd_vma upper = upper_insn & 0x3ff;
11042 bfd_vma lower = lower_insn & 0x7ff;
11043 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11044 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11045 bfd_vma i1 = j1 ^ s ? 0 : 1;
11046 bfd_vma i2 = j2 ^ s ? 0 : 1;
11048 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11049 /* Sign extend. */
11050 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11052 signed_addend = addend;
11055 if (r_type == R_ARM_THM_XPC22)
11057 /* Check for Thumb to Thumb call. */
11058 /* FIXME: Should we translate the instruction into a BL
11059 instruction instead ? */
11060 if (branch_type == ST_BRANCH_TO_THUMB)
11061 _bfd_error_handler
11062 (_("%pB: warning: %s BLX instruction targets"
11063 " %s function '%s'"),
11064 input_bfd, "Thumb",
11065 "Thumb", h ? h->root.root.string : "(local)");
11067 else
11069 /* If it is not a call to Thumb, assume call to Arm.
11070 If it is a call relative to a section name, then it is not a
11071 function call at all, but rather a long jump. Calls through
11072 the PLT do not require stubs. */
11073 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11075 if (globals->use_blx && r_type == R_ARM_THM_CALL)
11077 /* Convert BL to BLX. */
11078 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11080 else if (( r_type != R_ARM_THM_CALL)
11081 && (r_type != R_ARM_THM_JUMP24))
11083 if (elf32_thumb_to_arm_stub
11084 (info, sym_name, input_bfd, output_bfd, input_section,
11085 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11086 error_message))
11087 return bfd_reloc_ok;
11088 else
11089 return bfd_reloc_dangerous;
11092 else if (branch_type == ST_BRANCH_TO_THUMB
11093 && globals->use_blx
11094 && r_type == R_ARM_THM_CALL)
11096 /* Make sure this is a BL. */
11097 lower_insn |= 0x1800;
11101 enum elf32_arm_stub_type stub_type = arm_stub_none;
11102 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11104 /* Check if a stub has to be inserted because the destination
11105 is too far. */
11106 struct elf32_arm_stub_hash_entry *stub_entry;
11107 struct elf32_arm_link_hash_entry *hash;
11109 hash = (struct elf32_arm_link_hash_entry *) h;
11111 stub_type = arm_type_of_stub (info, input_section, rel,
11112 st_type, &branch_type,
11113 hash, value, sym_sec,
11114 input_bfd, sym_name);
11116 if (stub_type != arm_stub_none)
11118 /* The target is out of reach or we are changing modes, so
11119 redirect the branch to the local stub for this
11120 function. */
11121 stub_entry = elf32_arm_get_stub_entry (input_section,
11122 sym_sec, h,
11123 rel, globals,
11124 stub_type);
11125 if (stub_entry != NULL)
11127 value = (stub_entry->stub_offset
11128 + stub_entry->stub_sec->output_offset
11129 + stub_entry->stub_sec->output_section->vma);
11131 if (plt_offset != (bfd_vma) -1)
11132 *unresolved_reloc_p = false;
11135 /* If this call becomes a call to Arm, force BLX. */
11136 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11138 if ((stub_entry
11139 && !arm_stub_is_thumb (stub_entry->stub_type))
11140 || branch_type != ST_BRANCH_TO_THUMB)
11141 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11146 /* Handle calls via the PLT. */
11147 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11149 value = (splt->output_section->vma
11150 + splt->output_offset
11151 + plt_offset);
11153 if (globals->use_blx
11154 && r_type == R_ARM_THM_CALL
11155 && ! using_thumb_only (globals))
11157 /* If the Thumb BLX instruction is available, convert
11158 the BL to a BLX instruction to call the ARM-mode
11159 PLT entry. */
11160 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11161 branch_type = ST_BRANCH_TO_ARM;
11163 else
11165 if (! using_thumb_only (globals))
11166 /* Target the Thumb stub before the ARM PLT entry. */
11167 value -= PLT_THUMB_STUB_SIZE;
11168 branch_type = ST_BRANCH_TO_THUMB;
11170 *unresolved_reloc_p = false;
11173 relocation = value + signed_addend;
11175 relocation -= (input_section->output_section->vma
11176 + input_section->output_offset
11177 + rel->r_offset);
11179 check = relocation >> howto->rightshift;
11181 /* If this is a signed value, the rightshift just dropped
11182 leading 1 bits (assuming twos complement). */
11183 if ((bfd_signed_vma) relocation >= 0)
11184 signed_check = check;
11185 else
11186 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11188 /* Calculate the permissable maximum and minimum values for
11189 this relocation according to whether we're relocating for
11190 Thumb-2 or not. */
11191 bitsize = howto->bitsize;
11192 if (!thumb2_bl)
11193 bitsize -= 2;
11194 reloc_signed_max = (1 << (bitsize - 1)) - 1;
11195 reloc_signed_min = ~reloc_signed_max;
11197 /* Assumes two's complement. */
11198 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11199 overflow = true;
11201 if ((lower_insn & 0x5000) == 0x4000)
11202 /* For a BLX instruction, make sure that the relocation is rounded up
11203 to a word boundary. This follows the semantics of the instruction
11204 which specifies that bit 1 of the target address will come from bit
11205 1 of the base address. */
11206 relocation = (relocation + 2) & ~ 3;
11208 /* Put RELOCATION back into the insn. Assumes two's complement.
11209 We use the Thumb-2 encoding, which is safe even if dealing with
11210 a Thumb-1 instruction by virtue of our overflow check above. */
11211 reloc_sign = (signed_check < 0) ? 1 : 0;
11212 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11213 | ((relocation >> 12) & 0x3ff)
11214 | (reloc_sign << 10);
11215 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11216 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11217 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11218 | ((relocation >> 1) & 0x7ff);
11220 /* Put the relocated value back in the object file: */
11221 bfd_put_16 (input_bfd, upper_insn, hit_data);
11222 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11224 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11226 break;
11228 case R_ARM_THM_JUMP19:
11229 /* Thumb32 conditional branch instruction. */
11231 bfd_vma relocation;
11232 bool overflow = false;
11233 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11234 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11235 bfd_signed_vma reloc_signed_max = 0xffffe;
11236 bfd_signed_vma reloc_signed_min = -0x100000;
11237 bfd_signed_vma signed_check;
11238 enum elf32_arm_stub_type stub_type = arm_stub_none;
11239 struct elf32_arm_stub_hash_entry *stub_entry;
11240 struct elf32_arm_link_hash_entry *hash;
11242 /* Need to refetch the addend, reconstruct the top three bits,
11243 and squish the two 11 bit pieces together. */
11244 if (globals->use_rel)
11246 bfd_vma S = (upper_insn & 0x0400) >> 10;
11247 bfd_vma upper = (upper_insn & 0x003f);
11248 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
11249 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
11250 bfd_vma lower = (lower_insn & 0x07ff);
11252 upper |= J1 << 6;
11253 upper |= J2 << 7;
11254 upper |= (!S) << 8;
11255 upper -= 0x0100; /* Sign extend. */
11257 addend = (upper << 12) | (lower << 1);
11258 signed_addend = addend;
11261 /* Handle calls via the PLT. */
11262 if (plt_offset != (bfd_vma) -1)
11264 value = (splt->output_section->vma
11265 + splt->output_offset
11266 + plt_offset);
11267 /* Target the Thumb stub before the ARM PLT entry. */
11268 value -= PLT_THUMB_STUB_SIZE;
11269 *unresolved_reloc_p = false;
11272 hash = (struct elf32_arm_link_hash_entry *)h;
11274 stub_type = arm_type_of_stub (info, input_section, rel,
11275 st_type, &branch_type,
11276 hash, value, sym_sec,
11277 input_bfd, sym_name);
11278 if (stub_type != arm_stub_none)
11280 stub_entry = elf32_arm_get_stub_entry (input_section,
11281 sym_sec, h,
11282 rel, globals,
11283 stub_type);
11284 if (stub_entry != NULL)
11286 value = (stub_entry->stub_offset
11287 + stub_entry->stub_sec->output_offset
11288 + stub_entry->stub_sec->output_section->vma);
11292 relocation = value + signed_addend;
11293 relocation -= (input_section->output_section->vma
11294 + input_section->output_offset
11295 + rel->r_offset);
11296 signed_check = (bfd_signed_vma) relocation;
11298 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11299 overflow = true;
11301 /* Put RELOCATION back into the insn. */
11303 bfd_vma S = (relocation & 0x00100000) >> 20;
11304 bfd_vma J2 = (relocation & 0x00080000) >> 19;
11305 bfd_vma J1 = (relocation & 0x00040000) >> 18;
11306 bfd_vma hi = (relocation & 0x0003f000) >> 12;
11307 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
11309 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11310 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11313 /* Put the relocated value back in the object file: */
11314 bfd_put_16 (input_bfd, upper_insn, hit_data);
11315 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11317 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11320 case R_ARM_THM_JUMP11:
11321 case R_ARM_THM_JUMP8:
11322 case R_ARM_THM_JUMP6:
11323 /* Thumb B (branch) instruction). */
11325 bfd_signed_vma relocation;
11326 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11327 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11328 bfd_signed_vma signed_check;
11330 /* CZB cannot jump backward. */
11331 if (r_type == R_ARM_THM_JUMP6)
11333 reloc_signed_min = 0;
11334 if (globals->use_rel)
11335 signed_addend = ((addend & 0x200) >> 3) | ((addend & 0xf8) >> 2);
11338 relocation = value + signed_addend;
11340 relocation -= (input_section->output_section->vma
11341 + input_section->output_offset
11342 + rel->r_offset);
11344 relocation >>= howto->rightshift;
11345 signed_check = relocation;
11347 if (r_type == R_ARM_THM_JUMP6)
11348 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11349 else
11350 relocation &= howto->dst_mask;
11351 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11353 bfd_put_16 (input_bfd, relocation, hit_data);
11355 /* Assumes two's complement. */
11356 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11357 return bfd_reloc_overflow;
11359 return bfd_reloc_ok;
11362 case R_ARM_ALU_PCREL7_0:
11363 case R_ARM_ALU_PCREL15_8:
11364 case R_ARM_ALU_PCREL23_15:
11366 bfd_vma insn;
11367 bfd_vma relocation;
11369 insn = bfd_get_32 (input_bfd, hit_data);
11370 if (globals->use_rel)
11372 /* Extract the addend. */
11373 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11374 signed_addend = addend;
11376 relocation = value + signed_addend;
11378 relocation -= (input_section->output_section->vma
11379 + input_section->output_offset
11380 + rel->r_offset);
11381 insn = (insn & ~0xfff)
11382 | ((howto->bitpos << 7) & 0xf00)
11383 | ((relocation >> howto->bitpos) & 0xff);
11384 bfd_put_32 (input_bfd, value, hit_data);
11386 return bfd_reloc_ok;
11388 case R_ARM_GNU_VTINHERIT:
11389 case R_ARM_GNU_VTENTRY:
11390 return bfd_reloc_ok;
11392 case R_ARM_GOTOFF32:
11393 /* Relocation is relative to the start of the
11394 global offset table. */
11396 BFD_ASSERT (sgot != NULL);
11397 if (sgot == NULL)
11398 return bfd_reloc_notsupported;
11400 /* If we are addressing a Thumb function, we need to adjust the
11401 address by one, so that attempts to call the function pointer will
11402 correctly interpret it as Thumb code. */
11403 if (branch_type == ST_BRANCH_TO_THUMB)
11404 value += 1;
11406 /* Note that sgot->output_offset is not involved in this
11407 calculation. We always want the start of .got. If we
11408 define _GLOBAL_OFFSET_TABLE in a different way, as is
11409 permitted by the ABI, we might have to change this
11410 calculation. */
11411 value -= sgot->output_section->vma;
11412 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11413 contents, rel->r_offset, value,
11414 rel->r_addend);
11416 case R_ARM_GOTPC:
11417 /* Use global offset table as symbol value. */
11418 BFD_ASSERT (sgot != NULL);
11420 if (sgot == NULL)
11421 return bfd_reloc_notsupported;
11423 *unresolved_reloc_p = false;
11424 value = sgot->output_section->vma;
11425 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11426 contents, rel->r_offset, value,
11427 rel->r_addend);
11429 case R_ARM_GOT32:
11430 case R_ARM_GOT_PREL:
11431 /* Relocation is to the entry for this symbol in the
11432 global offset table. */
11433 if (sgot == NULL)
11434 return bfd_reloc_notsupported;
11436 if (dynreloc_st_type == STT_GNU_IFUNC
11437 && plt_offset != (bfd_vma) -1
11438 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11440 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11441 symbol, and the relocation resolves directly to the runtime
11442 target rather than to the .iplt entry. This means that any
11443 .got entry would be the same value as the .igot.plt entry,
11444 so there's no point creating both. */
11445 sgot = globals->root.igotplt;
11446 value = sgot->output_offset + gotplt_offset;
11448 else if (h != NULL)
11450 bfd_vma off;
11452 off = h->got.offset;
11453 BFD_ASSERT (off != (bfd_vma) -1);
11454 if ((off & 1) != 0)
11456 /* We have already processsed one GOT relocation against
11457 this symbol. */
11458 off &= ~1;
11459 if (globals->root.dynamic_sections_created
11460 && !SYMBOL_REFERENCES_LOCAL (info, h))
11461 *unresolved_reloc_p = false;
11463 else
11465 Elf_Internal_Rela outrel;
11466 int isrofixup = 0;
11468 if (((h->dynindx != -1) || globals->fdpic_p)
11469 && !SYMBOL_REFERENCES_LOCAL (info, h))
11471 /* If the symbol doesn't resolve locally in a static
11472 object, we have an undefined reference. If the
11473 symbol doesn't resolve locally in a dynamic object,
11474 it should be resolved by the dynamic linker. */
11475 if (globals->root.dynamic_sections_created)
11477 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11478 *unresolved_reloc_p = false;
11480 else
11481 outrel.r_info = 0;
11482 outrel.r_addend = 0;
11484 else
11486 if (dynreloc_st_type == STT_GNU_IFUNC)
11487 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11488 else if (bfd_link_pic (info)
11489 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
11490 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11491 else
11493 outrel.r_info = 0;
11494 if (globals->fdpic_p)
11495 isrofixup = 1;
11497 outrel.r_addend = dynreloc_value;
11500 /* The GOT entry is initialized to zero by default.
11501 See if we should install a different value. */
11502 if (outrel.r_addend != 0
11503 && (globals->use_rel || outrel.r_info == 0))
11505 bfd_put_32 (output_bfd, outrel.r_addend,
11506 sgot->contents + off);
11507 outrel.r_addend = 0;
11510 if (isrofixup)
11511 arm_elf_add_rofixup (output_bfd,
11512 elf32_arm_hash_table (info)->srofixup,
11513 sgot->output_section->vma
11514 + sgot->output_offset + off);
11516 else if (outrel.r_info != 0)
11518 outrel.r_offset = (sgot->output_section->vma
11519 + sgot->output_offset
11520 + off);
11521 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11524 h->got.offset |= 1;
11526 value = sgot->output_offset + off;
11528 else
11530 bfd_vma off;
11532 BFD_ASSERT (local_got_offsets != NULL
11533 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11535 off = local_got_offsets[r_symndx];
11537 /* The offset must always be a multiple of 4. We use the
11538 least significant bit to record whether we have already
11539 generated the necessary reloc. */
11540 if ((off & 1) != 0)
11541 off &= ~1;
11542 else
11544 Elf_Internal_Rela outrel;
11545 int isrofixup = 0;
11547 if (dynreloc_st_type == STT_GNU_IFUNC)
11548 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11549 else if (bfd_link_pic (info))
11550 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11551 else
11553 outrel.r_info = 0;
11554 if (globals->fdpic_p)
11555 isrofixup = 1;
11558 /* The GOT entry is initialized to zero by default.
11559 See if we should install a different value. */
11560 if (globals->use_rel || outrel.r_info == 0)
11561 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11563 if (isrofixup)
11564 arm_elf_add_rofixup (output_bfd,
11565 globals->srofixup,
11566 sgot->output_section->vma
11567 + sgot->output_offset + off);
11569 else if (outrel.r_info != 0)
11571 outrel.r_addend = addend + dynreloc_value;
11572 outrel.r_offset = (sgot->output_section->vma
11573 + sgot->output_offset
11574 + off);
11575 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11578 local_got_offsets[r_symndx] |= 1;
11581 value = sgot->output_offset + off;
11583 if (r_type != R_ARM_GOT32)
11584 value += sgot->output_section->vma;
11586 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11587 contents, rel->r_offset, value,
11588 rel->r_addend);
11590 case R_ARM_TLS_LDO32:
11591 value = value - dtpoff_base (info);
11593 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11594 contents, rel->r_offset, value,
11595 rel->r_addend);
11597 case R_ARM_TLS_LDM32:
11598 case R_ARM_TLS_LDM32_FDPIC:
11600 bfd_vma off;
11602 if (sgot == NULL)
11603 abort ();
11605 off = globals->tls_ldm_got.offset;
11607 if ((off & 1) != 0)
11608 off &= ~1;
11609 else
11611 /* If we don't know the module number, create a relocation
11612 for it. */
11613 if (bfd_link_dll (info))
11615 Elf_Internal_Rela outrel;
11617 if (srelgot == NULL)
11618 abort ();
11620 outrel.r_addend = 0;
11621 outrel.r_offset = (sgot->output_section->vma
11622 + sgot->output_offset + off);
11623 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11625 if (globals->use_rel)
11626 bfd_put_32 (output_bfd, outrel.r_addend,
11627 sgot->contents + off);
11629 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11631 else
11632 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11634 globals->tls_ldm_got.offset |= 1;
11637 if (r_type == R_ARM_TLS_LDM32_FDPIC)
11639 bfd_put_32 (output_bfd,
11640 globals->root.sgot->output_offset + off,
11641 contents + rel->r_offset);
11643 return bfd_reloc_ok;
11645 else
11647 value = sgot->output_section->vma + sgot->output_offset + off
11648 - (input_section->output_section->vma
11649 + input_section->output_offset + rel->r_offset);
11651 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11652 contents, rel->r_offset, value,
11653 rel->r_addend);
11657 case R_ARM_TLS_CALL:
11658 case R_ARM_THM_TLS_CALL:
11659 case R_ARM_TLS_GD32:
11660 case R_ARM_TLS_GD32_FDPIC:
11661 case R_ARM_TLS_IE32:
11662 case R_ARM_TLS_IE32_FDPIC:
11663 case R_ARM_TLS_GOTDESC:
11664 case R_ARM_TLS_DESCSEQ:
11665 case R_ARM_THM_TLS_DESCSEQ:
11667 bfd_vma off, offplt;
11668 int indx = 0;
11669 char tls_type;
11671 BFD_ASSERT (sgot != NULL);
11673 if (h != NULL)
11675 bool dyn;
11676 dyn = globals->root.dynamic_sections_created;
11677 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11678 bfd_link_pic (info),
11680 && (!bfd_link_pic (info)
11681 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11683 *unresolved_reloc_p = false;
11684 indx = h->dynindx;
11686 off = h->got.offset;
11687 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11688 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11690 else
11692 BFD_ASSERT (local_got_offsets != NULL);
11694 if (r_symndx >= elf32_arm_num_entries (input_bfd))
11696 _bfd_error_handler (_("\
11697 %pB: expected symbol index in range 0..%lu but found local symbol with index %lu"),
11698 input_bfd,
11699 (unsigned long) elf32_arm_num_entries (input_bfd),
11700 r_symndx);
11701 return false;
11703 off = local_got_offsets[r_symndx];
11704 offplt = local_tlsdesc_gotents[r_symndx];
11705 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11708 /* Linker relaxations happens from one of the
11709 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11710 if (ELF32_R_TYPE (rel->r_info) != r_type)
11711 tls_type = GOT_TLS_IE;
11713 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11715 if ((off & 1) != 0)
11716 off &= ~1;
11717 else
11719 bool need_relocs = false;
11720 Elf_Internal_Rela outrel;
11721 int cur_off = off;
11723 /* The GOT entries have not been initialized yet. Do it
11724 now, and emit any relocations. If both an IE GOT and a
11725 GD GOT are necessary, we emit the GD first. */
11727 if ((bfd_link_dll (info) || indx != 0)
11728 && (h == NULL
11729 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11730 && !resolved_to_zero)
11731 || h->root.type != bfd_link_hash_undefweak))
11733 need_relocs = true;
11734 BFD_ASSERT (srelgot != NULL);
11737 if (tls_type & GOT_TLS_GDESC)
11739 bfd_byte *loc;
11741 /* We should have relaxed, unless this is an undefined
11742 weak symbol. */
11743 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11744 || bfd_link_dll (info));
11745 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11746 <= globals->root.sgotplt->size);
11748 outrel.r_addend = 0;
11749 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11750 + globals->root.sgotplt->output_offset
11751 + offplt
11752 + globals->sgotplt_jump_table_size);
11754 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11755 sreloc = globals->root.srelplt;
11756 loc = sreloc->contents;
11757 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11758 BFD_ASSERT (loc + RELOC_SIZE (globals)
11759 <= sreloc->contents + sreloc->size);
11761 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11763 /* For globals, the first word in the relocation gets
11764 the relocation index and the top bit set, or zero,
11765 if we're binding now. For locals, it gets the
11766 symbol's offset in the tls section. */
11767 bfd_put_32 (output_bfd,
11768 !h ? value - elf_hash_table (info)->tls_sec->vma
11769 : info->flags & DF_BIND_NOW ? 0
11770 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11771 globals->root.sgotplt->contents + offplt
11772 + globals->sgotplt_jump_table_size);
11774 /* Second word in the relocation is always zero. */
11775 bfd_put_32 (output_bfd, 0,
11776 globals->root.sgotplt->contents + offplt
11777 + globals->sgotplt_jump_table_size + 4);
11779 if (tls_type & GOT_TLS_GD)
11781 if (need_relocs)
11783 outrel.r_addend = 0;
11784 outrel.r_offset = (sgot->output_section->vma
11785 + sgot->output_offset
11786 + cur_off);
11787 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11789 if (globals->use_rel)
11790 bfd_put_32 (output_bfd, outrel.r_addend,
11791 sgot->contents + cur_off);
11793 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11795 if (indx == 0)
11796 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11797 sgot->contents + cur_off + 4);
11798 else
11800 outrel.r_addend = 0;
11801 outrel.r_info = ELF32_R_INFO (indx,
11802 R_ARM_TLS_DTPOFF32);
11803 outrel.r_offset += 4;
11805 if (globals->use_rel)
11806 bfd_put_32 (output_bfd, outrel.r_addend,
11807 sgot->contents + cur_off + 4);
11809 elf32_arm_add_dynreloc (output_bfd, info,
11810 srelgot, &outrel);
11813 else
11815 /* If we are not emitting relocations for a
11816 general dynamic reference, then we must be in a
11817 static link or an executable link with the
11818 symbol binding locally. Mark it as belonging
11819 to module 1, the executable. */
11820 bfd_put_32 (output_bfd, 1,
11821 sgot->contents + cur_off);
11822 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11823 sgot->contents + cur_off + 4);
11826 cur_off += 8;
11829 if (tls_type & GOT_TLS_IE)
11831 if (need_relocs)
11833 if (indx == 0)
11834 outrel.r_addend = value - dtpoff_base (info);
11835 else
11836 outrel.r_addend = 0;
11837 outrel.r_offset = (sgot->output_section->vma
11838 + sgot->output_offset
11839 + cur_off);
11840 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11842 if (globals->use_rel)
11843 bfd_put_32 (output_bfd, outrel.r_addend,
11844 sgot->contents + cur_off);
11846 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11848 else
11849 bfd_put_32 (output_bfd, tpoff (info, value),
11850 sgot->contents + cur_off);
11851 cur_off += 4;
11854 if (h != NULL)
11855 h->got.offset |= 1;
11856 else
11857 local_got_offsets[r_symndx] |= 1;
11860 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11861 off += 8;
11862 else if (tls_type & GOT_TLS_GDESC)
11863 off = offplt;
11865 if (ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
11866 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL)
11868 bfd_signed_vma offset;
11869 /* TLS stubs are arm mode. The original symbol is a
11870 data object, so branch_type is bogus. */
11871 branch_type = ST_BRANCH_TO_ARM;
11872 enum elf32_arm_stub_type stub_type
11873 = arm_type_of_stub (info, input_section, rel,
11874 st_type, &branch_type,
11875 (struct elf32_arm_link_hash_entry *)h,
11876 globals->tls_trampoline, globals->root.splt,
11877 input_bfd, sym_name);
11879 if (stub_type != arm_stub_none)
11881 struct elf32_arm_stub_hash_entry *stub_entry
11882 = elf32_arm_get_stub_entry
11883 (input_section, globals->root.splt, 0, rel,
11884 globals, stub_type);
11885 offset = (stub_entry->stub_offset
11886 + stub_entry->stub_sec->output_offset
11887 + stub_entry->stub_sec->output_section->vma);
11889 else
11890 offset = (globals->root.splt->output_section->vma
11891 + globals->root.splt->output_offset
11892 + globals->tls_trampoline);
11894 if (ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL)
11896 unsigned long inst;
11898 offset -= (input_section->output_section->vma
11899 + input_section->output_offset
11900 + rel->r_offset + 8);
11902 inst = offset >> 2;
11903 inst &= 0x00ffffff;
11904 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11906 else
11908 /* Thumb blx encodes the offset in a complicated
11909 fashion. */
11910 unsigned upper_insn, lower_insn;
11911 unsigned neg;
11913 offset -= (input_section->output_section->vma
11914 + input_section->output_offset
11915 + rel->r_offset + 4);
11917 if (stub_type != arm_stub_none
11918 && arm_stub_is_thumb (stub_type))
11920 lower_insn = 0xd000;
11922 else
11924 lower_insn = 0xc000;
11925 /* Round up the offset to a word boundary. */
11926 offset = (offset + 2) & ~2;
11929 neg = offset < 0;
11930 upper_insn = (0xf000
11931 | ((offset >> 12) & 0x3ff)
11932 | (neg << 10));
11933 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
11934 | (((!((offset >> 22) & 1)) ^ neg) << 11)
11935 | ((offset >> 1) & 0x7ff);
11936 bfd_put_16 (input_bfd, upper_insn, hit_data);
11937 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11938 return bfd_reloc_ok;
11941 /* These relocations needs special care, as besides the fact
11942 they point somewhere in .gotplt, the addend must be
11943 adjusted accordingly depending on the type of instruction
11944 we refer to. */
11945 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
11947 unsigned long data, insn;
11948 unsigned thumb;
11950 data = bfd_get_signed_32 (input_bfd, hit_data);
11951 thumb = data & 1;
11952 data &= ~1ul;
11954 if (thumb)
11956 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
11957 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
11958 insn = (insn << 16)
11959 | bfd_get_16 (input_bfd,
11960 contents + rel->r_offset - data + 2);
11961 if ((insn & 0xf800c000) == 0xf000c000)
11962 /* bl/blx */
11963 value = -6;
11964 else if ((insn & 0xffffff00) == 0x4400)
11965 /* add */
11966 value = -5;
11967 else
11969 _bfd_error_handler
11970 /* xgettext:c-format */
11971 (_("%pB(%pA+%#" PRIx64 "): "
11972 "unexpected %s instruction '%#lx' "
11973 "referenced by TLS_GOTDESC"),
11974 input_bfd, input_section, (uint64_t) rel->r_offset,
11975 "Thumb", insn);
11976 return bfd_reloc_notsupported;
11979 else
11981 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
11983 switch (insn >> 24)
11985 case 0xeb: /* bl */
11986 case 0xfa: /* blx */
11987 value = -4;
11988 break;
11990 case 0xe0: /* add */
11991 value = -8;
11992 break;
11994 default:
11995 _bfd_error_handler
11996 /* xgettext:c-format */
11997 (_("%pB(%pA+%#" PRIx64 "): "
11998 "unexpected %s instruction '%#lx' "
11999 "referenced by TLS_GOTDESC"),
12000 input_bfd, input_section, (uint64_t) rel->r_offset,
12001 "ARM", insn);
12002 return bfd_reloc_notsupported;
12006 value += ((globals->root.sgotplt->output_section->vma
12007 + globals->root.sgotplt->output_offset + off)
12008 - (input_section->output_section->vma
12009 + input_section->output_offset
12010 + rel->r_offset)
12011 + globals->sgotplt_jump_table_size);
12013 else
12014 value = ((globals->root.sgot->output_section->vma
12015 + globals->root.sgot->output_offset + off)
12016 - (input_section->output_section->vma
12017 + input_section->output_offset + rel->r_offset));
12019 if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12020 r_type == R_ARM_TLS_IE32_FDPIC))
12022 /* For FDPIC relocations, resolve to the offset of the GOT
12023 entry from the start of GOT. */
12024 bfd_put_32 (output_bfd,
12025 globals->root.sgot->output_offset + off,
12026 contents + rel->r_offset);
12028 return bfd_reloc_ok;
12030 else
12032 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12033 contents, rel->r_offset, value,
12034 rel->r_addend);
12038 case R_ARM_TLS_LE32:
12039 if (bfd_link_dll (info))
12041 _bfd_error_handler
12042 /* xgettext:c-format */
12043 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12044 "in shared object"),
12045 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12046 return bfd_reloc_notsupported;
12048 else
12049 value = tpoff (info, value);
12051 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12052 contents, rel->r_offset, value,
12053 rel->r_addend);
12055 case R_ARM_V4BX:
12056 if (globals->fix_v4bx)
12058 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12060 /* Ensure that we have a BX instruction. */
12061 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12063 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12065 /* Branch to veneer. */
12066 bfd_vma glue_addr;
12067 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12068 glue_addr -= input_section->output_section->vma
12069 + input_section->output_offset
12070 + rel->r_offset + 8;
12071 insn = (insn & 0xf0000000) | 0x0a000000
12072 | ((glue_addr >> 2) & 0x00ffffff);
12074 else
12076 /* Preserve Rm (lowest four bits) and the condition code
12077 (highest four bits). Other bits encode MOV PC,Rm. */
12078 insn = (insn & 0xf000000f) | 0x01a0f000;
12081 bfd_put_32 (input_bfd, insn, hit_data);
12083 return bfd_reloc_ok;
12085 case R_ARM_MOVW_ABS_NC:
12086 case R_ARM_MOVT_ABS:
12087 case R_ARM_MOVW_PREL_NC:
12088 case R_ARM_MOVT_PREL:
12089 /* Until we properly support segment-base-relative addressing then
12090 we assume the segment base to be zero, as for the group relocations.
12091 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12092 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12093 case R_ARM_MOVW_BREL_NC:
12094 case R_ARM_MOVW_BREL:
12095 case R_ARM_MOVT_BREL:
12097 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12099 if (globals->use_rel)
12101 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12102 signed_addend = (addend ^ 0x8000) - 0x8000;
12105 value += signed_addend;
12107 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12108 value -= (input_section->output_section->vma
12109 + input_section->output_offset + rel->r_offset);
12111 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12112 return bfd_reloc_overflow;
12114 if (branch_type == ST_BRANCH_TO_THUMB)
12115 value |= 1;
12117 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12118 || r_type == R_ARM_MOVT_BREL)
12119 value >>= 16;
12121 insn &= 0xfff0f000;
12122 insn |= value & 0xfff;
12123 insn |= (value & 0xf000) << 4;
12124 bfd_put_32 (input_bfd, insn, hit_data);
12126 return bfd_reloc_ok;
12128 case R_ARM_THM_MOVW_ABS_NC:
12129 case R_ARM_THM_MOVT_ABS:
12130 case R_ARM_THM_MOVW_PREL_NC:
12131 case R_ARM_THM_MOVT_PREL:
12132 /* Until we properly support segment-base-relative addressing then
12133 we assume the segment base to be zero, as for the above relocations.
12134 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12135 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12136 as R_ARM_THM_MOVT_ABS. */
12137 case R_ARM_THM_MOVW_BREL_NC:
12138 case R_ARM_THM_MOVW_BREL:
12139 case R_ARM_THM_MOVT_BREL:
12141 bfd_vma insn;
12143 insn = bfd_get_16 (input_bfd, hit_data) << 16;
12144 insn |= bfd_get_16 (input_bfd, hit_data + 2);
12146 if (globals->use_rel)
12148 addend = ((insn >> 4) & 0xf000)
12149 | ((insn >> 15) & 0x0800)
12150 | ((insn >> 4) & 0x0700)
12151 | (insn & 0x00ff);
12152 signed_addend = (addend ^ 0x8000) - 0x8000;
12155 value += signed_addend;
12157 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12158 value -= (input_section->output_section->vma
12159 + input_section->output_offset + rel->r_offset);
12161 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12162 return bfd_reloc_overflow;
12164 if (branch_type == ST_BRANCH_TO_THUMB)
12165 value |= 1;
12167 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12168 || r_type == R_ARM_THM_MOVT_BREL)
12169 value >>= 16;
12171 insn &= 0xfbf08f00;
12172 insn |= (value & 0xf000) << 4;
12173 insn |= (value & 0x0800) << 15;
12174 insn |= (value & 0x0700) << 4;
12175 insn |= (value & 0x00ff);
12177 bfd_put_16 (input_bfd, insn >> 16, hit_data);
12178 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12180 return bfd_reloc_ok;
12182 case R_ARM_ALU_PC_G0_NC:
12183 case R_ARM_ALU_PC_G1_NC:
12184 case R_ARM_ALU_PC_G0:
12185 case R_ARM_ALU_PC_G1:
12186 case R_ARM_ALU_PC_G2:
12187 case R_ARM_ALU_SB_G0_NC:
12188 case R_ARM_ALU_SB_G1_NC:
12189 case R_ARM_ALU_SB_G0:
12190 case R_ARM_ALU_SB_G1:
12191 case R_ARM_ALU_SB_G2:
12193 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12194 bfd_vma pc = input_section->output_section->vma
12195 + input_section->output_offset + rel->r_offset;
12196 /* sb is the origin of the *segment* containing the symbol. */
12197 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12198 bfd_vma residual;
12199 bfd_vma g_n;
12200 bfd_signed_vma signed_value;
12201 int group = 0;
12203 /* Determine which group of bits to select. */
12204 switch (r_type)
12206 case R_ARM_ALU_PC_G0_NC:
12207 case R_ARM_ALU_PC_G0:
12208 case R_ARM_ALU_SB_G0_NC:
12209 case R_ARM_ALU_SB_G0:
12210 group = 0;
12211 break;
12213 case R_ARM_ALU_PC_G1_NC:
12214 case R_ARM_ALU_PC_G1:
12215 case R_ARM_ALU_SB_G1_NC:
12216 case R_ARM_ALU_SB_G1:
12217 group = 1;
12218 break;
12220 case R_ARM_ALU_PC_G2:
12221 case R_ARM_ALU_SB_G2:
12222 group = 2;
12223 break;
12225 default:
12226 abort ();
12229 /* If REL, extract the addend from the insn. If RELA, it will
12230 have already been fetched for us. */
12231 if (globals->use_rel)
12233 int negative;
12234 bfd_vma constant = insn & 0xff;
12235 bfd_vma rotation = (insn & 0xf00) >> 8;
12237 if (rotation == 0)
12238 signed_addend = constant;
12239 else
12241 /* Compensate for the fact that in the instruction, the
12242 rotation is stored in multiples of 2 bits. */
12243 rotation *= 2;
12245 /* Rotate "constant" right by "rotation" bits. */
12246 signed_addend = (constant >> rotation) |
12247 (constant << (8 * sizeof (bfd_vma) - rotation));
12250 /* Determine if the instruction is an ADD or a SUB.
12251 (For REL, this determines the sign of the addend.) */
12252 negative = identify_add_or_sub (insn);
12253 if (negative == 0)
12255 _bfd_error_handler
12256 /* xgettext:c-format */
12257 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12258 "are allowed for ALU group relocations"),
12259 input_bfd, input_section, (uint64_t) rel->r_offset);
12260 return bfd_reloc_overflow;
12263 signed_addend *= negative;
12266 /* Compute the value (X) to go in the place. */
12267 if (r_type == R_ARM_ALU_PC_G0_NC
12268 || r_type == R_ARM_ALU_PC_G1_NC
12269 || r_type == R_ARM_ALU_PC_G0
12270 || r_type == R_ARM_ALU_PC_G1
12271 || r_type == R_ARM_ALU_PC_G2)
12272 /* PC relative. */
12273 signed_value = value - pc + signed_addend;
12274 else
12275 /* Section base relative. */
12276 signed_value = value - sb + signed_addend;
12278 /* If the target symbol is a Thumb function, then set the
12279 Thumb bit in the address. */
12280 if (branch_type == ST_BRANCH_TO_THUMB)
12281 signed_value |= 1;
12283 /* Calculate the value of the relevant G_n, in encoded
12284 constant-with-rotation format. */
12285 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12286 group, &residual);
12288 /* Check for overflow if required. */
12289 if ((r_type == R_ARM_ALU_PC_G0
12290 || r_type == R_ARM_ALU_PC_G1
12291 || r_type == R_ARM_ALU_PC_G2
12292 || r_type == R_ARM_ALU_SB_G0
12293 || r_type == R_ARM_ALU_SB_G1
12294 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12296 _bfd_error_handler
12297 /* xgettext:c-format */
12298 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12299 "splitting %#" PRIx64 " for group relocation %s"),
12300 input_bfd, input_section, (uint64_t) rel->r_offset,
12301 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12302 howto->name);
12303 return bfd_reloc_overflow;
12306 /* Mask out the value and the ADD/SUB part of the opcode; take care
12307 not to destroy the S bit. */
12308 insn &= 0xff1ff000;
12310 /* Set the opcode according to whether the value to go in the
12311 place is negative. */
12312 if (signed_value < 0)
12313 insn |= 1 << 22;
12314 else
12315 insn |= 1 << 23;
12317 /* Encode the offset. */
12318 insn |= g_n;
12320 bfd_put_32 (input_bfd, insn, hit_data);
12322 return bfd_reloc_ok;
12324 case R_ARM_LDR_PC_G0:
12325 case R_ARM_LDR_PC_G1:
12326 case R_ARM_LDR_PC_G2:
12327 case R_ARM_LDR_SB_G0:
12328 case R_ARM_LDR_SB_G1:
12329 case R_ARM_LDR_SB_G2:
12331 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12332 bfd_vma pc = input_section->output_section->vma
12333 + input_section->output_offset + rel->r_offset;
12334 /* sb is the origin of the *segment* containing the symbol. */
12335 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12336 bfd_vma residual;
12337 bfd_signed_vma signed_value;
12338 int group = 0;
12340 /* Determine which groups of bits to calculate. */
12341 switch (r_type)
12343 case R_ARM_LDR_PC_G0:
12344 case R_ARM_LDR_SB_G0:
12345 group = 0;
12346 break;
12348 case R_ARM_LDR_PC_G1:
12349 case R_ARM_LDR_SB_G1:
12350 group = 1;
12351 break;
12353 case R_ARM_LDR_PC_G2:
12354 case R_ARM_LDR_SB_G2:
12355 group = 2;
12356 break;
12358 default:
12359 abort ();
12362 /* If REL, extract the addend from the insn. If RELA, it will
12363 have already been fetched for us. */
12364 if (globals->use_rel)
12366 int negative = (insn & (1 << 23)) ? 1 : -1;
12367 signed_addend = negative * (insn & 0xfff);
12370 /* Compute the value (X) to go in the place. */
12371 if (r_type == R_ARM_LDR_PC_G0
12372 || r_type == R_ARM_LDR_PC_G1
12373 || r_type == R_ARM_LDR_PC_G2)
12374 /* PC relative. */
12375 signed_value = value - pc + signed_addend;
12376 else
12377 /* Section base relative. */
12378 signed_value = value - sb + signed_addend;
12380 /* Calculate the value of the relevant G_{n-1} to obtain
12381 the residual at that stage. */
12382 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12383 group - 1, &residual);
12385 /* Check for overflow. */
12386 if (residual >= 0x1000)
12388 _bfd_error_handler
12389 /* xgettext:c-format */
12390 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12391 "splitting %#" PRIx64 " for group relocation %s"),
12392 input_bfd, input_section, (uint64_t) rel->r_offset,
12393 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12394 howto->name);
12395 return bfd_reloc_overflow;
12398 /* Mask out the value and U bit. */
12399 insn &= 0xff7ff000;
12401 /* Set the U bit if the value to go in the place is non-negative. */
12402 if (signed_value >= 0)
12403 insn |= 1 << 23;
12405 /* Encode the offset. */
12406 insn |= residual;
12408 bfd_put_32 (input_bfd, insn, hit_data);
12410 return bfd_reloc_ok;
12412 case R_ARM_LDRS_PC_G0:
12413 case R_ARM_LDRS_PC_G1:
12414 case R_ARM_LDRS_PC_G2:
12415 case R_ARM_LDRS_SB_G0:
12416 case R_ARM_LDRS_SB_G1:
12417 case R_ARM_LDRS_SB_G2:
12419 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12420 bfd_vma pc = input_section->output_section->vma
12421 + input_section->output_offset + rel->r_offset;
12422 /* sb is the origin of the *segment* containing the symbol. */
12423 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12424 bfd_vma residual;
12425 bfd_signed_vma signed_value;
12426 int group = 0;
12428 /* Determine which groups of bits to calculate. */
12429 switch (r_type)
12431 case R_ARM_LDRS_PC_G0:
12432 case R_ARM_LDRS_SB_G0:
12433 group = 0;
12434 break;
12436 case R_ARM_LDRS_PC_G1:
12437 case R_ARM_LDRS_SB_G1:
12438 group = 1;
12439 break;
12441 case R_ARM_LDRS_PC_G2:
12442 case R_ARM_LDRS_SB_G2:
12443 group = 2;
12444 break;
12446 default:
12447 abort ();
12450 /* If REL, extract the addend from the insn. If RELA, it will
12451 have already been fetched for us. */
12452 if (globals->use_rel)
12454 int negative = (insn & (1 << 23)) ? 1 : -1;
12455 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12458 /* Compute the value (X) to go in the place. */
12459 if (r_type == R_ARM_LDRS_PC_G0
12460 || r_type == R_ARM_LDRS_PC_G1
12461 || r_type == R_ARM_LDRS_PC_G2)
12462 /* PC relative. */
12463 signed_value = value - pc + signed_addend;
12464 else
12465 /* Section base relative. */
12466 signed_value = value - sb + signed_addend;
12468 /* Calculate the value of the relevant G_{n-1} to obtain
12469 the residual at that stage. */
12470 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12471 group - 1, &residual);
12473 /* Check for overflow. */
12474 if (residual >= 0x100)
12476 _bfd_error_handler
12477 /* xgettext:c-format */
12478 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12479 "splitting %#" PRIx64 " for group relocation %s"),
12480 input_bfd, input_section, (uint64_t) rel->r_offset,
12481 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12482 howto->name);
12483 return bfd_reloc_overflow;
12486 /* Mask out the value and U bit. */
12487 insn &= 0xff7ff0f0;
12489 /* Set the U bit if the value to go in the place is non-negative. */
12490 if (signed_value >= 0)
12491 insn |= 1 << 23;
12493 /* Encode the offset. */
12494 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12496 bfd_put_32 (input_bfd, insn, hit_data);
12498 return bfd_reloc_ok;
12500 case R_ARM_LDC_PC_G0:
12501 case R_ARM_LDC_PC_G1:
12502 case R_ARM_LDC_PC_G2:
12503 case R_ARM_LDC_SB_G0:
12504 case R_ARM_LDC_SB_G1:
12505 case R_ARM_LDC_SB_G2:
12507 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12508 bfd_vma pc = input_section->output_section->vma
12509 + input_section->output_offset + rel->r_offset;
12510 /* sb is the origin of the *segment* containing the symbol. */
12511 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12512 bfd_vma residual;
12513 bfd_signed_vma signed_value;
12514 int group = 0;
12516 /* Determine which groups of bits to calculate. */
12517 switch (r_type)
12519 case R_ARM_LDC_PC_G0:
12520 case R_ARM_LDC_SB_G0:
12521 group = 0;
12522 break;
12524 case R_ARM_LDC_PC_G1:
12525 case R_ARM_LDC_SB_G1:
12526 group = 1;
12527 break;
12529 case R_ARM_LDC_PC_G2:
12530 case R_ARM_LDC_SB_G2:
12531 group = 2;
12532 break;
12534 default:
12535 abort ();
12538 /* If REL, extract the addend from the insn. If RELA, it will
12539 have already been fetched for us. */
12540 if (globals->use_rel)
12542 int negative = (insn & (1 << 23)) ? 1 : -1;
12543 signed_addend = negative * ((insn & 0xff) << 2);
12546 /* Compute the value (X) to go in the place. */
12547 if (r_type == R_ARM_LDC_PC_G0
12548 || r_type == R_ARM_LDC_PC_G1
12549 || r_type == R_ARM_LDC_PC_G2)
12550 /* PC relative. */
12551 signed_value = value - pc + signed_addend;
12552 else
12553 /* Section base relative. */
12554 signed_value = value - sb + signed_addend;
12556 /* Calculate the value of the relevant G_{n-1} to obtain
12557 the residual at that stage. */
12558 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12559 group - 1, &residual);
12561 /* Check for overflow. (The absolute value to go in the place must be
12562 divisible by four and, after having been divided by four, must
12563 fit in eight bits.) */
12564 if ((residual & 0x3) != 0 || residual >= 0x400)
12566 _bfd_error_handler
12567 /* xgettext:c-format */
12568 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12569 "splitting %#" PRIx64 " for group relocation %s"),
12570 input_bfd, input_section, (uint64_t) rel->r_offset,
12571 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12572 howto->name);
12573 return bfd_reloc_overflow;
12576 /* Mask out the value and U bit. */
12577 insn &= 0xff7fff00;
12579 /* Set the U bit if the value to go in the place is non-negative. */
12580 if (signed_value >= 0)
12581 insn |= 1 << 23;
12583 /* Encode the offset. */
12584 insn |= residual >> 2;
12586 bfd_put_32 (input_bfd, insn, hit_data);
12588 return bfd_reloc_ok;
12590 case R_ARM_THM_ALU_ABS_G0_NC:
12591 case R_ARM_THM_ALU_ABS_G1_NC:
12592 case R_ARM_THM_ALU_ABS_G2_NC:
12593 case R_ARM_THM_ALU_ABS_G3_NC:
12595 const int shift_array[4] = {0, 8, 16, 24};
12596 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12597 bfd_vma addr = value;
12598 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12600 /* Compute address. */
12601 if (globals->use_rel)
12602 signed_addend = insn & 0xff;
12603 addr += signed_addend;
12604 if (branch_type == ST_BRANCH_TO_THUMB)
12605 addr |= 1;
12606 /* Clean imm8 insn. */
12607 insn &= 0xff00;
12608 /* And update with correct part of address. */
12609 insn |= (addr >> shift) & 0xff;
12610 /* Update insn. */
12611 bfd_put_16 (input_bfd, insn, hit_data);
12614 *unresolved_reloc_p = false;
12615 return bfd_reloc_ok;
12617 case R_ARM_GOTOFFFUNCDESC:
12619 if (h == NULL)
12621 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts (input_bfd);
12622 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12624 if (r_symndx >= elf32_arm_num_entries (input_bfd))
12626 * error_message = _("local symbol index too big");
12627 return bfd_reloc_dangerous;
12630 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12631 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12632 bfd_vma seg = -1;
12634 if (bfd_link_pic (info) && dynindx == 0)
12636 * error_message = _("no dynamic index information available");
12637 return bfd_reloc_dangerous;
12640 /* Resolve relocation. */
12641 bfd_put_32 (output_bfd, (offset + sgot->output_offset)
12642 , contents + rel->r_offset);
12643 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12644 not done yet. */
12645 arm_elf_fill_funcdesc (output_bfd, info,
12646 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12647 dynindx, offset, addr, dynreloc_value, seg);
12649 else
12651 int dynindx;
12652 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12653 bfd_vma addr;
12654 bfd_vma seg = -1;
12656 /* For static binaries, sym_sec can be null. */
12657 if (sym_sec)
12659 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12660 addr = dynreloc_value - sym_sec->output_section->vma;
12662 else
12664 dynindx = 0;
12665 addr = 0;
12668 if (bfd_link_pic (info) && dynindx == 0)
12670 * error_message = _("no dynamic index information available");
12671 return bfd_reloc_dangerous;
12674 /* This case cannot occur since funcdesc is allocated by
12675 the dynamic loader so we cannot resolve the relocation. */
12676 if (h->dynindx != -1)
12678 * error_message = _("invalid dynamic index");
12679 return bfd_reloc_dangerous;
12682 /* Resolve relocation. */
12683 bfd_put_32 (output_bfd, (offset + sgot->output_offset),
12684 contents + rel->r_offset);
12685 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12686 arm_elf_fill_funcdesc (output_bfd, info,
12687 &eh->fdpic_cnts.funcdesc_offset,
12688 dynindx, offset, addr, dynreloc_value, seg);
12691 *unresolved_reloc_p = false;
12692 return bfd_reloc_ok;
12694 case R_ARM_GOTFUNCDESC:
12696 if (h != NULL)
12698 Elf_Internal_Rela outrel;
12700 /* Resolve relocation. */
12701 bfd_put_32 (output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12702 + sgot->output_offset),
12703 contents + rel->r_offset);
12704 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12705 if (h->dynindx == -1)
12707 int dynindx;
12708 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12709 bfd_vma addr;
12710 bfd_vma seg = -1;
12712 /* For static binaries sym_sec can be null. */
12713 if (sym_sec)
12715 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12716 addr = dynreloc_value - sym_sec->output_section->vma;
12718 else
12720 dynindx = 0;
12721 addr = 0;
12724 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12725 arm_elf_fill_funcdesc (output_bfd, info,
12726 &eh->fdpic_cnts.funcdesc_offset,
12727 dynindx, offset, addr, dynreloc_value, seg);
12730 /* Add a dynamic relocation on GOT entry if not already done. */
12731 if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12733 if (h->dynindx == -1)
12735 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12736 if (h->root.type == bfd_link_hash_undefweak)
12737 bfd_put_32 (output_bfd, 0, sgot->contents
12738 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12739 else
12740 bfd_put_32 (output_bfd, sgot->output_section->vma
12741 + sgot->output_offset
12742 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12743 sgot->contents
12744 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12746 else
12748 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12750 outrel.r_offset = sgot->output_section->vma
12751 + sgot->output_offset
12752 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12753 outrel.r_addend = 0;
12754 if (h->dynindx == -1 && !bfd_link_pic (info))
12755 if (h->root.type == bfd_link_hash_undefweak)
12756 arm_elf_add_rofixup (output_bfd, globals->srofixup, -1);
12757 else
12758 arm_elf_add_rofixup (output_bfd, globals->srofixup,
12759 outrel.r_offset);
12760 else
12761 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12762 eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12765 else
12767 /* Such relocation on static function should not have been
12768 emitted by the compiler. */
12769 return bfd_reloc_notsupported;
12772 *unresolved_reloc_p = false;
12773 return bfd_reloc_ok;
12775 case R_ARM_FUNCDESC:
12777 if (h == NULL)
12779 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts (input_bfd);
12780 Elf_Internal_Rela outrel;
12781 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12783 if (r_symndx >= elf32_arm_num_entries (input_bfd))
12785 * error_message = _("local symbol index too big");
12786 return bfd_reloc_dangerous;
12789 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12790 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12791 bfd_vma seg = -1;
12793 if (bfd_link_pic (info) && dynindx == 0)
12795 * error_message = _("dynamic index information not available");
12796 return bfd_reloc_dangerous;
12799 /* Replace static FUNCDESC relocation with a
12800 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12801 executable. */
12802 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12803 outrel.r_offset = input_section->output_section->vma
12804 + input_section->output_offset + rel->r_offset;
12805 outrel.r_addend = 0;
12806 if (bfd_link_pic (info))
12807 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12808 else
12809 arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
12811 bfd_put_32 (input_bfd, sgot->output_section->vma
12812 + sgot->output_offset + offset, hit_data);
12814 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12815 arm_elf_fill_funcdesc (output_bfd, info,
12816 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12817 dynindx, offset, addr, dynreloc_value, seg);
12819 else
12821 if (h->dynindx == -1)
12823 int dynindx;
12824 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12825 bfd_vma addr;
12826 bfd_vma seg = -1;
12827 Elf_Internal_Rela outrel;
12829 /* For static binaries sym_sec can be null. */
12830 if (sym_sec)
12832 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12833 addr = dynreloc_value - sym_sec->output_section->vma;
12835 else
12837 dynindx = 0;
12838 addr = 0;
12841 if (bfd_link_pic (info) && dynindx == 0)
12842 abort ();
12844 /* Replace static FUNCDESC relocation with a
12845 R_ARM_RELATIVE dynamic relocation. */
12846 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12847 outrel.r_offset = input_section->output_section->vma
12848 + input_section->output_offset + rel->r_offset;
12849 outrel.r_addend = 0;
12850 if (bfd_link_pic (info))
12851 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12852 else
12853 arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
12855 bfd_put_32 (input_bfd, sgot->output_section->vma
12856 + sgot->output_offset + offset, hit_data);
12858 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12859 arm_elf_fill_funcdesc (output_bfd, info,
12860 &eh->fdpic_cnts.funcdesc_offset,
12861 dynindx, offset, addr, dynreloc_value, seg);
12863 else
12865 Elf_Internal_Rela outrel;
12867 /* Add a dynamic relocation. */
12868 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12869 outrel.r_offset = input_section->output_section->vma
12870 + input_section->output_offset + rel->r_offset;
12871 outrel.r_addend = 0;
12872 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12876 *unresolved_reloc_p = false;
12877 return bfd_reloc_ok;
12879 case R_ARM_THM_BF16:
12881 bfd_vma relocation;
12882 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12883 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12885 if (globals->use_rel)
12887 bfd_vma immA = (upper_insn & 0x001f);
12888 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12889 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12890 addend = (immA << 12);
12891 addend |= (immB << 2);
12892 addend |= (immC << 1);
12893 addend |= 1;
12894 /* Sign extend. */
12895 signed_addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
12898 relocation = value + signed_addend;
12899 relocation -= (input_section->output_section->vma
12900 + input_section->output_offset
12901 + rel->r_offset);
12903 /* Put RELOCATION back into the insn. */
12905 bfd_vma immA = (relocation & 0x0001f000) >> 12;
12906 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12907 bfd_vma immC = (relocation & 0x00000002) >> 1;
12909 upper_insn = (upper_insn & 0xffe0) | immA;
12910 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12913 /* Put the relocated value back in the object file: */
12914 bfd_put_16 (input_bfd, upper_insn, hit_data);
12915 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12917 return bfd_reloc_ok;
12920 case R_ARM_THM_BF12:
12922 bfd_vma relocation;
12923 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12924 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12926 if (globals->use_rel)
12928 bfd_vma immA = (upper_insn & 0x0001);
12929 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12930 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12931 addend = (immA << 12);
12932 addend |= (immB << 2);
12933 addend |= (immC << 1);
12934 addend |= 1;
12935 /* Sign extend. */
12936 addend = (addend & 0x1000) ? addend - (1 << 13) : addend;
12937 signed_addend = addend;
12940 relocation = value + signed_addend;
12941 relocation -= (input_section->output_section->vma
12942 + input_section->output_offset
12943 + rel->r_offset);
12945 /* Put RELOCATION back into the insn. */
12947 bfd_vma immA = (relocation & 0x00001000) >> 12;
12948 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12949 bfd_vma immC = (relocation & 0x00000002) >> 1;
12951 upper_insn = (upper_insn & 0xfffe) | immA;
12952 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12955 /* Put the relocated value back in the object file: */
12956 bfd_put_16 (input_bfd, upper_insn, hit_data);
12957 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12959 return bfd_reloc_ok;
12962 case R_ARM_THM_BF18:
12964 bfd_vma relocation;
12965 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12966 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12968 if (globals->use_rel)
12970 bfd_vma immA = (upper_insn & 0x007f);
12971 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12972 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12973 addend = (immA << 12);
12974 addend |= (immB << 2);
12975 addend |= (immC << 1);
12976 addend |= 1;
12977 /* Sign extend. */
12978 addend = (addend & 0x40000) ? addend - (1 << 19) : addend;
12979 signed_addend = addend;
12982 relocation = value + signed_addend;
12983 relocation -= (input_section->output_section->vma
12984 + input_section->output_offset
12985 + rel->r_offset);
12987 /* Put RELOCATION back into the insn. */
12989 bfd_vma immA = (relocation & 0x0007f000) >> 12;
12990 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12991 bfd_vma immC = (relocation & 0x00000002) >> 1;
12993 upper_insn = (upper_insn & 0xff80) | immA;
12994 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12997 /* Put the relocated value back in the object file: */
12998 bfd_put_16 (input_bfd, upper_insn, hit_data);
12999 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
13001 return bfd_reloc_ok;
13004 default:
13005 return bfd_reloc_notsupported;
13009 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
13010 static void
13011 arm_add_to_rel (bfd * abfd,
13012 bfd_byte * address,
13013 reloc_howto_type * howto,
13014 bfd_signed_vma increment)
13016 bfd_signed_vma addend;
13018 if (howto->type == R_ARM_THM_CALL
13019 || howto->type == R_ARM_THM_JUMP24)
13021 int upper_insn, lower_insn;
13022 int upper, lower;
13024 upper_insn = bfd_get_16 (abfd, address);
13025 lower_insn = bfd_get_16 (abfd, address + 2);
13026 upper = upper_insn & 0x7ff;
13027 lower = lower_insn & 0x7ff;
13029 addend = (upper << 12) | (lower << 1);
13030 addend += increment;
13031 addend >>= 1;
13033 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
13034 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
13036 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
13037 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
13039 else
13041 bfd_vma contents;
13043 contents = bfd_get_32 (abfd, address);
13045 /* Get the (signed) value from the instruction. */
13046 addend = contents & howto->src_mask;
13047 if (addend & ((howto->src_mask + 1) >> 1))
13049 bfd_signed_vma mask;
13051 mask = -1;
13052 mask &= ~ howto->src_mask;
13053 addend |= mask;
13056 /* Add in the increment, (which is a byte value). */
13057 switch (howto->type)
13059 default:
13060 addend += increment;
13061 break;
13063 case R_ARM_PC24:
13064 case R_ARM_PLT32:
13065 case R_ARM_CALL:
13066 case R_ARM_JUMP24:
13067 addend *= bfd_get_reloc_size (howto);
13068 addend += increment;
13070 /* Should we check for overflow here ? */
13072 /* Drop any undesired bits. */
13073 addend >>= howto->rightshift;
13074 break;
13077 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
13079 bfd_put_32 (abfd, contents, address);
13083 #define IS_ARM_TLS_RELOC(R_TYPE) \
13084 ((R_TYPE) == R_ARM_TLS_GD32 \
13085 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
13086 || (R_TYPE) == R_ARM_TLS_LDO32 \
13087 || (R_TYPE) == R_ARM_TLS_LDM32 \
13088 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
13089 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
13090 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
13091 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
13092 || (R_TYPE) == R_ARM_TLS_LE32 \
13093 || (R_TYPE) == R_ARM_TLS_IE32 \
13094 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
13095 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13097 /* Specific set of relocations for the gnu tls dialect. */
13098 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
13099 ((R_TYPE) == R_ARM_TLS_GOTDESC \
13100 || (R_TYPE) == R_ARM_TLS_CALL \
13101 || (R_TYPE) == R_ARM_THM_TLS_CALL \
13102 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
13103 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13105 /* Relocate an ARM ELF section. */
13107 static int
13108 elf32_arm_relocate_section (bfd * output_bfd,
13109 struct bfd_link_info * info,
13110 bfd * input_bfd,
13111 asection * input_section,
13112 bfd_byte * contents,
13113 Elf_Internal_Rela * relocs,
13114 Elf_Internal_Sym * local_syms,
13115 asection ** local_sections)
13117 Elf_Internal_Shdr *symtab_hdr;
13118 struct elf_link_hash_entry **sym_hashes;
13119 Elf_Internal_Rela *rel;
13120 Elf_Internal_Rela *relend;
13121 const char *name;
13122 struct elf32_arm_link_hash_table * globals;
13124 globals = elf32_arm_hash_table (info);
13125 if (globals == NULL)
13126 return false;
13128 symtab_hdr = & elf_symtab_hdr (input_bfd);
13129 sym_hashes = elf_sym_hashes (input_bfd);
13131 rel = relocs;
13132 relend = relocs + input_section->reloc_count;
13133 for (; rel < relend; rel++)
13135 int r_type;
13136 reloc_howto_type * howto;
13137 unsigned long r_symndx;
13138 Elf_Internal_Sym * sym;
13139 asection * sec;
13140 struct elf_link_hash_entry * h;
13141 bfd_vma relocation;
13142 bfd_reloc_status_type r;
13143 arelent bfd_reloc;
13144 char sym_type;
13145 bool unresolved_reloc = false;
13146 char *error_message = NULL;
13148 r_symndx = ELF32_R_SYM (rel->r_info);
13149 r_type = ELF32_R_TYPE (rel->r_info);
13150 r_type = arm_real_reloc_type (globals, r_type);
13152 if ( r_type == R_ARM_GNU_VTENTRY
13153 || r_type == R_ARM_GNU_VTINHERIT)
13154 continue;
13156 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13158 if (howto == NULL)
13159 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13161 h = NULL;
13162 sym = NULL;
13163 sec = NULL;
13165 if (r_symndx < symtab_hdr->sh_info)
13167 sym = local_syms + r_symndx;
13168 sym_type = ELF32_ST_TYPE (sym->st_info);
13169 sec = local_sections[r_symndx];
13171 /* An object file might have a reference to a local
13172 undefined symbol. This is a daft object file, but we
13173 should at least do something about it. V4BX & NONE
13174 relocations do not use the symbol and are explicitly
13175 allowed to use the undefined symbol, so allow those.
13176 Likewise for relocations against STN_UNDEF. */
13177 if (r_type != R_ARM_V4BX
13178 && r_type != R_ARM_NONE
13179 && r_symndx != STN_UNDEF
13180 && bfd_is_und_section (sec)
13181 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13182 (*info->callbacks->undefined_symbol)
13183 (info, bfd_elf_string_from_elf_section
13184 (input_bfd, symtab_hdr->sh_link, sym->st_name),
13185 input_bfd, input_section,
13186 rel->r_offset, true);
13188 if (globals->use_rel)
13190 relocation = (sec->output_section->vma
13191 + sec->output_offset
13192 + sym->st_value);
13193 if (!bfd_link_relocatable (info)
13194 && (sec->flags & SEC_MERGE)
13195 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13197 asection *msec;
13198 bfd_vma addend, value;
13200 switch (r_type)
13202 case R_ARM_MOVW_ABS_NC:
13203 case R_ARM_MOVT_ABS:
13204 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13205 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13206 addend = (addend ^ 0x8000) - 0x8000;
13207 break;
13209 case R_ARM_THM_MOVW_ABS_NC:
13210 case R_ARM_THM_MOVT_ABS:
13211 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13212 << 16;
13213 value |= bfd_get_16 (input_bfd,
13214 contents + rel->r_offset + 2);
13215 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13216 | ((value & 0x04000000) >> 15);
13217 addend = (addend ^ 0x8000) - 0x8000;
13218 break;
13220 default:
13221 if (howto->rightshift
13222 || (howto->src_mask & (howto->src_mask + 1)))
13224 _bfd_error_handler
13225 /* xgettext:c-format */
13226 (_("%pB(%pA+%#" PRIx64 "): "
13227 "%s relocation against SEC_MERGE section"),
13228 input_bfd, input_section,
13229 (uint64_t) rel->r_offset, howto->name);
13230 return false;
13233 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13235 /* Get the (signed) value from the instruction. */
13236 addend = value & howto->src_mask;
13237 if (addend & ((howto->src_mask + 1) >> 1))
13239 bfd_signed_vma mask;
13241 mask = -1;
13242 mask &= ~ howto->src_mask;
13243 addend |= mask;
13245 break;
13248 msec = sec;
13249 addend =
13250 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13251 - relocation;
13252 addend += msec->output_section->vma + msec->output_offset;
13254 /* Cases here must match those in the preceding
13255 switch statement. */
13256 switch (r_type)
13258 case R_ARM_MOVW_ABS_NC:
13259 case R_ARM_MOVT_ABS:
13260 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13261 | (addend & 0xfff);
13262 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13263 break;
13265 case R_ARM_THM_MOVW_ABS_NC:
13266 case R_ARM_THM_MOVT_ABS:
13267 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13268 | (addend & 0xff) | ((addend & 0x0800) << 15);
13269 bfd_put_16 (input_bfd, value >> 16,
13270 contents + rel->r_offset);
13271 bfd_put_16 (input_bfd, value,
13272 contents + rel->r_offset + 2);
13273 break;
13275 default:
13276 value = (value & ~ howto->dst_mask)
13277 | (addend & howto->dst_mask);
13278 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13279 break;
13283 else
13284 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13286 else
13288 bool warned, ignored;
13290 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13291 r_symndx, symtab_hdr, sym_hashes,
13292 h, sec, relocation,
13293 unresolved_reloc, warned, ignored);
13295 sym_type = h->type;
13298 if (sec != NULL && discarded_section (sec))
13299 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13300 rel, 1, relend, howto, 0, contents);
13302 if (bfd_link_relocatable (info))
13304 /* This is a relocatable link. We don't have to change
13305 anything, unless the reloc is against a section symbol,
13306 in which case we have to adjust according to where the
13307 section symbol winds up in the output section. */
13308 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13310 if (globals->use_rel)
13311 arm_add_to_rel (input_bfd, contents + rel->r_offset,
13312 howto, (bfd_signed_vma) sec->output_offset);
13313 else
13314 rel->r_addend += sec->output_offset;
13316 continue;
13319 if (h != NULL)
13320 name = h->root.root.string;
13321 else
13323 name = (bfd_elf_string_from_elf_section
13324 (input_bfd, symtab_hdr->sh_link, sym->st_name));
13325 if (name == NULL || *name == '\0')
13326 name = bfd_section_name (sec);
13329 if (r_symndx != STN_UNDEF
13330 && r_type != R_ARM_NONE
13331 && (h == NULL
13332 || h->root.type == bfd_link_hash_defined
13333 || h->root.type == bfd_link_hash_defweak)
13334 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13336 _bfd_error_handler
13337 ((sym_type == STT_TLS
13338 /* xgettext:c-format */
13339 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13340 /* xgettext:c-format */
13341 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13342 input_bfd,
13343 input_section,
13344 (uint64_t) rel->r_offset,
13345 howto->name,
13346 name);
13349 /* We call elf32_arm_final_link_relocate unless we're completely
13350 done, i.e., the relaxation produced the final output we want,
13351 and we won't let anybody mess with it. Also, we have to do
13352 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13353 both in relaxed and non-relaxed cases. */
13354 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13355 || (IS_ARM_TLS_GNU_RELOC (r_type)
13356 && !((h ? elf32_arm_hash_entry (h)->tls_type :
13357 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13358 & GOT_TLS_GDESC)))
13360 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13361 contents, rel, h == NULL);
13362 /* This may have been marked unresolved because it came from
13363 a shared library. But we've just dealt with that. */
13364 unresolved_reloc = 0;
13366 else
13367 r = bfd_reloc_continue;
13369 if (r == bfd_reloc_continue)
13371 unsigned char branch_type =
13372 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13373 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13375 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13376 input_section, contents, rel,
13377 relocation, info, sec, name,
13378 sym_type, branch_type, h,
13379 &unresolved_reloc,
13380 &error_message);
13383 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13384 because such sections are not SEC_ALLOC and thus ld.so will
13385 not process them. */
13386 if (unresolved_reloc
13387 && !((input_section->flags & SEC_DEBUGGING) != 0
13388 && h->def_dynamic)
13389 && _bfd_elf_section_offset (output_bfd, info, input_section,
13390 rel->r_offset) != (bfd_vma) -1)
13392 _bfd_error_handler
13393 /* xgettext:c-format */
13394 (_("%pB(%pA+%#" PRIx64 "): "
13395 "unresolvable %s relocation against symbol `%s'"),
13396 input_bfd,
13397 input_section,
13398 (uint64_t) rel->r_offset,
13399 howto->name,
13400 h->root.root.string);
13401 return false;
13404 if (r != bfd_reloc_ok)
13406 switch (r)
13408 case bfd_reloc_overflow:
13409 /* If the overflowing reloc was to an undefined symbol,
13410 we have already printed one error message and there
13411 is no point complaining again. */
13412 if (!h || h->root.type != bfd_link_hash_undefined)
13413 (*info->callbacks->reloc_overflow)
13414 (info, (h ? &h->root : NULL), name, howto->name,
13415 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13416 break;
13418 case bfd_reloc_undefined:
13419 (*info->callbacks->undefined_symbol)
13420 (info, name, input_bfd, input_section, rel->r_offset, true);
13421 break;
13423 case bfd_reloc_outofrange:
13424 error_message = _("out of range");
13425 goto common_error;
13427 case bfd_reloc_notsupported:
13428 error_message = _("unsupported relocation");
13429 goto common_error;
13431 case bfd_reloc_dangerous:
13432 /* error_message should already be set. */
13433 goto common_error;
13435 default:
13436 error_message = _("unknown error");
13437 /* Fall through. */
13439 common_error:
13440 BFD_ASSERT (error_message != NULL);
13441 (*info->callbacks->reloc_dangerous)
13442 (info, error_message, input_bfd, input_section, rel->r_offset);
13443 break;
13448 return true;
13451 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13452 adds the edit to the start of the list. (The list must be built in order of
13453 ascending TINDEX: the function's callers are primarily responsible for
13454 maintaining that condition). */
13456 static void
13457 add_unwind_table_edit (arm_unwind_table_edit **head,
13458 arm_unwind_table_edit **tail,
13459 arm_unwind_edit_type type,
13460 asection *linked_section,
13461 unsigned int tindex)
13463 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13464 xmalloc (sizeof (arm_unwind_table_edit));
13466 new_edit->type = type;
13467 new_edit->linked_section = linked_section;
13468 new_edit->index = tindex;
13470 if (tindex > 0)
13472 new_edit->next = NULL;
13474 if (*tail)
13475 (*tail)->next = new_edit;
13477 (*tail) = new_edit;
13479 if (!*head)
13480 (*head) = new_edit;
13482 else
13484 new_edit->next = *head;
13486 if (!*tail)
13487 *tail = new_edit;
13489 *head = new_edit;
13493 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13495 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13497 static void
13498 adjust_exidx_size (asection *exidx_sec, int adjust)
13500 asection *out_sec;
13502 if (!exidx_sec->rawsize)
13503 exidx_sec->rawsize = exidx_sec->size;
13505 bfd_set_section_size (exidx_sec, exidx_sec->size + adjust);
13506 out_sec = exidx_sec->output_section;
13507 /* Adjust size of output section. */
13508 bfd_set_section_size (out_sec, out_sec->size + adjust);
13511 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13513 static void
13514 insert_cantunwind_after (asection *text_sec, asection *exidx_sec)
13516 struct _arm_elf_section_data *exidx_arm_data;
13518 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13519 add_unwind_table_edit
13520 (&exidx_arm_data->u.exidx.unwind_edit_list,
13521 &exidx_arm_data->u.exidx.unwind_edit_tail,
13522 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13524 exidx_arm_data->additional_reloc_count++;
13526 adjust_exidx_size (exidx_sec, 8);
13529 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13530 made to those tables, such that:
13532 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13533 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13534 codes which have been inlined into the index).
13536 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13538 The edits are applied when the tables are written
13539 (in elf32_arm_write_section). */
13541 bool
13542 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13543 unsigned int num_text_sections,
13544 struct bfd_link_info *info,
13545 bool merge_exidx_entries)
13547 bfd *inp;
13548 unsigned int last_second_word = 0, i;
13549 asection *last_exidx_sec = NULL;
13550 asection *last_text_sec = NULL;
13551 int last_unwind_type = -1;
13553 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13554 text sections. */
13555 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13557 asection *sec;
13559 for (sec = inp->sections; sec != NULL; sec = sec->next)
13561 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13562 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13564 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13565 continue;
13567 if (elf_sec->linked_to)
13569 Elf_Internal_Shdr *linked_hdr
13570 = &elf_section_data (elf_sec->linked_to)->this_hdr;
13571 struct _arm_elf_section_data *linked_sec_arm_data
13572 = get_arm_elf_section_data (linked_hdr->bfd_section);
13574 if (linked_sec_arm_data == NULL)
13575 continue;
13577 /* Link this .ARM.exidx section back from the text section it
13578 describes. */
13579 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13584 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13585 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13586 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13588 for (i = 0; i < num_text_sections; i++)
13590 asection *sec = text_section_order[i];
13591 asection *exidx_sec;
13592 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13593 struct _arm_elf_section_data *exidx_arm_data;
13594 bfd_byte *contents = NULL;
13595 int deleted_exidx_bytes = 0;
13596 bfd_vma j;
13597 arm_unwind_table_edit *unwind_edit_head = NULL;
13598 arm_unwind_table_edit *unwind_edit_tail = NULL;
13599 Elf_Internal_Shdr *hdr;
13600 bfd *ibfd;
13602 if (arm_data == NULL)
13603 continue;
13605 exidx_sec = arm_data->u.text.arm_exidx_sec;
13606 if (exidx_sec == NULL)
13608 /* Section has no unwind data. */
13609 if (last_unwind_type == 0 || !last_exidx_sec)
13610 continue;
13612 /* Ignore zero sized sections. */
13613 if (sec->size == 0)
13614 continue;
13616 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13617 last_unwind_type = 0;
13618 continue;
13621 /* Skip /DISCARD/ sections. */
13622 if (bfd_is_abs_section (exidx_sec->output_section))
13623 continue;
13625 hdr = &elf_section_data (exidx_sec)->this_hdr;
13626 if (hdr->sh_type != SHT_ARM_EXIDX)
13627 continue;
13629 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13630 if (exidx_arm_data == NULL)
13631 continue;
13633 ibfd = exidx_sec->owner;
13635 if (hdr->contents != NULL)
13636 contents = hdr->contents;
13637 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13638 /* An error? */
13639 continue;
13641 if (last_unwind_type > 0)
13643 unsigned int first_word = bfd_get_32 (ibfd, contents);
13644 /* Add cantunwind if first unwind item does not match section
13645 start. */
13646 if (first_word != sec->vma)
13648 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13649 last_unwind_type = 0;
13653 for (j = 0; j < hdr->sh_size; j += 8)
13655 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13656 int unwind_type;
13657 int elide = 0;
13659 /* An EXIDX_CANTUNWIND entry. */
13660 if (second_word == 1)
13662 if (last_unwind_type == 0)
13663 elide = 1;
13664 unwind_type = 0;
13666 /* Inlined unwinding data. Merge if equal to previous. */
13667 else if ((second_word & 0x80000000) != 0)
13669 if (merge_exidx_entries
13670 && last_second_word == second_word && last_unwind_type == 1)
13671 elide = 1;
13672 unwind_type = 1;
13673 last_second_word = second_word;
13675 /* Normal table entry. In theory we could merge these too,
13676 but duplicate entries are likely to be much less common. */
13677 else
13678 unwind_type = 2;
13680 if (elide && !bfd_link_relocatable (info))
13682 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13683 DELETE_EXIDX_ENTRY, NULL, j / 8);
13685 deleted_exidx_bytes += 8;
13688 last_unwind_type = unwind_type;
13691 /* Free contents if we allocated it ourselves. */
13692 if (contents != hdr->contents)
13693 free (contents);
13695 /* Record edits to be applied later (in elf32_arm_write_section). */
13696 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13697 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13699 if (deleted_exidx_bytes > 0)
13700 adjust_exidx_size (exidx_sec, - deleted_exidx_bytes);
13702 last_exidx_sec = exidx_sec;
13703 last_text_sec = sec;
13706 /* Add terminating CANTUNWIND entry. */
13707 if (!bfd_link_relocatable (info) && last_exidx_sec
13708 && last_unwind_type != 0)
13709 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13711 return true;
13714 static bool
13715 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13716 bfd *ibfd, const char *name)
13718 asection *sec, *osec;
13720 sec = bfd_get_linker_section (ibfd, name);
13721 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13722 return true;
13724 osec = sec->output_section;
13725 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13726 return true;
13728 if (! bfd_set_section_contents (obfd, osec, sec->contents,
13729 sec->output_offset, sec->size))
13730 return false;
13732 return true;
13735 static bool
13736 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13738 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13739 asection *sec, *osec;
13741 if (globals == NULL)
13742 return false;
13744 /* Invoke the regular ELF backend linker to do all the work. */
13745 if (!bfd_elf_final_link (abfd, info))
13746 return false;
13748 /* Process stub sections (eg BE8 encoding, ...). */
13749 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13750 unsigned int i;
13751 for (i=0; i<htab->top_id; i++)
13753 sec = htab->stub_group[i].stub_sec;
13754 /* Only process it once, in its link_sec slot. */
13755 if (sec && i == htab->stub_group[i].link_sec->id)
13757 osec = sec->output_section;
13758 elf32_arm_write_section (abfd, info, sec, sec->contents);
13759 if (! bfd_set_section_contents (abfd, osec, sec->contents,
13760 sec->output_offset, sec->size))
13761 return false;
13765 /* Write out any glue sections now that we have created all the
13766 stubs. */
13767 if (globals->bfd_of_glue_owner != NULL)
13769 if (! elf32_arm_output_glue_section (info, abfd,
13770 globals->bfd_of_glue_owner,
13771 ARM2THUMB_GLUE_SECTION_NAME))
13772 return false;
13774 if (! elf32_arm_output_glue_section (info, abfd,
13775 globals->bfd_of_glue_owner,
13776 THUMB2ARM_GLUE_SECTION_NAME))
13777 return false;
13779 if (! elf32_arm_output_glue_section (info, abfd,
13780 globals->bfd_of_glue_owner,
13781 VFP11_ERRATUM_VENEER_SECTION_NAME))
13782 return false;
13784 if (! elf32_arm_output_glue_section (info, abfd,
13785 globals->bfd_of_glue_owner,
13786 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13787 return false;
13789 if (! elf32_arm_output_glue_section (info, abfd,
13790 globals->bfd_of_glue_owner,
13791 ARM_BX_GLUE_SECTION_NAME))
13792 return false;
13795 return true;
13798 /* Return a best guess for the machine number based on the attributes. */
13800 static unsigned int
13801 bfd_arm_get_mach_from_attributes (bfd * abfd)
13803 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13805 switch (arch)
13807 case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13808 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13809 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13810 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13812 case TAG_CPU_ARCH_V5TE:
13814 char * name;
13816 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13817 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13819 if (name)
13821 if (strcmp (name, "IWMMXT2") == 0)
13822 return bfd_mach_arm_iWMMXt2;
13824 if (strcmp (name, "IWMMXT") == 0)
13825 return bfd_mach_arm_iWMMXt;
13827 if (strcmp (name, "XSCALE") == 0)
13829 int wmmx;
13831 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13832 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13833 switch (wmmx)
13835 case 1: return bfd_mach_arm_iWMMXt;
13836 case 2: return bfd_mach_arm_iWMMXt2;
13837 default: return bfd_mach_arm_XScale;
13842 return bfd_mach_arm_5TE;
13845 case TAG_CPU_ARCH_V5TEJ:
13846 return bfd_mach_arm_5TEJ;
13847 case TAG_CPU_ARCH_V6:
13848 return bfd_mach_arm_6;
13849 case TAG_CPU_ARCH_V6KZ:
13850 return bfd_mach_arm_6KZ;
13851 case TAG_CPU_ARCH_V6T2:
13852 return bfd_mach_arm_6T2;
13853 case TAG_CPU_ARCH_V6K:
13854 return bfd_mach_arm_6K;
13855 case TAG_CPU_ARCH_V7:
13856 return bfd_mach_arm_7;
13857 case TAG_CPU_ARCH_V6_M:
13858 return bfd_mach_arm_6M;
13859 case TAG_CPU_ARCH_V6S_M:
13860 return bfd_mach_arm_6SM;
13861 case TAG_CPU_ARCH_V7E_M:
13862 return bfd_mach_arm_7EM;
13863 case TAG_CPU_ARCH_V8:
13864 return bfd_mach_arm_8;
13865 case TAG_CPU_ARCH_V8R:
13866 return bfd_mach_arm_8R;
13867 case TAG_CPU_ARCH_V8M_BASE:
13868 return bfd_mach_arm_8M_BASE;
13869 case TAG_CPU_ARCH_V8M_MAIN:
13870 return bfd_mach_arm_8M_MAIN;
13871 case TAG_CPU_ARCH_V8_1M_MAIN:
13872 return bfd_mach_arm_8_1M_MAIN;
13873 case TAG_CPU_ARCH_V9:
13874 return bfd_mach_arm_9;
13876 default:
13877 /* Force entry to be added for any new known Tag_CPU_arch value. */
13878 BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13880 /* Unknown Tag_CPU_arch value. */
13881 return bfd_mach_arm_unknown;
13885 /* Set the right machine number. */
13887 static bool
13888 elf32_arm_object_p (bfd *abfd)
13890 unsigned int mach;
13892 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13894 if (mach == bfd_mach_arm_unknown)
13896 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13897 mach = bfd_mach_arm_ep9312;
13898 else
13899 mach = bfd_arm_get_mach_from_attributes (abfd);
13902 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13903 return true;
13906 /* Function to keep ARM specific flags in the ELF header. */
13908 static bool
13909 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13911 if (elf_flags_init (abfd)
13912 && elf_elfheader (abfd)->e_flags != flags)
13914 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13916 if (flags & EF_ARM_INTERWORK)
13917 _bfd_error_handler
13918 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13919 abfd);
13920 else
13921 _bfd_error_handler
13922 (_("warning: clearing the interworking flag of %pB due to outside request"),
13923 abfd);
13926 else
13928 elf_elfheader (abfd)->e_flags = flags;
13929 elf_flags_init (abfd) = true;
13932 return true;
13935 /* Copy backend specific data from one object module to another. */
13937 static bool
13938 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13940 flagword in_flags;
13941 flagword out_flags;
13943 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13944 return true;
13946 in_flags = elf_elfheader (ibfd)->e_flags;
13947 out_flags = elf_elfheader (obfd)->e_flags;
13949 if (elf_flags_init (obfd)
13950 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13951 && in_flags != out_flags)
13953 /* Cannot mix APCS26 and APCS32 code. */
13954 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13955 return false;
13957 /* Cannot mix float APCS and non-float APCS code. */
13958 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13959 return false;
13961 /* If the src and dest have different interworking flags
13962 then turn off the interworking bit. */
13963 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13965 if (out_flags & EF_ARM_INTERWORK)
13966 _bfd_error_handler
13967 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13968 obfd, ibfd);
13970 in_flags &= ~EF_ARM_INTERWORK;
13973 /* Likewise for PIC, though don't warn for this case. */
13974 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13975 in_flags &= ~EF_ARM_PIC;
13978 elf_elfheader (obfd)->e_flags = in_flags;
13979 elf_flags_init (obfd) = true;
13981 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
13984 /* Values for Tag_ABI_PCS_R9_use. */
13985 enum
13987 AEABI_R9_V6,
13988 AEABI_R9_SB,
13989 AEABI_R9_TLS,
13990 AEABI_R9_unused
13993 /* Values for Tag_ABI_PCS_RW_data. */
13994 enum
13996 AEABI_PCS_RW_data_absolute,
13997 AEABI_PCS_RW_data_PCrel,
13998 AEABI_PCS_RW_data_SBrel,
13999 AEABI_PCS_RW_data_unused
14002 /* Values for Tag_ABI_enum_size. */
14003 enum
14005 AEABI_enum_unused,
14006 AEABI_enum_short,
14007 AEABI_enum_wide,
14008 AEABI_enum_forced_wide
14011 /* Determine whether an object attribute tag takes an integer, a
14012 string or both. */
14014 static int
14015 elf32_arm_obj_attrs_arg_type (int tag)
14017 if (tag == Tag_compatibility)
14018 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
14019 else if (tag == Tag_nodefaults)
14020 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
14021 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
14022 return ATTR_TYPE_FLAG_STR_VAL;
14023 else if (tag < 32)
14024 return ATTR_TYPE_FLAG_INT_VAL;
14025 else
14026 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
14029 /* The ABI defines that Tag_conformance should be emitted first, and that
14030 Tag_nodefaults should be second (if either is defined). This sets those
14031 two positions, and bumps up the position of all the remaining tags to
14032 compensate. */
14033 static int
14034 elf32_arm_obj_attrs_order (int num)
14036 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
14037 return Tag_conformance;
14038 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
14039 return Tag_nodefaults;
14040 if ((num - 2) < Tag_nodefaults)
14041 return num - 2;
14042 if ((num - 1) < Tag_conformance)
14043 return num - 1;
14044 return num;
14047 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
14048 static bool
14049 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
14051 if ((tag & 127) < 64)
14053 _bfd_error_handler
14054 (_("%pB: unknown mandatory EABI object attribute %d"),
14055 abfd, tag);
14056 bfd_set_error (bfd_error_bad_value);
14057 return false;
14059 else
14061 _bfd_error_handler
14062 (_("warning: %pB: unknown EABI object attribute %d"),
14063 abfd, tag);
14064 return true;
14068 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
14069 Returns -1 if no architecture could be read. */
14071 static int
14072 get_secondary_compatible_arch (bfd *abfd)
14074 obj_attribute *attr =
14075 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14077 /* Note: the tag and its argument below are uleb128 values, though
14078 currently-defined values fit in one byte for each. */
14079 if (attr->s
14080 && attr->s[0] == Tag_CPU_arch
14081 && (attr->s[1] & 128) != 128
14082 && attr->s[2] == 0)
14083 return attr->s[1];
14085 /* This tag is "safely ignorable", so don't complain if it looks funny. */
14086 return -1;
14089 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14090 The tag is removed if ARCH is -1. */
14092 static void
14093 set_secondary_compatible_arch (bfd *abfd, int arch)
14095 obj_attribute *attr =
14096 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14098 if (arch == -1)
14100 attr->s = NULL;
14101 return;
14104 /* Note: the tag and its argument below are uleb128 values, though
14105 currently-defined values fit in one byte for each. */
14106 if (!attr->s)
14107 attr->s = (char *) bfd_alloc (abfd, 3);
14108 attr->s[0] = Tag_CPU_arch;
14109 attr->s[1] = arch;
14110 attr->s[2] = '\0';
14113 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14114 into account. */
14116 static int
14117 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
14118 int newtag, int secondary_compat, const char* name_table[])
14120 #define T(X) TAG_CPU_ARCH_##X
14121 int tagl, tagh, result;
14122 const int v6t2[] =
14124 T(V6T2), /* PRE_V4. */
14125 T(V6T2), /* V4. */
14126 T(V6T2), /* V4T. */
14127 T(V6T2), /* V5T. */
14128 T(V6T2), /* V5TE. */
14129 T(V6T2), /* V5TEJ. */
14130 T(V6T2), /* V6. */
14131 T(V7), /* V6KZ. */
14132 T(V6T2) /* V6T2. */
14134 const int v6k[] =
14136 T(V6K), /* PRE_V4. */
14137 T(V6K), /* V4. */
14138 T(V6K), /* V4T. */
14139 T(V6K), /* V5T. */
14140 T(V6K), /* V5TE. */
14141 T(V6K), /* V5TEJ. */
14142 T(V6K), /* V6. */
14143 T(V6KZ), /* V6KZ. */
14144 T(V7), /* V6T2. */
14145 T(V6K) /* V6K. */
14147 const int v7[] =
14149 T(V7), /* PRE_V4. */
14150 T(V7), /* V4. */
14151 T(V7), /* V4T. */
14152 T(V7), /* V5T. */
14153 T(V7), /* V5TE. */
14154 T(V7), /* V5TEJ. */
14155 T(V7), /* V6. */
14156 T(V7), /* V6KZ. */
14157 T(V7), /* V6T2. */
14158 T(V7), /* V6K. */
14159 T(V7) /* V7. */
14161 const int v6_m[] =
14163 -1, /* PRE_V4. */
14164 -1, /* V4. */
14165 T(V6K), /* V4T. */
14166 T(V6K), /* V5T. */
14167 T(V6K), /* V5TE. */
14168 T(V6K), /* V5TEJ. */
14169 T(V6K), /* V6. */
14170 T(V6KZ), /* V6KZ. */
14171 T(V7), /* V6T2. */
14172 T(V6K), /* V6K. */
14173 T(V7), /* V7. */
14174 T(V6_M) /* V6_M. */
14176 const int v6s_m[] =
14178 -1, /* PRE_V4. */
14179 -1, /* V4. */
14180 T(V6K), /* V4T. */
14181 T(V6K), /* V5T. */
14182 T(V6K), /* V5TE. */
14183 T(V6K), /* V5TEJ. */
14184 T(V6K), /* V6. */
14185 T(V6KZ), /* V6KZ. */
14186 T(V7), /* V6T2. */
14187 T(V6K), /* V6K. */
14188 T(V7), /* V7. */
14189 T(V6S_M), /* V6_M. */
14190 T(V6S_M) /* V6S_M. */
14192 const int v7e_m[] =
14194 -1, /* PRE_V4. */
14195 -1, /* V4. */
14196 T(V7E_M), /* V4T. */
14197 T(V7E_M), /* V5T. */
14198 T(V7E_M), /* V5TE. */
14199 T(V7E_M), /* V5TEJ. */
14200 T(V7E_M), /* V6. */
14201 T(V7E_M), /* V6KZ. */
14202 T(V7E_M), /* V6T2. */
14203 T(V7E_M), /* V6K. */
14204 T(V7E_M), /* V7. */
14205 T(V7E_M), /* V6_M. */
14206 T(V7E_M), /* V6S_M. */
14207 T(V7E_M) /* V7E_M. */
14209 const int v8[] =
14211 T(V8), /* PRE_V4. */
14212 T(V8), /* V4. */
14213 T(V8), /* V4T. */
14214 T(V8), /* V5T. */
14215 T(V8), /* V5TE. */
14216 T(V8), /* V5TEJ. */
14217 T(V8), /* V6. */
14218 T(V8), /* V6KZ. */
14219 T(V8), /* V6T2. */
14220 T(V8), /* V6K. */
14221 T(V8), /* V7. */
14222 T(V8), /* V6_M. */
14223 T(V8), /* V6S_M. */
14224 T(V8), /* V7E_M. */
14225 T(V8), /* V8. */
14226 T(V8), /* V8-R. */
14227 T(V8), /* V8-M.BASE. */
14228 T(V8), /* V8-M.MAIN. */
14229 T(V8), /* V8.1. */
14230 T(V8), /* V8.2. */
14231 T(V8), /* V8.3. */
14232 T(V8), /* V8.1-M.MAIN. */
14234 const int v8r[] =
14236 T(V8R), /* PRE_V4. */
14237 T(V8R), /* V4. */
14238 T(V8R), /* V4T. */
14239 T(V8R), /* V5T. */
14240 T(V8R), /* V5TE. */
14241 T(V8R), /* V5TEJ. */
14242 T(V8R), /* V6. */
14243 T(V8R), /* V6KZ. */
14244 T(V8R), /* V6T2. */
14245 T(V8R), /* V6K. */
14246 T(V8R), /* V7. */
14247 T(V8R), /* V6_M. */
14248 T(V8R), /* V6S_M. */
14249 T(V8R), /* V7E_M. */
14250 T(V8), /* V8. */
14251 T(V8R), /* V8R. */
14253 const int v8m_baseline[] =
14255 -1, /* PRE_V4. */
14256 -1, /* V4. */
14257 -1, /* V4T. */
14258 -1, /* V5T. */
14259 -1, /* V5TE. */
14260 -1, /* V5TEJ. */
14261 -1, /* V6. */
14262 -1, /* V6KZ. */
14263 -1, /* V6T2. */
14264 -1, /* V6K. */
14265 -1, /* V7. */
14266 T(V8M_BASE), /* V6_M. */
14267 T(V8M_BASE), /* V6S_M. */
14268 -1, /* V7E_M. */
14269 -1, /* V8. */
14270 -1, /* V8R. */
14271 T(V8M_BASE) /* V8-M BASELINE. */
14273 const int v8m_mainline[] =
14275 -1, /* PRE_V4. */
14276 -1, /* V4. */
14277 -1, /* V4T. */
14278 -1, /* V5T. */
14279 -1, /* V5TE. */
14280 -1, /* V5TEJ. */
14281 -1, /* V6. */
14282 -1, /* V6KZ. */
14283 -1, /* V6T2. */
14284 -1, /* V6K. */
14285 T(V8M_MAIN), /* V7. */
14286 T(V8M_MAIN), /* V6_M. */
14287 T(V8M_MAIN), /* V6S_M. */
14288 T(V8M_MAIN), /* V7E_M. */
14289 -1, /* V8. */
14290 -1, /* V8R. */
14291 T(V8M_MAIN), /* V8-M BASELINE. */
14292 T(V8M_MAIN) /* V8-M MAINLINE. */
14294 const int v8_1m_mainline[] =
14296 -1, /* PRE_V4. */
14297 -1, /* V4. */
14298 -1, /* V4T. */
14299 -1, /* V5T. */
14300 -1, /* V5TE. */
14301 -1, /* V5TEJ. */
14302 -1, /* V6. */
14303 -1, /* V6KZ. */
14304 -1, /* V6T2. */
14305 -1, /* V6K. */
14306 T(V8_1M_MAIN), /* V7. */
14307 T(V8_1M_MAIN), /* V6_M. */
14308 T(V8_1M_MAIN), /* V6S_M. */
14309 T(V8_1M_MAIN), /* V7E_M. */
14310 -1, /* V8. */
14311 -1, /* V8R. */
14312 T(V8_1M_MAIN), /* V8-M BASELINE. */
14313 T(V8_1M_MAIN), /* V8-M MAINLINE. */
14314 -1, /* Unused (18). */
14315 -1, /* Unused (19). */
14316 -1, /* Unused (20). */
14317 T(V8_1M_MAIN) /* V8.1-M MAINLINE. */
14319 const int v9[] =
14321 T(V9), /* PRE_V4. */
14322 T(V9), /* V4. */
14323 T(V9), /* V4T. */
14324 T(V9), /* V5T. */
14325 T(V9), /* V5TE. */
14326 T(V9), /* V5TEJ. */
14327 T(V9), /* V6. */
14328 T(V9), /* V6KZ. */
14329 T(V9), /* V6T2. */
14330 T(V9), /* V6K. */
14331 T(V9), /* V7. */
14332 T(V9), /* V6_M. */
14333 T(V9), /* V6S_M. */
14334 T(V9), /* V7E_M. */
14335 T(V9), /* V8. */
14336 T(V9), /* V8-R. */
14337 T(V9), /* V8-M.BASE. */
14338 T(V9), /* V8-M.MAIN. */
14339 T(V9), /* V8.1. */
14340 T(V9), /* V8.2. */
14341 T(V9), /* V8.3. */
14342 T(V9), /* V8.1-M.MAIN. */
14343 T(V9), /* V9. */
14345 const int v4t_plus_v6_m[] =
14347 -1, /* PRE_V4. */
14348 -1, /* V4. */
14349 T(V4T), /* V4T. */
14350 T(V5T), /* V5T. */
14351 T(V5TE), /* V5TE. */
14352 T(V5TEJ), /* V5TEJ. */
14353 T(V6), /* V6. */
14354 T(V6KZ), /* V6KZ. */
14355 T(V6T2), /* V6T2. */
14356 T(V6K), /* V6K. */
14357 T(V7), /* V7. */
14358 T(V6_M), /* V6_M. */
14359 T(V6S_M), /* V6S_M. */
14360 T(V7E_M), /* V7E_M. */
14361 T(V8), /* V8. */
14362 -1, /* V8R. */
14363 T(V8M_BASE), /* V8-M BASELINE. */
14364 T(V8M_MAIN), /* V8-M MAINLINE. */
14365 -1, /* Unused (18). */
14366 -1, /* Unused (19). */
14367 -1, /* Unused (20). */
14368 T(V8_1M_MAIN), /* V8.1-M MAINLINE. */
14369 T(V9), /* V9. */
14370 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
14372 const int *comb[] =
14374 v6t2,
14375 v6k,
14377 v6_m,
14378 v6s_m,
14379 v7e_m,
14381 v8r,
14382 v8m_baseline,
14383 v8m_mainline,
14384 NULL,
14385 NULL,
14386 NULL,
14387 v8_1m_mainline,
14389 /* Pseudo-architecture. */
14390 v4t_plus_v6_m
14393 /* Check we've not got a higher architecture than we know about. */
14395 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14397 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14398 return -1;
14401 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14403 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14404 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14405 oldtag = T(V4T_PLUS_V6_M);
14407 /* And override the new tag if we have a Tag_also_compatible_with on the
14408 input. */
14410 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14411 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14412 newtag = T(V4T_PLUS_V6_M);
14414 tagl = (oldtag < newtag) ? oldtag : newtag;
14415 result = tagh = (oldtag > newtag) ? oldtag : newtag;
14417 /* Architectures before V6KZ add features monotonically. */
14418 if (tagh <= TAG_CPU_ARCH_V6KZ)
14419 return result;
14421 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14423 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14424 as the canonical version. */
14425 if (result == T(V4T_PLUS_V6_M))
14427 result = T(V4T);
14428 *secondary_compat_out = T(V6_M);
14430 else
14431 *secondary_compat_out = -1;
14433 if (result == -1)
14435 _bfd_error_handler (_("error: conflicting CPU architectures %s vs %s in %pB"),
14436 name_table[oldtag], name_table[newtag], ibfd);
14437 return -1;
14440 return result;
14441 #undef T
14444 /* Query attributes object to see if integer divide instructions may be
14445 present in an object. */
14446 static bool
14447 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14449 int arch = attr[Tag_CPU_arch].i;
14450 int profile = attr[Tag_CPU_arch_profile].i;
14452 switch (attr[Tag_DIV_use].i)
14454 case 0:
14455 /* Integer divide allowed if instruction contained in archetecture. */
14456 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14457 return true;
14458 else if (arch >= TAG_CPU_ARCH_V7E_M)
14459 return true;
14460 else
14461 return false;
14463 case 1:
14464 /* Integer divide explicitly prohibited. */
14465 return false;
14467 default:
14468 /* Unrecognised case - treat as allowing divide everywhere. */
14469 case 2:
14470 /* Integer divide allowed in ARM state. */
14471 return true;
14475 /* Query attributes object to see if integer divide instructions are
14476 forbidden to be in the object. This is not the inverse of
14477 elf32_arm_attributes_accept_div. */
14478 static bool
14479 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14481 return attr[Tag_DIV_use].i == 1;
14484 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14485 are conflicting attributes. */
14487 static bool
14488 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14490 bfd *obfd = info->output_bfd;
14491 obj_attribute *in_attr;
14492 obj_attribute *out_attr;
14493 /* Some tags have 0 = don't care, 1 = strong requirement,
14494 2 = weak requirement. */
14495 static const int order_021[3] = {0, 2, 1};
14496 int i;
14497 bool result = true;
14498 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14500 /* Skip the linker stubs file. This preserves previous behavior
14501 of accepting unknown attributes in the first input file - but
14502 is that a bug? */
14503 if (ibfd->flags & BFD_LINKER_CREATED)
14504 return true;
14506 /* Skip any input that hasn't attribute section.
14507 This enables to link object files without attribute section with
14508 any others. */
14509 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14510 return true;
14512 if (!elf_known_obj_attributes_proc (obfd)[0].i)
14514 /* This is the first object. Copy the attributes. */
14515 _bfd_elf_copy_obj_attributes (ibfd, obfd);
14517 out_attr = elf_known_obj_attributes_proc (obfd);
14519 /* Use the Tag_null value to indicate the attributes have been
14520 initialized. */
14521 out_attr[0].i = 1;
14523 /* We do not output objects with Tag_MPextension_use_legacy - we move
14524 the attribute's value to Tag_MPextension_use. */
14525 if (out_attr[Tag_MPextension_use_legacy].i != 0)
14527 if (out_attr[Tag_MPextension_use].i != 0
14528 && out_attr[Tag_MPextension_use_legacy].i
14529 != out_attr[Tag_MPextension_use].i)
14531 _bfd_error_handler
14532 (_("Error: %pB has both the current and legacy "
14533 "Tag_MPextension_use attributes"), ibfd);
14534 result = false;
14537 out_attr[Tag_MPextension_use] =
14538 out_attr[Tag_MPextension_use_legacy];
14539 out_attr[Tag_MPextension_use_legacy].type = 0;
14540 out_attr[Tag_MPextension_use_legacy].i = 0;
14543 /* PR 28859 and 28848: Handle the case where the first input file,
14544 eg crti.o, has a Tag_ABI_HardFP_use of 3 but no Tag_FP_arch set.
14545 Using Tag_ABI_HardFP_use in this way is deprecated, so reset the
14546 attribute to zero.
14547 FIXME: Should we handle other non-zero values of Tag_ABI_HardFO_use ? */
14548 if (out_attr[Tag_ABI_HardFP_use].i == 3 && out_attr[Tag_FP_arch].i == 0)
14549 out_attr[Tag_ABI_HardFP_use].i = 0;
14551 return result;
14554 in_attr = elf_known_obj_attributes_proc (ibfd);
14555 out_attr = elf_known_obj_attributes_proc (obfd);
14556 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14557 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14559 /* Ignore mismatches if the object doesn't use floating point or is
14560 floating point ABI independent. */
14561 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14562 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14563 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14564 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14565 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14566 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14568 _bfd_error_handler
14569 (_("error: %pB uses VFP register arguments, %pB does not"),
14570 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14571 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14572 result = false;
14576 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14578 /* Merge this attribute with existing attributes. */
14579 switch (i)
14581 case Tag_CPU_raw_name:
14582 case Tag_CPU_name:
14583 /* These are merged after Tag_CPU_arch. */
14584 break;
14586 case Tag_ABI_optimization_goals:
14587 case Tag_ABI_FP_optimization_goals:
14588 /* Use the first value seen. */
14589 break;
14591 case Tag_CPU_arch:
14593 int secondary_compat = -1, secondary_compat_out = -1;
14594 unsigned int saved_out_attr = out_attr[i].i;
14595 int arch_attr;
14596 static const char *name_table[] =
14598 /* These aren't real CPU names, but we can't guess
14599 that from the architecture version alone. */
14600 "Pre v4",
14601 "ARM v4",
14602 "ARM v4T",
14603 "ARM v5T",
14604 "ARM v5TE",
14605 "ARM v5TEJ",
14606 "ARM v6",
14607 "ARM v6KZ",
14608 "ARM v6T2",
14609 "ARM v6K",
14610 "ARM v7",
14611 "ARM v6-M",
14612 "ARM v6S-M",
14613 "ARM v7E-M",
14614 "ARM v8",
14615 "ARM v8-R",
14616 "ARM v8-M.baseline",
14617 "ARM v8-M.mainline",
14618 "ARM v8.1-A",
14619 "ARM v8.2-A",
14620 "ARM v8.3-A",
14621 "ARM v8.1-M.mainline",
14622 "ARM v9",
14625 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14626 secondary_compat = get_secondary_compatible_arch (ibfd);
14627 secondary_compat_out = get_secondary_compatible_arch (obfd);
14628 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14629 &secondary_compat_out,
14630 in_attr[i].i,
14631 secondary_compat,
14632 name_table);
14634 /* Return with error if failed to merge. */
14635 if (arch_attr == -1)
14636 return false;
14638 out_attr[i].i = arch_attr;
14640 set_secondary_compatible_arch (obfd, secondary_compat_out);
14642 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14643 if (out_attr[i].i == saved_out_attr)
14644 ; /* Leave the names alone. */
14645 else if (out_attr[i].i == in_attr[i].i)
14647 /* The output architecture has been changed to match the
14648 input architecture. Use the input names. */
14649 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14650 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14651 : NULL;
14652 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14653 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14654 : NULL;
14656 else
14658 out_attr[Tag_CPU_name].s = NULL;
14659 out_attr[Tag_CPU_raw_name].s = NULL;
14662 /* If we still don't have a value for Tag_CPU_name,
14663 make one up now. Tag_CPU_raw_name remains blank. */
14664 if (out_attr[Tag_CPU_name].s == NULL
14665 && out_attr[i].i < ARRAY_SIZE (name_table))
14666 out_attr[Tag_CPU_name].s =
14667 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14669 break;
14671 case Tag_ARM_ISA_use:
14672 case Tag_THUMB_ISA_use:
14673 case Tag_WMMX_arch:
14674 case Tag_Advanced_SIMD_arch:
14675 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14676 case Tag_ABI_FP_rounding:
14677 case Tag_ABI_FP_exceptions:
14678 case Tag_ABI_FP_user_exceptions:
14679 case Tag_ABI_FP_number_model:
14680 case Tag_FP_HP_extension:
14681 case Tag_CPU_unaligned_access:
14682 case Tag_T2EE_use:
14683 case Tag_MPextension_use:
14684 case Tag_MVE_arch:
14685 case Tag_PAC_extension:
14686 case Tag_BTI_extension:
14687 case Tag_BTI_use:
14688 case Tag_PACRET_use:
14689 /* Use the largest value specified. */
14690 if (in_attr[i].i > out_attr[i].i)
14691 out_attr[i].i = in_attr[i].i;
14692 break;
14694 case Tag_ABI_align_preserved:
14695 case Tag_ABI_PCS_RO_data:
14696 /* Use the smallest value specified. */
14697 if (in_attr[i].i < out_attr[i].i)
14698 out_attr[i].i = in_attr[i].i;
14699 break;
14701 case Tag_ABI_align_needed:
14702 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14703 && (in_attr[Tag_ABI_align_preserved].i == 0
14704 || out_attr[Tag_ABI_align_preserved].i == 0))
14706 /* This error message should be enabled once all non-conformant
14707 binaries in the toolchain have had the attributes set
14708 properly.
14709 _bfd_error_handler
14710 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14711 obfd, ibfd);
14712 result = false; */
14714 /* Fall through. */
14715 case Tag_ABI_FP_denormal:
14716 case Tag_ABI_PCS_GOT_use:
14717 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14718 value if greater than 2 (for future-proofing). */
14719 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14720 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14721 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14722 out_attr[i].i = in_attr[i].i;
14723 break;
14725 case Tag_Virtualization_use:
14726 /* The virtualization tag effectively stores two bits of
14727 information: the intended use of TrustZone (in bit 0), and the
14728 intended use of Virtualization (in bit 1). */
14729 if (out_attr[i].i == 0)
14730 out_attr[i].i = in_attr[i].i;
14731 else if (in_attr[i].i != 0
14732 && in_attr[i].i != out_attr[i].i)
14734 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14735 out_attr[i].i = 3;
14736 else
14738 _bfd_error_handler
14739 (_("error: %pB: unable to merge virtualization attributes "
14740 "with %pB"),
14741 obfd, ibfd);
14742 result = false;
14745 break;
14747 case Tag_CPU_arch_profile:
14748 if (out_attr[i].i != in_attr[i].i)
14750 /* 0 will merge with anything.
14751 'A' and 'S' merge to 'A'.
14752 'R' and 'S' merge to 'R'.
14753 'M' and 'A|R|S' is an error. */
14754 if (out_attr[i].i == 0
14755 || (out_attr[i].i == 'S'
14756 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14757 out_attr[i].i = in_attr[i].i;
14758 else if (in_attr[i].i == 0
14759 || (in_attr[i].i == 'S'
14760 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14761 ; /* Do nothing. */
14762 else
14764 _bfd_error_handler
14765 (_("error: %pB: conflicting architecture profiles %c/%c"),
14766 ibfd,
14767 in_attr[i].i ? in_attr[i].i : '0',
14768 out_attr[i].i ? out_attr[i].i : '0');
14769 result = false;
14772 break;
14774 case Tag_DSP_extension:
14775 /* No need to change output value if any of:
14776 - pre (<=) ARMv5T input architecture (do not have DSP)
14777 - M input profile not ARMv7E-M and do not have DSP. */
14778 if (in_attr[Tag_CPU_arch].i <= 3
14779 || (in_attr[Tag_CPU_arch_profile].i == 'M'
14780 && in_attr[Tag_CPU_arch].i != 13
14781 && in_attr[i].i == 0))
14782 ; /* Do nothing. */
14783 /* Output value should be 0 if DSP part of architecture, ie.
14784 - post (>=) ARMv5te architecture output
14785 - A, R or S profile output or ARMv7E-M output architecture. */
14786 else if (out_attr[Tag_CPU_arch].i >= 4
14787 && (out_attr[Tag_CPU_arch_profile].i == 'A'
14788 || out_attr[Tag_CPU_arch_profile].i == 'R'
14789 || out_attr[Tag_CPU_arch_profile].i == 'S'
14790 || out_attr[Tag_CPU_arch].i == 13))
14791 out_attr[i].i = 0;
14792 /* Otherwise, DSP instructions are added and not part of output
14793 architecture. */
14794 else
14795 out_attr[i].i = 1;
14796 break;
14798 case Tag_FP_arch:
14800 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14801 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14802 when it's 0. It might mean absence of FP hardware if
14803 Tag_FP_arch is zero. */
14805 #define VFP_VERSION_COUNT 9
14806 static const struct
14808 int ver;
14809 int regs;
14810 } vfp_versions[VFP_VERSION_COUNT] =
14812 {0, 0},
14813 {1, 16},
14814 {2, 16},
14815 {3, 32},
14816 {3, 16},
14817 {4, 32},
14818 {4, 16},
14819 {8, 32},
14820 {8, 16}
14822 int ver;
14823 int regs;
14824 int newval;
14826 /* If the output has no requirement about FP hardware,
14827 follow the requirement of the input. */
14828 if (out_attr[i].i == 0)
14830 /* This assert is still reasonable, we shouldn't
14831 produce the suspicious build attribute
14832 combination (See below for in_attr). */
14833 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14834 out_attr[i].i = in_attr[i].i;
14835 out_attr[Tag_ABI_HardFP_use].i
14836 = in_attr[Tag_ABI_HardFP_use].i;
14837 break;
14839 /* If the input has no requirement about FP hardware, do
14840 nothing. */
14841 else if (in_attr[i].i == 0)
14843 /* We used to assert that Tag_ABI_HardFP_use was
14844 zero here, but we should never assert when
14845 consuming an object file that has suspicious
14846 build attributes. The single precision variant
14847 of 'no FP architecture' is still 'no FP
14848 architecture', so we just ignore the tag in this
14849 case. */
14850 break;
14853 /* Both the input and the output have nonzero Tag_FP_arch.
14854 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14856 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14857 do nothing. */
14858 if (in_attr[Tag_ABI_HardFP_use].i == 0
14859 && out_attr[Tag_ABI_HardFP_use].i == 0)
14861 /* If the input and the output have different Tag_ABI_HardFP_use,
14862 the combination of them is 0 (implied by Tag_FP_arch). */
14863 else if (in_attr[Tag_ABI_HardFP_use].i
14864 != out_attr[Tag_ABI_HardFP_use].i)
14865 out_attr[Tag_ABI_HardFP_use].i = 0;
14867 /* Now we can handle Tag_FP_arch. */
14869 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14870 pick the biggest. */
14871 if (in_attr[i].i >= VFP_VERSION_COUNT
14872 && in_attr[i].i > out_attr[i].i)
14874 out_attr[i] = in_attr[i];
14875 break;
14877 /* The output uses the superset of input features
14878 (ISA version) and registers. */
14879 ver = vfp_versions[in_attr[i].i].ver;
14880 if (ver < vfp_versions[out_attr[i].i].ver)
14881 ver = vfp_versions[out_attr[i].i].ver;
14882 regs = vfp_versions[in_attr[i].i].regs;
14883 if (regs < vfp_versions[out_attr[i].i].regs)
14884 regs = vfp_versions[out_attr[i].i].regs;
14885 /* This assumes all possible supersets are also a valid
14886 options. */
14887 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14889 if (regs == vfp_versions[newval].regs
14890 && ver == vfp_versions[newval].ver)
14891 break;
14893 out_attr[i].i = newval;
14895 break;
14896 case Tag_PCS_config:
14897 if (out_attr[i].i == 0)
14898 out_attr[i].i = in_attr[i].i;
14899 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14901 /* It's sometimes ok to mix different configs, so this is only
14902 a warning. */
14903 _bfd_error_handler
14904 (_("warning: %pB: conflicting platform configuration"), ibfd);
14906 break;
14907 case Tag_ABI_PCS_R9_use:
14908 if (in_attr[i].i != out_attr[i].i
14909 && out_attr[i].i != AEABI_R9_unused
14910 && in_attr[i].i != AEABI_R9_unused)
14912 _bfd_error_handler
14913 (_("error: %pB: conflicting use of R9"), ibfd);
14914 result = false;
14916 if (out_attr[i].i == AEABI_R9_unused)
14917 out_attr[i].i = in_attr[i].i;
14918 break;
14919 case Tag_ABI_PCS_RW_data:
14920 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14921 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14922 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14924 _bfd_error_handler
14925 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14926 ibfd);
14927 result = false;
14929 /* Use the smallest value specified. */
14930 if (in_attr[i].i < out_attr[i].i)
14931 out_attr[i].i = in_attr[i].i;
14932 break;
14933 case Tag_ABI_PCS_wchar_t:
14934 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14935 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14937 _bfd_error_handler
14938 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14939 ibfd, in_attr[i].i, out_attr[i].i);
14941 else if (in_attr[i].i && !out_attr[i].i)
14942 out_attr[i].i = in_attr[i].i;
14943 break;
14944 case Tag_ABI_enum_size:
14945 if (in_attr[i].i != AEABI_enum_unused)
14947 if (out_attr[i].i == AEABI_enum_unused
14948 || out_attr[i].i == AEABI_enum_forced_wide)
14950 /* The existing object is compatible with anything.
14951 Use whatever requirements the new object has. */
14952 out_attr[i].i = in_attr[i].i;
14954 else if (in_attr[i].i != AEABI_enum_forced_wide
14955 && out_attr[i].i != in_attr[i].i
14956 && !elf_arm_tdata (obfd)->no_enum_size_warning)
14958 static const char *aeabi_enum_names[] =
14959 { "", "variable-size", "32-bit", "" };
14960 const char *in_name =
14961 in_attr[i].i < ARRAY_SIZE (aeabi_enum_names)
14962 ? aeabi_enum_names[in_attr[i].i]
14963 : "<unknown>";
14964 const char *out_name =
14965 out_attr[i].i < ARRAY_SIZE (aeabi_enum_names)
14966 ? aeabi_enum_names[out_attr[i].i]
14967 : "<unknown>";
14968 _bfd_error_handler
14969 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14970 ibfd, in_name, out_name);
14973 break;
14974 case Tag_ABI_VFP_args:
14975 /* Aready done. */
14976 break;
14977 case Tag_ABI_WMMX_args:
14978 if (in_attr[i].i != out_attr[i].i)
14980 _bfd_error_handler
14981 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14982 ibfd, obfd);
14983 result = false;
14985 break;
14986 case Tag_compatibility:
14987 /* Merged in target-independent code. */
14988 break;
14989 case Tag_ABI_HardFP_use:
14990 /* This is handled along with Tag_FP_arch. */
14991 break;
14992 case Tag_ABI_FP_16bit_format:
14993 if (in_attr[i].i != 0 && out_attr[i].i != 0)
14995 if (in_attr[i].i != out_attr[i].i)
14997 _bfd_error_handler
14998 (_("error: fp16 format mismatch between %pB and %pB"),
14999 ibfd, obfd);
15000 result = false;
15003 if (in_attr[i].i != 0)
15004 out_attr[i].i = in_attr[i].i;
15005 break;
15007 case Tag_DIV_use:
15008 /* A value of zero on input means that the divide instruction may
15009 be used if available in the base architecture as specified via
15010 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
15011 the user did not want divide instructions. A value of 2
15012 explicitly means that divide instructions were allowed in ARM
15013 and Thumb state. */
15014 if (in_attr[i].i == out_attr[i].i)
15015 /* Do nothing. */ ;
15016 else if (elf32_arm_attributes_forbid_div (in_attr)
15017 && !elf32_arm_attributes_accept_div (out_attr))
15018 out_attr[i].i = 1;
15019 else if (elf32_arm_attributes_forbid_div (out_attr)
15020 && elf32_arm_attributes_accept_div (in_attr))
15021 out_attr[i].i = in_attr[i].i;
15022 else if (in_attr[i].i == 2)
15023 out_attr[i].i = in_attr[i].i;
15024 break;
15026 case Tag_MPextension_use_legacy:
15027 /* We don't output objects with Tag_MPextension_use_legacy - we
15028 move the value to Tag_MPextension_use. */
15029 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
15031 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
15033 _bfd_error_handler
15034 (_("%pB has both the current and legacy "
15035 "Tag_MPextension_use attributes"),
15036 ibfd);
15037 result = false;
15041 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
15042 out_attr[Tag_MPextension_use] = in_attr[i];
15044 break;
15046 case Tag_nodefaults:
15047 /* This tag is set if it exists, but the value is unused (and is
15048 typically zero). We don't actually need to do anything here -
15049 the merge happens automatically when the type flags are merged
15050 below. */
15051 break;
15052 case Tag_also_compatible_with:
15053 /* Already done in Tag_CPU_arch. */
15054 break;
15055 case Tag_conformance:
15056 /* Keep the attribute if it matches. Throw it away otherwise.
15057 No attribute means no claim to conform. */
15058 if (!in_attr[i].s || !out_attr[i].s
15059 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
15060 out_attr[i].s = NULL;
15061 break;
15063 default:
15064 result
15065 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
15068 /* If out_attr was copied from in_attr then it won't have a type yet. */
15069 if (in_attr[i].type && !out_attr[i].type)
15070 out_attr[i].type = in_attr[i].type;
15073 /* Merge Tag_compatibility attributes and any common GNU ones. */
15074 if (!_bfd_elf_merge_object_attributes (ibfd, info))
15075 return false;
15077 /* Check for any attributes not known on ARM. */
15078 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
15080 return result;
15084 /* Return TRUE if the two EABI versions are incompatible. */
15086 static bool
15087 elf32_arm_versions_compatible (unsigned iver, unsigned over)
15089 /* v4 and v5 are the same spec before and after it was released,
15090 so allow mixing them. */
15091 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
15092 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
15093 return true;
15095 return (iver == over);
15098 /* Merge backend specific data from an object file to the output
15099 object file when linking. */
15101 static bool
15102 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
15104 /* Display the flags field. */
15106 static bool
15107 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
15109 FILE * file = (FILE *) ptr;
15110 unsigned long flags;
15112 BFD_ASSERT (abfd != NULL && ptr != NULL);
15114 /* Print normal ELF private data. */
15115 _bfd_elf_print_private_bfd_data (abfd, ptr);
15117 flags = elf_elfheader (abfd)->e_flags;
15118 /* Ignore init flag - it may not be set, despite the flags field
15119 containing valid data. */
15121 fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags);
15123 switch (EF_ARM_EABI_VERSION (flags))
15125 case EF_ARM_EABI_UNKNOWN:
15126 /* The following flag bits are GNU extensions and not part of the
15127 official ARM ELF extended ABI. Hence they are only decoded if
15128 the EABI version is not set. */
15129 if (flags & EF_ARM_INTERWORK)
15130 fprintf (file, _(" [interworking enabled]"));
15132 if (flags & EF_ARM_APCS_26)
15133 fprintf (file, " [APCS-26]");
15134 else
15135 fprintf (file, " [APCS-32]");
15137 if (flags & EF_ARM_VFP_FLOAT)
15138 fprintf (file, _(" [VFP float format]"));
15139 else if (flags & EF_ARM_MAVERICK_FLOAT)
15140 fprintf (file, _(" [Maverick float format]"));
15141 else
15142 fprintf (file, _(" [FPA float format]"));
15144 if (flags & EF_ARM_APCS_FLOAT)
15145 fprintf (file, _(" [floats passed in float registers]"));
15147 if (flags & EF_ARM_PIC)
15148 fprintf (file, _(" [position independent]"));
15150 if (flags & EF_ARM_NEW_ABI)
15151 fprintf (file, _(" [new ABI]"));
15153 if (flags & EF_ARM_OLD_ABI)
15154 fprintf (file, _(" [old ABI]"));
15156 if (flags & EF_ARM_SOFT_FLOAT)
15157 fprintf (file, _(" [software FP]"));
15159 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
15160 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
15161 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
15162 | EF_ARM_MAVERICK_FLOAT);
15163 break;
15165 case EF_ARM_EABI_VER1:
15166 fprintf (file, _(" [Version1 EABI]"));
15168 if (flags & EF_ARM_SYMSARESORTED)
15169 fprintf (file, _(" [sorted symbol table]"));
15170 else
15171 fprintf (file, _(" [unsorted symbol table]"));
15173 flags &= ~ EF_ARM_SYMSARESORTED;
15174 break;
15176 case EF_ARM_EABI_VER2:
15177 fprintf (file, _(" [Version2 EABI]"));
15179 if (flags & EF_ARM_SYMSARESORTED)
15180 fprintf (file, _(" [sorted symbol table]"));
15181 else
15182 fprintf (file, _(" [unsorted symbol table]"));
15184 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
15185 fprintf (file, _(" [dynamic symbols use segment index]"));
15187 if (flags & EF_ARM_MAPSYMSFIRST)
15188 fprintf (file, _(" [mapping symbols precede others]"));
15190 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
15191 | EF_ARM_MAPSYMSFIRST);
15192 break;
15194 case EF_ARM_EABI_VER3:
15195 fprintf (file, _(" [Version3 EABI]"));
15196 break;
15198 case EF_ARM_EABI_VER4:
15199 fprintf (file, _(" [Version4 EABI]"));
15200 goto eabi;
15202 case EF_ARM_EABI_VER5:
15203 fprintf (file, _(" [Version5 EABI]"));
15205 if (flags & EF_ARM_ABI_FLOAT_SOFT)
15206 fprintf (file, _(" [soft-float ABI]"));
15208 if (flags & EF_ARM_ABI_FLOAT_HARD)
15209 fprintf (file, _(" [hard-float ABI]"));
15211 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
15213 eabi:
15214 if (flags & EF_ARM_BE8)
15215 fprintf (file, _(" [BE8]"));
15217 if (flags & EF_ARM_LE8)
15218 fprintf (file, _(" [LE8]"));
15220 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
15221 break;
15223 default:
15224 fprintf (file, _(" <EABI version unrecognised>"));
15225 break;
15228 flags &= ~ EF_ARM_EABIMASK;
15230 if (flags & EF_ARM_RELEXEC)
15231 fprintf (file, _(" [relocatable executable]"));
15233 if (flags & EF_ARM_PIC)
15234 fprintf (file, _(" [position independent]"));
15236 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
15237 fprintf (file, _(" [FDPIC ABI supplement]"));
15239 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
15241 if (flags)
15242 fprintf (file, _(" <Unrecognised flag bits set>"));
15244 fputc ('\n', file);
15246 return true;
15249 static int
15250 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15252 switch (ELF_ST_TYPE (elf_sym->st_info))
15254 case STT_ARM_TFUNC:
15255 return ELF_ST_TYPE (elf_sym->st_info);
15257 case STT_ARM_16BIT:
15258 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15259 This allows us to distinguish between data used by Thumb instructions
15260 and non-data (which is probably code) inside Thumb regions of an
15261 executable. */
15262 if (type != STT_OBJECT && type != STT_TLS)
15263 return ELF_ST_TYPE (elf_sym->st_info);
15264 break;
15266 default:
15267 break;
15270 return type;
15273 static asection *
15274 elf32_arm_gc_mark_hook (asection *sec,
15275 struct bfd_link_info *info,
15276 Elf_Internal_Rela *rel,
15277 struct elf_link_hash_entry *h,
15278 Elf_Internal_Sym *sym)
15280 if (h != NULL)
15281 switch (ELF32_R_TYPE (rel->r_info))
15283 case R_ARM_GNU_VTINHERIT:
15284 case R_ARM_GNU_VTENTRY:
15285 return NULL;
15288 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15291 /* Look through the relocs for a section during the first phase. */
15293 static bool
15294 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15295 asection *sec, const Elf_Internal_Rela *relocs)
15297 Elf_Internal_Shdr *symtab_hdr;
15298 struct elf_link_hash_entry **sym_hashes;
15299 const Elf_Internal_Rela *rel;
15300 const Elf_Internal_Rela *rel_end;
15301 bfd *dynobj;
15302 asection *sreloc;
15303 struct elf32_arm_link_hash_table *htab;
15304 bool call_reloc_p;
15305 bool may_become_dynamic_p;
15306 bool may_need_local_target_p;
15307 unsigned long nsyms;
15309 if (bfd_link_relocatable (info))
15310 return true;
15312 BFD_ASSERT (is_arm_elf (abfd));
15314 htab = elf32_arm_hash_table (info);
15315 if (htab == NULL)
15316 return false;
15318 sreloc = NULL;
15320 if (htab->root.dynobj == NULL)
15321 htab->root.dynobj = abfd;
15322 if (!create_ifunc_sections (info))
15323 return false;
15325 dynobj = htab->root.dynobj;
15327 symtab_hdr = & elf_symtab_hdr (abfd);
15328 sym_hashes = elf_sym_hashes (abfd);
15329 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15331 rel_end = relocs + sec->reloc_count;
15332 for (rel = relocs; rel < rel_end; rel++)
15334 Elf_Internal_Sym *isym;
15335 struct elf_link_hash_entry *h;
15336 struct elf32_arm_link_hash_entry *eh;
15337 unsigned int r_symndx;
15338 int r_type;
15340 r_symndx = ELF32_R_SYM (rel->r_info);
15341 r_type = ELF32_R_TYPE (rel->r_info);
15342 r_type = arm_real_reloc_type (htab, r_type);
15344 if (r_symndx >= nsyms
15345 /* PR 9934: It is possible to have relocations that do not
15346 refer to symbols, thus it is also possible to have an
15347 object file containing relocations but no symbol table. */
15348 && (r_symndx > STN_UNDEF || nsyms > 0))
15350 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15351 r_symndx);
15352 return false;
15355 h = NULL;
15356 isym = NULL;
15357 if (nsyms > 0)
15359 if (r_symndx < symtab_hdr->sh_info)
15361 /* A local symbol. */
15362 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
15363 abfd, r_symndx);
15364 if (isym == NULL)
15365 return false;
15367 else
15369 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15370 while (h->root.type == bfd_link_hash_indirect
15371 || h->root.type == bfd_link_hash_warning)
15372 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15376 eh = (struct elf32_arm_link_hash_entry *) h;
15378 call_reloc_p = false;
15379 may_become_dynamic_p = false;
15380 may_need_local_target_p = false;
15382 /* Could be done earlier, if h were already available. */
15383 r_type = elf32_arm_tls_transition (info, r_type, h);
15384 switch (r_type)
15386 case R_ARM_GOTOFFFUNCDESC:
15388 if (h == NULL)
15390 if (!elf32_arm_allocate_local_sym_info (abfd))
15391 return false;
15392 if (r_symndx >= elf32_arm_num_entries (abfd))
15393 return false;
15394 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].gotofffuncdesc_cnt += 1;
15395 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_offset = -1;
15397 else
15399 eh->fdpic_cnts.gotofffuncdesc_cnt++;
15402 break;
15404 case R_ARM_GOTFUNCDESC:
15406 if (h == NULL)
15408 /* Such a relocation is not supposed to be generated
15409 by gcc on a static function. */
15410 /* Anyway if needed it could be handled. */
15411 return false;
15413 else
15415 eh->fdpic_cnts.gotfuncdesc_cnt++;
15418 break;
15420 case R_ARM_FUNCDESC:
15422 if (h == NULL)
15424 if (!elf32_arm_allocate_local_sym_info (abfd))
15425 return false;
15426 if (r_symndx >= elf32_arm_num_entries (abfd))
15427 return false;
15428 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_cnt += 1;
15429 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_offset = -1;
15431 else
15433 eh->fdpic_cnts.funcdesc_cnt++;
15436 break;
15438 case R_ARM_GOT32:
15439 case R_ARM_GOT_PREL:
15440 case R_ARM_TLS_GD32:
15441 case R_ARM_TLS_GD32_FDPIC:
15442 case R_ARM_TLS_IE32:
15443 case R_ARM_TLS_IE32_FDPIC:
15444 case R_ARM_TLS_GOTDESC:
15445 case R_ARM_TLS_DESCSEQ:
15446 case R_ARM_THM_TLS_DESCSEQ:
15447 case R_ARM_TLS_CALL:
15448 case R_ARM_THM_TLS_CALL:
15449 /* This symbol requires a global offset table entry. */
15451 int tls_type, old_tls_type;
15453 switch (r_type)
15455 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15456 case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15458 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15459 case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15461 case R_ARM_TLS_GOTDESC:
15462 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15463 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15464 tls_type = GOT_TLS_GDESC; break;
15466 default: tls_type = GOT_NORMAL; break;
15469 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15470 info->flags |= DF_STATIC_TLS;
15472 if (h != NULL)
15474 h->got.refcount++;
15475 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15477 else
15479 /* This is a global offset table entry for a local symbol. */
15480 if (!elf32_arm_allocate_local_sym_info (abfd))
15481 return false;
15482 if (r_symndx >= elf32_arm_num_entries (abfd))
15484 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15485 r_symndx);
15486 return false;
15489 elf_local_got_refcounts (abfd)[r_symndx] += 1;
15490 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15493 /* If a variable is accessed with both tls methods, two
15494 slots may be created. */
15495 if (GOT_TLS_GD_ANY_P (old_tls_type)
15496 && GOT_TLS_GD_ANY_P (tls_type))
15497 tls_type |= old_tls_type;
15499 /* We will already have issued an error message if there
15500 is a TLS/non-TLS mismatch, based on the symbol
15501 type. So just combine any TLS types needed. */
15502 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15503 && tls_type != GOT_NORMAL)
15504 tls_type |= old_tls_type;
15506 /* If the symbol is accessed in both IE and GDESC
15507 method, we're able to relax. Turn off the GDESC flag,
15508 without messing up with any other kind of tls types
15509 that may be involved. */
15510 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15511 tls_type &= ~GOT_TLS_GDESC;
15513 if (old_tls_type != tls_type)
15515 if (h != NULL)
15516 elf32_arm_hash_entry (h)->tls_type = tls_type;
15517 else
15518 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15521 /* Fall through. */
15523 case R_ARM_TLS_LDM32:
15524 case R_ARM_TLS_LDM32_FDPIC:
15525 if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15526 htab->tls_ldm_got.refcount++;
15527 /* Fall through. */
15529 case R_ARM_GOTOFF32:
15530 case R_ARM_GOTPC:
15531 if (htab->root.sgot == NULL
15532 && !create_got_section (htab->root.dynobj, info))
15533 return false;
15534 break;
15536 case R_ARM_PC24:
15537 case R_ARM_PLT32:
15538 case R_ARM_CALL:
15539 case R_ARM_JUMP24:
15540 case R_ARM_PREL31:
15541 case R_ARM_THM_CALL:
15542 case R_ARM_THM_JUMP24:
15543 case R_ARM_THM_JUMP19:
15544 call_reloc_p = true;
15545 may_need_local_target_p = true;
15546 break;
15548 case R_ARM_ABS12:
15549 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15550 ldr __GOTT_INDEX__ offsets. */
15551 if (htab->root.target_os != is_vxworks)
15553 may_need_local_target_p = true;
15554 break;
15556 else goto jump_over;
15558 /* Fall through. */
15560 case R_ARM_MOVW_ABS_NC:
15561 case R_ARM_MOVT_ABS:
15562 case R_ARM_THM_MOVW_ABS_NC:
15563 case R_ARM_THM_MOVT_ABS:
15564 if (bfd_link_pic (info))
15566 _bfd_error_handler
15567 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15568 abfd, elf32_arm_howto_table_1[r_type].name,
15569 (h) ? h->root.root.string : "a local symbol");
15570 bfd_set_error (bfd_error_bad_value);
15571 return false;
15574 /* Fall through. */
15575 case R_ARM_ABS32:
15576 case R_ARM_ABS32_NOI:
15577 jump_over:
15578 if (h != NULL && bfd_link_executable (info))
15580 h->pointer_equality_needed = 1;
15582 /* Fall through. */
15583 case R_ARM_REL32:
15584 case R_ARM_REL32_NOI:
15585 case R_ARM_MOVW_PREL_NC:
15586 case R_ARM_MOVT_PREL:
15587 case R_ARM_THM_MOVW_PREL_NC:
15588 case R_ARM_THM_MOVT_PREL:
15590 /* Should the interworking branches be listed here? */
15591 if ((bfd_link_pic (info)
15592 || htab->fdpic_p)
15593 && (sec->flags & SEC_ALLOC) != 0)
15595 if (h == NULL
15596 && elf32_arm_howto_from_type (r_type)->pc_relative)
15598 /* In shared libraries and relocatable executables,
15599 we treat local relative references as calls;
15600 see the related SYMBOL_CALLS_LOCAL code in
15601 allocate_dynrelocs. */
15602 call_reloc_p = true;
15603 may_need_local_target_p = true;
15605 else
15606 /* We are creating a shared library or relocatable
15607 executable, and this is a reloc against a global symbol,
15608 or a non-PC-relative reloc against a local symbol.
15609 We may need to copy the reloc into the output. */
15610 may_become_dynamic_p = true;
15612 else
15613 may_need_local_target_p = true;
15614 break;
15616 /* This relocation describes the C++ object vtable hierarchy.
15617 Reconstruct it for later use during GC. */
15618 case R_ARM_GNU_VTINHERIT:
15619 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15620 return false;
15621 break;
15623 /* This relocation describes which C++ vtable entries are actually
15624 used. Record for later use during GC. */
15625 case R_ARM_GNU_VTENTRY:
15626 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15627 return false;
15628 break;
15631 if (h != NULL)
15633 if (call_reloc_p)
15634 /* We may need a .plt entry if the function this reloc
15635 refers to is in a different object, regardless of the
15636 symbol's type. We can't tell for sure yet, because
15637 something later might force the symbol local. */
15638 h->needs_plt = 1;
15639 else if (may_need_local_target_p)
15640 /* If this reloc is in a read-only section, we might
15641 need a copy reloc. We can't check reliably at this
15642 stage whether the section is read-only, as input
15643 sections have not yet been mapped to output sections.
15644 Tentatively set the flag for now, and correct in
15645 adjust_dynamic_symbol. */
15646 h->non_got_ref = 1;
15649 if (may_need_local_target_p
15650 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15652 union gotplt_union *root_plt;
15653 struct arm_plt_info *arm_plt;
15654 struct arm_local_iplt_info *local_iplt;
15656 if (h != NULL)
15658 root_plt = &h->plt;
15659 arm_plt = &eh->plt;
15661 else
15663 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15664 if (local_iplt == NULL)
15665 return false;
15666 root_plt = &local_iplt->root;
15667 arm_plt = &local_iplt->arm;
15670 /* If the symbol is a function that doesn't bind locally,
15671 this relocation will need a PLT entry. */
15672 if (root_plt->refcount != -1)
15673 root_plt->refcount += 1;
15675 if (!call_reloc_p)
15676 arm_plt->noncall_refcount++;
15678 /* It's too early to use htab->use_blx here, so we have to
15679 record possible blx references separately from
15680 relocs that definitely need a thumb stub. */
15682 if (r_type == R_ARM_THM_CALL)
15683 arm_plt->maybe_thumb_refcount += 1;
15685 if (r_type == R_ARM_THM_JUMP24
15686 || r_type == R_ARM_THM_JUMP19)
15687 arm_plt->thumb_refcount += 1;
15690 if (may_become_dynamic_p)
15692 struct elf_dyn_relocs *p, **head;
15694 /* Create a reloc section in dynobj. */
15695 if (sreloc == NULL)
15697 sreloc = _bfd_elf_make_dynamic_reloc_section
15698 (sec, dynobj, 2, abfd, ! htab->use_rel);
15700 if (sreloc == NULL)
15701 return false;
15704 /* If this is a global symbol, count the number of
15705 relocations we need for this symbol. */
15706 if (h != NULL)
15707 head = &h->dyn_relocs;
15708 else
15710 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15711 if (head == NULL)
15712 return false;
15715 p = *head;
15716 if (p == NULL || p->sec != sec)
15718 size_t amt = sizeof *p;
15720 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15721 if (p == NULL)
15722 return false;
15723 p->next = *head;
15724 *head = p;
15725 p->sec = sec;
15726 p->count = 0;
15727 p->pc_count = 0;
15730 if (elf32_arm_howto_from_type (r_type)->pc_relative)
15731 p->pc_count += 1;
15732 p->count += 1;
15733 if (h == NULL && htab->fdpic_p && !bfd_link_pic (info)
15734 && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI)
15736 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15737 that will become rofixup. */
15738 /* This is due to the fact that we suppose all will become rofixup. */
15739 _bfd_error_handler
15740 (_("FDPIC does not yet support %s relocation"
15741 " to become dynamic for executable"),
15742 elf32_arm_howto_table_1[r_type].name);
15743 abort ();
15748 return true;
15751 static void
15752 elf32_arm_update_relocs (asection *o,
15753 struct bfd_elf_section_reloc_data *reldata)
15755 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15756 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15757 const struct elf_backend_data *bed;
15758 _arm_elf_section_data *eado;
15759 struct bfd_link_order *p;
15760 bfd_byte *erela_head, *erela;
15761 Elf_Internal_Rela *irela_head, *irela;
15762 Elf_Internal_Shdr *rel_hdr;
15763 bfd *abfd;
15764 unsigned int count;
15766 eado = get_arm_elf_section_data (o);
15768 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15769 return;
15771 abfd = o->owner;
15772 bed = get_elf_backend_data (abfd);
15773 rel_hdr = reldata->hdr;
15775 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15777 swap_in = bed->s->swap_reloc_in;
15778 swap_out = bed->s->swap_reloc_out;
15780 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15782 swap_in = bed->s->swap_reloca_in;
15783 swap_out = bed->s->swap_reloca_out;
15785 else
15786 abort ();
15788 erela_head = rel_hdr->contents;
15789 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15790 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15792 erela = erela_head;
15793 irela = irela_head;
15794 count = 0;
15796 for (p = o->map_head.link_order; p; p = p->next)
15798 if (p->type == bfd_section_reloc_link_order
15799 || p->type == bfd_symbol_reloc_link_order)
15801 (*swap_in) (abfd, erela, irela);
15802 erela += rel_hdr->sh_entsize;
15803 irela++;
15804 count++;
15806 else if (p->type == bfd_indirect_link_order)
15808 struct bfd_elf_section_reloc_data *input_reldata;
15809 arm_unwind_table_edit *edit_list, *edit_tail;
15810 _arm_elf_section_data *eadi;
15811 bfd_size_type j;
15812 bfd_vma offset;
15813 asection *i;
15815 i = p->u.indirect.section;
15817 eadi = get_arm_elf_section_data (i);
15818 edit_list = eadi->u.exidx.unwind_edit_list;
15819 edit_tail = eadi->u.exidx.unwind_edit_tail;
15820 offset = i->output_offset;
15822 if (eadi->elf.rel.hdr &&
15823 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15824 input_reldata = &eadi->elf.rel;
15825 else if (eadi->elf.rela.hdr &&
15826 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15827 input_reldata = &eadi->elf.rela;
15828 else
15829 abort ();
15831 if (edit_list)
15833 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15835 arm_unwind_table_edit *edit_node, *edit_next;
15836 bfd_vma bias;
15837 bfd_vma reloc_index;
15839 (*swap_in) (abfd, erela, irela);
15840 reloc_index = (irela->r_offset - offset) / 8;
15842 bias = 0;
15843 edit_node = edit_list;
15844 for (edit_next = edit_list;
15845 edit_next && edit_next->index <= reloc_index;
15846 edit_next = edit_node->next)
15848 bias++;
15849 edit_node = edit_next;
15852 if (edit_node->type != DELETE_EXIDX_ENTRY
15853 || edit_node->index != reloc_index)
15855 irela->r_offset -= bias * 8;
15856 irela++;
15857 count++;
15860 erela += rel_hdr->sh_entsize;
15863 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15865 /* New relocation entity. */
15866 asection *text_sec = edit_tail->linked_section;
15867 asection *text_out = text_sec->output_section;
15868 bfd_vma exidx_offset = offset + i->size - 8;
15870 irela->r_addend = 0;
15871 irela->r_offset = exidx_offset;
15872 irela->r_info = ELF32_R_INFO
15873 (text_out->target_index, R_ARM_PREL31);
15874 irela++;
15875 count++;
15878 else
15880 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15882 (*swap_in) (abfd, erela, irela);
15883 erela += rel_hdr->sh_entsize;
15884 irela++;
15887 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15892 reldata->count = count;
15893 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15895 erela = erela_head;
15896 irela = irela_head;
15897 while (count > 0)
15899 (*swap_out) (abfd, irela, erela);
15900 erela += rel_hdr->sh_entsize;
15901 irela++;
15902 count--;
15905 free (irela_head);
15907 /* Hashes are no longer valid. */
15908 free (reldata->hashes);
15909 reldata->hashes = NULL;
15912 /* Unwinding tables are not referenced directly. This pass marks them as
15913 required if the corresponding code section is marked. Similarly, ARMv8-M
15914 secure entry functions can only be referenced by SG veneers which are
15915 created after the GC process. They need to be marked in case they reside in
15916 their own section (as would be the case if code was compiled with
15917 -ffunction-sections). */
15919 static bool
15920 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15921 elf_gc_mark_hook_fn gc_mark_hook)
15923 bfd *sub;
15924 Elf_Internal_Shdr **elf_shdrp;
15925 asection *cmse_sec;
15926 obj_attribute *out_attr;
15927 Elf_Internal_Shdr *symtab_hdr;
15928 unsigned i, sym_count, ext_start;
15929 const struct elf_backend_data *bed;
15930 struct elf_link_hash_entry **sym_hashes;
15931 struct elf32_arm_link_hash_entry *cmse_hash;
15932 bool again, is_v8m, first_bfd_browse = true;
15933 bool extra_marks_added = false;
15934 asection *isec;
15936 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15938 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15939 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15940 && out_attr[Tag_CPU_arch_profile].i == 'M';
15942 /* Marking EH data may cause additional code sections to be marked,
15943 requiring multiple passes. */
15944 again = true;
15945 while (again)
15947 again = false;
15948 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15950 asection *o;
15952 if (! is_arm_elf (sub))
15953 continue;
15955 elf_shdrp = elf_elfsections (sub);
15956 for (o = sub->sections; o != NULL; o = o->next)
15958 Elf_Internal_Shdr *hdr;
15960 hdr = &elf_section_data (o)->this_hdr;
15961 if (hdr->sh_type == SHT_ARM_EXIDX
15962 && hdr->sh_link
15963 && hdr->sh_link < elf_numsections (sub)
15964 && !o->gc_mark
15965 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15967 again = true;
15968 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15969 return false;
15973 /* Mark section holding ARMv8-M secure entry functions. We mark all
15974 of them so no need for a second browsing. */
15975 if (is_v8m && first_bfd_browse)
15977 bool debug_sec_need_to_be_marked = false;
15979 sym_hashes = elf_sym_hashes (sub);
15980 bed = get_elf_backend_data (sub);
15981 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15982 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15983 ext_start = symtab_hdr->sh_info;
15985 /* Scan symbols. */
15986 for (i = ext_start; i < sym_count; i++)
15988 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15989 if (cmse_hash == NULL)
15990 continue;
15992 /* Assume it is a special symbol. If not, cmse_scan will
15993 warn about it and user can do something about it. */
15994 if (startswith (cmse_hash->root.root.root.string,
15995 CMSE_PREFIX))
15997 cmse_sec = cmse_hash->root.root.u.def.section;
15998 if (!cmse_sec->gc_mark
15999 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
16000 return false;
16001 /* The debug sections related to these secure entry
16002 functions are marked on enabling below flag. */
16003 debug_sec_need_to_be_marked = true;
16007 if (debug_sec_need_to_be_marked)
16009 /* Looping over all the sections of the object file containing
16010 Armv8-M secure entry functions and marking all the debug
16011 sections. */
16012 for (isec = sub->sections; isec != NULL; isec = isec->next)
16014 /* If not a debug sections, skip it. */
16015 if (!isec->gc_mark && (isec->flags & SEC_DEBUGGING))
16017 isec->gc_mark = 1;
16018 extra_marks_added = true;
16021 debug_sec_need_to_be_marked = false;
16026 first_bfd_browse = false;
16029 /* PR 30354: If we have added extra marks then make sure that any
16030 dependencies of the newly marked sections are also marked. */
16031 if (extra_marks_added)
16032 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
16034 return true;
16037 /* Treat mapping symbols as special target symbols. */
16039 static bool
16040 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
16042 return bfd_is_arm_special_symbol_name (sym->name,
16043 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
16046 /* If the ELF symbol SYM might be a function in SEC, return the
16047 function size and set *CODE_OFF to the function's entry point,
16048 otherwise return zero. */
16050 static bfd_size_type
16051 elf32_arm_maybe_function_sym (const asymbol *sym, asection *sec,
16052 bfd_vma *code_off)
16054 bfd_size_type size;
16055 elf_symbol_type * elf_sym = (elf_symbol_type *) sym;
16057 if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
16058 | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
16059 || sym->section != sec)
16060 return 0;
16062 size = (sym->flags & BSF_SYNTHETIC) ? 0 : elf_sym->internal_elf_sym.st_size;
16064 if (!(sym->flags & BSF_SYNTHETIC))
16065 switch (ELF_ST_TYPE (elf_sym->internal_elf_sym.st_info))
16067 case STT_NOTYPE:
16068 /* Ignore symbols created by the annobin plugin for gcc and clang.
16069 These symbols are hidden, local, notype and have a size of 0. */
16070 if (size == 0
16071 && sym->flags & BSF_LOCAL
16072 && ELF_ST_VISIBILITY (elf_sym->internal_elf_sym.st_other) == STV_HIDDEN)
16073 return 0;
16074 /* Fall through. */
16075 case STT_FUNC:
16076 case STT_ARM_TFUNC:
16077 /* FIXME: Allow STT_GNU_IFUNC as well ? */
16078 break;
16079 default:
16080 return 0;
16083 if ((sym->flags & BSF_LOCAL)
16084 && bfd_is_arm_special_symbol_name (sym->name,
16085 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
16086 return 0;
16088 *code_off = sym->value;
16090 /* Do not return 0 for the function's size. */
16091 return size ? size : 1;
16095 static bool
16096 elf32_arm_find_inliner_info (bfd * abfd,
16097 const char ** filename_ptr,
16098 const char ** functionname_ptr,
16099 unsigned int * line_ptr)
16101 bool found;
16102 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
16103 functionname_ptr, line_ptr,
16104 & elf_tdata (abfd)->dwarf2_find_line_info);
16105 return found;
16108 /* Adjust a symbol defined by a dynamic object and referenced by a
16109 regular object. The current definition is in some section of the
16110 dynamic object, but we're not including those sections. We have to
16111 change the definition to something the rest of the link can
16112 understand. */
16114 static bool
16115 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
16116 struct elf_link_hash_entry * h)
16118 bfd * dynobj;
16119 asection *s, *srel;
16120 struct elf32_arm_link_hash_entry * eh;
16121 struct elf32_arm_link_hash_table *globals;
16123 globals = elf32_arm_hash_table (info);
16124 if (globals == NULL)
16125 return false;
16127 dynobj = elf_hash_table (info)->dynobj;
16129 /* Make sure we know what is going on here. */
16130 BFD_ASSERT (dynobj != NULL
16131 && (h->needs_plt
16132 || h->type == STT_GNU_IFUNC
16133 || h->is_weakalias
16134 || (h->def_dynamic
16135 && h->ref_regular
16136 && !h->def_regular)));
16138 eh = (struct elf32_arm_link_hash_entry *) h;
16140 /* If this is a function, put it in the procedure linkage table. We
16141 will fill in the contents of the procedure linkage table later,
16142 when we know the address of the .got section. */
16143 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
16145 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16146 symbol binds locally. */
16147 if (h->plt.refcount <= 0
16148 || (h->type != STT_GNU_IFUNC
16149 && (SYMBOL_CALLS_LOCAL (info, h)
16150 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16151 && h->root.type == bfd_link_hash_undefweak))))
16153 /* This case can occur if we saw a PLT32 reloc in an input
16154 file, but the symbol was never referred to by a dynamic
16155 object, or if all references were garbage collected. In
16156 such a case, we don't actually need to build a procedure
16157 linkage table, and we can just do a PC24 reloc instead. */
16158 h->plt.offset = (bfd_vma) -1;
16159 eh->plt.thumb_refcount = 0;
16160 eh->plt.maybe_thumb_refcount = 0;
16161 eh->plt.noncall_refcount = 0;
16162 h->needs_plt = 0;
16165 return true;
16167 else
16169 /* It's possible that we incorrectly decided a .plt reloc was
16170 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16171 in check_relocs. We can't decide accurately between function
16172 and non-function syms in check-relocs; Objects loaded later in
16173 the link may change h->type. So fix it now. */
16174 h->plt.offset = (bfd_vma) -1;
16175 eh->plt.thumb_refcount = 0;
16176 eh->plt.maybe_thumb_refcount = 0;
16177 eh->plt.noncall_refcount = 0;
16180 /* If this is a weak symbol, and there is a real definition, the
16181 processor independent code will have arranged for us to see the
16182 real definition first, and we can just use the same value. */
16183 if (h->is_weakalias)
16185 struct elf_link_hash_entry *def = weakdef (h);
16186 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16187 h->root.u.def.section = def->root.u.def.section;
16188 h->root.u.def.value = def->root.u.def.value;
16189 return true;
16192 /* If there are no non-GOT references, we do not need a copy
16193 relocation. */
16194 if (!h->non_got_ref)
16195 return true;
16197 /* This is a reference to a symbol defined by a dynamic object which
16198 is not a function. */
16200 /* If we are creating a shared library, we must presume that the
16201 only references to the symbol are via the global offset table.
16202 For such cases we need not do anything here; the relocations will
16203 be handled correctly by relocate_section. */
16204 if (bfd_link_pic (info))
16205 return true;
16207 /* We must allocate the symbol in our .dynbss section, which will
16208 become part of the .bss section of the executable. There will be
16209 an entry for this symbol in the .dynsym section. The dynamic
16210 object will contain position independent code, so all references
16211 from the dynamic object to this symbol will go through the global
16212 offset table. The dynamic linker will use the .dynsym entry to
16213 determine the address it must put in the global offset table, so
16214 both the dynamic object and the regular object will refer to the
16215 same memory location for the variable. */
16216 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16217 linker to copy the initial value out of the dynamic object and into
16218 the runtime process image. We need to remember the offset into the
16219 .rel(a).bss section we are going to use. */
16220 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16222 s = globals->root.sdynrelro;
16223 srel = globals->root.sreldynrelro;
16225 else
16227 s = globals->root.sdynbss;
16228 srel = globals->root.srelbss;
16230 if (info->nocopyreloc == 0
16231 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16232 && h->size != 0)
16234 elf32_arm_allocate_dynrelocs (info, srel, 1);
16235 h->needs_copy = 1;
16238 return _bfd_elf_adjust_dynamic_copy (info, h, s);
16241 /* Allocate space in .plt, .got and associated reloc sections for
16242 dynamic relocs. */
16244 static bool
16245 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16247 struct bfd_link_info *info;
16248 struct elf32_arm_link_hash_table *htab;
16249 struct elf32_arm_link_hash_entry *eh;
16250 struct elf_dyn_relocs *p;
16252 if (h->root.type == bfd_link_hash_indirect)
16253 return true;
16255 eh = (struct elf32_arm_link_hash_entry *) h;
16257 info = (struct bfd_link_info *) inf;
16258 htab = elf32_arm_hash_table (info);
16259 if (htab == NULL)
16260 return false;
16262 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16263 && h->plt.refcount > 0)
16265 /* Make sure this symbol is output as a dynamic symbol.
16266 Undefined weak syms won't yet be marked as dynamic. */
16267 if (h->dynindx == -1 && !h->forced_local
16268 && h->root.type == bfd_link_hash_undefweak)
16270 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16271 return false;
16274 /* If the call in the PLT entry binds locally, the associated
16275 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16276 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16277 than the .plt section. */
16278 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16280 eh->is_iplt = 1;
16281 if (eh->plt.noncall_refcount == 0
16282 && SYMBOL_REFERENCES_LOCAL (info, h))
16283 /* All non-call references can be resolved directly.
16284 This means that they can (and in some cases, must)
16285 resolve directly to the run-time target, rather than
16286 to the PLT. That in turns means that any .got entry
16287 would be equal to the .igot.plt entry, so there's
16288 no point having both. */
16289 h->got.refcount = 0;
16292 if (bfd_link_pic (info)
16293 || eh->is_iplt
16294 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16296 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16298 /* If this symbol is not defined in a regular file, and we are
16299 not generating a shared library, then set the symbol to this
16300 location in the .plt. This is required to make function
16301 pointers compare as equal between the normal executable and
16302 the shared library. */
16303 if (! bfd_link_pic (info)
16304 && !h->def_regular)
16306 h->root.u.def.section = htab->root.splt;
16307 h->root.u.def.value = h->plt.offset;
16309 /* Make sure the function is not marked as Thumb, in case
16310 it is the target of an ABS32 relocation, which will
16311 point to the PLT entry. */
16312 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16315 /* VxWorks executables have a second set of relocations for
16316 each PLT entry. They go in a separate relocation section,
16317 which is processed by the kernel loader. */
16318 if (htab->root.target_os == is_vxworks && !bfd_link_pic (info))
16320 /* There is a relocation for the initial PLT entry:
16321 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16322 if (h->plt.offset == htab->plt_header_size)
16323 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16325 /* There are two extra relocations for each subsequent
16326 PLT entry: an R_ARM_32 relocation for the GOT entry,
16327 and an R_ARM_32 relocation for the PLT entry. */
16328 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16331 else
16333 h->plt.offset = (bfd_vma) -1;
16334 h->needs_plt = 0;
16337 else
16339 h->plt.offset = (bfd_vma) -1;
16340 h->needs_plt = 0;
16343 eh = (struct elf32_arm_link_hash_entry *) h;
16344 eh->tlsdesc_got = (bfd_vma) -1;
16346 if (h->got.refcount > 0)
16348 asection *s;
16349 bool dyn;
16350 int tls_type = elf32_arm_hash_entry (h)->tls_type;
16351 int indx;
16353 /* Make sure this symbol is output as a dynamic symbol.
16354 Undefined weak syms won't yet be marked as dynamic. */
16355 if (htab->root.dynamic_sections_created
16356 && h->dynindx == -1
16357 && !h->forced_local
16358 && h->root.type == bfd_link_hash_undefweak)
16360 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16361 return false;
16364 s = htab->root.sgot;
16365 h->got.offset = s->size;
16367 if (tls_type == GOT_UNKNOWN)
16368 abort ();
16370 if (tls_type == GOT_NORMAL)
16371 /* Non-TLS symbols need one GOT slot. */
16372 s->size += 4;
16373 else
16375 if (tls_type & GOT_TLS_GDESC)
16377 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16378 eh->tlsdesc_got
16379 = (htab->root.sgotplt->size
16380 - elf32_arm_compute_jump_table_size (htab));
16381 htab->root.sgotplt->size += 8;
16382 h->got.offset = (bfd_vma) -2;
16383 /* plt.got_offset needs to know there's a TLS_DESC
16384 reloc in the middle of .got.plt. */
16385 htab->num_tls_desc++;
16388 if (tls_type & GOT_TLS_GD)
16390 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16391 consecutive GOT slots. If the symbol is both GD
16392 and GDESC, got.offset may have been
16393 overwritten. */
16394 h->got.offset = s->size;
16395 s->size += 8;
16398 if (tls_type & GOT_TLS_IE)
16399 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16400 slot. */
16401 s->size += 4;
16404 dyn = htab->root.dynamic_sections_created;
16406 indx = 0;
16407 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
16408 && (!bfd_link_pic (info)
16409 || !SYMBOL_REFERENCES_LOCAL (info, h)))
16410 indx = h->dynindx;
16412 if (tls_type != GOT_NORMAL
16413 && (bfd_link_dll (info) || indx != 0)
16414 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16415 || h->root.type != bfd_link_hash_undefweak))
16417 if (tls_type & GOT_TLS_IE)
16418 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16420 if (tls_type & GOT_TLS_GD)
16421 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16423 if (tls_type & GOT_TLS_GDESC)
16425 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16426 /* GDESC needs a trampoline to jump to. */
16427 htab->tls_trampoline = -1;
16430 /* Only GD needs it. GDESC just emits one relocation per
16431 2 entries. */
16432 if ((tls_type & GOT_TLS_GD) && indx != 0)
16433 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16435 else if (((indx != -1) || htab->fdpic_p)
16436 && !SYMBOL_REFERENCES_LOCAL (info, h))
16438 if (htab->root.dynamic_sections_created)
16439 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16440 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16442 else if (h->type == STT_GNU_IFUNC
16443 && eh->plt.noncall_refcount == 0)
16444 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16445 they all resolve dynamically instead. Reserve room for the
16446 GOT entry's R_ARM_IRELATIVE relocation. */
16447 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16448 else if (bfd_link_pic (info)
16449 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16450 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16451 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16452 else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16453 /* Reserve room for rofixup for FDPIC executable. */
16454 /* TLS relocs do not need space since they are completely
16455 resolved. */
16456 htab->srofixup->size += 4;
16458 else
16459 h->got.offset = (bfd_vma) -1;
16461 /* FDPIC support. */
16462 if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16464 /* Symbol musn't be exported. */
16465 if (h->dynindx != -1)
16466 abort ();
16468 /* We only allocate one function descriptor with its associated
16469 relocation. */
16470 if (eh->fdpic_cnts.funcdesc_offset == -1)
16472 asection *s = htab->root.sgot;
16474 eh->fdpic_cnts.funcdesc_offset = s->size;
16475 s->size += 8;
16476 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16477 if (bfd_link_pic (info))
16478 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16479 else
16480 htab->srofixup->size += 8;
16484 if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16486 asection *s = htab->root.sgot;
16488 if (htab->root.dynamic_sections_created && h->dynindx == -1
16489 && !h->forced_local)
16490 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16491 return false;
16493 if (h->dynindx == -1)
16495 /* We only allocate one function descriptor with its
16496 associated relocation. */
16497 if (eh->fdpic_cnts.funcdesc_offset == -1)
16500 eh->fdpic_cnts.funcdesc_offset = s->size;
16501 s->size += 8;
16502 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16503 rofixups. */
16504 if (bfd_link_pic (info))
16505 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16506 else
16507 htab->srofixup->size += 8;
16511 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16512 R_ARM_RELATIVE/rofixup relocation on it. */
16513 eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16514 s->size += 4;
16515 if (h->dynindx == -1 && !bfd_link_pic (info))
16516 htab->srofixup->size += 4;
16517 else
16518 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16521 if (eh->fdpic_cnts.funcdesc_cnt > 0)
16523 if (htab->root.dynamic_sections_created && h->dynindx == -1
16524 && !h->forced_local)
16525 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16526 return false;
16528 if (h->dynindx == -1)
16530 /* We only allocate one function descriptor with its
16531 associated relocation. */
16532 if (eh->fdpic_cnts.funcdesc_offset == -1)
16534 asection *s = htab->root.sgot;
16536 eh->fdpic_cnts.funcdesc_offset = s->size;
16537 s->size += 8;
16538 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16539 rofixups. */
16540 if (bfd_link_pic (info))
16541 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16542 else
16543 htab->srofixup->size += 8;
16546 if (h->dynindx == -1 && !bfd_link_pic (info))
16548 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16549 htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16551 else
16553 /* Will need one dynamic reloc per reference. will be either
16554 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16555 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16556 eh->fdpic_cnts.funcdesc_cnt);
16560 /* Allocate stubs for exported Thumb functions on v4t. */
16561 if (!htab->use_blx && h->dynindx != -1
16562 && h->def_regular
16563 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16564 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16566 struct elf_link_hash_entry * th;
16567 struct bfd_link_hash_entry * bh;
16568 struct elf_link_hash_entry * myh;
16569 char name[1024];
16570 asection *s;
16571 bh = NULL;
16572 /* Create a new symbol to regist the real location of the function. */
16573 s = h->root.u.def.section;
16574 sprintf (name, "__real_%s", h->root.root.string);
16575 _bfd_generic_link_add_one_symbol (info, s->owner,
16576 name, BSF_GLOBAL, s,
16577 h->root.u.def.value,
16578 NULL, true, false, &bh);
16580 myh = (struct elf_link_hash_entry *) bh;
16581 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16582 myh->forced_local = 1;
16583 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16584 eh->export_glue = myh;
16585 th = record_arm_to_thumb_glue (info, h);
16586 /* Point the symbol at the stub. */
16587 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16588 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16589 h->root.u.def.section = th->root.u.def.section;
16590 h->root.u.def.value = th->root.u.def.value & ~1;
16593 if (h->dyn_relocs == NULL)
16594 return true;
16596 /* In the shared -Bsymbolic case, discard space allocated for
16597 dynamic pc-relative relocs against symbols which turn out to be
16598 defined in regular objects. For the normal shared case, discard
16599 space for pc-relative relocs that have become local due to symbol
16600 visibility changes. */
16602 if (bfd_link_pic (info)
16603 || htab->fdpic_p)
16605 /* Relocs that use pc_count are PC-relative forms, which will appear
16606 on something like ".long foo - ." or "movw REG, foo - .". We want
16607 calls to protected symbols to resolve directly to the function
16608 rather than going via the plt. If people want function pointer
16609 comparisons to work as expected then they should avoid writing
16610 assembly like ".long foo - .". */
16611 if (SYMBOL_CALLS_LOCAL (info, h))
16613 struct elf_dyn_relocs **pp;
16615 for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16617 p->count -= p->pc_count;
16618 p->pc_count = 0;
16619 if (p->count == 0)
16620 *pp = p->next;
16621 else
16622 pp = &p->next;
16626 if (htab->root.target_os == is_vxworks)
16628 struct elf_dyn_relocs **pp;
16630 for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16632 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16633 *pp = p->next;
16634 else
16635 pp = &p->next;
16639 /* Also discard relocs on undefined weak syms with non-default
16640 visibility. */
16641 if (h->dyn_relocs != NULL
16642 && h->root.type == bfd_link_hash_undefweak)
16644 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16645 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16646 h->dyn_relocs = NULL;
16648 /* Make sure undefined weak symbols are output as a dynamic
16649 symbol in PIEs. */
16650 else if (htab->root.dynamic_sections_created && h->dynindx == -1
16651 && !h->forced_local)
16653 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16654 return false;
16658 else
16660 /* For the non-shared case, discard space for relocs against
16661 symbols which turn out to need copy relocs or are not
16662 dynamic. */
16664 if (!h->non_got_ref
16665 && ((h->def_dynamic
16666 && !h->def_regular)
16667 || (htab->root.dynamic_sections_created
16668 && (h->root.type == bfd_link_hash_undefweak
16669 || h->root.type == bfd_link_hash_undefined))))
16671 /* Make sure this symbol is output as a dynamic symbol.
16672 Undefined weak syms won't yet be marked as dynamic. */
16673 if (h->dynindx == -1 && !h->forced_local
16674 && h->root.type == bfd_link_hash_undefweak)
16676 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16677 return false;
16680 /* If that succeeded, we know we'll be keeping all the
16681 relocs. */
16682 if (h->dynindx != -1)
16683 goto keep;
16686 h->dyn_relocs = NULL;
16688 keep: ;
16691 /* Finally, allocate space. */
16692 for (p = h->dyn_relocs; p != NULL; p = p->next)
16694 asection *sreloc = elf_section_data (p->sec)->sreloc;
16696 if (h->type == STT_GNU_IFUNC
16697 && eh->plt.noncall_refcount == 0
16698 && SYMBOL_REFERENCES_LOCAL (info, h))
16699 elf32_arm_allocate_irelocs (info, sreloc, p->count);
16700 else if (h->dynindx != -1
16701 && (!bfd_link_pic (info) || !info->symbolic || !h->def_regular))
16702 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16703 else if (htab->fdpic_p && !bfd_link_pic (info))
16704 htab->srofixup->size += 4 * p->count;
16705 else
16706 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16709 return true;
16712 void
16713 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16714 int byteswap_code)
16716 struct elf32_arm_link_hash_table *globals;
16718 globals = elf32_arm_hash_table (info);
16719 if (globals == NULL)
16720 return;
16722 globals->byteswap_code = byteswap_code;
16725 /* Set the sizes of the dynamic sections. */
16727 static bool
16728 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16729 struct bfd_link_info * info)
16731 bfd * dynobj;
16732 asection * s;
16733 bool relocs;
16734 bfd *ibfd;
16735 struct elf32_arm_link_hash_table *htab;
16737 htab = elf32_arm_hash_table (info);
16738 if (htab == NULL)
16739 return false;
16741 dynobj = elf_hash_table (info)->dynobj;
16742 BFD_ASSERT (dynobj != NULL);
16743 check_use_blx (htab);
16745 if (elf_hash_table (info)->dynamic_sections_created)
16747 /* Set the contents of the .interp section to the interpreter. */
16748 if (bfd_link_executable (info) && !info->nointerp)
16750 s = bfd_get_linker_section (dynobj, ".interp");
16751 BFD_ASSERT (s != NULL);
16752 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16753 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16757 /* Set up .got offsets for local syms, and space for local dynamic
16758 relocs. */
16759 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16761 bfd_signed_vma *local_got;
16762 bfd_signed_vma *end_local_got;
16763 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16764 char *local_tls_type;
16765 bfd_vma *local_tlsdesc_gotent;
16766 bfd_size_type locsymcount;
16767 Elf_Internal_Shdr *symtab_hdr;
16768 asection *srel;
16769 unsigned int symndx;
16770 struct fdpic_local *local_fdpic_cnts;
16772 if (! is_arm_elf (ibfd))
16773 continue;
16775 for (s = ibfd->sections; s != NULL; s = s->next)
16777 struct elf_dyn_relocs *p;
16779 for (p = (struct elf_dyn_relocs *)
16780 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16782 if (!bfd_is_abs_section (p->sec)
16783 && bfd_is_abs_section (p->sec->output_section))
16785 /* Input section has been discarded, either because
16786 it is a copy of a linkonce section or due to
16787 linker script /DISCARD/, so we'll be discarding
16788 the relocs too. */
16790 else if (htab->root.target_os == is_vxworks
16791 && strcmp (p->sec->output_section->name,
16792 ".tls_vars") == 0)
16794 /* Relocations in vxworks .tls_vars sections are
16795 handled specially by the loader. */
16797 else if (p->count != 0)
16799 srel = elf_section_data (p->sec)->sreloc;
16800 if (htab->fdpic_p && !bfd_link_pic (info))
16801 htab->srofixup->size += 4 * p->count;
16802 else
16803 elf32_arm_allocate_dynrelocs (info, srel, p->count);
16804 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16805 info->flags |= DF_TEXTREL;
16810 local_got = elf_local_got_refcounts (ibfd);
16811 if (local_got == NULL)
16812 continue;
16814 symtab_hdr = & elf_symtab_hdr (ibfd);
16815 locsymcount = symtab_hdr->sh_info;
16816 end_local_got = local_got + locsymcount;
16817 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16818 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16819 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16820 local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16821 symndx = 0;
16822 s = htab->root.sgot;
16823 srel = htab->root.srelgot;
16824 for (; local_got < end_local_got;
16825 ++local_got, ++local_iplt_ptr, ++local_tls_type,
16826 ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16828 if (symndx >= elf32_arm_num_entries (ibfd))
16829 return false;
16831 *local_tlsdesc_gotent = (bfd_vma) -1;
16832 local_iplt = *local_iplt_ptr;
16834 /* FDPIC support. */
16835 if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16837 if (local_fdpic_cnts->funcdesc_offset == -1)
16839 local_fdpic_cnts->funcdesc_offset = s->size;
16840 s->size += 8;
16842 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16843 if (bfd_link_pic (info))
16844 elf32_arm_allocate_dynrelocs (info, srel, 1);
16845 else
16846 htab->srofixup->size += 8;
16850 if (local_fdpic_cnts->funcdesc_cnt > 0)
16852 if (local_fdpic_cnts->funcdesc_offset == -1)
16854 local_fdpic_cnts->funcdesc_offset = s->size;
16855 s->size += 8;
16857 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16858 if (bfd_link_pic (info))
16859 elf32_arm_allocate_dynrelocs (info, srel, 1);
16860 else
16861 htab->srofixup->size += 8;
16864 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16865 if (bfd_link_pic (info))
16866 elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16867 else
16868 htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16871 if (local_iplt != NULL)
16873 struct elf_dyn_relocs *p;
16875 if (local_iplt->root.refcount > 0)
16877 elf32_arm_allocate_plt_entry (info, true,
16878 &local_iplt->root,
16879 &local_iplt->arm);
16880 if (local_iplt->arm.noncall_refcount == 0)
16881 /* All references to the PLT are calls, so all
16882 non-call references can resolve directly to the
16883 run-time target. This means that the .got entry
16884 would be the same as the .igot.plt entry, so there's
16885 no point creating both. */
16886 *local_got = 0;
16888 else
16890 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16891 local_iplt->root.offset = (bfd_vma) -1;
16894 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16896 asection *psrel;
16898 psrel = elf_section_data (p->sec)->sreloc;
16899 if (local_iplt->arm.noncall_refcount == 0)
16900 elf32_arm_allocate_irelocs (info, psrel, p->count);
16901 else
16902 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16905 if (*local_got > 0)
16907 Elf_Internal_Sym *isym;
16909 *local_got = s->size;
16910 if (*local_tls_type & GOT_TLS_GD)
16911 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16912 s->size += 8;
16913 if (*local_tls_type & GOT_TLS_GDESC)
16915 *local_tlsdesc_gotent = htab->root.sgotplt->size
16916 - elf32_arm_compute_jump_table_size (htab);
16917 htab->root.sgotplt->size += 8;
16918 *local_got = (bfd_vma) -2;
16919 /* plt.got_offset needs to know there's a TLS_DESC
16920 reloc in the middle of .got.plt. */
16921 htab->num_tls_desc++;
16923 if (*local_tls_type & GOT_TLS_IE)
16924 s->size += 4;
16926 if (*local_tls_type & GOT_NORMAL)
16928 /* If the symbol is both GD and GDESC, *local_got
16929 may have been overwritten. */
16930 *local_got = s->size;
16931 s->size += 4;
16934 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache, ibfd,
16935 symndx);
16936 if (isym == NULL)
16937 return false;
16939 /* If all references to an STT_GNU_IFUNC PLT are calls,
16940 then all non-call references, including this GOT entry,
16941 resolve directly to the run-time target. */
16942 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16943 && (local_iplt == NULL
16944 || local_iplt->arm.noncall_refcount == 0))
16945 elf32_arm_allocate_irelocs (info, srel, 1);
16946 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16948 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16949 elf32_arm_allocate_dynrelocs (info, srel, 1);
16950 else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16951 htab->srofixup->size += 4;
16953 if ((bfd_link_pic (info) || htab->fdpic_p)
16954 && *local_tls_type & GOT_TLS_GDESC)
16956 elf32_arm_allocate_dynrelocs (info,
16957 htab->root.srelplt, 1);
16958 htab->tls_trampoline = -1;
16962 else
16963 *local_got = (bfd_vma) -1;
16967 if (htab->tls_ldm_got.refcount > 0)
16969 /* Allocate two GOT entries and one dynamic relocation (if necessary)
16970 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
16971 htab->tls_ldm_got.offset = htab->root.sgot->size;
16972 htab->root.sgot->size += 8;
16973 if (bfd_link_pic (info))
16974 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16976 else
16977 htab->tls_ldm_got.offset = -1;
16979 /* At the very end of the .rofixup section is a pointer to the GOT,
16980 reserve space for it. */
16981 if (htab->fdpic_p && htab->srofixup != NULL)
16982 htab->srofixup->size += 4;
16984 /* Allocate global sym .plt and .got entries, and space for global
16985 sym dynamic relocs. */
16986 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
16988 /* Here we rummage through the found bfds to collect glue information. */
16989 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16991 if (! is_arm_elf (ibfd))
16992 continue;
16994 /* Initialise mapping tables for code/data. */
16995 bfd_elf32_arm_init_maps (ibfd);
16997 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
16998 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
16999 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
17000 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
17003 /* Allocate space for the glue sections now that we've sized them. */
17004 bfd_elf32_arm_allocate_interworking_sections (info);
17006 /* For every jump slot reserved in the sgotplt, reloc_count is
17007 incremented. However, when we reserve space for TLS descriptors,
17008 it's not incremented, so in order to compute the space reserved
17009 for them, it suffices to multiply the reloc count by the jump
17010 slot size. */
17011 if (htab->root.srelplt)
17012 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size (htab);
17014 if (htab->tls_trampoline)
17016 if (htab->root.splt->size == 0)
17017 htab->root.splt->size += htab->plt_header_size;
17019 htab->tls_trampoline = htab->root.splt->size;
17020 htab->root.splt->size += htab->plt_entry_size;
17022 /* If we're not using lazy TLS relocations, don't generate the
17023 PLT and GOT entries they require. */
17024 if ((info->flags & DF_BIND_NOW))
17025 htab->root.tlsdesc_plt = 0;
17026 else
17028 htab->root.tlsdesc_got = htab->root.sgot->size;
17029 htab->root.sgot->size += 4;
17031 htab->root.tlsdesc_plt = htab->root.splt->size;
17032 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
17036 /* The check_relocs and adjust_dynamic_symbol entry points have
17037 determined the sizes of the various dynamic sections. Allocate
17038 memory for them. */
17039 relocs = false;
17040 for (s = dynobj->sections; s != NULL; s = s->next)
17042 const char * name;
17044 if ((s->flags & SEC_LINKER_CREATED) == 0)
17045 continue;
17047 /* It's OK to base decisions on the section name, because none
17048 of the dynobj section names depend upon the input files. */
17049 name = bfd_section_name (s);
17051 if (s == htab->root.splt)
17053 /* Remember whether there is a PLT. */
17056 else if (startswith (name, ".rel"))
17058 if (s->size != 0)
17060 /* Remember whether there are any reloc sections other
17061 than .rel(a).plt and .rela.plt.unloaded. */
17062 if (s != htab->root.srelplt && s != htab->srelplt2)
17063 relocs = true;
17065 /* We use the reloc_count field as a counter if we need
17066 to copy relocs into the output file. */
17067 s->reloc_count = 0;
17070 else if (s != htab->root.sgot
17071 && s != htab->root.sgotplt
17072 && s != htab->root.iplt
17073 && s != htab->root.igotplt
17074 && s != htab->root.sdynbss
17075 && s != htab->root.sdynrelro
17076 && s != htab->srofixup)
17078 /* It's not one of our sections, so don't allocate space. */
17079 continue;
17082 if (s->size == 0)
17084 /* If we don't need this section, strip it from the
17085 output file. This is mostly to handle .rel(a).bss and
17086 .rel(a).plt. We must create both sections in
17087 create_dynamic_sections, because they must be created
17088 before the linker maps input sections to output
17089 sections. The linker does that before
17090 adjust_dynamic_symbol is called, and it is that
17091 function which decides whether anything needs to go
17092 into these sections. */
17093 s->flags |= SEC_EXCLUDE;
17094 continue;
17097 if ((s->flags & SEC_HAS_CONTENTS) == 0)
17098 continue;
17100 /* Allocate memory for the section contents. */
17101 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
17102 if (s->contents == NULL)
17103 return false;
17106 return _bfd_elf_maybe_vxworks_add_dynamic_tags (output_bfd, info,
17107 relocs);
17110 /* Size sections even though they're not dynamic. We use it to setup
17111 _TLS_MODULE_BASE_, if needed. */
17113 static bool
17114 elf32_arm_always_size_sections (bfd *output_bfd,
17115 struct bfd_link_info *info)
17117 asection *tls_sec;
17118 struct elf32_arm_link_hash_table *htab;
17120 htab = elf32_arm_hash_table (info);
17122 if (bfd_link_relocatable (info))
17123 return true;
17125 tls_sec = elf_hash_table (info)->tls_sec;
17127 if (tls_sec)
17129 struct elf_link_hash_entry *tlsbase;
17131 tlsbase = elf_link_hash_lookup
17132 (elf_hash_table (info), "_TLS_MODULE_BASE_", true, true, false);
17134 if (tlsbase)
17136 struct bfd_link_hash_entry *bh = NULL;
17137 const struct elf_backend_data *bed
17138 = get_elf_backend_data (output_bfd);
17140 if (!(_bfd_generic_link_add_one_symbol
17141 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17142 tls_sec, 0, NULL, false,
17143 bed->collect, &bh)))
17144 return false;
17146 tlsbase->type = STT_TLS;
17147 tlsbase = (struct elf_link_hash_entry *)bh;
17148 tlsbase->def_regular = 1;
17149 tlsbase->other = STV_HIDDEN;
17150 (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
17154 if (htab->fdpic_p && !bfd_link_relocatable (info)
17155 && !bfd_elf_stack_segment_size (output_bfd, info,
17156 "__stacksize", DEFAULT_STACK_SIZE))
17157 return false;
17159 return true;
17162 /* Finish up dynamic symbol handling. We set the contents of various
17163 dynamic sections here. */
17165 static bool
17166 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17167 struct bfd_link_info * info,
17168 struct elf_link_hash_entry * h,
17169 Elf_Internal_Sym * sym)
17171 struct elf32_arm_link_hash_table *htab;
17172 struct elf32_arm_link_hash_entry *eh;
17174 htab = elf32_arm_hash_table (info);
17176 eh = (struct elf32_arm_link_hash_entry *) h;
17178 if (h->plt.offset != (bfd_vma) -1)
17180 if (!eh->is_iplt)
17182 BFD_ASSERT (h->dynindx != -1);
17183 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17184 h->dynindx, 0))
17185 return false;
17188 if (!h->def_regular)
17190 /* Mark the symbol as undefined, rather than as defined in
17191 the .plt section. */
17192 sym->st_shndx = SHN_UNDEF;
17193 /* If the symbol is weak we need to clear the value.
17194 Otherwise, the PLT entry would provide a definition for
17195 the symbol even if the symbol wasn't defined anywhere,
17196 and so the symbol would never be NULL. Leave the value if
17197 there were any relocations where pointer equality matters
17198 (this is a clue for the dynamic linker, to make function
17199 pointer comparisons work between an application and shared
17200 library). */
17201 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17202 sym->st_value = 0;
17204 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17206 /* At least one non-call relocation references this .iplt entry,
17207 so the .iplt entry is the function's canonical address. */
17208 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17209 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17210 sym->st_shndx = (_bfd_elf_section_from_bfd_section
17211 (output_bfd, htab->root.iplt->output_section));
17212 sym->st_value = (h->plt.offset
17213 + htab->root.iplt->output_section->vma
17214 + htab->root.iplt->output_offset);
17218 if (h->needs_copy)
17220 asection * s;
17221 Elf_Internal_Rela rel;
17223 /* This symbol needs a copy reloc. Set it up. */
17224 BFD_ASSERT (h->dynindx != -1
17225 && (h->root.type == bfd_link_hash_defined
17226 || h->root.type == bfd_link_hash_defweak));
17228 rel.r_addend = 0;
17229 rel.r_offset = (h->root.u.def.value
17230 + h->root.u.def.section->output_section->vma
17231 + h->root.u.def.section->output_offset);
17232 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17233 if (h->root.u.def.section == htab->root.sdynrelro)
17234 s = htab->root.sreldynrelro;
17235 else
17236 s = htab->root.srelbss;
17237 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17240 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17241 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17242 it is relative to the ".got" section. */
17243 if (h == htab->root.hdynamic
17244 || (!htab->fdpic_p
17245 && htab->root.target_os != is_vxworks
17246 && h == htab->root.hgot))
17247 sym->st_shndx = SHN_ABS;
17249 return true;
17252 static void
17253 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17254 void *contents,
17255 const unsigned long *template, unsigned count)
17257 unsigned ix;
17259 for (ix = 0; ix != count; ix++)
17261 unsigned long insn = template[ix];
17263 /* Emit mov pc,rx if bx is not permitted. */
17264 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17265 insn = (insn & 0xf000000f) | 0x01a0f000;
17266 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17270 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17271 other variants, NaCl needs this entry in a static executable's
17272 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17273 zero. For .iplt really only the last bundle is useful, and .iplt
17274 could have a shorter first entry, with each individual PLT entry's
17275 relative branch calculated differently so it targets the last
17276 bundle instead of the instruction before it (labelled .Lplt_tail
17277 above). But it's simpler to keep the size and layout of PLT0
17278 consistent with the dynamic case, at the cost of some dead code at
17279 the start of .iplt and the one dead store to the stack at the start
17280 of .Lplt_tail. */
17281 static void
17282 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17283 asection *plt, bfd_vma got_displacement)
17285 unsigned int i;
17287 put_arm_insn (htab, output_bfd,
17288 elf32_arm_nacl_plt0_entry[0]
17289 | arm_movw_immediate (got_displacement),
17290 plt->contents + 0);
17291 put_arm_insn (htab, output_bfd,
17292 elf32_arm_nacl_plt0_entry[1]
17293 | arm_movt_immediate (got_displacement),
17294 plt->contents + 4);
17296 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17297 put_arm_insn (htab, output_bfd,
17298 elf32_arm_nacl_plt0_entry[i],
17299 plt->contents + (i * 4));
17302 /* Finish up the dynamic sections. */
17304 static bool
17305 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17307 bfd * dynobj;
17308 asection * sgot;
17309 asection * sdyn;
17310 struct elf32_arm_link_hash_table *htab;
17312 htab = elf32_arm_hash_table (info);
17313 if (htab == NULL)
17314 return false;
17316 dynobj = elf_hash_table (info)->dynobj;
17318 sgot = htab->root.sgotplt;
17319 /* A broken linker script might have discarded the dynamic sections.
17320 Catch this here so that we do not seg-fault later on. */
17321 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17322 return false;
17323 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17325 if (elf_hash_table (info)->dynamic_sections_created)
17327 asection *splt;
17328 Elf32_External_Dyn *dyncon, *dynconend;
17330 splt = htab->root.splt;
17331 BFD_ASSERT (splt != NULL && sdyn != NULL);
17332 BFD_ASSERT (sgot != NULL);
17334 dyncon = (Elf32_External_Dyn *) sdyn->contents;
17335 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17337 for (; dyncon < dynconend; dyncon++)
17339 Elf_Internal_Dyn dyn;
17340 const char * name;
17341 asection * s;
17343 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17345 switch (dyn.d_tag)
17347 default:
17348 if (htab->root.target_os == is_vxworks
17349 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17350 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17351 break;
17353 case DT_HASH:
17354 case DT_STRTAB:
17355 case DT_SYMTAB:
17356 case DT_VERSYM:
17357 case DT_VERDEF:
17358 case DT_VERNEED:
17359 break;
17361 case DT_PLTGOT:
17362 name = ".got.plt";
17363 goto get_vma;
17364 case DT_JMPREL:
17365 name = RELOC_SECTION (htab, ".plt");
17366 get_vma:
17367 s = bfd_get_linker_section (dynobj, name);
17368 if (s == NULL)
17370 _bfd_error_handler
17371 (_("could not find section %s"), name);
17372 bfd_set_error (bfd_error_invalid_operation);
17373 return false;
17375 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17376 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17377 break;
17379 case DT_PLTRELSZ:
17380 s = htab->root.srelplt;
17381 BFD_ASSERT (s != NULL);
17382 dyn.d_un.d_val = s->size;
17383 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17384 break;
17386 case DT_RELSZ:
17387 case DT_RELASZ:
17388 case DT_REL:
17389 case DT_RELA:
17390 break;
17392 case DT_TLSDESC_PLT:
17393 s = htab->root.splt;
17394 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17395 + htab->root.tlsdesc_plt);
17396 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17397 break;
17399 case DT_TLSDESC_GOT:
17400 s = htab->root.sgot;
17401 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17402 + htab->root.tlsdesc_got);
17403 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17404 break;
17406 /* Set the bottom bit of DT_INIT/FINI if the
17407 corresponding function is Thumb. */
17408 case DT_INIT:
17409 name = info->init_function;
17410 goto get_sym;
17411 case DT_FINI:
17412 name = info->fini_function;
17413 get_sym:
17414 /* If it wasn't set by elf_bfd_final_link
17415 then there is nothing to adjust. */
17416 if (dyn.d_un.d_val != 0)
17418 struct elf_link_hash_entry * eh;
17420 eh = elf_link_hash_lookup (elf_hash_table (info), name,
17421 false, false, true);
17422 if (eh != NULL
17423 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17424 == ST_BRANCH_TO_THUMB)
17426 dyn.d_un.d_val |= 1;
17427 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17430 break;
17434 /* Fill in the first entry in the procedure linkage table. */
17435 if (splt->size > 0 && htab->plt_header_size)
17437 const bfd_vma *plt0_entry;
17438 bfd_vma got_address, plt_address, got_displacement;
17440 /* Calculate the addresses of the GOT and PLT. */
17441 got_address = sgot->output_section->vma + sgot->output_offset;
17442 plt_address = splt->output_section->vma + splt->output_offset;
17444 if (htab->root.target_os == is_vxworks)
17446 /* The VxWorks GOT is relocated by the dynamic linker.
17447 Therefore, we must emit relocations rather than simply
17448 computing the values now. */
17449 Elf_Internal_Rela rel;
17451 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17452 put_arm_insn (htab, output_bfd, plt0_entry[0],
17453 splt->contents + 0);
17454 put_arm_insn (htab, output_bfd, plt0_entry[1],
17455 splt->contents + 4);
17456 put_arm_insn (htab, output_bfd, plt0_entry[2],
17457 splt->contents + 8);
17458 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17460 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17461 rel.r_offset = plt_address + 12;
17462 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17463 rel.r_addend = 0;
17464 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17465 htab->srelplt2->contents);
17467 else if (htab->root.target_os == is_nacl)
17468 arm_nacl_put_plt0 (htab, output_bfd, splt,
17469 got_address + 8 - (plt_address + 16));
17470 else if (using_thumb_only (htab))
17472 got_displacement = got_address - (plt_address + 12);
17474 plt0_entry = elf32_thumb2_plt0_entry;
17475 put_arm_insn (htab, output_bfd, plt0_entry[0],
17476 splt->contents + 0);
17477 put_arm_insn (htab, output_bfd, plt0_entry[1],
17478 splt->contents + 4);
17479 put_arm_insn (htab, output_bfd, plt0_entry[2],
17480 splt->contents + 8);
17482 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17484 else
17486 got_displacement = got_address - (plt_address + 16);
17488 plt0_entry = elf32_arm_plt0_entry;
17489 put_arm_insn (htab, output_bfd, plt0_entry[0],
17490 splt->contents + 0);
17491 put_arm_insn (htab, output_bfd, plt0_entry[1],
17492 splt->contents + 4);
17493 put_arm_insn (htab, output_bfd, plt0_entry[2],
17494 splt->contents + 8);
17495 put_arm_insn (htab, output_bfd, plt0_entry[3],
17496 splt->contents + 12);
17498 #ifdef FOUR_WORD_PLT
17499 /* The displacement value goes in the otherwise-unused
17500 last word of the second entry. */
17501 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17502 #else
17503 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17504 #endif
17508 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17509 really seem like the right value. */
17510 if (splt->output_section->owner == output_bfd)
17511 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17513 if (htab->root.tlsdesc_plt)
17515 bfd_vma got_address
17516 = sgot->output_section->vma + sgot->output_offset;
17517 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17518 + htab->root.sgot->output_offset);
17519 bfd_vma plt_address
17520 = splt->output_section->vma + splt->output_offset;
17522 arm_put_trampoline (htab, output_bfd,
17523 splt->contents + htab->root.tlsdesc_plt,
17524 dl_tlsdesc_lazy_trampoline, 6);
17526 bfd_put_32 (output_bfd,
17527 gotplt_address + htab->root.tlsdesc_got
17528 - (plt_address + htab->root.tlsdesc_plt)
17529 - dl_tlsdesc_lazy_trampoline[6],
17530 splt->contents + htab->root.tlsdesc_plt + 24);
17531 bfd_put_32 (output_bfd,
17532 got_address - (plt_address + htab->root.tlsdesc_plt)
17533 - dl_tlsdesc_lazy_trampoline[7],
17534 splt->contents + htab->root.tlsdesc_plt + 24 + 4);
17537 if (htab->tls_trampoline)
17539 arm_put_trampoline (htab, output_bfd,
17540 splt->contents + htab->tls_trampoline,
17541 tls_trampoline, 3);
17542 #ifdef FOUR_WORD_PLT
17543 bfd_put_32 (output_bfd, 0x00000000,
17544 splt->contents + htab->tls_trampoline + 12);
17545 #endif
17548 if (htab->root.target_os == is_vxworks
17549 && !bfd_link_pic (info)
17550 && htab->root.splt->size > 0)
17552 /* Correct the .rel(a).plt.unloaded relocations. They will have
17553 incorrect symbol indexes. */
17554 int num_plts;
17555 unsigned char *p;
17557 num_plts = ((htab->root.splt->size - htab->plt_header_size)
17558 / htab->plt_entry_size);
17559 p = htab->srelplt2->contents + RELOC_SIZE (htab);
17561 for (; num_plts; num_plts--)
17563 Elf_Internal_Rela rel;
17565 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17566 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17567 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17568 p += RELOC_SIZE (htab);
17570 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17571 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17572 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17573 p += RELOC_SIZE (htab);
17578 if (htab->root.target_os == is_nacl
17579 && htab->root.iplt != NULL
17580 && htab->root.iplt->size > 0)
17581 /* NaCl uses a special first entry in .iplt too. */
17582 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17584 /* Fill in the first three entries in the global offset table. */
17585 if (sgot)
17587 if (sgot->size > 0)
17589 if (sdyn == NULL)
17590 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17591 else
17592 bfd_put_32 (output_bfd,
17593 sdyn->output_section->vma + sdyn->output_offset,
17594 sgot->contents);
17595 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17596 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17599 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17602 /* At the very end of the .rofixup section is a pointer to the GOT. */
17603 if (htab->fdpic_p && htab->srofixup != NULL)
17605 struct elf_link_hash_entry *hgot = htab->root.hgot;
17607 bfd_vma got_value = hgot->root.u.def.value
17608 + hgot->root.u.def.section->output_section->vma
17609 + hgot->root.u.def.section->output_offset;
17611 arm_elf_add_rofixup (output_bfd, htab->srofixup, got_value);
17613 /* Make sure we allocated and generated the same number of fixups. */
17614 BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17617 return true;
17620 static bool
17621 elf32_arm_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
17623 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
17624 struct elf32_arm_link_hash_table *globals;
17625 struct elf_segment_map *m;
17627 if (!_bfd_elf_init_file_header (abfd, link_info))
17628 return false;
17630 i_ehdrp = elf_elfheader (abfd);
17632 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17633 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17634 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17636 if (link_info)
17638 globals = elf32_arm_hash_table (link_info);
17639 if (globals != NULL && globals->byteswap_code)
17640 i_ehdrp->e_flags |= EF_ARM_BE8;
17642 if (globals->fdpic_p)
17643 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17646 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17647 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17649 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17650 if (abi == AEABI_VFP_args_vfp)
17651 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17652 else
17653 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17656 /* Scan segment to set p_flags attribute if it contains only sections with
17657 SHF_ARM_PURECODE flag. */
17658 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17660 unsigned int j;
17662 if (m->count == 0)
17663 continue;
17664 for (j = 0; j < m->count; j++)
17666 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17667 break;
17669 if (j == m->count)
17671 m->p_flags = PF_X;
17672 m->p_flags_valid = 1;
17675 return true;
17678 static enum elf_reloc_type_class
17679 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17680 const asection *rel_sec ATTRIBUTE_UNUSED,
17681 const Elf_Internal_Rela *rela)
17683 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
17685 if (htab->root.dynsym != NULL
17686 && htab->root.dynsym->contents != NULL)
17688 /* Check relocation against STT_GNU_IFUNC symbol if there are
17689 dynamic symbols. */
17690 bfd *abfd = info->output_bfd;
17691 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
17692 unsigned long r_symndx = ELF32_R_SYM (rela->r_info);
17693 if (r_symndx != STN_UNDEF)
17695 Elf_Internal_Sym sym;
17696 if (!bed->s->swap_symbol_in (abfd,
17697 (htab->root.dynsym->contents
17698 + r_symndx * bed->s->sizeof_sym),
17699 0, &sym))
17701 /* xgettext:c-format */
17702 _bfd_error_handler (_("%pB symbol number %lu references"
17703 " nonexistent SHT_SYMTAB_SHNDX section"),
17704 abfd, r_symndx);
17705 /* Ideally an error class should be returned here. */
17707 else if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
17708 return reloc_class_ifunc;
17712 switch ((int) ELF32_R_TYPE (rela->r_info))
17714 case R_ARM_RELATIVE:
17715 return reloc_class_relative;
17716 case R_ARM_JUMP_SLOT:
17717 return reloc_class_plt;
17718 case R_ARM_COPY:
17719 return reloc_class_copy;
17720 case R_ARM_IRELATIVE:
17721 return reloc_class_ifunc;
17722 default:
17723 return reloc_class_normal;
17727 static void
17728 arm_final_write_processing (bfd *abfd)
17730 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17733 static bool
17734 elf32_arm_final_write_processing (bfd *abfd)
17736 arm_final_write_processing (abfd);
17737 return _bfd_elf_final_write_processing (abfd);
17740 /* Return TRUE if this is an unwinding table entry. */
17742 static bool
17743 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17745 return (startswith (name, ELF_STRING_ARM_unwind)
17746 || startswith (name, ELF_STRING_ARM_unwind_once));
17750 /* Set the type and flags for an ARM section. We do this by
17751 the section name, which is a hack, but ought to work. */
17753 static bool
17754 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17756 const char * name;
17758 name = bfd_section_name (sec);
17760 if (is_arm_elf_unwind_section_name (abfd, name))
17762 hdr->sh_type = SHT_ARM_EXIDX;
17763 hdr->sh_flags |= SHF_LINK_ORDER;
17766 if (sec->flags & SEC_ELF_PURECODE)
17767 hdr->sh_flags |= SHF_ARM_PURECODE;
17769 return true;
17772 /* Handle an ARM specific section when reading an object file. This is
17773 called when bfd_section_from_shdr finds a section with an unknown
17774 type. */
17776 static bool
17777 elf32_arm_section_from_shdr (bfd *abfd,
17778 Elf_Internal_Shdr * hdr,
17779 const char *name,
17780 int shindex)
17782 /* There ought to be a place to keep ELF backend specific flags, but
17783 at the moment there isn't one. We just keep track of the
17784 sections by their name, instead. Fortunately, the ABI gives
17785 names for all the ARM specific sections, so we will probably get
17786 away with this. */
17787 switch (hdr->sh_type)
17789 case SHT_ARM_EXIDX:
17790 case SHT_ARM_PREEMPTMAP:
17791 case SHT_ARM_ATTRIBUTES:
17792 break;
17794 default:
17795 return false;
17798 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17799 return false;
17801 return true;
17804 static _arm_elf_section_data *
17805 get_arm_elf_section_data (asection * sec)
17807 if (sec && sec->owner && is_arm_elf (sec->owner))
17808 return elf32_arm_section_data (sec);
17809 else
17810 return NULL;
17813 typedef struct
17815 void *flaginfo;
17816 struct bfd_link_info *info;
17817 asection *sec;
17818 int sec_shndx;
17819 int (*func) (void *, const char *, Elf_Internal_Sym *,
17820 asection *, struct elf_link_hash_entry *);
17821 } output_arch_syminfo;
17823 enum map_symbol_type
17825 ARM_MAP_ARM,
17826 ARM_MAP_THUMB,
17827 ARM_MAP_DATA
17831 /* Output a single mapping symbol. */
17833 static bool
17834 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17835 enum map_symbol_type type,
17836 bfd_vma offset)
17838 static const char *names[3] = {"$a", "$t", "$d"};
17839 Elf_Internal_Sym sym;
17841 sym.st_value = osi->sec->output_section->vma
17842 + osi->sec->output_offset
17843 + offset;
17844 sym.st_size = 0;
17845 sym.st_other = 0;
17846 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17847 sym.st_shndx = osi->sec_shndx;
17848 sym.st_target_internal = 0;
17849 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17850 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17853 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17854 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17856 static bool
17857 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17858 bool is_iplt_entry_p,
17859 union gotplt_union *root_plt,
17860 struct arm_plt_info *arm_plt)
17862 struct elf32_arm_link_hash_table *htab;
17863 bfd_vma addr, plt_header_size;
17865 if (root_plt->offset == (bfd_vma) -1)
17866 return true;
17868 htab = elf32_arm_hash_table (osi->info);
17869 if (htab == NULL)
17870 return false;
17872 if (is_iplt_entry_p)
17874 osi->sec = htab->root.iplt;
17875 plt_header_size = 0;
17877 else
17879 osi->sec = htab->root.splt;
17880 plt_header_size = htab->plt_header_size;
17882 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17883 (osi->info->output_bfd, osi->sec->output_section));
17885 addr = root_plt->offset & -2;
17886 if (htab->root.target_os == is_vxworks)
17888 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17889 return false;
17890 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
17891 return false;
17892 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
17893 return false;
17894 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
17895 return false;
17897 else if (htab->root.target_os == is_nacl)
17899 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17900 return false;
17902 else if (htab->fdpic_p)
17904 enum map_symbol_type type = using_thumb_only (htab)
17905 ? ARM_MAP_THUMB
17906 : ARM_MAP_ARM;
17908 if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
17909 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17910 return false;
17911 if (!elf32_arm_output_map_sym (osi, type, addr))
17912 return false;
17913 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
17914 return false;
17915 if (htab->plt_entry_size == 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry))
17916 if (!elf32_arm_output_map_sym (osi, type, addr + 24))
17917 return false;
17919 else if (using_thumb_only (htab))
17921 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
17922 return false;
17924 else
17926 bool thumb_stub_p;
17928 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
17929 if (thumb_stub_p)
17931 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17932 return false;
17934 #ifdef FOUR_WORD_PLT
17935 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17936 return false;
17937 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
17938 return false;
17939 #else
17940 /* A three-word PLT with no Thumb thunk contains only Arm code,
17941 so only need to output a mapping symbol for the first PLT entry and
17942 entries with thumb thunks. */
17943 if (thumb_stub_p || addr == plt_header_size)
17945 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17946 return false;
17948 #endif
17951 return true;
17954 /* Output mapping symbols for PLT entries associated with H. */
17956 static bool
17957 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
17959 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
17960 struct elf32_arm_link_hash_entry *eh;
17962 if (h->root.type == bfd_link_hash_indirect)
17963 return true;
17965 if (h->root.type == bfd_link_hash_warning)
17966 /* When warning symbols are created, they **replace** the "real"
17967 entry in the hash table, thus we never get to see the real
17968 symbol in a hash traversal. So look at it now. */
17969 h = (struct elf_link_hash_entry *) h->root.u.i.link;
17971 eh = (struct elf32_arm_link_hash_entry *) h;
17972 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
17973 &h->plt, &eh->plt);
17976 /* Bind a veneered symbol to its veneer identified by its hash entry
17977 STUB_ENTRY. The veneered location thus loose its symbol. */
17979 static void
17980 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
17982 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
17984 BFD_ASSERT (hash);
17985 hash->root.root.u.def.section = stub_entry->stub_sec;
17986 hash->root.root.u.def.value = stub_entry->stub_offset;
17987 hash->root.size = stub_entry->stub_size;
17990 /* Output a single local symbol for a generated stub. */
17992 static bool
17993 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
17994 bfd_vma offset, bfd_vma size)
17996 Elf_Internal_Sym sym;
17998 sym.st_value = osi->sec->output_section->vma
17999 + osi->sec->output_offset
18000 + offset;
18001 sym.st_size = size;
18002 sym.st_other = 0;
18003 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
18004 sym.st_shndx = osi->sec_shndx;
18005 sym.st_target_internal = 0;
18006 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
18009 static bool
18010 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
18011 void * in_arg)
18013 struct elf32_arm_stub_hash_entry *stub_entry;
18014 asection *stub_sec;
18015 bfd_vma addr;
18016 char *stub_name;
18017 output_arch_syminfo *osi;
18018 const insn_sequence *template_sequence;
18019 enum stub_insn_type prev_type;
18020 int size;
18021 int i;
18022 enum map_symbol_type sym_type;
18024 /* Massage our args to the form they really have. */
18025 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18026 osi = (output_arch_syminfo *) in_arg;
18028 stub_sec = stub_entry->stub_sec;
18030 /* Ensure this stub is attached to the current section being
18031 processed. */
18032 if (stub_sec != osi->sec)
18033 return true;
18035 addr = (bfd_vma) stub_entry->stub_offset;
18036 template_sequence = stub_entry->stub_template;
18038 if (arm_stub_sym_claimed (stub_entry->stub_type))
18039 arm_stub_claim_sym (stub_entry);
18040 else
18042 stub_name = stub_entry->output_name;
18043 switch (template_sequence[0].type)
18045 case ARM_TYPE:
18046 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
18047 stub_entry->stub_size))
18048 return false;
18049 break;
18050 case THUMB16_TYPE:
18051 case THUMB32_TYPE:
18052 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
18053 stub_entry->stub_size))
18054 return false;
18055 break;
18056 default:
18057 BFD_FAIL ();
18058 return 0;
18062 prev_type = DATA_TYPE;
18063 size = 0;
18064 for (i = 0; i < stub_entry->stub_template_size; i++)
18066 switch (template_sequence[i].type)
18068 case ARM_TYPE:
18069 sym_type = ARM_MAP_ARM;
18070 break;
18072 case THUMB16_TYPE:
18073 case THUMB32_TYPE:
18074 sym_type = ARM_MAP_THUMB;
18075 break;
18077 case DATA_TYPE:
18078 sym_type = ARM_MAP_DATA;
18079 break;
18081 default:
18082 BFD_FAIL ();
18083 return false;
18086 if (template_sequence[i].type != prev_type)
18088 prev_type = template_sequence[i].type;
18089 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
18090 return false;
18093 switch (template_sequence[i].type)
18095 case ARM_TYPE:
18096 case THUMB32_TYPE:
18097 size += 4;
18098 break;
18100 case THUMB16_TYPE:
18101 size += 2;
18102 break;
18104 case DATA_TYPE:
18105 size += 4;
18106 break;
18108 default:
18109 BFD_FAIL ();
18110 return false;
18114 return true;
18117 /* Output mapping symbols for linker generated sections,
18118 and for those data-only sections that do not have a
18119 $d. */
18121 static bool
18122 elf32_arm_output_arch_local_syms (bfd *output_bfd,
18123 struct bfd_link_info *info,
18124 void *flaginfo,
18125 int (*func) (void *, const char *,
18126 Elf_Internal_Sym *,
18127 asection *,
18128 struct elf_link_hash_entry *))
18130 output_arch_syminfo osi;
18131 struct elf32_arm_link_hash_table *htab;
18132 bfd_vma offset;
18133 bfd_size_type size;
18134 bfd *input_bfd;
18136 if (info->strip == strip_all
18137 && !info->emitrelocations
18138 && !bfd_link_relocatable (info))
18139 return true;
18141 htab = elf32_arm_hash_table (info);
18142 if (htab == NULL)
18143 return false;
18145 check_use_blx (htab);
18147 osi.flaginfo = flaginfo;
18148 osi.info = info;
18149 osi.func = func;
18151 /* Add a $d mapping symbol to data-only sections that
18152 don't have any mapping symbol. This may result in (harmless) redundant
18153 mapping symbols. */
18154 for (input_bfd = info->input_bfds;
18155 input_bfd != NULL;
18156 input_bfd = input_bfd->link.next)
18158 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18159 for (osi.sec = input_bfd->sections;
18160 osi.sec != NULL;
18161 osi.sec = osi.sec->next)
18163 if (osi.sec->output_section != NULL
18164 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18165 != 0)
18166 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18167 == SEC_HAS_CONTENTS
18168 && get_arm_elf_section_data (osi.sec) != NULL
18169 && get_arm_elf_section_data (osi.sec)->mapcount == 0
18170 && osi.sec->size > 0
18171 && (osi.sec->flags & SEC_EXCLUDE) == 0)
18173 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18174 (output_bfd, osi.sec->output_section);
18175 if (osi.sec_shndx != (int)SHN_BAD)
18176 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18181 /* ARM->Thumb glue. */
18182 if (htab->arm_glue_size > 0)
18184 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18185 ARM2THUMB_GLUE_SECTION_NAME);
18187 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18188 (output_bfd, osi.sec->output_section);
18189 if (bfd_link_pic (info)
18190 || htab->pic_veneer)
18191 size = ARM2THUMB_PIC_GLUE_SIZE;
18192 else if (htab->use_blx)
18193 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18194 else
18195 size = ARM2THUMB_STATIC_GLUE_SIZE;
18197 for (offset = 0; offset < htab->arm_glue_size; offset += size)
18199 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18200 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18204 /* Thumb->ARM glue. */
18205 if (htab->thumb_glue_size > 0)
18207 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18208 THUMB2ARM_GLUE_SECTION_NAME);
18210 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18211 (output_bfd, osi.sec->output_section);
18212 size = THUMB2ARM_GLUE_SIZE;
18214 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18216 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18217 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18221 /* ARMv4 BX veneers. */
18222 if (htab->bx_glue_size > 0)
18224 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18225 ARM_BX_GLUE_SECTION_NAME);
18227 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18228 (output_bfd, osi.sec->output_section);
18230 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18233 /* Long calls stubs. */
18234 if (htab->stub_bfd && htab->stub_bfd->sections)
18236 asection* stub_sec;
18238 for (stub_sec = htab->stub_bfd->sections;
18239 stub_sec != NULL;
18240 stub_sec = stub_sec->next)
18242 /* Ignore non-stub sections. */
18243 if (!strstr (stub_sec->name, STUB_SUFFIX))
18244 continue;
18246 osi.sec = stub_sec;
18248 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18249 (output_bfd, osi.sec->output_section);
18251 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18255 /* Finally, output mapping symbols for the PLT. */
18256 if (htab->root.splt && htab->root.splt->size > 0)
18258 osi.sec = htab->root.splt;
18259 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18260 (output_bfd, osi.sec->output_section));
18262 /* Output mapping symbols for the plt header. */
18263 if (htab->root.target_os == is_vxworks)
18265 /* VxWorks shared libraries have no PLT header. */
18266 if (!bfd_link_pic (info))
18268 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18269 return false;
18270 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18271 return false;
18274 else if (htab->root.target_os == is_nacl)
18276 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18277 return false;
18279 else if (using_thumb_only (htab) && !htab->fdpic_p)
18281 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18282 return false;
18283 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18284 return false;
18285 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18286 return false;
18288 else if (!htab->fdpic_p)
18290 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18291 return false;
18292 #ifndef FOUR_WORD_PLT
18293 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18294 return false;
18295 #endif
18298 if (htab->root.target_os == is_nacl
18299 && htab->root.iplt
18300 && htab->root.iplt->size > 0)
18302 /* NaCl uses a special first entry in .iplt too. */
18303 osi.sec = htab->root.iplt;
18304 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18305 (output_bfd, osi.sec->output_section));
18306 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18307 return false;
18309 if ((htab->root.splt && htab->root.splt->size > 0)
18310 || (htab->root.iplt && htab->root.iplt->size > 0))
18312 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18313 for (input_bfd = info->input_bfds;
18314 input_bfd != NULL;
18315 input_bfd = input_bfd->link.next)
18317 struct arm_local_iplt_info **local_iplt;
18318 unsigned int i, num_syms;
18320 local_iplt = elf32_arm_local_iplt (input_bfd);
18321 if (local_iplt != NULL)
18323 num_syms = elf_symtab_hdr (input_bfd).sh_info;
18324 if (num_syms > elf32_arm_num_entries (input_bfd))
18326 _bfd_error_handler (_("\
18327 %pB: Number of symbols in input file has increased from %lu to %u\n"),
18328 input_bfd,
18329 (unsigned long) elf32_arm_num_entries (input_bfd),
18330 num_syms);
18331 return false;
18333 for (i = 0; i < num_syms; i++)
18334 if (local_iplt[i] != NULL
18335 && !elf32_arm_output_plt_map_1 (&osi, true,
18336 &local_iplt[i]->root,
18337 &local_iplt[i]->arm))
18338 return false;
18342 if (htab->root.tlsdesc_plt != 0)
18344 /* Mapping symbols for the lazy tls trampoline. */
18345 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM,
18346 htab->root.tlsdesc_plt))
18347 return false;
18349 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18350 htab->root.tlsdesc_plt + 24))
18351 return false;
18353 if (htab->tls_trampoline != 0)
18355 /* Mapping symbols for the tls trampoline. */
18356 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18357 return false;
18358 #ifdef FOUR_WORD_PLT
18359 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18360 htab->tls_trampoline + 12))
18361 return false;
18362 #endif
18365 return true;
18368 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18369 the import library. All SYMCOUNT symbols of ABFD can be examined
18370 from their pointers in SYMS. Pointers of symbols to keep should be
18371 stored continuously at the beginning of that array.
18373 Returns the number of symbols to keep. */
18375 static unsigned int
18376 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18377 struct bfd_link_info *info,
18378 asymbol **syms, long symcount)
18380 size_t maxnamelen;
18381 char *cmse_name;
18382 long src_count, dst_count = 0;
18383 struct elf32_arm_link_hash_table *htab;
18385 htab = elf32_arm_hash_table (info);
18386 if (!htab->stub_bfd || !htab->stub_bfd->sections)
18387 symcount = 0;
18389 maxnamelen = 128;
18390 cmse_name = (char *) bfd_malloc (maxnamelen);
18391 BFD_ASSERT (cmse_name);
18393 for (src_count = 0; src_count < symcount; src_count++)
18395 struct elf32_arm_link_hash_entry *cmse_hash;
18396 asymbol *sym;
18397 flagword flags;
18398 char *name;
18399 size_t namelen;
18401 sym = syms[src_count];
18402 flags = sym->flags;
18403 name = (char *) bfd_asymbol_name (sym);
18405 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18406 continue;
18407 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18408 continue;
18410 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18411 if (namelen > maxnamelen)
18413 cmse_name = (char *)
18414 bfd_realloc (cmse_name, namelen);
18415 maxnamelen = namelen;
18417 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18418 cmse_hash = (struct elf32_arm_link_hash_entry *)
18419 elf_link_hash_lookup (&(htab)->root, cmse_name, false, false, true);
18421 if (!cmse_hash
18422 || (cmse_hash->root.root.type != bfd_link_hash_defined
18423 && cmse_hash->root.root.type != bfd_link_hash_defweak)
18424 || cmse_hash->root.type != STT_FUNC)
18425 continue;
18427 syms[dst_count++] = sym;
18429 free (cmse_name);
18431 syms[dst_count] = NULL;
18433 return dst_count;
18436 /* Filter symbols of ABFD to include in the import library. All
18437 SYMCOUNT symbols of ABFD can be examined from their pointers in
18438 SYMS. Pointers of symbols to keep should be stored continuously at
18439 the beginning of that array.
18441 Returns the number of symbols to keep. */
18443 static unsigned int
18444 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18445 struct bfd_link_info *info,
18446 asymbol **syms, long symcount)
18448 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18450 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18451 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18452 library to be a relocatable object file. */
18453 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18454 if (globals->cmse_implib)
18455 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18456 else
18457 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18460 /* Allocate target specific section data. */
18462 static bool
18463 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18465 if (!sec->used_by_bfd)
18467 _arm_elf_section_data *sdata;
18468 size_t amt = sizeof (*sdata);
18470 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18471 if (sdata == NULL)
18472 return false;
18473 sec->used_by_bfd = sdata;
18476 return _bfd_elf_new_section_hook (abfd, sec);
18480 /* Used to order a list of mapping symbols by address. */
18482 static int
18483 elf32_arm_compare_mapping (const void * a, const void * b)
18485 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18486 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18488 if (amap->vma > bmap->vma)
18489 return 1;
18490 else if (amap->vma < bmap->vma)
18491 return -1;
18492 else if (amap->type > bmap->type)
18493 /* Ensure results do not depend on the host qsort for objects with
18494 multiple mapping symbols at the same address by sorting on type
18495 after vma. */
18496 return 1;
18497 else if (amap->type < bmap->type)
18498 return -1;
18499 else
18500 return 0;
18503 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18505 static unsigned long
18506 offset_prel31 (unsigned long addr, bfd_vma offset)
18508 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18511 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18512 relocations. */
18514 static void
18515 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18517 unsigned long first_word = bfd_get_32 (output_bfd, from);
18518 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18520 /* High bit of first word is supposed to be zero. */
18521 if ((first_word & 0x80000000ul) == 0)
18522 first_word = offset_prel31 (first_word, offset);
18524 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18525 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18526 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18527 second_word = offset_prel31 (second_word, offset);
18529 bfd_put_32 (output_bfd, first_word, to);
18530 bfd_put_32 (output_bfd, second_word, to + 4);
18533 /* Data for make_branch_to_a8_stub(). */
18535 struct a8_branch_to_stub_data
18537 asection *writing_section;
18538 bfd_byte *contents;
18542 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18543 places for a particular section. */
18545 static bool
18546 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18547 void *in_arg)
18549 struct elf32_arm_stub_hash_entry *stub_entry;
18550 struct a8_branch_to_stub_data *data;
18551 bfd_byte *contents;
18552 unsigned long branch_insn;
18553 bfd_vma veneered_insn_loc, veneer_entry_loc;
18554 bfd_signed_vma branch_offset;
18555 bfd *abfd;
18556 unsigned int loc;
18558 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18559 data = (struct a8_branch_to_stub_data *) in_arg;
18561 if (stub_entry->target_section != data->writing_section
18562 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18563 return true;
18565 contents = data->contents;
18567 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18568 generated when both source and target are in the same section. */
18569 veneered_insn_loc = stub_entry->target_section->output_section->vma
18570 + stub_entry->target_section->output_offset
18571 + stub_entry->source_value;
18573 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18574 + stub_entry->stub_sec->output_offset
18575 + stub_entry->stub_offset;
18577 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18578 veneered_insn_loc &= ~3u;
18580 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18582 abfd = stub_entry->target_section->owner;
18583 loc = stub_entry->source_value;
18585 /* We attempt to avoid this condition by setting stubs_always_after_branch
18586 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18587 This check is just to be on the safe side... */
18588 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18590 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18591 "allocated in unsafe location"), abfd);
18592 return false;
18595 switch (stub_entry->stub_type)
18597 case arm_stub_a8_veneer_b:
18598 case arm_stub_a8_veneer_b_cond:
18599 branch_insn = 0xf0009000;
18600 goto jump24;
18602 case arm_stub_a8_veneer_blx:
18603 branch_insn = 0xf000e800;
18604 goto jump24;
18606 case arm_stub_a8_veneer_bl:
18608 unsigned int i1, j1, i2, j2, s;
18610 branch_insn = 0xf000d000;
18612 jump24:
18613 if (branch_offset < -16777216 || branch_offset > 16777214)
18615 /* There's not much we can do apart from complain if this
18616 happens. */
18617 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18618 "of range (input file too large)"), abfd);
18619 return false;
18622 /* i1 = not(j1 eor s), so:
18623 not i1 = j1 eor s
18624 j1 = (not i1) eor s. */
18626 branch_insn |= (branch_offset >> 1) & 0x7ff;
18627 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18628 i2 = (branch_offset >> 22) & 1;
18629 i1 = (branch_offset >> 23) & 1;
18630 s = (branch_offset >> 24) & 1;
18631 j1 = (!i1) ^ s;
18632 j2 = (!i2) ^ s;
18633 branch_insn |= j2 << 11;
18634 branch_insn |= j1 << 13;
18635 branch_insn |= s << 26;
18637 break;
18639 default:
18640 BFD_FAIL ();
18641 return false;
18644 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18645 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18647 return true;
18650 /* Beginning of stm32l4xx work-around. */
18652 /* Functions encoding instructions necessary for the emission of the
18653 fix-stm32l4xx-629360.
18654 Encoding is extracted from the
18655 ARM (C) Architecture Reference Manual
18656 ARMv7-A and ARMv7-R edition
18657 ARM DDI 0406C.b (ID072512). */
18659 static inline bfd_vma
18660 create_instruction_branch_absolute (int branch_offset)
18662 /* A8.8.18 B (A8-334)
18663 B target_address (Encoding T4). */
18664 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18665 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18666 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18668 int s = ((branch_offset & 0x1000000) >> 24);
18669 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18670 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18672 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18673 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18675 bfd_vma patched_inst = 0xf0009000
18676 | s << 26 /* S. */
18677 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
18678 | j1 << 13 /* J1. */
18679 | j2 << 11 /* J2. */
18680 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
18682 return patched_inst;
18685 static inline bfd_vma
18686 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18688 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18689 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18690 bfd_vma patched_inst = 0xe8900000
18691 | (/*W=*/wback << 21)
18692 | (base_reg << 16)
18693 | (reg_mask & 0x0000ffff);
18695 return patched_inst;
18698 static inline bfd_vma
18699 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18701 /* A8.8.60 LDMDB/LDMEA (A8-402)
18702 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18703 bfd_vma patched_inst = 0xe9100000
18704 | (/*W=*/wback << 21)
18705 | (base_reg << 16)
18706 | (reg_mask & 0x0000ffff);
18708 return patched_inst;
18711 static inline bfd_vma
18712 create_instruction_mov (int target_reg, int source_reg)
18714 /* A8.8.103 MOV (register) (A8-486)
18715 MOV Rd, Rm (Encoding T1). */
18716 bfd_vma patched_inst = 0x4600
18717 | (target_reg & 0x7)
18718 | ((target_reg & 0x8) >> 3) << 7
18719 | (source_reg << 3);
18721 return patched_inst;
18724 static inline bfd_vma
18725 create_instruction_sub (int target_reg, int source_reg, int value)
18727 /* A8.8.221 SUB (immediate) (A8-708)
18728 SUB Rd, Rn, #value (Encoding T3). */
18729 bfd_vma patched_inst = 0xf1a00000
18730 | (target_reg << 8)
18731 | (source_reg << 16)
18732 | (/*S=*/0 << 20)
18733 | ((value & 0x800) >> 11) << 26
18734 | ((value & 0x700) >> 8) << 12
18735 | (value & 0x0ff);
18737 return patched_inst;
18740 static inline bfd_vma
18741 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18742 int first_reg)
18744 /* A8.8.332 VLDM (A8-922)
18745 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18746 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18747 | (/*W=*/wback << 21)
18748 | (base_reg << 16)
18749 | (num_words & 0x000000ff)
18750 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18751 | (first_reg & 0x00000001) << 22;
18753 return patched_inst;
18756 static inline bfd_vma
18757 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18758 int first_reg)
18760 /* A8.8.332 VLDM (A8-922)
18761 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18762 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18763 | (base_reg << 16)
18764 | (num_words & 0x000000ff)
18765 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18766 | (first_reg & 0x00000001) << 22;
18768 return patched_inst;
18771 static inline bfd_vma
18772 create_instruction_udf_w (int value)
18774 /* A8.8.247 UDF (A8-758)
18775 Undefined (Encoding T2). */
18776 bfd_vma patched_inst = 0xf7f0a000
18777 | (value & 0x00000fff)
18778 | (value & 0x000f0000) << 16;
18780 return patched_inst;
18783 static inline bfd_vma
18784 create_instruction_udf (int value)
18786 /* A8.8.247 UDF (A8-758)
18787 Undefined (Encoding T1). */
18788 bfd_vma patched_inst = 0xde00
18789 | (value & 0xff);
18791 return patched_inst;
18794 /* Functions writing an instruction in memory, returning the next
18795 memory position to write to. */
18797 static inline bfd_byte *
18798 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18799 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18801 put_thumb2_insn (htab, output_bfd, insn, pt);
18802 return pt + 4;
18805 static inline bfd_byte *
18806 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18807 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18809 put_thumb_insn (htab, output_bfd, insn, pt);
18810 return pt + 2;
18813 /* Function filling up a region in memory with T1 and T2 UDFs taking
18814 care of alignment. */
18816 static bfd_byte *
18817 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18818 bfd * output_bfd,
18819 const bfd_byte * const base_stub_contents,
18820 bfd_byte * const from_stub_contents,
18821 const bfd_byte * const end_stub_contents)
18823 bfd_byte *current_stub_contents = from_stub_contents;
18825 /* Fill the remaining of the stub with deterministic contents : UDF
18826 instructions.
18827 Check if realignment is needed on modulo 4 frontier using T1, to
18828 further use T2. */
18829 if ((current_stub_contents < end_stub_contents)
18830 && !((current_stub_contents - base_stub_contents) % 2)
18831 && ((current_stub_contents - base_stub_contents) % 4))
18832 current_stub_contents =
18833 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18834 create_instruction_udf (0));
18836 for (; current_stub_contents < end_stub_contents;)
18837 current_stub_contents =
18838 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18839 create_instruction_udf_w (0));
18841 return current_stub_contents;
18844 /* Functions writing the stream of instructions equivalent to the
18845 derived sequence for ldmia, ldmdb, vldm respectively. */
18847 static void
18848 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18849 bfd * output_bfd,
18850 const insn32 initial_insn,
18851 const bfd_byte *const initial_insn_addr,
18852 bfd_byte *const base_stub_contents)
18854 int wback = (initial_insn & 0x00200000) >> 21;
18855 int ri, rn = (initial_insn & 0x000F0000) >> 16;
18856 int insn_all_registers = initial_insn & 0x0000ffff;
18857 int insn_low_registers, insn_high_registers;
18858 int usable_register_mask;
18859 int nb_registers = elf32_arm_popcount (insn_all_registers);
18860 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18861 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18862 bfd_byte *current_stub_contents = base_stub_contents;
18864 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18866 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18867 smaller than 8 registers load sequences that do not cause the
18868 hardware issue. */
18869 if (nb_registers <= 8)
18871 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18872 current_stub_contents =
18873 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18874 initial_insn);
18876 /* B initial_insn_addr+4. */
18877 if (!restore_pc)
18878 current_stub_contents =
18879 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18880 create_instruction_branch_absolute
18881 (initial_insn_addr - current_stub_contents));
18883 /* Fill the remaining of the stub with deterministic contents. */
18884 current_stub_contents =
18885 stm32l4xx_fill_stub_udf (htab, output_bfd,
18886 base_stub_contents, current_stub_contents,
18887 base_stub_contents +
18888 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18890 return;
18893 /* - reg_list[13] == 0. */
18894 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18896 /* - reg_list[14] & reg_list[15] != 1. */
18897 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18899 /* - if (wback==1) reg_list[rn] == 0. */
18900 BFD_ASSERT (!wback || !restore_rn);
18902 /* - nb_registers > 8. */
18903 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18905 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18907 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18908 - One with the 7 lowest registers (register mask 0x007F)
18909 This LDM will finally contain between 2 and 7 registers
18910 - One with the 7 highest registers (register mask 0xDF80)
18911 This ldm will finally contain between 2 and 7 registers. */
18912 insn_low_registers = insn_all_registers & 0x007F;
18913 insn_high_registers = insn_all_registers & 0xDF80;
18915 /* A spare register may be needed during this veneer to temporarily
18916 handle the base register. This register will be restored with the
18917 last LDM operation.
18918 The usable register may be any general purpose register (that
18919 excludes PC, SP, LR : register mask is 0x1FFF). */
18920 usable_register_mask = 0x1FFF;
18922 /* Generate the stub function. */
18923 if (wback)
18925 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18926 current_stub_contents =
18927 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18928 create_instruction_ldmia
18929 (rn, /*wback=*/1, insn_low_registers));
18931 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
18932 current_stub_contents =
18933 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18934 create_instruction_ldmia
18935 (rn, /*wback=*/1, insn_high_registers));
18936 if (!restore_pc)
18938 /* B initial_insn_addr+4. */
18939 current_stub_contents =
18940 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18941 create_instruction_branch_absolute
18942 (initial_insn_addr - current_stub_contents));
18945 else /* if (!wback). */
18947 ri = rn;
18949 /* If Rn is not part of the high-register-list, move it there. */
18950 if (!(insn_high_registers & (1 << rn)))
18952 /* Choose a Ri in the high-register-list that will be restored. */
18953 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18955 /* MOV Ri, Rn. */
18956 current_stub_contents =
18957 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18958 create_instruction_mov (ri, rn));
18961 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
18962 current_stub_contents =
18963 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18964 create_instruction_ldmia
18965 (ri, /*wback=*/1, insn_low_registers));
18967 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
18968 current_stub_contents =
18969 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18970 create_instruction_ldmia
18971 (ri, /*wback=*/0, insn_high_registers));
18973 if (!restore_pc)
18975 /* B initial_insn_addr+4. */
18976 current_stub_contents =
18977 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18978 create_instruction_branch_absolute
18979 (initial_insn_addr - current_stub_contents));
18983 /* Fill the remaining of the stub with deterministic contents. */
18984 current_stub_contents =
18985 stm32l4xx_fill_stub_udf (htab, output_bfd,
18986 base_stub_contents, current_stub_contents,
18987 base_stub_contents +
18988 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18991 static void
18992 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
18993 bfd * output_bfd,
18994 const insn32 initial_insn,
18995 const bfd_byte *const initial_insn_addr,
18996 bfd_byte *const base_stub_contents)
18998 int wback = (initial_insn & 0x00200000) >> 21;
18999 int ri, rn = (initial_insn & 0x000f0000) >> 16;
19000 int insn_all_registers = initial_insn & 0x0000ffff;
19001 int insn_low_registers, insn_high_registers;
19002 int usable_register_mask;
19003 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
19004 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
19005 int nb_registers = elf32_arm_popcount (insn_all_registers);
19006 bfd_byte *current_stub_contents = base_stub_contents;
19008 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
19010 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19011 smaller than 8 registers load sequences that do not cause the
19012 hardware issue. */
19013 if (nb_registers <= 8)
19015 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
19016 current_stub_contents =
19017 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19018 initial_insn);
19020 /* B initial_insn_addr+4. */
19021 current_stub_contents =
19022 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19023 create_instruction_branch_absolute
19024 (initial_insn_addr - current_stub_contents));
19026 /* Fill the remaining of the stub with deterministic contents. */
19027 current_stub_contents =
19028 stm32l4xx_fill_stub_udf (htab, output_bfd,
19029 base_stub_contents, current_stub_contents,
19030 base_stub_contents +
19031 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19033 return;
19036 /* - reg_list[13] == 0. */
19037 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
19039 /* - reg_list[14] & reg_list[15] != 1. */
19040 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
19042 /* - if (wback==1) reg_list[rn] == 0. */
19043 BFD_ASSERT (!wback || !restore_rn);
19045 /* - nb_registers > 8. */
19046 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
19048 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19050 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
19051 - One with the 7 lowest registers (register mask 0x007F)
19052 This LDM will finally contain between 2 and 7 registers
19053 - One with the 7 highest registers (register mask 0xDF80)
19054 This ldm will finally contain between 2 and 7 registers. */
19055 insn_low_registers = insn_all_registers & 0x007F;
19056 insn_high_registers = insn_all_registers & 0xDF80;
19058 /* A spare register may be needed during this veneer to temporarily
19059 handle the base register. This register will be restored with
19060 the last LDM operation.
19061 The usable register may be any general purpose register (that excludes
19062 PC, SP, LR : register mask is 0x1FFF). */
19063 usable_register_mask = 0x1FFF;
19065 /* Generate the stub function. */
19066 if (!wback && !restore_pc && !restore_rn)
19068 /* Choose a Ri in the low-register-list that will be restored. */
19069 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19071 /* MOV Ri, Rn. */
19072 current_stub_contents =
19073 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19074 create_instruction_mov (ri, rn));
19076 /* LDMDB Ri!, {R-high-register-list}. */
19077 current_stub_contents =
19078 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19079 create_instruction_ldmdb
19080 (ri, /*wback=*/1, insn_high_registers));
19082 /* LDMDB Ri, {R-low-register-list}. */
19083 current_stub_contents =
19084 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19085 create_instruction_ldmdb
19086 (ri, /*wback=*/0, insn_low_registers));
19088 /* B initial_insn_addr+4. */
19089 current_stub_contents =
19090 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19091 create_instruction_branch_absolute
19092 (initial_insn_addr - current_stub_contents));
19094 else if (wback && !restore_pc && !restore_rn)
19096 /* LDMDB Rn!, {R-high-register-list}. */
19097 current_stub_contents =
19098 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19099 create_instruction_ldmdb
19100 (rn, /*wback=*/1, insn_high_registers));
19102 /* LDMDB Rn!, {R-low-register-list}. */
19103 current_stub_contents =
19104 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19105 create_instruction_ldmdb
19106 (rn, /*wback=*/1, insn_low_registers));
19108 /* B initial_insn_addr+4. */
19109 current_stub_contents =
19110 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19111 create_instruction_branch_absolute
19112 (initial_insn_addr - current_stub_contents));
19114 else if (!wback && restore_pc && !restore_rn)
19116 /* Choose a Ri in the high-register-list that will be restored. */
19117 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19119 /* SUB Ri, Rn, #(4*nb_registers). */
19120 current_stub_contents =
19121 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19122 create_instruction_sub (ri, rn, (4 * nb_registers)));
19124 /* LDMIA Ri!, {R-low-register-list}. */
19125 current_stub_contents =
19126 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19127 create_instruction_ldmia
19128 (ri, /*wback=*/1, insn_low_registers));
19130 /* LDMIA Ri, {R-high-register-list}. */
19131 current_stub_contents =
19132 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19133 create_instruction_ldmia
19134 (ri, /*wback=*/0, insn_high_registers));
19136 else if (wback && restore_pc && !restore_rn)
19138 /* Choose a Ri in the high-register-list that will be restored. */
19139 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19141 /* SUB Rn, Rn, #(4*nb_registers) */
19142 current_stub_contents =
19143 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19144 create_instruction_sub (rn, rn, (4 * nb_registers)));
19146 /* MOV Ri, Rn. */
19147 current_stub_contents =
19148 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19149 create_instruction_mov (ri, rn));
19151 /* LDMIA Ri!, {R-low-register-list}. */
19152 current_stub_contents =
19153 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19154 create_instruction_ldmia
19155 (ri, /*wback=*/1, insn_low_registers));
19157 /* LDMIA Ri, {R-high-register-list}. */
19158 current_stub_contents =
19159 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19160 create_instruction_ldmia
19161 (ri, /*wback=*/0, insn_high_registers));
19163 else if (!wback && !restore_pc && restore_rn)
19165 ri = rn;
19166 if (!(insn_low_registers & (1 << rn)))
19168 /* Choose a Ri in the low-register-list that will be restored. */
19169 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19171 /* MOV Ri, Rn. */
19172 current_stub_contents =
19173 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19174 create_instruction_mov (ri, rn));
19177 /* LDMDB Ri!, {R-high-register-list}. */
19178 current_stub_contents =
19179 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19180 create_instruction_ldmdb
19181 (ri, /*wback=*/1, insn_high_registers));
19183 /* LDMDB Ri, {R-low-register-list}. */
19184 current_stub_contents =
19185 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19186 create_instruction_ldmdb
19187 (ri, /*wback=*/0, insn_low_registers));
19189 /* B initial_insn_addr+4. */
19190 current_stub_contents =
19191 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19192 create_instruction_branch_absolute
19193 (initial_insn_addr - current_stub_contents));
19195 else if (!wback && restore_pc && restore_rn)
19197 ri = rn;
19198 if (!(insn_high_registers & (1 << rn)))
19200 /* Choose a Ri in the high-register-list that will be restored. */
19201 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19204 /* SUB Ri, Rn, #(4*nb_registers). */
19205 current_stub_contents =
19206 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19207 create_instruction_sub (ri, rn, (4 * nb_registers)));
19209 /* LDMIA Ri!, {R-low-register-list}. */
19210 current_stub_contents =
19211 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19212 create_instruction_ldmia
19213 (ri, /*wback=*/1, insn_low_registers));
19215 /* LDMIA Ri, {R-high-register-list}. */
19216 current_stub_contents =
19217 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19218 create_instruction_ldmia
19219 (ri, /*wback=*/0, insn_high_registers));
19221 else if (wback && restore_rn)
19223 /* The assembler should not have accepted to encode this. */
19224 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19225 "undefined behavior.\n");
19228 /* Fill the remaining of the stub with deterministic contents. */
19229 current_stub_contents =
19230 stm32l4xx_fill_stub_udf (htab, output_bfd,
19231 base_stub_contents, current_stub_contents,
19232 base_stub_contents +
19233 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19237 static void
19238 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19239 bfd * output_bfd,
19240 const insn32 initial_insn,
19241 const bfd_byte *const initial_insn_addr,
19242 bfd_byte *const base_stub_contents)
19244 int num_words = initial_insn & 0xff;
19245 bfd_byte *current_stub_contents = base_stub_contents;
19247 BFD_ASSERT (is_thumb2_vldm (initial_insn));
19249 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19250 smaller than 8 words load sequences that do not cause the
19251 hardware issue. */
19252 if (num_words <= 8)
19254 /* Untouched instruction. */
19255 current_stub_contents =
19256 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19257 initial_insn);
19259 /* B initial_insn_addr+4. */
19260 current_stub_contents =
19261 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19262 create_instruction_branch_absolute
19263 (initial_insn_addr - current_stub_contents));
19265 else
19267 bool is_dp = /* DP encoding. */
19268 (initial_insn & 0xfe100f00) == 0xec100b00;
19269 bool is_ia_nobang = /* (IA without !). */
19270 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
19271 bool is_ia_bang = /* (IA with !) - includes VPOP. */
19272 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
19273 bool is_db_bang = /* (DB with !). */
19274 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
19275 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19276 /* d = UInt (Vd:D);. */
19277 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19278 | (((unsigned int)initial_insn << 9) >> 31);
19280 /* Compute the number of 8-words chunks needed to split. */
19281 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19282 int chunk;
19284 /* The test coverage has been done assuming the following
19285 hypothesis that exactly one of the previous is_ predicates is
19286 true. */
19287 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19288 && !(is_ia_nobang & is_ia_bang & is_db_bang));
19290 /* We treat the cutting of the words in one pass for all
19291 cases, then we emit the adjustments:
19293 vldm rx, {...}
19294 -> vldm rx!, {8_words_or_less} for each needed 8_word
19295 -> sub rx, rx, #size (list)
19297 vldm rx!, {...}
19298 -> vldm rx!, {8_words_or_less} for each needed 8_word
19299 This also handles vpop instruction (when rx is sp)
19301 vldmd rx!, {...}
19302 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19303 for (chunk = 0; chunk < chunks; ++chunk)
19305 bfd_vma new_insn = 0;
19307 if (is_ia_nobang || is_ia_bang)
19309 new_insn = create_instruction_vldmia
19310 (base_reg,
19311 is_dp,
19312 /*wback= . */1,
19313 chunks - (chunk + 1) ?
19314 8 : num_words - chunk * 8,
19315 first_reg + chunk * 8);
19317 else if (is_db_bang)
19319 new_insn = create_instruction_vldmdb
19320 (base_reg,
19321 is_dp,
19322 chunks - (chunk + 1) ?
19323 8 : num_words - chunk * 8,
19324 first_reg + chunk * 8);
19327 if (new_insn)
19328 current_stub_contents =
19329 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19330 new_insn);
19333 /* Only this case requires the base register compensation
19334 subtract. */
19335 if (is_ia_nobang)
19337 current_stub_contents =
19338 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19339 create_instruction_sub
19340 (base_reg, base_reg, 4*num_words));
19343 /* B initial_insn_addr+4. */
19344 current_stub_contents =
19345 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19346 create_instruction_branch_absolute
19347 (initial_insn_addr - current_stub_contents));
19350 /* Fill the remaining of the stub with deterministic contents. */
19351 current_stub_contents =
19352 stm32l4xx_fill_stub_udf (htab, output_bfd,
19353 base_stub_contents, current_stub_contents,
19354 base_stub_contents +
19355 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19358 static void
19359 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19360 bfd * output_bfd,
19361 const insn32 wrong_insn,
19362 const bfd_byte *const wrong_insn_addr,
19363 bfd_byte *const stub_contents)
19365 if (is_thumb2_ldmia (wrong_insn))
19366 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19367 wrong_insn, wrong_insn_addr,
19368 stub_contents);
19369 else if (is_thumb2_ldmdb (wrong_insn))
19370 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19371 wrong_insn, wrong_insn_addr,
19372 stub_contents);
19373 else if (is_thumb2_vldm (wrong_insn))
19374 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19375 wrong_insn, wrong_insn_addr,
19376 stub_contents);
19379 /* End of stm32l4xx work-around. */
19382 /* Do code byteswapping. Return FALSE afterwards so that the section is
19383 written out as normal. */
19385 static bool
19386 elf32_arm_write_section (bfd *output_bfd,
19387 struct bfd_link_info *link_info,
19388 asection *sec,
19389 bfd_byte *contents)
19391 unsigned int mapcount, errcount;
19392 _arm_elf_section_data *arm_data;
19393 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19394 elf32_arm_section_map *map;
19395 elf32_vfp11_erratum_list *errnode;
19396 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19397 bfd_vma ptr;
19398 bfd_vma end;
19399 bfd_vma offset = sec->output_section->vma + sec->output_offset;
19400 bfd_byte tmp;
19401 unsigned int i;
19403 if (globals == NULL)
19404 return false;
19406 /* If this section has not been allocated an _arm_elf_section_data
19407 structure then we cannot record anything. */
19408 arm_data = get_arm_elf_section_data (sec);
19409 if (arm_data == NULL)
19410 return false;
19412 mapcount = arm_data->mapcount;
19413 map = arm_data->map;
19414 errcount = arm_data->erratumcount;
19416 if (errcount != 0)
19418 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19420 for (errnode = arm_data->erratumlist; errnode != 0;
19421 errnode = errnode->next)
19423 bfd_vma target = errnode->vma - offset;
19425 switch (errnode->type)
19427 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19429 bfd_vma branch_to_veneer;
19430 /* Original condition code of instruction, plus bit mask for
19431 ARM B instruction. */
19432 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19433 | 0x0a000000;
19435 /* The instruction is before the label. */
19436 target -= 4;
19438 /* Above offset included in -4 below. */
19439 branch_to_veneer = errnode->u.b.veneer->vma
19440 - errnode->vma - 4;
19442 if ((signed) branch_to_veneer < -(1 << 25)
19443 || (signed) branch_to_veneer >= (1 << 25))
19444 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19445 "range"), output_bfd);
19447 insn |= (branch_to_veneer >> 2) & 0xffffff;
19448 contents[endianflip ^ target] = insn & 0xff;
19449 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19450 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19451 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19453 break;
19455 case VFP11_ERRATUM_ARM_VENEER:
19457 bfd_vma branch_from_veneer;
19458 unsigned int insn;
19460 /* Take size of veneer into account. */
19461 branch_from_veneer = errnode->u.v.branch->vma
19462 - errnode->vma - 12;
19464 if ((signed) branch_from_veneer < -(1 << 25)
19465 || (signed) branch_from_veneer >= (1 << 25))
19466 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19467 "range"), output_bfd);
19469 /* Original instruction. */
19470 insn = errnode->u.v.branch->u.b.vfp_insn;
19471 contents[endianflip ^ target] = insn & 0xff;
19472 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19473 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19474 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19476 /* Branch back to insn after original insn. */
19477 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19478 contents[endianflip ^ (target + 4)] = insn & 0xff;
19479 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19480 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19481 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19483 break;
19485 default:
19486 abort ();
19491 if (arm_data->stm32l4xx_erratumcount != 0)
19493 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19494 stm32l4xx_errnode != 0;
19495 stm32l4xx_errnode = stm32l4xx_errnode->next)
19497 bfd_vma target = stm32l4xx_errnode->vma - offset;
19499 switch (stm32l4xx_errnode->type)
19501 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19503 unsigned int insn;
19504 bfd_vma branch_to_veneer =
19505 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19507 if ((signed) branch_to_veneer < -(1 << 24)
19508 || (signed) branch_to_veneer >= (1 << 24))
19510 bfd_vma out_of_range =
19511 ((signed) branch_to_veneer < -(1 << 24)) ?
19512 - branch_to_veneer - (1 << 24) :
19513 ((signed) branch_to_veneer >= (1 << 24)) ?
19514 branch_to_veneer - (1 << 24) : 0;
19516 _bfd_error_handler
19517 (_("%pB(%#" PRIx64 "): error: "
19518 "cannot create STM32L4XX veneer; "
19519 "jump out of range by %" PRId64 " bytes; "
19520 "cannot encode branch instruction"),
19521 output_bfd,
19522 (uint64_t) (stm32l4xx_errnode->vma - 4),
19523 (int64_t) out_of_range);
19524 continue;
19527 insn = create_instruction_branch_absolute
19528 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19530 /* The instruction is before the label. */
19531 target -= 4;
19533 put_thumb2_insn (globals, output_bfd,
19534 (bfd_vma) insn, contents + target);
19536 break;
19538 case STM32L4XX_ERRATUM_VENEER:
19540 bfd_byte * veneer;
19541 bfd_byte * veneer_r;
19542 unsigned int insn;
19544 veneer = contents + target;
19545 veneer_r = veneer
19546 + stm32l4xx_errnode->u.b.veneer->vma
19547 - stm32l4xx_errnode->vma - 4;
19549 if ((signed) (veneer_r - veneer -
19550 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19551 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19552 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19553 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19554 || (signed) (veneer_r - veneer) >= (1 << 24))
19556 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19557 "veneer"), output_bfd);
19558 continue;
19561 /* Original instruction. */
19562 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19564 stm32l4xx_create_replacing_stub
19565 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19567 break;
19569 default:
19570 abort ();
19575 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19577 arm_unwind_table_edit *edit_node
19578 = arm_data->u.exidx.unwind_edit_list;
19579 /* Now, sec->size is the size of the section we will write. The original
19580 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19581 markers) was sec->rawsize. (This isn't the case if we perform no
19582 edits, then rawsize will be zero and we should use size). */
19583 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19584 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19585 unsigned int in_index, out_index;
19586 bfd_vma add_to_offsets = 0;
19588 if (edited_contents == NULL)
19589 return false;
19590 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19592 if (edit_node)
19594 unsigned int edit_index = edit_node->index;
19596 if (in_index < edit_index && in_index * 8 < input_size)
19598 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19599 contents + in_index * 8, add_to_offsets);
19600 out_index++;
19601 in_index++;
19603 else if (in_index == edit_index
19604 || (in_index * 8 >= input_size
19605 && edit_index == UINT_MAX))
19607 switch (edit_node->type)
19609 case DELETE_EXIDX_ENTRY:
19610 in_index++;
19611 add_to_offsets += 8;
19612 break;
19614 case INSERT_EXIDX_CANTUNWIND_AT_END:
19616 asection *text_sec = edit_node->linked_section;
19617 bfd_vma text_offset = text_sec->output_section->vma
19618 + text_sec->output_offset
19619 + text_sec->size;
19620 bfd_vma exidx_offset = offset + out_index * 8;
19621 unsigned long prel31_offset;
19623 /* Note: this is meant to be equivalent to an
19624 R_ARM_PREL31 relocation. These synthetic
19625 EXIDX_CANTUNWIND markers are not relocated by the
19626 usual BFD method. */
19627 prel31_offset = (text_offset - exidx_offset)
19628 & 0x7ffffffful;
19629 if (bfd_link_relocatable (link_info))
19631 /* Here relocation for new EXIDX_CANTUNWIND is
19632 created, so there is no need to
19633 adjust offset by hand. */
19634 prel31_offset = text_sec->output_offset
19635 + text_sec->size;
19638 /* First address we can't unwind. */
19639 bfd_put_32 (output_bfd, prel31_offset,
19640 &edited_contents[out_index * 8]);
19642 /* Code for EXIDX_CANTUNWIND. */
19643 bfd_put_32 (output_bfd, 0x1,
19644 &edited_contents[out_index * 8 + 4]);
19646 out_index++;
19647 add_to_offsets -= 8;
19649 break;
19652 edit_node = edit_node->next;
19655 else
19657 /* No more edits, copy remaining entries verbatim. */
19658 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19659 contents + in_index * 8, add_to_offsets);
19660 out_index++;
19661 in_index++;
19665 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19666 bfd_set_section_contents (output_bfd, sec->output_section,
19667 edited_contents,
19668 (file_ptr) sec->output_offset, sec->size);
19670 return true;
19673 /* Fix code to point to Cortex-A8 erratum stubs. */
19674 if (globals->fix_cortex_a8)
19676 struct a8_branch_to_stub_data data;
19678 data.writing_section = sec;
19679 data.contents = contents;
19681 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19682 & data);
19685 if (mapcount == 0)
19686 return false;
19688 if (globals->byteswap_code)
19690 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19692 ptr = map[0].vma;
19693 for (i = 0; i < mapcount; i++)
19695 if (i == mapcount - 1)
19696 end = sec->size;
19697 else
19698 end = map[i + 1].vma;
19700 switch (map[i].type)
19702 case 'a':
19703 /* Byte swap code words. */
19704 while (ptr + 3 < end)
19706 tmp = contents[ptr];
19707 contents[ptr] = contents[ptr + 3];
19708 contents[ptr + 3] = tmp;
19709 tmp = contents[ptr + 1];
19710 contents[ptr + 1] = contents[ptr + 2];
19711 contents[ptr + 2] = tmp;
19712 ptr += 4;
19714 break;
19716 case 't':
19717 /* Byte swap code halfwords. */
19718 while (ptr + 1 < end)
19720 tmp = contents[ptr];
19721 contents[ptr] = contents[ptr + 1];
19722 contents[ptr + 1] = tmp;
19723 ptr += 2;
19725 break;
19727 case 'd':
19728 /* Leave data alone. */
19729 break;
19731 ptr = end;
19735 free (map);
19736 arm_data->mapcount = -1;
19737 arm_data->mapsize = 0;
19738 arm_data->map = NULL;
19740 return false;
19743 /* Mangle thumb function symbols as we read them in. */
19745 static bool
19746 elf32_arm_swap_symbol_in (bfd * abfd,
19747 const void *psrc,
19748 const void *pshn,
19749 Elf_Internal_Sym *dst)
19751 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19752 return false;
19753 dst->st_target_internal = 0;
19755 /* New EABI objects mark thumb function symbols by setting the low bit of
19756 the address. */
19757 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19758 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19760 if (dst->st_value & 1)
19762 dst->st_value &= ~(bfd_vma) 1;
19763 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19764 ST_BRANCH_TO_THUMB);
19766 else
19767 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19769 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19771 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19772 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19774 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19775 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19776 else
19777 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19779 return true;
19783 /* Mangle thumb function symbols as we write them out. */
19785 static void
19786 elf32_arm_swap_symbol_out (bfd *abfd,
19787 const Elf_Internal_Sym *src,
19788 void *cdst,
19789 void *shndx)
19791 Elf_Internal_Sym newsym;
19793 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19794 of the address set, as per the new EABI. We do this unconditionally
19795 because objcopy does not set the elf header flags until after
19796 it writes out the symbol table. */
19797 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19799 newsym = *src;
19800 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19801 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19802 if (newsym.st_shndx != SHN_UNDEF)
19804 /* Do this only for defined symbols. At link type, the static
19805 linker will simulate the work of dynamic linker of resolving
19806 symbols and will carry over the thumbness of found symbols to
19807 the output symbol table. It's not clear how it happens, but
19808 the thumbness of undefined symbols can well be different at
19809 runtime, and writing '1' for them will be confusing for users
19810 and possibly for dynamic linker itself.
19812 newsym.st_value |= 1;
19815 src = &newsym;
19817 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19820 /* Add the PT_ARM_EXIDX program header. */
19822 static bool
19823 elf32_arm_modify_segment_map (bfd *abfd,
19824 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19826 struct elf_segment_map *m;
19827 asection *sec;
19829 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19830 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19832 /* If there is already a PT_ARM_EXIDX header, then we do not
19833 want to add another one. This situation arises when running
19834 "strip"; the input binary already has the header. */
19835 m = elf_seg_map (abfd);
19836 while (m && m->p_type != PT_ARM_EXIDX)
19837 m = m->next;
19838 if (!m)
19840 m = (struct elf_segment_map *)
19841 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19842 if (m == NULL)
19843 return false;
19844 m->p_type = PT_ARM_EXIDX;
19845 m->count = 1;
19846 m->sections[0] = sec;
19848 m->next = elf_seg_map (abfd);
19849 elf_seg_map (abfd) = m;
19853 return true;
19856 /* We may add a PT_ARM_EXIDX program header. */
19858 static int
19859 elf32_arm_additional_program_headers (bfd *abfd,
19860 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19862 asection *sec;
19864 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19865 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19866 return 1;
19867 else
19868 return 0;
19871 /* Hook called by the linker routine which adds symbols from an object
19872 file. */
19874 static bool
19875 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19876 Elf_Internal_Sym *sym, const char **namep,
19877 flagword *flagsp, asection **secp, bfd_vma *valp)
19879 if (elf32_arm_hash_table (info) == NULL)
19880 return false;
19882 if (elf32_arm_hash_table (info)->root.target_os == is_vxworks
19883 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19884 flagsp, secp, valp))
19885 return false;
19887 return true;
19890 /* We use this to override swap_symbol_in and swap_symbol_out. */
19891 const struct elf_size_info elf32_arm_size_info =
19893 sizeof (Elf32_External_Ehdr),
19894 sizeof (Elf32_External_Phdr),
19895 sizeof (Elf32_External_Shdr),
19896 sizeof (Elf32_External_Rel),
19897 sizeof (Elf32_External_Rela),
19898 sizeof (Elf32_External_Sym),
19899 sizeof (Elf32_External_Dyn),
19900 sizeof (Elf_External_Note),
19903 32, 2,
19904 ELFCLASS32, EV_CURRENT,
19905 bfd_elf32_write_out_phdrs,
19906 bfd_elf32_write_shdrs_and_ehdr,
19907 bfd_elf32_checksum_contents,
19908 bfd_elf32_write_relocs,
19909 elf32_arm_swap_symbol_in,
19910 elf32_arm_swap_symbol_out,
19911 bfd_elf32_slurp_reloc_table,
19912 bfd_elf32_slurp_symbol_table,
19913 bfd_elf32_swap_dyn_in,
19914 bfd_elf32_swap_dyn_out,
19915 bfd_elf32_swap_reloc_in,
19916 bfd_elf32_swap_reloc_out,
19917 bfd_elf32_swap_reloca_in,
19918 bfd_elf32_swap_reloca_out
19921 static bfd_vma
19922 read_code32 (const bfd *abfd, const bfd_byte *addr)
19924 /* V7 BE8 code is always little endian. */
19925 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19926 return bfd_getl32 (addr);
19928 return bfd_get_32 (abfd, addr);
19931 static bfd_vma
19932 read_code16 (const bfd *abfd, const bfd_byte *addr)
19934 /* V7 BE8 code is always little endian. */
19935 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19936 return bfd_getl16 (addr);
19938 return bfd_get_16 (abfd, addr);
19941 /* Return size of plt0 entry starting at ADDR
19942 or (bfd_vma) -1 if size can not be determined. */
19944 static bfd_vma
19945 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr,
19946 bfd_size_type data_size)
19948 bfd_vma first_word;
19949 bfd_vma plt0_size;
19951 if (data_size < 4)
19952 return (bfd_vma) -1;
19954 first_word = read_code32 (abfd, addr);
19956 if (first_word == elf32_arm_plt0_entry[0])
19957 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
19958 else if (first_word == elf32_thumb2_plt0_entry[0])
19959 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
19960 else
19961 /* We don't yet handle this PLT format. */
19962 return (bfd_vma) -1;
19964 return plt0_size;
19967 /* Return size of plt entry starting at offset OFFSET
19968 of plt section located at address START
19969 or (bfd_vma) -1 if size can not be determined. */
19971 static bfd_vma
19972 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset,
19973 bfd_size_type data_size)
19975 bfd_vma first_insn;
19976 bfd_vma plt_size = 0;
19978 /* PLT entry size if fixed on Thumb-only platforms. */
19979 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
19980 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
19982 /* Respect Thumb stub if necessary. */
19983 if (offset + 2 > data_size)
19984 return (bfd_vma) -1;
19985 if (read_code16 (abfd, start + offset) == elf32_arm_plt_thumb_stub[0])
19987 plt_size += 2 * ARRAY_SIZE (elf32_arm_plt_thumb_stub);
19990 /* Strip immediate from first add. */
19991 if (offset + plt_size + 4 > data_size)
19992 return (bfd_vma) -1;
19993 first_insn = read_code32 (abfd, start + offset + plt_size) & 0xffffff00;
19995 #ifdef FOUR_WORD_PLT
19996 if (first_insn == elf32_arm_plt_entry[0])
19997 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
19998 #else
19999 if (first_insn == elf32_arm_plt_entry_long[0])
20000 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
20001 else if (first_insn == elf32_arm_plt_entry_short[0])
20002 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
20003 #endif
20004 else
20005 /* We don't yet handle this PLT format. */
20006 return (bfd_vma) -1;
20008 return plt_size;
20011 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
20013 static long
20014 elf32_arm_get_synthetic_symtab (bfd *abfd,
20015 long symcount ATTRIBUTE_UNUSED,
20016 asymbol **syms ATTRIBUTE_UNUSED,
20017 long dynsymcount,
20018 asymbol **dynsyms,
20019 asymbol **ret)
20021 asection *relplt;
20022 asymbol *s;
20023 arelent *p;
20024 long count, i, n;
20025 size_t size;
20026 Elf_Internal_Shdr *hdr;
20027 char *names;
20028 asection *plt;
20029 bfd_vma offset;
20030 bfd_byte *data;
20032 *ret = NULL;
20034 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
20035 return 0;
20037 if (dynsymcount <= 0)
20038 return 0;
20040 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
20041 if (relplt == NULL)
20042 return 0;
20044 hdr = &elf_section_data (relplt)->this_hdr;
20045 if (hdr->sh_link != elf_dynsymtab (abfd)
20046 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
20047 return 0;
20049 plt = bfd_get_section_by_name (abfd, ".plt");
20050 if (plt == NULL)
20051 return 0;
20053 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, true))
20054 return -1;
20056 data = NULL;
20057 if (!bfd_get_full_section_contents (abfd, plt, &data))
20058 return -1;
20060 count = NUM_SHDR_ENTRIES (hdr);
20061 size = count * sizeof (asymbol);
20062 p = relplt->relocation;
20063 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20065 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
20066 if (p->addend != 0)
20067 size += sizeof ("+0x") - 1 + 8;
20070 offset = elf32_arm_plt0_size (abfd, data, plt->size);
20071 if (offset == (bfd_vma) -1
20072 || (s = *ret = (asymbol *) bfd_malloc (size)) == NULL)
20074 free (data);
20075 return -1;
20078 names = (char *) (s + count);
20079 p = relplt->relocation;
20080 n = 0;
20081 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20083 size_t len;
20085 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset, plt->size);
20086 if (plt_size == (bfd_vma) -1)
20087 break;
20089 *s = **p->sym_ptr_ptr;
20090 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20091 we are defining a symbol, ensure one of them is set. */
20092 if ((s->flags & BSF_LOCAL) == 0)
20093 s->flags |= BSF_GLOBAL;
20094 s->flags |= BSF_SYNTHETIC;
20095 s->section = plt;
20096 s->value = offset;
20097 s->name = names;
20098 s->udata.p = NULL;
20099 len = strlen ((*p->sym_ptr_ptr)->name);
20100 memcpy (names, (*p->sym_ptr_ptr)->name, len);
20101 names += len;
20102 if (p->addend != 0)
20104 char buf[30], *a;
20106 memcpy (names, "+0x", sizeof ("+0x") - 1);
20107 names += sizeof ("+0x") - 1;
20108 bfd_sprintf_vma (abfd, buf, p->addend);
20109 for (a = buf; *a == '0'; ++a)
20111 len = strlen (a);
20112 memcpy (names, a, len);
20113 names += len;
20115 memcpy (names, "@plt", sizeof ("@plt"));
20116 names += sizeof ("@plt");
20117 ++s, ++n;
20118 offset += plt_size;
20121 free (data);
20122 return n;
20125 static bool
20126 elf32_arm_section_flags (const Elf_Internal_Shdr *hdr)
20128 if (hdr->sh_flags & SHF_ARM_PURECODE)
20129 hdr->bfd_section->flags |= SEC_ELF_PURECODE;
20130 return true;
20133 static flagword
20134 elf32_arm_lookup_section_flags (char *flag_name)
20136 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20137 return SHF_ARM_PURECODE;
20139 return SEC_NO_FLAGS;
20142 static unsigned int
20143 elf32_arm_count_additional_relocs (asection *sec)
20145 struct _arm_elf_section_data *arm_data;
20146 arm_data = get_arm_elf_section_data (sec);
20148 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20151 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20152 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20153 FALSE otherwise. ISECTION is the best guess matching section from the
20154 input bfd IBFD, but it might be NULL. */
20156 static bool
20157 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20158 bfd *obfd ATTRIBUTE_UNUSED,
20159 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20160 Elf_Internal_Shdr *osection)
20162 switch (osection->sh_type)
20164 case SHT_ARM_EXIDX:
20166 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20167 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20168 unsigned i = 0;
20170 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20171 osection->sh_info = 0;
20173 /* The sh_link field must be set to the text section associated with
20174 this index section. Unfortunately the ARM EHABI does not specify
20175 exactly how to determine this association. Our caller does try
20176 to match up OSECTION with its corresponding input section however
20177 so that is a good first guess. */
20178 if (isection != NULL
20179 && osection->bfd_section != NULL
20180 && isection->bfd_section != NULL
20181 && isection->bfd_section->output_section != NULL
20182 && isection->bfd_section->output_section == osection->bfd_section
20183 && iheaders != NULL
20184 && isection->sh_link > 0
20185 && isection->sh_link < elf_numsections (ibfd)
20186 && iheaders[isection->sh_link]->bfd_section != NULL
20187 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20190 for (i = elf_numsections (obfd); i-- > 0;)
20191 if (oheaders[i]->bfd_section
20192 == iheaders[isection->sh_link]->bfd_section->output_section)
20193 break;
20196 if (i == 0)
20198 /* Failing that we have to find a matching section ourselves. If
20199 we had the output section name available we could compare that
20200 with input section names. Unfortunately we don't. So instead
20201 we use a simple heuristic and look for the nearest executable
20202 section before this one. */
20203 for (i = elf_numsections (obfd); i-- > 0;)
20204 if (oheaders[i] == osection)
20205 break;
20206 if (i == 0)
20207 break;
20209 while (i-- > 0)
20210 if (oheaders[i]->sh_type == SHT_PROGBITS
20211 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20212 == (SHF_ALLOC | SHF_EXECINSTR))
20213 break;
20216 if (i)
20218 osection->sh_link = i;
20219 /* If the text section was part of a group
20220 then the index section should be too. */
20221 if (oheaders[i]->sh_flags & SHF_GROUP)
20222 osection->sh_flags |= SHF_GROUP;
20223 return true;
20226 break;
20228 case SHT_ARM_PREEMPTMAP:
20229 osection->sh_flags = SHF_ALLOC;
20230 break;
20232 case SHT_ARM_ATTRIBUTES:
20233 case SHT_ARM_DEBUGOVERLAY:
20234 case SHT_ARM_OVERLAYSECTION:
20235 default:
20236 break;
20239 return false;
20242 /* Returns TRUE if NAME is an ARM mapping symbol.
20243 Traditionally the symbols $a, $d and $t have been used.
20244 The ARM ELF standard also defines $x (for A64 code). It also allows a
20245 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20246 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20247 not support them here. $t.x indicates the start of ThumbEE instructions. */
20249 static bool
20250 is_arm_mapping_symbol (const char * name)
20252 return name != NULL /* Paranoia. */
20253 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20254 the mapping symbols could have acquired a prefix.
20255 We do not support this here, since such symbols no
20256 longer conform to the ARM ELF ABI. */
20257 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20258 && (name[2] == 0 || name[2] == '.');
20259 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20260 any characters that follow the period are legal characters for the body
20261 of a symbol's name. For now we just assume that this is the case. */
20264 /* Make sure that mapping symbols in object files are not removed via the
20265 "strip --strip-unneeded" tool. These symbols are needed in order to
20266 correctly generate interworking veneers, and for byte swapping code
20267 regions. Once an object file has been linked, it is safe to remove the
20268 symbols as they will no longer be needed. */
20270 static void
20271 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20273 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20274 && sym->section != bfd_abs_section_ptr
20275 && is_arm_mapping_symbol (sym->name))
20276 sym->flags |= BSF_KEEP;
20279 #undef elf_backend_copy_special_section_fields
20280 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20282 #define ELF_ARCH bfd_arch_arm
20283 #define ELF_TARGET_ID ARM_ELF_DATA
20284 #define ELF_MACHINE_CODE EM_ARM
20285 #define ELF_MAXPAGESIZE 0x1000
20286 #define ELF_COMMONPAGESIZE 0x1000
20288 #define bfd_elf32_mkobject elf32_arm_mkobject
20290 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20291 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20292 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20293 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20294 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20295 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20296 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20297 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20298 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20299 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20300 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20301 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20303 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20304 #define elf_backend_maybe_function_sym elf32_arm_maybe_function_sym
20305 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20306 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20307 #define elf_backend_check_relocs elf32_arm_check_relocs
20308 #define elf_backend_update_relocs elf32_arm_update_relocs
20309 #define elf_backend_relocate_section elf32_arm_relocate_section
20310 #define elf_backend_write_section elf32_arm_write_section
20311 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20312 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20313 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20314 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20315 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20316 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20317 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20318 #define elf_backend_init_file_header elf32_arm_init_file_header
20319 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20320 #define elf_backend_object_p elf32_arm_object_p
20321 #define elf_backend_fake_sections elf32_arm_fake_sections
20322 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20323 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20324 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20325 #define elf_backend_size_info elf32_arm_size_info
20326 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20327 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20328 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20329 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20330 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20331 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20332 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20333 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20335 #define elf_backend_can_refcount 1
20336 #define elf_backend_can_gc_sections 1
20337 #define elf_backend_plt_readonly 1
20338 #define elf_backend_want_got_plt 1
20339 #define elf_backend_want_plt_sym 0
20340 #define elf_backend_want_dynrelro 1
20341 #define elf_backend_may_use_rel_p 1
20342 #define elf_backend_may_use_rela_p 0
20343 #define elf_backend_default_use_rela_p 0
20344 #define elf_backend_dtrel_excludes_plt 1
20346 #define elf_backend_got_header_size 12
20347 #define elf_backend_extern_protected_data 0
20349 #undef elf_backend_obj_attrs_vendor
20350 #define elf_backend_obj_attrs_vendor "aeabi"
20351 #undef elf_backend_obj_attrs_section
20352 #define elf_backend_obj_attrs_section ".ARM.attributes"
20353 #undef elf_backend_obj_attrs_arg_type
20354 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20355 #undef elf_backend_obj_attrs_section_type
20356 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20357 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20358 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20360 #undef elf_backend_section_flags
20361 #define elf_backend_section_flags elf32_arm_section_flags
20362 #undef elf_backend_lookup_section_flags_hook
20363 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20365 #define elf_backend_linux_prpsinfo32_ugid16 true
20367 #include "elf32-target.h"
20369 /* Native Client targets. */
20371 #undef TARGET_LITTLE_SYM
20372 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20373 #undef TARGET_LITTLE_NAME
20374 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20375 #undef TARGET_BIG_SYM
20376 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20377 #undef TARGET_BIG_NAME
20378 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20380 /* Like elf32_arm_link_hash_table_create -- but overrides
20381 appropriately for NaCl. */
20383 static struct bfd_link_hash_table *
20384 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20386 struct bfd_link_hash_table *ret;
20388 ret = elf32_arm_link_hash_table_create (abfd);
20389 if (ret)
20391 struct elf32_arm_link_hash_table *htab
20392 = (struct elf32_arm_link_hash_table *) ret;
20394 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20395 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20397 return ret;
20400 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20401 really need to use elf32_arm_modify_segment_map. But we do it
20402 anyway just to reduce gratuitous differences with the stock ARM backend. */
20404 static bool
20405 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20407 return (elf32_arm_modify_segment_map (abfd, info)
20408 && nacl_modify_segment_map (abfd, info));
20411 static bool
20412 elf32_arm_nacl_final_write_processing (bfd *abfd)
20414 arm_final_write_processing (abfd);
20415 return nacl_final_write_processing (abfd);
20418 static bfd_vma
20419 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20420 const arelent *rel ATTRIBUTE_UNUSED)
20422 return plt->vma
20423 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20424 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20427 #undef elf32_bed
20428 #define elf32_bed elf32_arm_nacl_bed
20429 #undef bfd_elf32_bfd_link_hash_table_create
20430 #define bfd_elf32_bfd_link_hash_table_create \
20431 elf32_arm_nacl_link_hash_table_create
20432 #undef elf_backend_plt_alignment
20433 #define elf_backend_plt_alignment 4
20434 #undef elf_backend_modify_segment_map
20435 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20436 #undef elf_backend_modify_headers
20437 #define elf_backend_modify_headers nacl_modify_headers
20438 #undef elf_backend_final_write_processing
20439 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20440 #undef bfd_elf32_get_synthetic_symtab
20441 #undef elf_backend_plt_sym_val
20442 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20443 #undef elf_backend_copy_special_section_fields
20445 #undef ELF_MINPAGESIZE
20446 #undef ELF_COMMONPAGESIZE
20448 #undef ELF_TARGET_OS
20449 #define ELF_TARGET_OS is_nacl
20451 #include "elf32-target.h"
20453 /* Reset to defaults. */
20454 #undef elf_backend_plt_alignment
20455 #undef elf_backend_modify_segment_map
20456 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20457 #undef elf_backend_modify_headers
20458 #undef elf_backend_final_write_processing
20459 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20460 #undef ELF_MINPAGESIZE
20461 #undef ELF_COMMONPAGESIZE
20462 #define ELF_COMMONPAGESIZE 0x1000
20465 /* FDPIC Targets. */
20467 #undef TARGET_LITTLE_SYM
20468 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20469 #undef TARGET_LITTLE_NAME
20470 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20471 #undef TARGET_BIG_SYM
20472 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20473 #undef TARGET_BIG_NAME
20474 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20475 #undef elf_match_priority
20476 #define elf_match_priority 128
20477 #undef ELF_OSABI
20478 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20480 /* Like elf32_arm_link_hash_table_create -- but overrides
20481 appropriately for FDPIC. */
20483 static struct bfd_link_hash_table *
20484 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20486 struct bfd_link_hash_table *ret;
20488 ret = elf32_arm_link_hash_table_create (abfd);
20489 if (ret)
20491 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20493 htab->fdpic_p = 1;
20495 return ret;
20498 /* We need dynamic symbols for every section, since segments can
20499 relocate independently. */
20500 static bool
20501 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20502 struct bfd_link_info *info
20503 ATTRIBUTE_UNUSED,
20504 asection *p ATTRIBUTE_UNUSED)
20506 switch (elf_section_data (p)->this_hdr.sh_type)
20508 case SHT_PROGBITS:
20509 case SHT_NOBITS:
20510 /* If sh_type is yet undecided, assume it could be
20511 SHT_PROGBITS/SHT_NOBITS. */
20512 case SHT_NULL:
20513 return false;
20515 /* There shouldn't be section relative relocations
20516 against any other section. */
20517 default:
20518 return true;
20522 #undef elf32_bed
20523 #define elf32_bed elf32_arm_fdpic_bed
20525 #undef bfd_elf32_bfd_link_hash_table_create
20526 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20528 #undef elf_backend_omit_section_dynsym
20529 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20531 #undef ELF_TARGET_OS
20533 #include "elf32-target.h"
20535 #undef elf_match_priority
20536 #undef ELF_OSABI
20537 #undef elf_backend_omit_section_dynsym
20539 /* VxWorks Targets. */
20541 #undef TARGET_LITTLE_SYM
20542 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20543 #undef TARGET_LITTLE_NAME
20544 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20545 #undef TARGET_BIG_SYM
20546 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20547 #undef TARGET_BIG_NAME
20548 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20550 /* Like elf32_arm_link_hash_table_create -- but overrides
20551 appropriately for VxWorks. */
20553 static struct bfd_link_hash_table *
20554 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20556 struct bfd_link_hash_table *ret;
20558 ret = elf32_arm_link_hash_table_create (abfd);
20559 if (ret)
20561 struct elf32_arm_link_hash_table *htab
20562 = (struct elf32_arm_link_hash_table *) ret;
20563 htab->use_rel = 0;
20565 return ret;
20568 static bool
20569 elf32_arm_vxworks_final_write_processing (bfd *abfd)
20571 arm_final_write_processing (abfd);
20572 return elf_vxworks_final_write_processing (abfd);
20575 #undef elf32_bed
20576 #define elf32_bed elf32_arm_vxworks_bed
20578 #undef bfd_elf32_bfd_link_hash_table_create
20579 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20580 #undef elf_backend_final_write_processing
20581 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20582 #undef elf_backend_emit_relocs
20583 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20585 #undef elf_backend_may_use_rel_p
20586 #define elf_backend_may_use_rel_p 0
20587 #undef elf_backend_may_use_rela_p
20588 #define elf_backend_may_use_rela_p 1
20589 #undef elf_backend_default_use_rela_p
20590 #define elf_backend_default_use_rela_p 1
20591 #undef elf_backend_want_plt_sym
20592 #define elf_backend_want_plt_sym 1
20593 #undef ELF_MAXPAGESIZE
20594 #define ELF_MAXPAGESIZE 0x1000
20595 #undef ELF_TARGET_OS
20596 #define ELF_TARGET_OS is_vxworks
20598 #include "elf32-target.h"
20601 /* Merge backend specific data from an object file to the output
20602 object file when linking. */
20604 static bool
20605 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20607 bfd *obfd = info->output_bfd;
20608 flagword out_flags;
20609 flagword in_flags;
20610 bool flags_compatible = true;
20611 asection *sec;
20613 /* Check if we have the same endianness. */
20614 if (! _bfd_generic_verify_endian_match (ibfd, info))
20615 return false;
20617 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20618 return true;
20620 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20621 return false;
20623 /* The input BFD must have had its flags initialised. */
20624 /* The following seems bogus to me -- The flags are initialized in
20625 the assembler but I don't think an elf_flags_init field is
20626 written into the object. */
20627 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20629 in_flags = elf_elfheader (ibfd)->e_flags;
20630 out_flags = elf_elfheader (obfd)->e_flags;
20632 /* In theory there is no reason why we couldn't handle this. However
20633 in practice it isn't even close to working and there is no real
20634 reason to want it. */
20635 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20636 && !(ibfd->flags & DYNAMIC)
20637 && (in_flags & EF_ARM_BE8))
20639 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20640 ibfd);
20641 return false;
20644 if (!elf_flags_init (obfd))
20646 /* If the input has no flags set, then do not set the output flags.
20647 This will allow future bfds to determine the desired output flags.
20648 If no input bfds have any flags set, then neither will the output bfd.
20650 Note - we used to restrict this test to when the input architecture
20651 variant was the default variant, but this does not allow for
20652 linker scripts which override the default. See PR 28910 for an
20653 example. */
20654 if (in_flags == 0)
20655 return true;
20657 elf_flags_init (obfd) = true;
20658 elf_elfheader (obfd)->e_flags = in_flags;
20660 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20661 && bfd_get_arch_info (obfd)->the_default)
20662 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20664 return true;
20667 /* Determine what should happen if the input ARM architecture
20668 does not match the output ARM architecture. */
20669 if (! bfd_arm_merge_machines (ibfd, obfd))
20670 return false;
20672 /* Identical flags must be compatible. */
20673 if (in_flags == out_flags)
20674 return true;
20676 /* Check to see if the input BFD actually contains any sections. If
20677 not, its flags may not have been initialised either, but it
20678 cannot actually cause any incompatiblity. Do not short-circuit
20679 dynamic objects; their section list may be emptied by
20680 elf_link_add_object_symbols.
20682 Also check to see if there are no code sections in the input.
20683 In this case there is no need to check for code specific flags.
20684 XXX - do we need to worry about floating-point format compatability
20685 in data sections ? */
20686 if (!(ibfd->flags & DYNAMIC))
20688 bool null_input_bfd = true;
20689 bool only_data_sections = true;
20691 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20693 /* Ignore synthetic glue sections. */
20694 if (strcmp (sec->name, ".glue_7")
20695 && strcmp (sec->name, ".glue_7t"))
20697 if ((bfd_section_flags (sec)
20698 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20699 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20700 only_data_sections = false;
20702 null_input_bfd = false;
20703 break;
20707 if (null_input_bfd || only_data_sections)
20708 return true;
20711 /* Complain about various flag mismatches. */
20712 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20713 EF_ARM_EABI_VERSION (out_flags)))
20715 _bfd_error_handler
20716 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20717 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20718 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20719 return false;
20722 /* Not sure what needs to be checked for EABI versions >= 1. */
20723 /* VxWorks libraries do not use these flags. */
20724 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20725 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20726 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20728 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20730 _bfd_error_handler
20731 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20732 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20733 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20734 flags_compatible = false;
20737 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20739 if (in_flags & EF_ARM_APCS_FLOAT)
20740 _bfd_error_handler
20741 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20742 ibfd, obfd);
20743 else
20744 _bfd_error_handler
20745 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20746 ibfd, obfd);
20748 flags_compatible = false;
20751 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20753 if (in_flags & EF_ARM_VFP_FLOAT)
20754 _bfd_error_handler
20755 (_("error: %pB uses %s instructions, whereas %pB does not"),
20756 ibfd, "VFP", obfd);
20757 else
20758 _bfd_error_handler
20759 (_("error: %pB uses %s instructions, whereas %pB does not"),
20760 ibfd, "FPA", obfd);
20762 flags_compatible = false;
20765 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20767 if (in_flags & EF_ARM_MAVERICK_FLOAT)
20768 _bfd_error_handler
20769 (_("error: %pB uses %s instructions, whereas %pB does not"),
20770 ibfd, "Maverick", obfd);
20771 else
20772 _bfd_error_handler
20773 (_("error: %pB does not use %s instructions, whereas %pB does"),
20774 ibfd, "Maverick", obfd);
20776 flags_compatible = false;
20779 #ifdef EF_ARM_SOFT_FLOAT
20780 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20782 /* We can allow interworking between code that is VFP format
20783 layout, and uses either soft float or integer regs for
20784 passing floating point arguments and results. We already
20785 know that the APCS_FLOAT flags match; similarly for VFP
20786 flags. */
20787 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20788 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20790 if (in_flags & EF_ARM_SOFT_FLOAT)
20791 _bfd_error_handler
20792 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20793 ibfd, obfd);
20794 else
20795 _bfd_error_handler
20796 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20797 ibfd, obfd);
20799 flags_compatible = false;
20802 #endif
20804 /* Interworking mismatch is only a warning. */
20805 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20807 if (in_flags & EF_ARM_INTERWORK)
20809 _bfd_error_handler
20810 (_("warning: %pB supports interworking, whereas %pB does not"),
20811 ibfd, obfd);
20813 else
20815 _bfd_error_handler
20816 (_("warning: %pB does not support interworking, whereas %pB does"),
20817 ibfd, obfd);
20822 return flags_compatible;