1 /* AArch64-specific support for ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
22 #include "elfxx-aarch64.h"
26 #define MASK(n) ((1u << (n)) - 1)
28 /* Decode the 26-bit offset of unconditional branch. */
29 static inline uint32_t
30 decode_branch_ofs_26 (uint32_t insn
)
32 return insn
& MASK (26);
35 /* Decode the 19-bit offset of conditional branch and compare & branch. */
36 static inline uint32_t
37 decode_cond_branch_ofs_19 (uint32_t insn
)
39 return (insn
>> 5) & MASK (19);
42 /* Decode the 19-bit offset of load literal. */
43 static inline uint32_t
44 decode_ld_lit_ofs_19 (uint32_t insn
)
46 return (insn
>> 5) & MASK (19);
49 /* Decode the 14-bit offset of test & branch. */
50 static inline uint32_t
51 decode_tst_branch_ofs_14 (uint32_t insn
)
53 return (insn
>> 5) & MASK (14);
56 /* Decode the 16-bit imm of move wide. */
57 static inline uint32_t
58 decode_movw_imm (uint32_t insn
)
60 return (insn
>> 5) & MASK (16);
63 /* Decode the 12-bit imm of add immediate. */
64 static inline uint32_t
65 decode_add_imm (uint32_t insn
)
67 return (insn
>> 10) & MASK (12);
70 /* Reencode the imm field of add immediate. */
71 static inline uint32_t
72 reencode_add_imm (uint32_t insn
, uint32_t imm
)
74 return (insn
& ~(MASK (12) << 10)) | ((imm
& MASK (12)) << 10);
77 /* Reencode the imm field of adr. */
78 static inline uint32_t
79 reencode_adr_imm (uint32_t insn
, uint32_t imm
)
81 return (insn
& ~((MASK (2) << 29) | (MASK (19) << 5)))
82 | ((imm
& MASK (2)) << 29) | ((imm
& (MASK (19) << 2)) << 3);
85 /* Reencode the imm field of ld/st pos immediate. */
86 static inline uint32_t
87 reencode_ldst_pos_imm (uint32_t insn
, uint32_t imm
)
89 return (insn
& ~(MASK (12) << 10)) | ((imm
& MASK (12)) << 10);
92 /* Encode the 26-bit offset of unconditional branch. */
93 static inline uint32_t
94 reencode_branch_ofs_26 (uint32_t insn
, uint32_t ofs
)
96 return (insn
& ~MASK (26)) | (ofs
& MASK (26));
99 /* Encode the 19-bit offset of conditional branch and compare & branch. */
100 static inline uint32_t
101 reencode_cond_branch_ofs_19 (uint32_t insn
, uint32_t ofs
)
103 return (insn
& ~(MASK (19) << 5)) | ((ofs
& MASK (19)) << 5);
106 /* Decode the 19-bit offset of load literal. */
107 static inline uint32_t
108 reencode_ld_lit_ofs_19 (uint32_t insn
, uint32_t ofs
)
110 return (insn
& ~(MASK (19) << 5)) | ((ofs
& MASK (19)) << 5);
113 /* Encode the 14-bit offset of test & branch. */
114 static inline uint32_t
115 reencode_tst_branch_ofs_14 (uint32_t insn
, uint32_t ofs
)
117 return (insn
& ~(MASK (14) << 5)) | ((ofs
& MASK (14)) << 5);
120 /* Reencode the imm field of move wide. */
121 static inline uint32_t
122 reencode_movw_imm (uint32_t insn
, uint32_t imm
)
124 return (insn
& ~(MASK (16) << 5)) | ((imm
& MASK (16)) << 5);
127 /* Reencode mov[zn] to movz. */
128 static inline uint32_t
129 reencode_movzn_to_movz (uint32_t opcode
)
131 return opcode
| (1 << 30);
134 /* Reencode mov[zn] to movn. */
135 static inline uint32_t
136 reencode_movzn_to_movn (uint32_t opcode
)
138 return opcode
& ~(1 << 30);
141 /* Return non-zero if the indicated VALUE has overflowed the maximum
142 range expressible by a unsigned number with the indicated number of
145 static bfd_reloc_status_type
146 aarch64_unsigned_overflow (bfd_vma value
, unsigned int bits
)
149 if (bits
>= sizeof (bfd_vma
) * 8)
151 lim
= (bfd_vma
) 1 << bits
;
153 return bfd_reloc_overflow
;
157 /* Return non-zero if the indicated VALUE has overflowed the maximum
158 range expressible by an signed number with the indicated number of
161 static bfd_reloc_status_type
162 aarch64_signed_overflow (bfd_vma value
, unsigned int bits
)
164 bfd_signed_vma svalue
= (bfd_signed_vma
) value
;
167 if (bits
>= sizeof (bfd_vma
) * 8)
169 lim
= (bfd_signed_vma
) 1 << (bits
- 1);
170 if (svalue
< -lim
|| svalue
>= lim
)
171 return bfd_reloc_overflow
;
175 /* Insert the addend/value into the instruction or data object being
177 bfd_reloc_status_type
178 _bfd_aarch64_elf_put_addend (bfd
*abfd
,
179 bfd_byte
*address
, bfd_reloc_code_real_type r_type
,
180 reloc_howto_type
*howto
, bfd_signed_vma addend
)
182 bfd_reloc_status_type status
= bfd_reloc_ok
;
183 bfd_signed_vma old_addend
= addend
;
187 size
= bfd_get_reloc_size (howto
);
193 contents
= bfd_get_16 (abfd
, address
);
196 if (howto
->src_mask
!= 0xffffffff)
197 /* Must be 32-bit instruction, always little-endian. */
198 contents
= bfd_getl32 (address
);
200 /* Must be 32-bit data (endianness dependent). */
201 contents
= bfd_get_32 (abfd
, address
);
204 contents
= bfd_get_64 (abfd
, address
);
210 switch (howto
->complain_on_overflow
)
212 case complain_overflow_dont
:
214 case complain_overflow_signed
:
215 status
= aarch64_signed_overflow (addend
,
216 howto
->bitsize
+ howto
->rightshift
);
218 case complain_overflow_unsigned
:
219 status
= aarch64_unsigned_overflow (addend
,
220 howto
->bitsize
+ howto
->rightshift
);
222 case complain_overflow_bitfield
:
227 addend
>>= howto
->rightshift
;
231 case BFD_RELOC_AARCH64_JUMP26
:
232 case BFD_RELOC_AARCH64_CALL26
:
233 contents
= reencode_branch_ofs_26 (contents
, addend
);
236 case BFD_RELOC_AARCH64_BRANCH19
:
237 contents
= reencode_cond_branch_ofs_19 (contents
, addend
);
240 case BFD_RELOC_AARCH64_TSTBR14
:
241 contents
= reencode_tst_branch_ofs_14 (contents
, addend
);
244 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19
:
245 case BFD_RELOC_AARCH64_LD_LO19_PCREL
:
246 case BFD_RELOC_AARCH64_GOT_LD_PREL19
:
247 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19
:
248 if (old_addend
& ((1 << howto
->rightshift
) - 1))
249 return bfd_reloc_overflow
;
250 contents
= reencode_ld_lit_ofs_19 (contents
, addend
);
253 case BFD_RELOC_AARCH64_TLSDESC_CALL
:
256 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21
:
257 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21
:
258 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21
:
259 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
260 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21
:
261 case BFD_RELOC_AARCH64_ADR_GOT_PAGE
:
262 case BFD_RELOC_AARCH64_ADR_LO21_PCREL
:
263 case BFD_RELOC_AARCH64_ADR_HI21_PCREL
:
264 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL
:
265 contents
= reencode_adr_imm (contents
, addend
);
268 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC
:
269 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12
:
270 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
:
271 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
272 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC
:
273 case BFD_RELOC_AARCH64_ADD_LO12
:
274 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
275 12 bits of the page offset following
276 BFD_RELOC_AARCH64_ADR_HI21_PCREL which computes the
277 (pc-relative) page base. */
278 contents
= reencode_add_imm (contents
, addend
);
281 case BFD_RELOC_AARCH64_LDST8_LO12
:
282 case BFD_RELOC_AARCH64_LDST16_LO12
:
283 case BFD_RELOC_AARCH64_LDST32_LO12
:
284 case BFD_RELOC_AARCH64_LDST64_LO12
:
285 case BFD_RELOC_AARCH64_LDST128_LO12
:
286 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC
:
287 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
:
288 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
289 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
:
290 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC
:
291 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
:
292 if (old_addend
& ((1 << howto
->rightshift
) - 1))
293 return bfd_reloc_overflow
;
294 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
295 12 bits of the page offset following BFD_RELOC_AARCH64_ADR_HI21_PCREL
296 which computes the (pc-relative) page base. */
297 contents
= reencode_ldst_pos_imm (contents
, addend
);
300 /* Group relocations to create high bits of a 16, 32, 48 or 64
301 bit signed data or abs address inline. Will change
302 instruction to MOVN or MOVZ depending on sign of calculated
305 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
:
306 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
:
307 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
:
308 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0
:
309 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
310 case BFD_RELOC_AARCH64_MOVW_G0_S
:
311 case BFD_RELOC_AARCH64_MOVW_G1_S
:
312 case BFD_RELOC_AARCH64_MOVW_G2_S
:
313 /* NOTE: We can only come here with movz or movn. */
316 /* Force use of MOVN. */
318 contents
= reencode_movzn_to_movn (contents
);
322 /* Force use of MOVZ. */
323 contents
= reencode_movzn_to_movz (contents
);
327 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
328 data or abs address inline. */
330 case BFD_RELOC_AARCH64_MOVW_G0
:
331 case BFD_RELOC_AARCH64_MOVW_G0_NC
:
332 case BFD_RELOC_AARCH64_MOVW_G1
:
333 case BFD_RELOC_AARCH64_MOVW_G1_NC
:
334 case BFD_RELOC_AARCH64_MOVW_G2
:
335 case BFD_RELOC_AARCH64_MOVW_G2_NC
:
336 case BFD_RELOC_AARCH64_MOVW_G3
:
337 contents
= reencode_movw_imm (contents
, addend
);
341 /* Repack simple data */
342 if (howto
->dst_mask
& (howto
->dst_mask
+ 1))
343 return bfd_reloc_notsupported
;
345 contents
= ((contents
& ~howto
->dst_mask
) | (addend
& howto
->dst_mask
));
352 bfd_put_16 (abfd
, contents
, address
);
355 if (howto
->dst_mask
!= 0xffffffff)
356 /* must be 32-bit instruction, always little-endian */
357 bfd_putl32 (contents
, address
);
359 /* must be 32-bit data (endianness dependent) */
360 bfd_put_32 (abfd
, contents
, address
);
363 bfd_put_64 (abfd
, contents
, address
);
373 _bfd_aarch64_elf_resolve_relocation (bfd_reloc_code_real_type r_type
,
374 bfd_vma place
, bfd_vma value
,
375 bfd_vma addend
, bfd_boolean weak_undef_p
)
379 case BFD_RELOC_AARCH64_TLSDESC_CALL
:
380 case BFD_RELOC_AARCH64_NONE
:
383 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21
:
384 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19
:
385 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21
:
386 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19
:
387 case BFD_RELOC_AARCH64_ADR_LO21_PCREL
:
388 case BFD_RELOC_AARCH64_BRANCH19
:
389 case BFD_RELOC_AARCH64_LD_LO19_PCREL
:
390 case BFD_RELOC_AARCH64_16_PCREL
:
391 case BFD_RELOC_AARCH64_32_PCREL
:
392 case BFD_RELOC_AARCH64_64_PCREL
:
393 case BFD_RELOC_AARCH64_TSTBR14
:
396 value
= value
+ addend
- place
;
399 case BFD_RELOC_AARCH64_CALL26
:
400 case BFD_RELOC_AARCH64_JUMP26
:
401 value
= value
+ addend
- place
;
404 case BFD_RELOC_AARCH64_16
:
405 case BFD_RELOC_AARCH64_32
:
406 case BFD_RELOC_AARCH64_MOVW_G0_S
:
407 case BFD_RELOC_AARCH64_MOVW_G1_S
:
408 case BFD_RELOC_AARCH64_MOVW_G2_S
:
409 case BFD_RELOC_AARCH64_MOVW_G0
:
410 case BFD_RELOC_AARCH64_MOVW_G0_NC
:
411 case BFD_RELOC_AARCH64_MOVW_G1
:
412 case BFD_RELOC_AARCH64_MOVW_G1_NC
:
413 case BFD_RELOC_AARCH64_MOVW_G2
:
414 case BFD_RELOC_AARCH64_MOVW_G2_NC
:
415 case BFD_RELOC_AARCH64_MOVW_G3
:
416 value
= value
+ addend
;
419 case BFD_RELOC_AARCH64_ADR_HI21_PCREL
:
420 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL
:
423 value
= PG (value
+ addend
) - PG (place
);
426 case BFD_RELOC_AARCH64_GOT_LD_PREL19
:
427 value
= value
+ addend
- place
;
430 case BFD_RELOC_AARCH64_ADR_GOT_PAGE
:
431 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21
:
432 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21
:
433 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
434 value
= PG (value
+ addend
) - PG (place
);
437 case BFD_RELOC_AARCH64_ADD_LO12
:
438 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC
:
439 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
:
440 case BFD_RELOC_AARCH64_LDST8_LO12
:
441 case BFD_RELOC_AARCH64_LDST16_LO12
:
442 case BFD_RELOC_AARCH64_LDST32_LO12
:
443 case BFD_RELOC_AARCH64_LDST64_LO12
:
444 case BFD_RELOC_AARCH64_LDST128_LO12
:
445 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC
:
446 case BFD_RELOC_AARCH64_TLSDESC_ADD
:
447 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC
:
448 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
:
449 case BFD_RELOC_AARCH64_TLSDESC_LDR
:
450 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC
:
451 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
452 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
:
453 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12
:
454 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
455 value
= PG_OFFSET (value
+ addend
);
458 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
:
459 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
:
460 value
= (value
+ addend
) & (bfd_vma
) 0xffff0000;
462 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
:
463 /* Mask off low 12bits, keep all other high bits, so that the later
464 generic code could check whehter there is overflow. */
465 value
= (value
+ addend
) & ~(bfd_vma
) 0xfff;
468 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0
:
469 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
470 value
= (value
+ addend
) & (bfd_vma
) 0xffff;
473 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
:
474 value
= (value
+ addend
) & ~(bfd_vma
) 0xffffffff;
475 value
-= place
& ~(bfd_vma
) 0xffffffff;
485 /* Hook called by the linker routine which adds symbols from an object
489 _bfd_aarch64_elf_add_symbol_hook (bfd
*abfd
, struct bfd_link_info
*info
,
490 Elf_Internal_Sym
*sym
,
491 const char **namep ATTRIBUTE_UNUSED
,
492 flagword
*flagsp ATTRIBUTE_UNUSED
,
493 asection
**secp ATTRIBUTE_UNUSED
,
494 bfd_vma
*valp ATTRIBUTE_UNUSED
)
496 if ((ELF_ST_TYPE (sym
->st_info
) == STT_GNU_IFUNC
497 || ELF_ST_BIND (sym
->st_info
) == STB_GNU_UNIQUE
)
498 && (abfd
->flags
& DYNAMIC
) == 0
499 && bfd_get_flavour (info
->output_bfd
) == bfd_target_elf_flavour
)
500 elf_tdata (info
->output_bfd
)->has_gnu_symbols
= TRUE
;
505 /* Support for core dump NOTE sections. */
508 _bfd_aarch64_elf_grok_prstatus (bfd
*abfd
, Elf_Internal_Note
*note
)
513 switch (note
->descsz
)
518 case 392: /* sizeof(struct elf_prstatus) on Linux/arm64. */
520 elf_tdata (abfd
)->core
->signal
521 = bfd_get_16 (abfd
, note
->descdata
+ 12);
524 elf_tdata (abfd
)->core
->lwpid
525 = bfd_get_32 (abfd
, note
->descdata
+ 32);
534 /* Make a ".reg/999" section. */
535 return _bfd_elfcore_make_pseudosection (abfd
, ".reg",
536 size
, note
->descpos
+ offset
);
540 _bfd_aarch64_elf_grok_psinfo (bfd
*abfd
, Elf_Internal_Note
*note
)
542 switch (note
->descsz
)
547 case 136: /* This is sizeof(struct elf_prpsinfo) on Linux/aarch64. */
548 elf_tdata (abfd
)->core
->pid
= bfd_get_32 (abfd
, note
->descdata
+ 24);
549 elf_tdata (abfd
)->core
->program
550 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 40, 16);
551 elf_tdata (abfd
)->core
->command
552 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 56, 80);
555 /* Note that for some reason, a spurious space is tacked
556 onto the end of the args in some (at least one anyway)
557 implementations, so strip it off if it exists. */
560 char *command
= elf_tdata (abfd
)->core
->command
;
561 int n
= strlen (command
);
563 if (0 < n
&& command
[n
- 1] == ' ')
564 command
[n
- 1] = '\0';
571 _bfd_aarch64_elf_write_core_note (bfd
*abfd
, char *buf
, int *bufsiz
, int note_type
,
584 va_start (ap
, note_type
);
585 memset (data
, 0, sizeof (data
));
586 strncpy (data
+ 40, va_arg (ap
, const char *), 16);
587 strncpy (data
+ 56, va_arg (ap
, const char *), 80);
590 return elfcore_write_note (abfd
, buf
, bufsiz
, "CORE",
591 note_type
, data
, sizeof (data
));
602 va_start (ap
, note_type
);
603 memset (data
, 0, sizeof (data
));
604 pid
= va_arg (ap
, long);
605 bfd_put_32 (abfd
, pid
, data
+ 32);
606 cursig
= va_arg (ap
, int);
607 bfd_put_16 (abfd
, cursig
, data
+ 12);
608 greg
= va_arg (ap
, const void *);
609 memcpy (data
+ 112, greg
, 272);
612 return elfcore_write_note (abfd
, buf
, bufsiz
, "CORE",
613 note_type
, data
, sizeof (data
));