1 /* Renesas RX specific support for 32-bit ELF.
2 Copyright (C) 2008, 2009, 2010
3 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
26 #include "libiberty.h"
28 #define RX_OPCODE_BIG_ENDIAN 0
31 char * rx_get_reloc (long);
32 void rx_dump_symtab (bfd
*, void *, void *);
35 #define RXREL(n,sz,bit,shift,complain,pcrel) \
36 HOWTO (R_RX_##n, shift, sz, bit, pcrel, 0, complain_overflow_ ## complain, \
37 bfd_elf_generic_reloc, "R_RX_" #n, FALSE, 0, ~0, FALSE)
39 /* Note that the relocations around 0x7f are internal to this file;
40 feel free to move them as needed to avoid conflicts with published
41 relocation numbers. */
43 static reloc_howto_type rx_elf_howto_table
[] =
45 RXREL (NONE
, 0, 0, 0, dont
, FALSE
),
46 RXREL (DIR32
, 2, 32, 0, signed, FALSE
),
47 RXREL (DIR24S
, 2, 24, 0, signed, FALSE
),
48 RXREL (DIR16
, 1, 16, 0, dont
, FALSE
),
49 RXREL (DIR16U
, 1, 16, 0, unsigned, FALSE
),
50 RXREL (DIR16S
, 1, 16, 0, signed, FALSE
),
51 RXREL (DIR8
, 0, 8, 0, dont
, FALSE
),
52 RXREL (DIR8U
, 0, 8, 0, unsigned, FALSE
),
53 RXREL (DIR8S
, 0, 8, 0, signed, FALSE
),
54 RXREL (DIR24S_PCREL
, 2, 24, 0, signed, TRUE
),
55 RXREL (DIR16S_PCREL
, 1, 16, 0, signed, TRUE
),
56 RXREL (DIR8S_PCREL
, 0, 8, 0, signed, TRUE
),
57 RXREL (DIR16UL
, 1, 16, 2, unsigned, FALSE
),
58 RXREL (DIR16UW
, 1, 16, 1, unsigned, FALSE
),
59 RXREL (DIR8UL
, 0, 8, 2, unsigned, FALSE
),
60 RXREL (DIR8UW
, 0, 8, 1, unsigned, FALSE
),
61 RXREL (DIR32_REV
, 1, 16, 0, dont
, FALSE
),
62 RXREL (DIR16_REV
, 1, 16, 0, dont
, FALSE
),
63 RXREL (DIR3U_PCREL
, 0, 3, 0, dont
, TRUE
),
79 RXREL (RH_3_PCREL
, 0, 3, 0, signed, TRUE
),
80 RXREL (RH_16_OP
, 1, 16, 0, signed, FALSE
),
81 RXREL (RH_24_OP
, 2, 24, 0, signed, FALSE
),
82 RXREL (RH_32_OP
, 2, 32, 0, signed, FALSE
),
83 RXREL (RH_24_UNS
, 2, 24, 0, unsigned, FALSE
),
84 RXREL (RH_8_NEG
, 0, 8, 0, signed, FALSE
),
85 RXREL (RH_16_NEG
, 1, 16, 0, signed, FALSE
),
86 RXREL (RH_24_NEG
, 2, 24, 0, signed, FALSE
),
87 RXREL (RH_32_NEG
, 2, 32, 0, signed, FALSE
),
88 RXREL (RH_DIFF
, 2, 32, 0, signed, FALSE
),
89 RXREL (RH_GPRELB
, 1, 16, 0, unsigned, FALSE
),
90 RXREL (RH_GPRELW
, 1, 16, 0, unsigned, FALSE
),
91 RXREL (RH_GPRELL
, 1, 16, 0, unsigned, FALSE
),
92 RXREL (RH_RELAX
, 0, 0, 0, dont
, FALSE
),
114 RXREL (ABS32
, 2, 32, 0, dont
, FALSE
),
115 RXREL (ABS24S
, 2, 24, 0, signed, FALSE
),
116 RXREL (ABS16
, 1, 16, 0, dont
, FALSE
),
117 RXREL (ABS16U
, 1, 16, 0, unsigned, FALSE
),
118 RXREL (ABS16S
, 1, 16, 0, signed, FALSE
),
119 RXREL (ABS8
, 0, 8, 0, dont
, FALSE
),
120 RXREL (ABS8U
, 0, 8, 0, unsigned, FALSE
),
121 RXREL (ABS8S
, 0, 8, 0, signed, FALSE
),
122 RXREL (ABS24S_PCREL
, 2, 24, 0, signed, TRUE
),
123 RXREL (ABS16S_PCREL
, 1, 16, 0, signed, TRUE
),
124 RXREL (ABS8S_PCREL
, 0, 8, 0, signed, TRUE
),
125 RXREL (ABS16UL
, 1, 16, 0, unsigned, FALSE
),
126 RXREL (ABS16UW
, 1, 16, 0, unsigned, FALSE
),
127 RXREL (ABS8UL
, 0, 8, 0, unsigned, FALSE
),
128 RXREL (ABS8UW
, 0, 8, 0, unsigned, FALSE
),
129 RXREL (ABS32_REV
, 2, 32, 0, dont
, FALSE
),
130 RXREL (ABS16_REV
, 1, 16, 0, dont
, FALSE
),
132 #define STACK_REL_P(x) ((x) <= R_RX_ABS16_REV && (x) >= R_RX_ABS32)
173 /* These are internal. */
174 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 8/12. */
175 /* ---- ---- 4--- 3210. */
176 #define R_RX_RH_ABS5p8B 0x78
177 RXREL (RH_ABS5p8B
, 0, 0, 0, dont
, FALSE
),
178 #define R_RX_RH_ABS5p8W 0x79
179 RXREL (RH_ABS5p8W
, 0, 0, 0, dont
, FALSE
),
180 #define R_RX_RH_ABS5p8L 0x7a
181 RXREL (RH_ABS5p8L
, 0, 0, 0, dont
, FALSE
),
182 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 5/12. */
183 /* ---- -432 1--- 0---. */
184 #define R_RX_RH_ABS5p5B 0x7b
185 RXREL (RH_ABS5p5B
, 0, 0, 0, dont
, FALSE
),
186 #define R_RX_RH_ABS5p5W 0x7c
187 RXREL (RH_ABS5p5W
, 0, 0, 0, dont
, FALSE
),
188 #define R_RX_RH_ABS5p5L 0x7d
189 RXREL (RH_ABS5p5L
, 0, 0, 0, dont
, FALSE
),
190 /* A 4-bit unsigned immediate at bit position 8. */
191 #define R_RX_RH_UIMM4p8 0x7e
192 RXREL (RH_UIMM4p8
, 0, 0, 0, dont
, FALSE
),
193 /* A 4-bit negative unsigned immediate at bit position 8. */
194 #define R_RX_RH_UNEG4p8 0x7f
195 RXREL (RH_UNEG4p8
, 0, 0, 0, dont
, FALSE
),
196 /* End of internal relocs. */
198 RXREL (SYM
, 2, 32, 0, dont
, FALSE
),
199 RXREL (OPneg
, 2, 32, 0, dont
, FALSE
),
200 RXREL (OPadd
, 2, 32, 0, dont
, FALSE
),
201 RXREL (OPsub
, 2, 32, 0, dont
, FALSE
),
202 RXREL (OPmul
, 2, 32, 0, dont
, FALSE
),
203 RXREL (OPdiv
, 2, 32, 0, dont
, FALSE
),
204 RXREL (OPshla
, 2, 32, 0, dont
, FALSE
),
205 RXREL (OPshra
, 2, 32, 0, dont
, FALSE
),
206 RXREL (OPsctsize
, 2, 32, 0, dont
, FALSE
),
207 RXREL (OPscttop
, 2, 32, 0, dont
, FALSE
),
208 RXREL (OPand
, 2, 32, 0, dont
, FALSE
),
209 RXREL (OPor
, 2, 32, 0, dont
, FALSE
),
210 RXREL (OPxor
, 2, 32, 0, dont
, FALSE
),
211 RXREL (OPnot
, 2, 32, 0, dont
, FALSE
),
212 RXREL (OPmod
, 2, 32, 0, dont
, FALSE
),
213 RXREL (OPromtop
, 2, 32, 0, dont
, FALSE
),
214 RXREL (OPramtop
, 2, 32, 0, dont
, FALSE
)
217 /* Map BFD reloc types to RX ELF reloc types. */
221 bfd_reloc_code_real_type bfd_reloc_val
;
222 unsigned int rx_reloc_val
;
225 static const struct rx_reloc_map rx_reloc_map
[] =
227 { BFD_RELOC_NONE
, R_RX_NONE
},
228 { BFD_RELOC_8
, R_RX_DIR8S
},
229 { BFD_RELOC_16
, R_RX_DIR16S
},
230 { BFD_RELOC_24
, R_RX_DIR24S
},
231 { BFD_RELOC_32
, R_RX_DIR32
},
232 { BFD_RELOC_RX_16_OP
, R_RX_DIR16
},
233 { BFD_RELOC_RX_DIR3U_PCREL
, R_RX_DIR3U_PCREL
},
234 { BFD_RELOC_8_PCREL
, R_RX_DIR8S_PCREL
},
235 { BFD_RELOC_16_PCREL
, R_RX_DIR16S_PCREL
},
236 { BFD_RELOC_24_PCREL
, R_RX_DIR24S_PCREL
},
237 { BFD_RELOC_RX_8U
, R_RX_DIR8U
},
238 { BFD_RELOC_RX_16U
, R_RX_DIR16U
},
239 { BFD_RELOC_RX_24U
, R_RX_RH_24_UNS
},
240 { BFD_RELOC_RX_NEG8
, R_RX_RH_8_NEG
},
241 { BFD_RELOC_RX_NEG16
, R_RX_RH_16_NEG
},
242 { BFD_RELOC_RX_NEG24
, R_RX_RH_24_NEG
},
243 { BFD_RELOC_RX_NEG32
, R_RX_RH_32_NEG
},
244 { BFD_RELOC_RX_DIFF
, R_RX_RH_DIFF
},
245 { BFD_RELOC_RX_GPRELB
, R_RX_RH_GPRELB
},
246 { BFD_RELOC_RX_GPRELW
, R_RX_RH_GPRELW
},
247 { BFD_RELOC_RX_GPRELL
, R_RX_RH_GPRELL
},
248 { BFD_RELOC_RX_RELAX
, R_RX_RH_RELAX
},
249 { BFD_RELOC_RX_SYM
, R_RX_SYM
},
250 { BFD_RELOC_RX_OP_SUBTRACT
, R_RX_OPsub
},
251 { BFD_RELOC_RX_ABS8
, R_RX_ABS8
},
252 { BFD_RELOC_RX_ABS16
, R_RX_ABS16
},
253 { BFD_RELOC_RX_ABS32
, R_RX_ABS32
},
254 { BFD_RELOC_RX_ABS16UL
, R_RX_ABS16UL
},
255 { BFD_RELOC_RX_ABS16UW
, R_RX_ABS16UW
},
256 { BFD_RELOC_RX_ABS16U
, R_RX_ABS16U
}
259 #define BIGE(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
261 static reloc_howto_type
*
262 rx_reloc_type_lookup (bfd
* abfd ATTRIBUTE_UNUSED
,
263 bfd_reloc_code_real_type code
)
267 if (code
== BFD_RELOC_RX_32_OP
)
268 return rx_elf_howto_table
+ R_RX_DIR32
;
270 for (i
= ARRAY_SIZE (rx_reloc_map
); --i
;)
271 if (rx_reloc_map
[i
].bfd_reloc_val
== code
)
272 return rx_elf_howto_table
+ rx_reloc_map
[i
].rx_reloc_val
;
277 static reloc_howto_type
*
278 rx_reloc_name_lookup (bfd
* abfd ATTRIBUTE_UNUSED
, const char * r_name
)
282 for (i
= 0; i
< ARRAY_SIZE (rx_elf_howto_table
); i
++)
283 if (rx_elf_howto_table
[i
].name
!= NULL
284 && strcasecmp (rx_elf_howto_table
[i
].name
, r_name
) == 0)
285 return rx_elf_howto_table
+ i
;
290 /* Set the howto pointer for an RX ELF reloc. */
293 rx_info_to_howto_rela (bfd
* abfd ATTRIBUTE_UNUSED
,
295 Elf_Internal_Rela
* dst
)
299 r_type
= ELF32_R_TYPE (dst
->r_info
);
300 BFD_ASSERT (r_type
< (unsigned int) R_RX_max
);
301 cache_ptr
->howto
= rx_elf_howto_table
+ r_type
;
305 get_symbol_value (const char * name
,
306 bfd_reloc_status_type
* status
,
307 struct bfd_link_info
* info
,
309 asection
* input_section
,
313 struct bfd_link_hash_entry
* h
;
315 h
= bfd_link_hash_lookup (info
->hash
, name
, FALSE
, FALSE
, TRUE
);
318 || (h
->type
!= bfd_link_hash_defined
319 && h
->type
!= bfd_link_hash_defweak
))
320 * status
= info
->callbacks
->undefined_symbol
321 (info
, name
, input_bfd
, input_section
, offset
, TRUE
);
323 value
= (h
->u
.def
.value
324 + h
->u
.def
.section
->output_section
->vma
325 + h
->u
.def
.section
->output_offset
);
331 get_gp (bfd_reloc_status_type
* status
,
332 struct bfd_link_info
* info
,
337 static bfd_boolean cached
= FALSE
;
338 static bfd_vma cached_value
= 0;
342 cached_value
= get_symbol_value ("__gp", status
, info
, abfd
, sec
, offset
);
349 get_romstart (bfd_reloc_status_type
* status
,
350 struct bfd_link_info
* info
,
355 static bfd_boolean cached
= FALSE
;
356 static bfd_vma cached_value
= 0;
360 cached_value
= get_symbol_value ("_start", status
, info
, abfd
, sec
, offset
);
367 get_ramstart (bfd_reloc_status_type
* status
,
368 struct bfd_link_info
* info
,
373 static bfd_boolean cached
= FALSE
;
374 static bfd_vma cached_value
= 0;
378 cached_value
= get_symbol_value ("__datastart", status
, info
, abfd
, sec
, offset
);
384 #define NUM_STACK_ENTRIES 16
385 static int32_t rx_stack
[ NUM_STACK_ENTRIES
];
386 static unsigned int rx_stack_top
;
388 #define RX_STACK_PUSH(val) \
391 if (rx_stack_top < NUM_STACK_ENTRIES) \
392 rx_stack [rx_stack_top ++] = (val); \
394 r = bfd_reloc_dangerous; \
398 #define RX_STACK_POP(dest) \
401 if (rx_stack_top > 0) \
402 (dest) = rx_stack [-- rx_stack_top]; \
404 (dest) = 0, r = bfd_reloc_dangerous; \
408 /* Relocate an RX ELF section.
409 There is some attempt to make this function usable for many architectures,
410 both USE_REL and USE_RELA ['twould be nice if such a critter existed],
411 if only to serve as a learning tool.
413 The RELOCATE_SECTION function is called by the new ELF backend linker
414 to handle the relocations for a section.
416 The relocs are always passed as Rela structures; if the section
417 actually uses Rel structures, the r_addend field will always be
420 This function is responsible for adjusting the section contents as
421 necessary, and (if using Rela relocs and generating a relocatable
422 output file) adjusting the reloc addend as necessary.
424 This function does not have to worry about setting the reloc
425 address or the reloc symbol index.
427 LOCAL_SYMS is a pointer to the swapped in local symbols.
429 LOCAL_SECTIONS is an array giving the section in the input file
430 corresponding to the st_shndx field of each local symbol.
432 The global hash table entry for the global symbols can be found
433 via elf_sym_hashes (input_bfd).
435 When generating relocatable output, this function must handle
436 STB_LOCAL/STT_SECTION symbols specially. The output symbol is
437 going to be the section symbol corresponding to the output
438 section, which means that the addend must be adjusted
442 rx_elf_relocate_section
444 struct bfd_link_info
* info
,
446 asection
* input_section
,
448 Elf_Internal_Rela
* relocs
,
449 Elf_Internal_Sym
* local_syms
,
450 asection
** local_sections
)
452 Elf_Internal_Shdr
* symtab_hdr
;
453 struct elf_link_hash_entry
** sym_hashes
;
454 Elf_Internal_Rela
* rel
;
455 Elf_Internal_Rela
* relend
;
457 symtab_hdr
= & elf_tdata (input_bfd
)->symtab_hdr
;
458 sym_hashes
= elf_sym_hashes (input_bfd
);
459 relend
= relocs
+ input_section
->reloc_count
;
460 for (rel
= relocs
; rel
< relend
; rel
++)
462 reloc_howto_type
* howto
;
463 unsigned long r_symndx
;
464 Elf_Internal_Sym
* sym
;
466 struct elf_link_hash_entry
* h
;
468 bfd_reloc_status_type r
;
469 const char * name
= NULL
;
470 bfd_boolean unresolved_reloc
= TRUE
;
473 r_type
= ELF32_R_TYPE (rel
->r_info
);
474 r_symndx
= ELF32_R_SYM (rel
->r_info
);
476 howto
= rx_elf_howto_table
+ ELF32_R_TYPE (rel
->r_info
);
482 if (r_symndx
< symtab_hdr
->sh_info
)
484 sym
= local_syms
+ r_symndx
;
485 sec
= local_sections
[r_symndx
];
486 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, & sec
, rel
);
488 name
= bfd_elf_string_from_elf_section
489 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
);
490 name
= (sym
->st_name
== 0) ? bfd_section_name (input_bfd
, sec
) : name
;
496 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
497 r_symndx
, symtab_hdr
, sym_hashes
, h
,
498 sec
, relocation
, unresolved_reloc
,
501 name
= h
->root
.root
.string
;
504 if (sec
!= NULL
&& elf_discarded_section (sec
))
505 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
506 rel
, relend
, howto
, contents
);
508 if (info
->relocatable
)
510 /* This is a relocatable link. We don't have to change
511 anything, unless the reloc is against a section symbol,
512 in which case we have to adjust according to where the
513 section symbol winds up in the output section. */
514 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
515 rel
->r_addend
+= sec
->output_offset
;
519 if (h
!= NULL
&& h
->root
.type
== bfd_link_hash_undefweak
)
520 /* If the symbol is undefined and weak
521 then the relocation resolves to zero. */
525 if (howto
->pc_relative
)
527 relocation
-= (input_section
->output_section
->vma
528 + input_section
->output_offset
530 if (r_type
!= R_RX_RH_3_PCREL
531 && r_type
!= R_RX_DIR3U_PCREL
)
535 relocation
+= rel
->r_addend
;
540 #define RANGE(a,b) if (a > (long) relocation || (long) relocation > b) r = bfd_reloc_overflow
541 #define ALIGN(m) if (relocation & m) r = bfd_reloc_other;
542 #define OP(i) (contents[rel->r_offset + (i)])
543 #define WARN_REDHAT(type) \
544 _bfd_error_handler (_("%B:%A: Warning: deprecated Red Hat reloc " type " detected against: %s."), \
545 input_bfd, input_section, name)
547 /* Opcode relocs are always big endian. Data relocs are bi-endian. */
556 case R_RX_RH_3_PCREL
:
557 WARN_REDHAT ("RX_RH_3_PCREL");
560 OP (0) |= relocation
& 0x07;
564 WARN_REDHAT ("RX_RH_8_NEG");
565 relocation
= - relocation
;
566 case R_RX_DIR8S_PCREL
:
582 WARN_REDHAT ("RX_RH_16_NEG");
583 relocation
= - relocation
;
584 case R_RX_DIR16S_PCREL
:
585 RANGE (-32768, 32767);
586 #if RX_OPCODE_BIG_ENDIAN
589 OP (1) = relocation
>> 8;
594 WARN_REDHAT ("RX_RH_16_OP");
595 RANGE (-32768, 32767);
596 #if RX_OPCODE_BIG_ENDIAN
598 OP (0) = relocation
>> 8;
601 OP (1) = relocation
>> 8;
606 RANGE (-32768, 65535);
607 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
610 OP (0) = relocation
>> 8;
615 OP (1) = relocation
>> 8;
621 #if RX_OPCODE_BIG_ENDIAN
623 OP (0) = relocation
>> 8;
626 OP (1) = relocation
>> 8;
631 RANGE (-32768, 65536);
632 #if RX_OPCODE_BIG_ENDIAN
634 OP (0) = relocation
>> 8;
637 OP (1) = relocation
>> 8;
642 RANGE (-32768, 65536);
643 #if RX_OPCODE_BIG_ENDIAN
645 OP (1) = relocation
>> 8;
648 OP (0) = relocation
>> 8;
652 case R_RX_DIR3U_PCREL
:
655 OP (0) |= relocation
& 0x07;
659 WARN_REDHAT ("RX_RH_24_NEG");
660 relocation
= - relocation
;
661 case R_RX_DIR24S_PCREL
:
662 RANGE (-0x800000, 0x7fffff);
663 #if RX_OPCODE_BIG_ENDIAN
665 OP (1) = relocation
>> 8;
666 OP (0) = relocation
>> 16;
669 OP (1) = relocation
>> 8;
670 OP (2) = relocation
>> 16;
675 WARN_REDHAT ("RX_RH_24_OP");
676 RANGE (-0x800000, 0x7fffff);
677 #if RX_OPCODE_BIG_ENDIAN
679 OP (1) = relocation
>> 8;
680 OP (0) = relocation
>> 16;
683 OP (1) = relocation
>> 8;
684 OP (2) = relocation
>> 16;
689 RANGE (-0x800000, 0x7fffff);
690 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
693 OP (1) = relocation
>> 8;
694 OP (0) = relocation
>> 16;
699 OP (1) = relocation
>> 8;
700 OP (2) = relocation
>> 16;
705 WARN_REDHAT ("RX_RH_24_UNS");
707 #if RX_OPCODE_BIG_ENDIAN
709 OP (1) = relocation
>> 8;
710 OP (0) = relocation
>> 16;
713 OP (1) = relocation
>> 8;
714 OP (2) = relocation
>> 16;
719 WARN_REDHAT ("RX_RH_32_NEG");
720 relocation
= - relocation
;
721 #if RX_OPCODE_BIG_ENDIAN
723 OP (2) = relocation
>> 8;
724 OP (1) = relocation
>> 16;
725 OP (0) = relocation
>> 24;
728 OP (1) = relocation
>> 8;
729 OP (2) = relocation
>> 16;
730 OP (3) = relocation
>> 24;
735 WARN_REDHAT ("RX_RH_32_OP");
736 #if RX_OPCODE_BIG_ENDIAN
738 OP (2) = relocation
>> 8;
739 OP (1) = relocation
>> 16;
740 OP (0) = relocation
>> 24;
743 OP (1) = relocation
>> 8;
744 OP (2) = relocation
>> 16;
745 OP (3) = relocation
>> 24;
750 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
753 OP (2) = relocation
>> 8;
754 OP (1) = relocation
>> 16;
755 OP (0) = relocation
>> 24;
760 OP (1) = relocation
>> 8;
761 OP (2) = relocation
>> 16;
762 OP (3) = relocation
>> 24;
767 if (BIGE (output_bfd
))
770 OP (1) = relocation
>> 8;
771 OP (2) = relocation
>> 16;
772 OP (3) = relocation
>> 24;
777 OP (2) = relocation
>> 8;
778 OP (1) = relocation
>> 16;
779 OP (0) = relocation
>> 24;
786 WARN_REDHAT ("RX_RH_DIFF");
787 val
= bfd_get_32 (output_bfd
, & OP (0));
789 bfd_put_32 (output_bfd
, val
, & OP (0));
794 WARN_REDHAT ("RX_RH_GPRELB");
795 relocation
-= get_gp (&r
, info
, input_bfd
, input_section
, rel
->r_offset
);
797 #if RX_OPCODE_BIG_ENDIAN
799 OP (0) = relocation
>> 8;
802 OP (1) = relocation
>> 8;
807 WARN_REDHAT ("RX_RH_GPRELW");
808 relocation
-= get_gp (&r
, info
, input_bfd
, input_section
, rel
->r_offset
);
812 #if RX_OPCODE_BIG_ENDIAN
814 OP (0) = relocation
>> 8;
817 OP (1) = relocation
>> 8;
822 WARN_REDHAT ("RX_RH_GPRELL");
823 relocation
-= get_gp (&r
, info
, input_bfd
, input_section
, rel
->r_offset
);
827 #if RX_OPCODE_BIG_ENDIAN
829 OP (0) = relocation
>> 8;
832 OP (1) = relocation
>> 8;
836 /* Internal relocations just for relaxation: */
837 case R_RX_RH_ABS5p5B
:
838 RX_STACK_POP (relocation
);
841 OP (0) |= relocation
>> 2;
843 OP (1) |= (relocation
<< 6) & 0x80;
844 OP (1) |= (relocation
<< 3) & 0x08;
847 case R_RX_RH_ABS5p5W
:
848 RX_STACK_POP (relocation
);
853 OP (0) |= relocation
>> 2;
855 OP (1) |= (relocation
<< 6) & 0x80;
856 OP (1) |= (relocation
<< 3) & 0x08;
859 case R_RX_RH_ABS5p5L
:
860 RX_STACK_POP (relocation
);
865 OP (0) |= relocation
>> 2;
867 OP (1) |= (relocation
<< 6) & 0x80;
868 OP (1) |= (relocation
<< 3) & 0x08;
871 case R_RX_RH_ABS5p8B
:
872 RX_STACK_POP (relocation
);
875 OP (0) |= (relocation
<< 3) & 0x80;
876 OP (0) |= relocation
& 0x0f;
879 case R_RX_RH_ABS5p8W
:
880 RX_STACK_POP (relocation
);
885 OP (0) |= (relocation
<< 3) & 0x80;
886 OP (0) |= relocation
& 0x0f;
889 case R_RX_RH_ABS5p8L
:
890 RX_STACK_POP (relocation
);
895 OP (0) |= (relocation
<< 3) & 0x80;
896 OP (0) |= relocation
& 0x0f;
899 case R_RX_RH_UIMM4p8
:
902 OP (0) |= relocation
<< 4;
905 case R_RX_RH_UNEG4p8
:
908 OP (0) |= (-relocation
) << 4;
911 /* Complex reloc handling: */
914 RX_STACK_POP (relocation
);
915 #if RX_OPCODE_BIG_ENDIAN
917 OP (2) = relocation
>> 8;
918 OP (1) = relocation
>> 16;
919 OP (0) = relocation
>> 24;
922 OP (1) = relocation
>> 8;
923 OP (2) = relocation
>> 16;
924 OP (3) = relocation
>> 24;
929 RX_STACK_POP (relocation
);
930 #if RX_OPCODE_BIG_ENDIAN
932 OP (1) = relocation
>> 8;
933 OP (2) = relocation
>> 16;
934 OP (3) = relocation
>> 24;
937 OP (2) = relocation
>> 8;
938 OP (1) = relocation
>> 16;
939 OP (0) = relocation
>> 24;
943 case R_RX_ABS24S_PCREL
:
945 RX_STACK_POP (relocation
);
946 RANGE (-0x800000, 0x7fffff);
947 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
950 OP (1) = relocation
>> 8;
951 OP (0) = relocation
>> 16;
956 OP (1) = relocation
>> 8;
957 OP (2) = relocation
>> 16;
962 RX_STACK_POP (relocation
);
963 RANGE (-32768, 65535);
964 #if RX_OPCODE_BIG_ENDIAN
966 OP (0) = relocation
>> 8;
969 OP (1) = relocation
>> 8;
974 RX_STACK_POP (relocation
);
975 RANGE (-32768, 65535);
976 #if RX_OPCODE_BIG_ENDIAN
978 OP (1) = relocation
>> 8;
981 OP (0) = relocation
>> 8;
985 case R_RX_ABS16S_PCREL
:
987 RX_STACK_POP (relocation
);
988 RANGE (-32768, 32767);
989 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
992 OP (0) = relocation
>> 8;
997 OP (1) = relocation
>> 8;
1002 RX_STACK_POP (relocation
);
1004 #if RX_OPCODE_BIG_ENDIAN
1005 OP (1) = relocation
;
1006 OP (0) = relocation
>> 8;
1008 OP (0) = relocation
;
1009 OP (1) = relocation
>> 8;
1014 RX_STACK_POP (relocation
);
1017 #if RX_OPCODE_BIG_ENDIAN
1018 OP (1) = relocation
;
1019 OP (0) = relocation
>> 8;
1021 OP (0) = relocation
;
1022 OP (1) = relocation
>> 8;
1027 RX_STACK_POP (relocation
);
1030 #if RX_OPCODE_BIG_ENDIAN
1031 OP (1) = relocation
;
1032 OP (0) = relocation
>> 8;
1034 OP (0) = relocation
;
1035 OP (1) = relocation
>> 8;
1040 RX_STACK_POP (relocation
);
1042 OP (0) = relocation
;
1046 RX_STACK_POP (relocation
);
1048 OP (0) = relocation
;
1052 RX_STACK_POP (relocation
);
1055 OP (0) = relocation
;
1059 RX_STACK_POP (relocation
);
1062 OP (0) = relocation
;
1065 case R_RX_ABS8S_PCREL
:
1067 RX_STACK_POP (relocation
);
1069 OP (0) = relocation
;
1073 if (r_symndx
< symtab_hdr
->sh_info
)
1074 RX_STACK_PUSH (sec
->output_section
->vma
1075 + sec
->output_offset
1080 && (h
->root
.type
== bfd_link_hash_defined
1081 || h
->root
.type
== bfd_link_hash_defweak
))
1082 RX_STACK_PUSH (h
->root
.u
.def
.value
1083 + sec
->output_section
->vma
1084 + sec
->output_offset
);
1086 _bfd_error_handler (_("Warning: RX_SYM reloc with an unknown symbol"));
1096 RX_STACK_PUSH (tmp
);
1104 RX_STACK_POP (tmp1
);
1105 RX_STACK_POP (tmp2
);
1107 RX_STACK_PUSH (tmp1
);
1115 RX_STACK_POP (tmp1
);
1116 RX_STACK_POP (tmp2
);
1118 RX_STACK_PUSH (tmp2
);
1126 RX_STACK_POP (tmp1
);
1127 RX_STACK_POP (tmp2
);
1129 RX_STACK_PUSH (tmp1
);
1137 RX_STACK_POP (tmp1
);
1138 RX_STACK_POP (tmp2
);
1140 RX_STACK_PUSH (tmp1
);
1148 RX_STACK_POP (tmp1
);
1149 RX_STACK_POP (tmp2
);
1151 RX_STACK_PUSH (tmp1
);
1159 RX_STACK_POP (tmp1
);
1160 RX_STACK_POP (tmp2
);
1162 RX_STACK_PUSH (tmp1
);
1166 case R_RX_OPsctsize
:
1167 RX_STACK_PUSH (input_section
->size
);
1171 RX_STACK_PUSH (input_section
->output_section
->vma
);
1178 RX_STACK_POP (tmp1
);
1179 RX_STACK_POP (tmp2
);
1181 RX_STACK_PUSH (tmp1
);
1189 RX_STACK_POP (tmp1
);
1190 RX_STACK_POP (tmp2
);
1192 RX_STACK_PUSH (tmp1
);
1200 RX_STACK_POP (tmp1
);
1201 RX_STACK_POP (tmp2
);
1203 RX_STACK_PUSH (tmp1
);
1213 RX_STACK_PUSH (tmp
);
1221 RX_STACK_POP (tmp1
);
1222 RX_STACK_POP (tmp2
);
1224 RX_STACK_PUSH (tmp1
);
1229 RX_STACK_PUSH (get_romstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1233 RX_STACK_PUSH (get_ramstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1237 r
= bfd_reloc_notsupported
;
1241 if (r
!= bfd_reloc_ok
)
1243 const char * msg
= NULL
;
1247 case bfd_reloc_overflow
:
1248 /* Catch the case of a missing function declaration
1249 and emit a more helpful error message. */
1250 if (r_type
== R_RX_DIR24S_PCREL
)
1251 msg
= _("%B(%A): error: call to undefined function '%s'");
1253 r
= info
->callbacks
->reloc_overflow
1254 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
, (bfd_vma
) 0,
1255 input_bfd
, input_section
, rel
->r_offset
);
1258 case bfd_reloc_undefined
:
1259 r
= info
->callbacks
->undefined_symbol
1260 (info
, name
, input_bfd
, input_section
, rel
->r_offset
,
1264 case bfd_reloc_other
:
1265 msg
= _("%B(%A): warning: unaligned access to symbol '%s' in the small data area");
1268 case bfd_reloc_outofrange
:
1269 msg
= _("%B(%A): internal error: out of range error");
1272 case bfd_reloc_notsupported
:
1273 msg
= _("%B(%A): internal error: unsupported relocation error");
1276 case bfd_reloc_dangerous
:
1277 msg
= _("%B(%A): internal error: dangerous relocation");
1281 msg
= _("%B(%A): internal error: unknown error");
1286 _bfd_error_handler (msg
, input_bfd
, input_section
, name
);
1296 /* Relaxation Support. */
1298 /* Progression of relocations from largest operand size to smallest
1302 next_smaller_reloc (int r
)
1306 case R_RX_DIR32
: return R_RX_DIR24S
;
1307 case R_RX_DIR24S
: return R_RX_DIR16S
;
1308 case R_RX_DIR16S
: return R_RX_DIR8S
;
1309 case R_RX_DIR8S
: return R_RX_NONE
;
1311 case R_RX_DIR16
: return R_RX_DIR8
;
1312 case R_RX_DIR8
: return R_RX_NONE
;
1314 case R_RX_DIR16U
: return R_RX_DIR8U
;
1315 case R_RX_DIR8U
: return R_RX_NONE
;
1317 case R_RX_DIR24S_PCREL
: return R_RX_DIR16S_PCREL
;
1318 case R_RX_DIR16S_PCREL
: return R_RX_DIR8S_PCREL
;
1319 case R_RX_DIR8S_PCREL
: return R_RX_DIR3U_PCREL
;
1321 case R_RX_DIR16UL
: return R_RX_DIR8UL
;
1322 case R_RX_DIR8UL
: return R_RX_NONE
;
1323 case R_RX_DIR16UW
: return R_RX_DIR8UW
;
1324 case R_RX_DIR8UW
: return R_RX_NONE
;
1326 case R_RX_RH_32_OP
: return R_RX_RH_24_OP
;
1327 case R_RX_RH_24_OP
: return R_RX_RH_16_OP
;
1328 case R_RX_RH_16_OP
: return R_RX_DIR8
;
1330 case R_RX_ABS32
: return R_RX_ABS24S
;
1331 case R_RX_ABS24S
: return R_RX_ABS16S
;
1332 case R_RX_ABS16
: return R_RX_ABS8
;
1333 case R_RX_ABS16U
: return R_RX_ABS8U
;
1334 case R_RX_ABS16S
: return R_RX_ABS8S
;
1335 case R_RX_ABS8
: return R_RX_NONE
;
1336 case R_RX_ABS8U
: return R_RX_NONE
;
1337 case R_RX_ABS8S
: return R_RX_NONE
;
1338 case R_RX_ABS24S_PCREL
: return R_RX_ABS16S_PCREL
;
1339 case R_RX_ABS16S_PCREL
: return R_RX_ABS8S_PCREL
;
1340 case R_RX_ABS8S_PCREL
: return R_RX_NONE
;
1341 case R_RX_ABS16UL
: return R_RX_ABS8UL
;
1342 case R_RX_ABS16UW
: return R_RX_ABS8UW
;
1343 case R_RX_ABS8UL
: return R_RX_NONE
;
1344 case R_RX_ABS8UW
: return R_RX_NONE
;
1349 /* Delete some bytes from a section while relaxing. */
1352 elf32_rx_relax_delete_bytes (bfd
*abfd
, asection
*sec
, bfd_vma addr
, int count
,
1353 Elf_Internal_Rela
*alignment_rel
, int force_snip
)
1355 Elf_Internal_Shdr
* symtab_hdr
;
1356 unsigned int sec_shndx
;
1357 bfd_byte
* contents
;
1358 Elf_Internal_Rela
* irel
;
1359 Elf_Internal_Rela
* irelend
;
1360 Elf_Internal_Sym
* isym
;
1361 Elf_Internal_Sym
* isymend
;
1363 unsigned int symcount
;
1364 struct elf_link_hash_entry
** sym_hashes
;
1365 struct elf_link_hash_entry
** end_hashes
;
1370 sec_shndx
= _bfd_elf_section_from_bfd_section (abfd
, sec
);
1372 contents
= elf_section_data (sec
)->this_hdr
.contents
;
1374 /* The deletion must stop at the next alignment boundary, if
1375 ALIGNMENT_REL is non-NULL. */
1378 toaddr
= alignment_rel
->r_offset
;
1380 irel
= elf_section_data (sec
)->relocs
;
1381 irelend
= irel
+ sec
->reloc_count
;
1383 /* Actually delete the bytes. */
1384 memmove (contents
+ addr
, contents
+ addr
+ count
,
1385 (size_t) (toaddr
- addr
- count
));
1387 /* If we don't have an alignment marker to worry about, we can just
1388 shrink the section. Otherwise, we have to fill in the newly
1389 created gap with NOP insns (0x03). */
1393 memset (contents
+ toaddr
- count
, 0x03, count
);
1395 /* Adjust all the relocs. */
1396 for (irel
= elf_section_data (sec
)->relocs
; irel
< irelend
; irel
++)
1398 /* Get the new reloc address. */
1399 if (irel
->r_offset
> addr
1400 && (irel
->r_offset
< toaddr
1401 || (force_snip
&& irel
->r_offset
== toaddr
)))
1402 irel
->r_offset
-= count
;
1404 /* If we see an ALIGN marker at the end of the gap, we move it
1405 to the beginning of the gap, since marking these gaps is what
1407 if (irel
->r_offset
== toaddr
1408 && ELF32_R_TYPE (irel
->r_info
) == R_RX_RH_RELAX
1409 && irel
->r_addend
& RX_RELAXA_ALIGN
)
1410 irel
->r_offset
-= count
;
1413 /* Adjust the local symbols defined in this section. */
1414 symtab_hdr
= &elf_tdata (abfd
)->symtab_hdr
;
1415 isym
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
1416 isymend
= isym
+ symtab_hdr
->sh_info
;
1418 for (; isym
< isymend
; isym
++)
1420 /* If the symbol is in the range of memory we just moved, we
1421 have to adjust its value. */
1422 if (isym
->st_shndx
== sec_shndx
1423 && isym
->st_value
> addr
1424 && isym
->st_value
< toaddr
)
1425 isym
->st_value
-= count
;
1427 /* If the symbol *spans* the bytes we just deleted (i.e. it's
1428 *end* is in the moved bytes but it's *start* isn't), then we
1429 must adjust its size. */
1430 if (isym
->st_shndx
== sec_shndx
1431 && isym
->st_value
< addr
1432 && isym
->st_value
+ isym
->st_size
> addr
1433 && isym
->st_value
+ isym
->st_size
< toaddr
)
1434 isym
->st_size
-= count
;
1437 /* Now adjust the global symbols defined in this section. */
1438 symcount
= (symtab_hdr
->sh_size
/ sizeof (Elf32_External_Sym
)
1439 - symtab_hdr
->sh_info
);
1440 sym_hashes
= elf_sym_hashes (abfd
);
1441 end_hashes
= sym_hashes
+ symcount
;
1443 for (; sym_hashes
< end_hashes
; sym_hashes
++)
1445 struct elf_link_hash_entry
*sym_hash
= *sym_hashes
;
1447 if ((sym_hash
->root
.type
== bfd_link_hash_defined
1448 || sym_hash
->root
.type
== bfd_link_hash_defweak
)
1449 && sym_hash
->root
.u
.def
.section
== sec
)
1451 /* As above, adjust the value if needed. */
1452 if (sym_hash
->root
.u
.def
.value
> addr
1453 && sym_hash
->root
.u
.def
.value
< toaddr
)
1454 sym_hash
->root
.u
.def
.value
-= count
;
1456 /* As above, adjust the size if needed. */
1457 if (sym_hash
->root
.u
.def
.value
< addr
1458 && sym_hash
->root
.u
.def
.value
+ sym_hash
->size
> addr
1459 && sym_hash
->root
.u
.def
.value
+ sym_hash
->size
< toaddr
)
1460 sym_hash
->size
-= count
;
1467 /* Used to sort relocs by address. If relocs have the same address,
1468 we maintain their relative order, except that R_RX_RH_RELAX
1469 alignment relocs must be the first reloc for any given address. */
1472 reloc_bubblesort (Elf_Internal_Rela
* r
, int count
)
1476 bfd_boolean swappit
;
1478 /* This is almost a classic bubblesort. It's the slowest sort, but
1479 we're taking advantage of the fact that the relocations are
1480 mostly in order already (the assembler emits them that way) and
1481 we need relocs with the same address to remain in the same
1487 for (i
= 0; i
< count
- 1; i
++)
1489 if (r
[i
].r_offset
> r
[i
+ 1].r_offset
)
1491 else if (r
[i
].r_offset
< r
[i
+ 1].r_offset
)
1493 else if (ELF32_R_TYPE (r
[i
+ 1].r_info
) == R_RX_RH_RELAX
1494 && (r
[i
+ 1].r_addend
& RX_RELAXA_ALIGN
))
1496 else if (ELF32_R_TYPE (r
[i
+ 1].r_info
) == R_RX_RH_RELAX
1497 && (r
[i
+ 1].r_addend
& RX_RELAXA_ELIGN
)
1498 && !(ELF32_R_TYPE (r
[i
].r_info
) == R_RX_RH_RELAX
1499 && (r
[i
].r_addend
& RX_RELAXA_ALIGN
)))
1506 Elf_Internal_Rela tmp
;
1511 /* If we do move a reloc back, re-scan to see if it
1512 needs to be moved even further back. This avoids
1513 most of the O(n^2) behavior for our cases. */
1523 #define OFFSET_FOR_RELOC(rel, lrel, scale) \
1524 rx_offset_for_reloc (abfd, rel + 1, symtab_hdr, shndx_buf, intsyms, \
1525 lrel, abfd, sec, link_info, scale)
1528 rx_offset_for_reloc (bfd
* abfd
,
1529 Elf_Internal_Rela
* rel
,
1530 Elf_Internal_Shdr
* symtab_hdr
,
1531 Elf_External_Sym_Shndx
* shndx_buf ATTRIBUTE_UNUSED
,
1532 Elf_Internal_Sym
* intsyms
,
1533 Elf_Internal_Rela
** lrel
,
1535 asection
* input_section
,
1536 struct bfd_link_info
* info
,
1540 bfd_reloc_status_type r
;
1544 /* REL is the first of 1..N relocations. We compute the symbol
1545 value for each relocation, then combine them if needed. LREL
1546 gets a pointer to the last relocation used. */
1551 /* Get the value of the symbol referred to by the reloc. */
1552 if (ELF32_R_SYM (rel
->r_info
) < symtab_hdr
->sh_info
)
1554 /* A local symbol. */
1555 Elf_Internal_Sym
*isym
;
1558 isym
= intsyms
+ ELF32_R_SYM (rel
->r_info
);
1560 if (isym
->st_shndx
== SHN_UNDEF
)
1561 ssec
= bfd_und_section_ptr
;
1562 else if (isym
->st_shndx
== SHN_ABS
)
1563 ssec
= bfd_abs_section_ptr
;
1564 else if (isym
->st_shndx
== SHN_COMMON
)
1565 ssec
= bfd_com_section_ptr
;
1567 ssec
= bfd_section_from_elf_index (abfd
,
1570 /* Initial symbol value. */
1571 symval
= isym
->st_value
;
1573 /* GAS may have made this symbol relative to a section, in
1574 which case, we have to add the addend to find the
1576 if (ELF_ST_TYPE (isym
->st_info
) == STT_SECTION
)
1577 symval
+= rel
->r_addend
;
1581 if ((ssec
->flags
& SEC_MERGE
)
1582 && ssec
->sec_info_type
== ELF_INFO_TYPE_MERGE
)
1583 symval
= _bfd_merged_section_offset (abfd
, & ssec
,
1584 elf_section_data (ssec
)->sec_info
,
1588 /* Now make the offset relative to where the linker is putting it. */
1591 ssec
->output_section
->vma
+ ssec
->output_offset
;
1593 symval
+= rel
->r_addend
;
1598 struct elf_link_hash_entry
* h
;
1600 /* An external symbol. */
1601 indx
= ELF32_R_SYM (rel
->r_info
) - symtab_hdr
->sh_info
;
1602 h
= elf_sym_hashes (abfd
)[indx
];
1603 BFD_ASSERT (h
!= NULL
);
1605 if (h
->root
.type
!= bfd_link_hash_defined
1606 && h
->root
.type
!= bfd_link_hash_defweak
)
1608 /* This appears to be a reference to an undefined
1609 symbol. Just ignore it--it will be caught by the
1610 regular reloc processing. */
1616 symval
= (h
->root
.u
.def
.value
1617 + h
->root
.u
.def
.section
->output_section
->vma
1618 + h
->root
.u
.def
.section
->output_offset
);
1620 symval
+= rel
->r_addend
;
1623 switch (ELF32_R_TYPE (rel
->r_info
))
1626 RX_STACK_PUSH (symval
);
1630 RX_STACK_POP (tmp1
);
1632 RX_STACK_PUSH (tmp1
);
1636 RX_STACK_POP (tmp1
);
1637 RX_STACK_POP (tmp2
);
1639 RX_STACK_PUSH (tmp1
);
1643 RX_STACK_POP (tmp1
);
1644 RX_STACK_POP (tmp2
);
1646 RX_STACK_PUSH (tmp2
);
1650 RX_STACK_POP (tmp1
);
1651 RX_STACK_POP (tmp2
);
1653 RX_STACK_PUSH (tmp1
);
1657 RX_STACK_POP (tmp1
);
1658 RX_STACK_POP (tmp2
);
1660 RX_STACK_PUSH (tmp1
);
1664 RX_STACK_POP (tmp1
);
1665 RX_STACK_POP (tmp2
);
1667 RX_STACK_PUSH (tmp1
);
1671 RX_STACK_POP (tmp1
);
1672 RX_STACK_POP (tmp2
);
1674 RX_STACK_PUSH (tmp1
);
1677 case R_RX_OPsctsize
:
1678 RX_STACK_PUSH (input_section
->size
);
1682 RX_STACK_PUSH (input_section
->output_section
->vma
);
1686 RX_STACK_POP (tmp1
);
1687 RX_STACK_POP (tmp2
);
1689 RX_STACK_PUSH (tmp1
);
1693 RX_STACK_POP (tmp1
);
1694 RX_STACK_POP (tmp2
);
1696 RX_STACK_PUSH (tmp1
);
1700 RX_STACK_POP (tmp1
);
1701 RX_STACK_POP (tmp2
);
1703 RX_STACK_PUSH (tmp1
);
1707 RX_STACK_POP (tmp1
);
1709 RX_STACK_PUSH (tmp1
);
1713 RX_STACK_POP (tmp1
);
1714 RX_STACK_POP (tmp2
);
1716 RX_STACK_PUSH (tmp1
);
1720 RX_STACK_PUSH (get_romstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1724 RX_STACK_PUSH (get_ramstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1732 RX_STACK_POP (symval
);
1743 RX_STACK_POP (symval
);
1751 RX_STACK_POP (symval
);
1762 move_reloc (Elf_Internal_Rela
* irel
, Elf_Internal_Rela
* srel
, int delta
)
1764 bfd_vma old_offset
= srel
->r_offset
;
1767 while (irel
<= srel
)
1769 if (irel
->r_offset
== old_offset
)
1770 irel
->r_offset
+= delta
;
1775 /* Relax one section. */
1778 elf32_rx_relax_section (bfd
* abfd
,
1780 struct bfd_link_info
* link_info
,
1781 bfd_boolean
* again
,
1782 bfd_boolean allow_pcrel3
)
1784 Elf_Internal_Shdr
* symtab_hdr
;
1785 Elf_Internal_Shdr
* shndx_hdr
;
1786 Elf_Internal_Rela
* internal_relocs
;
1787 Elf_Internal_Rela
* free_relocs
= NULL
;
1788 Elf_Internal_Rela
* irel
;
1789 Elf_Internal_Rela
* srel
;
1790 Elf_Internal_Rela
* irelend
;
1791 Elf_Internal_Rela
* next_alignment
;
1792 Elf_Internal_Rela
* prev_alignment
;
1793 bfd_byte
* contents
= NULL
;
1794 bfd_byte
* free_contents
= NULL
;
1795 Elf_Internal_Sym
* intsyms
= NULL
;
1796 Elf_Internal_Sym
* free_intsyms
= NULL
;
1797 Elf_External_Sym_Shndx
* shndx_buf
= NULL
;
1803 int section_alignment_glue
;
1804 /* how much to scale the relocation by - 1, 2, or 4. */
1807 /* Assume nothing changes. */
1810 /* We don't have to do anything for a relocatable link, if
1811 this section does not have relocs, or if this is not a
1813 if (link_info
->relocatable
1814 || (sec
->flags
& SEC_RELOC
) == 0
1815 || sec
->reloc_count
== 0
1816 || (sec
->flags
& SEC_CODE
) == 0)
1819 symtab_hdr
= &elf_tdata (abfd
)->symtab_hdr
;
1820 shndx_hdr
= &elf_tdata (abfd
)->symtab_shndx_hdr
;
1822 sec_start
= sec
->output_section
->vma
+ sec
->output_offset
;
1824 /* Get the section contents. */
1825 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
1826 contents
= elf_section_data (sec
)->this_hdr
.contents
;
1827 /* Go get them off disk. */
1830 if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
1832 elf_section_data (sec
)->this_hdr
.contents
= contents
;
1835 /* Read this BFD's symbols. */
1836 /* Get cached copy if it exists. */
1837 if (symtab_hdr
->contents
!= NULL
)
1838 intsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
1841 intsyms
= bfd_elf_get_elf_syms (abfd
, symtab_hdr
, symtab_hdr
->sh_info
, 0, NULL
, NULL
, NULL
);
1842 symtab_hdr
->contents
= (bfd_byte
*) intsyms
;
1845 if (shndx_hdr
->sh_size
!= 0)
1849 amt
= symtab_hdr
->sh_info
;
1850 amt
*= sizeof (Elf_External_Sym_Shndx
);
1851 shndx_buf
= (Elf_External_Sym_Shndx
*) bfd_malloc (amt
);
1852 if (shndx_buf
== NULL
)
1854 if (bfd_seek (abfd
, shndx_hdr
->sh_offset
, SEEK_SET
) != 0
1855 || bfd_bread ((PTR
) shndx_buf
, amt
, abfd
) != amt
)
1857 shndx_hdr
->contents
= (bfd_byte
*) shndx_buf
;
1860 /* Get a copy of the native relocations. */
1861 internal_relocs
= (_bfd_elf_link_read_relocs
1862 (abfd
, sec
, (PTR
) NULL
, (Elf_Internal_Rela
*) NULL
,
1863 link_info
->keep_memory
));
1864 if (internal_relocs
== NULL
)
1866 if (! link_info
->keep_memory
)
1867 free_relocs
= internal_relocs
;
1869 /* The RL_ relocs must be just before the operand relocs they go
1870 with, so we must sort them to guarantee this. We use bubblesort
1871 instead of qsort so we can guarantee that relocs with the same
1872 address remain in the same relative order. */
1873 reloc_bubblesort (internal_relocs
, sec
->reloc_count
);
1875 /* Walk through them looking for relaxing opportunities. */
1876 irelend
= internal_relocs
+ sec
->reloc_count
;
1878 /* This will either be NULL or a pointer to the next alignment
1880 next_alignment
= internal_relocs
;
1881 /* This will be the previous alignment, although at first it points
1882 to the first real relocation. */
1883 prev_alignment
= internal_relocs
;
1885 /* We calculate worst case shrinkage caused by alignment directives.
1886 No fool-proof, but better than either ignoring the problem or
1887 doing heavy duty analysis of all the alignment markers in all
1889 section_alignment_glue
= 0;
1890 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
1891 if (ELF32_R_TYPE (irel
->r_info
) == R_RX_RH_RELAX
1892 && irel
->r_addend
& RX_RELAXA_ALIGN
)
1894 int this_glue
= 1 << (irel
->r_addend
& RX_RELAXA_ANUM
);
1896 if (section_alignment_glue
< this_glue
)
1897 section_alignment_glue
= this_glue
;
1899 /* Worst case is all 0..N alignments, in order, causing 2*N-1 byte
1901 section_alignment_glue
*= 2;
1903 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
1905 unsigned char *insn
;
1908 /* The insns we care about are all marked with one of these. */
1909 if (ELF32_R_TYPE (irel
->r_info
) != R_RX_RH_RELAX
)
1912 if (irel
->r_addend
& RX_RELAXA_ALIGN
1913 || next_alignment
== internal_relocs
)
1915 /* When we delete bytes, we need to maintain all the alignments
1916 indicated. In addition, we need to be careful about relaxing
1917 jumps across alignment boundaries - these displacements
1918 *grow* when we delete bytes. For now, don't shrink
1919 displacements across an alignment boundary, just in case.
1920 Note that this only affects relocations to the same
1922 prev_alignment
= next_alignment
;
1923 next_alignment
+= 2;
1924 while (next_alignment
< irelend
1925 && (ELF32_R_TYPE (next_alignment
->r_info
) != R_RX_RH_RELAX
1926 || !(next_alignment
->r_addend
& RX_RELAXA_ELIGN
)))
1928 if (next_alignment
>= irelend
|| next_alignment
->r_offset
== 0)
1929 next_alignment
= NULL
;
1932 /* When we hit alignment markers, see if we've shrunk enough
1933 before them to reduce the gap without violating the alignment
1935 if (irel
->r_addend
& RX_RELAXA_ALIGN
)
1937 /* At this point, the next relocation *should* be the ELIGN
1939 Elf_Internal_Rela
*erel
= irel
+ 1;
1940 unsigned int alignment
, nbytes
;
1942 if (ELF32_R_TYPE (erel
->r_info
) != R_RX_RH_RELAX
)
1944 if (!(erel
->r_addend
& RX_RELAXA_ELIGN
))
1947 alignment
= 1 << (irel
->r_addend
& RX_RELAXA_ANUM
);
1949 if (erel
->r_offset
- irel
->r_offset
< alignment
)
1952 nbytes
= erel
->r_offset
- irel
->r_offset
;
1953 nbytes
/= alignment
;
1954 nbytes
*= alignment
;
1956 elf32_rx_relax_delete_bytes (abfd
, sec
, erel
->r_offset
-nbytes
, nbytes
, next_alignment
,
1957 erel
->r_offset
== sec
->size
);
1963 if (irel
->r_addend
& RX_RELAXA_ELIGN
)
1966 insn
= contents
+ irel
->r_offset
;
1968 nrelocs
= irel
->r_addend
& RX_RELAXA_RNUM
;
1970 /* At this point, we have an insn that is a candidate for linker
1971 relaxation. There are NRELOCS relocs following that may be
1972 relaxed, although each reloc may be made of more than one
1973 reloc entry (such as gp-rel symbols). */
1975 /* Get the value of the symbol referred to by the reloc. Just
1976 in case this is the last reloc in the list, use the RL's
1977 addend to choose between this reloc (no addend) or the next
1978 (yes addend, which means at least one following reloc). */
1980 /* srel points to the "current" reloction for this insn -
1981 actually the last reloc for a given operand, which is the one
1982 we need to update. We check the relaxations in the same
1983 order that the relocations happen, so we'll just push it
1987 pc
= sec
->output_section
->vma
+ sec
->output_offset
1991 symval = OFFSET_FOR_RELOC (srel, &srel, &scale); \
1992 pcrel = symval - pc + srel->r_addend; \
1995 #define SNIPNR(offset, nbytes) \
1996 elf32_rx_relax_delete_bytes (abfd, sec, (insn - contents) + offset, nbytes, next_alignment, 0);
1997 #define SNIP(offset, nbytes, newtype) \
1998 SNIPNR (offset, nbytes); \
1999 srel->r_info = ELF32_R_INFO (ELF32_R_SYM (srel->r_info), newtype)
2001 /* The order of these bit tests must match the order that the
2002 relocs appear in. Since we sorted those by offset, we can
2005 /* Note that the numbers in, say, DSP6 are the bit offsets of
2006 the code fields that describe the operand. Bits number 0 for
2007 the MSB of insn[0]. */
2014 if (irel
->r_addend
& RX_RELAXA_DSP6
)
2019 if (code
== 2 && symval
/scale
<= 255)
2021 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2024 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2025 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2027 SNIP (3, 1, newrel
);
2032 else if (code
== 1 && symval
== 0)
2035 SNIP (2, 1, R_RX_NONE
);
2039 /* Special case DSP:5 format: MOV.bwl dsp:5[Rsrc],Rdst. */
2040 else if (code
== 1 && symval
/scale
<= 31
2041 /* Decodable bits. */
2042 && (insn
[0] & 0xcc) == 0xcc
2044 && (insn
[0] & 0x30) != 3
2045 /* Register MSBs. */
2046 && (insn
[1] & 0x88) == 0x00)
2050 insn
[0] = 0x88 | (insn
[0] & 0x30);
2051 /* The register fields are in the right place already. */
2053 /* We can't relax this new opcode. */
2056 switch ((insn
[0] & 0x30) >> 4)
2059 newrel
= R_RX_RH_ABS5p5B
;
2062 newrel
= R_RX_RH_ABS5p5W
;
2065 newrel
= R_RX_RH_ABS5p5L
;
2069 move_reloc (irel
, srel
, -2);
2070 SNIP (2, 1, newrel
);
2073 /* Special case DSP:5 format: MOVU.bw dsp:5[Rsrc],Rdst. */
2074 else if (code
== 1 && symval
/scale
<= 31
2075 /* Decodable bits. */
2076 && (insn
[0] & 0xf8) == 0x58
2077 /* Register MSBs. */
2078 && (insn
[1] & 0x88) == 0x00)
2082 insn
[0] = 0xb0 | ((insn
[0] & 0x04) << 1);
2083 /* The register fields are in the right place already. */
2085 /* We can't relax this new opcode. */
2088 switch ((insn
[0] & 0x08) >> 3)
2091 newrel
= R_RX_RH_ABS5p5B
;
2094 newrel
= R_RX_RH_ABS5p5W
;
2098 move_reloc (irel
, srel
, -2);
2099 SNIP (2, 1, newrel
);
2103 /* A DSP4 operand always follows a DSP6 operand, even if there's
2104 no relocation for it. We have to read the code out of the
2105 opcode to calculate the offset of the operand. */
2106 if (irel
->r_addend
& RX_RELAXA_DSP4
)
2108 int code6
, offset
= 0;
2112 code6
= insn
[0] & 0x03;
2115 case 0: offset
= 2; break;
2116 case 1: offset
= 3; break;
2117 case 2: offset
= 4; break;
2118 case 3: offset
= 2; break;
2121 code
= (insn
[0] & 0x0c) >> 2;
2123 if (code
== 2 && symval
/ scale
<= 255)
2125 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2129 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2130 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2132 SNIP (offset
+1, 1, newrel
);
2137 else if (code
== 1 && symval
== 0)
2140 SNIP (offset
, 1, R_RX_NONE
);
2143 /* Special case DSP:5 format: MOV.bwl Rsrc,dsp:5[Rdst] */
2144 else if (code
== 1 && symval
/scale
<= 31
2145 /* Decodable bits. */
2146 && (insn
[0] & 0xc3) == 0xc3
2148 && (insn
[0] & 0x30) != 3
2149 /* Register MSBs. */
2150 && (insn
[1] & 0x88) == 0x00)
2154 insn
[0] = 0x80 | (insn
[0] & 0x30);
2155 /* The register fields are in the right place already. */
2157 /* We can't relax this new opcode. */
2160 switch ((insn
[0] & 0x30) >> 4)
2163 newrel
= R_RX_RH_ABS5p5B
;
2166 newrel
= R_RX_RH_ABS5p5W
;
2169 newrel
= R_RX_RH_ABS5p5L
;
2173 move_reloc (irel
, srel
, -2);
2174 SNIP (2, 1, newrel
);
2178 /* These always occur alone, but the offset depends on whether
2179 it's a MEMEX opcode (0x06) or not. */
2180 if (irel
->r_addend
& RX_RELAXA_DSP14
)
2185 if (insn
[0] == 0x06)
2192 if (code
== 2 && symval
/ scale
<= 255)
2194 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2198 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2199 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2201 SNIP (offset
, 1, newrel
);
2205 else if (code
== 1 && symval
== 0)
2208 SNIP (offset
, 1, R_RX_NONE
);
2219 /* These always occur alone. */
2220 if (irel
->r_addend
& RX_RELAXA_IMM6
)
2226 /* These relocations sign-extend, so we must do signed compares. */
2227 ssymval
= (long) symval
;
2229 code
= insn
[0] & 0x03;
2231 if (code
== 0 && ssymval
<= 8388607 && ssymval
>= -8388608)
2233 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2237 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2238 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2240 SNIP (2, 1, newrel
);
2245 else if (code
== 3 && ssymval
<= 32767 && ssymval
>= -32768)
2247 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2251 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2252 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2254 SNIP (2, 1, newrel
);
2259 /* Special case UIMM8 format: CMP #uimm8,Rdst. */
2260 else if (code
== 2 && ssymval
<= 255 && ssymval
>= 16
2261 /* Decodable bits. */
2262 && (insn
[0] & 0xfc) == 0x74
2263 /* Decodable bits. */
2264 && ((insn
[1] & 0xf0) == 0x00))
2269 insn
[1] = 0x50 | (insn
[1] & 0x0f);
2271 /* We can't relax this new opcode. */
2274 if (STACK_REL_P (ELF32_R_TYPE (srel
->r_info
)))
2275 newrel
= R_RX_ABS8U
;
2277 newrel
= R_RX_DIR8U
;
2279 SNIP (2, 1, newrel
);
2283 else if (code
== 2 && ssymval
<= 127 && ssymval
>= -128)
2285 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2289 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2290 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2292 SNIP (2, 1, newrel
);
2297 /* Special case UIMM4 format: CMP, MUL, AND, OR. */
2298 else if (code
== 1 && ssymval
<= 15 && ssymval
>= 0
2299 /* Decodable bits and immediate type. */
2301 /* Decodable bits. */
2302 && (insn
[1] & 0xc0) == 0x00)
2304 static const int newop
[4] = { 1, 3, 4, 5 };
2306 insn
[0] = 0x60 | newop
[insn
[1] >> 4];
2307 /* The register number doesn't move. */
2309 /* We can't relax this new opcode. */
2312 move_reloc (irel
, srel
, -1);
2314 SNIP (2, 1, R_RX_RH_UIMM4p8
);
2318 /* Special case UIMM4 format: ADD -> ADD/SUB. */
2319 else if (code
== 1 && ssymval
<= 15 && ssymval
>= -15
2320 /* Decodable bits and immediate type. */
2322 /* Same register for source and destination. */
2323 && ((insn
[1] >> 4) == (insn
[1] & 0x0f)))
2327 /* Note that we can't turn "add $0,Rs" into a NOP
2328 because the flags need to be set right. */
2332 insn
[0] = 0x60; /* Subtract. */
2333 newrel
= R_RX_RH_UNEG4p8
;
2337 insn
[0] = 0x62; /* Add. */
2338 newrel
= R_RX_RH_UIMM4p8
;
2341 /* The register number is in the right place. */
2343 /* We can't relax this new opcode. */
2346 move_reloc (irel
, srel
, -1);
2348 SNIP (2, 1, newrel
);
2353 /* These are either matched with a DSP6 (2-byte base) or an id24
2355 if (irel
->r_addend
& RX_RELAXA_IMM12
)
2357 int dspcode
, offset
= 0;
2362 if ((insn
[0] & 0xfc) == 0xfc)
2363 dspcode
= 1; /* Just something with one byte operand. */
2365 dspcode
= insn
[0] & 3;
2368 case 0: offset
= 2; break;
2369 case 1: offset
= 3; break;
2370 case 2: offset
= 4; break;
2371 case 3: offset
= 2; break;
2374 /* These relocations sign-extend, so we must do signed compares. */
2375 ssymval
= (long) symval
;
2377 code
= (insn
[1] >> 2) & 3;
2378 if (code
== 0 && ssymval
<= 8388607 && ssymval
>= -8388608)
2380 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2384 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2385 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2387 SNIP (offset
, 1, newrel
);
2392 else if (code
== 3 && ssymval
<= 32767 && ssymval
>= -32768)
2394 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2398 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2399 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2401 SNIP (offset
, 1, newrel
);
2406 /* Special case UIMM8 format: MOV #uimm8,Rdst. */
2407 else if (code
== 2 && ssymval
<= 255 && ssymval
>= 16
2408 /* Decodable bits. */
2410 /* Decodable bits. */
2411 && ((insn
[1] & 0x03) == 0x02))
2416 insn
[1] = 0x40 | (insn
[1] >> 4);
2418 /* We can't relax this new opcode. */
2421 if (STACK_REL_P (ELF32_R_TYPE (srel
->r_info
)))
2422 newrel
= R_RX_ABS8U
;
2424 newrel
= R_RX_DIR8U
;
2426 SNIP (2, 1, newrel
);
2430 else if (code
== 2 && ssymval
<= 127 && ssymval
>= -128)
2432 unsigned int newrel
= ELF32_R_TYPE(srel
->r_info
);
2436 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2437 if (newrel
!= ELF32_R_TYPE(srel
->r_info
))
2439 SNIP (offset
, 1, newrel
);
2444 /* Special case UIMM4 format: MOV #uimm4,Rdst. */
2445 else if (code
== 1 && ssymval
<= 15 && ssymval
>= 0
2446 /* Decodable bits. */
2448 /* Decodable bits. */
2449 && ((insn
[1] & 0x03) == 0x02))
2452 insn
[1] = insn
[1] >> 4;
2454 /* We can't relax this new opcode. */
2457 move_reloc (irel
, srel
, -1);
2459 SNIP (2, 1, R_RX_RH_UIMM4p8
);
2464 if (irel
->r_addend
& RX_RELAXA_BRA
)
2466 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2468 int alignment_glue
= 0;
2472 /* Branches over alignment chunks are problematic, as
2473 deleting bytes here makes the branch *further* away. We
2474 can be agressive with branches within this alignment
2475 block, but not branches outside it. */
2476 if ((prev_alignment
== NULL
2477 || symval
< (bfd_vma
)(sec_start
+ prev_alignment
->r_offset
))
2478 && (next_alignment
== NULL
2479 || symval
> (bfd_vma
)(sec_start
+ next_alignment
->r_offset
)))
2480 alignment_glue
= section_alignment_glue
;
2482 if (ELF32_R_TYPE(srel
[1].r_info
) == R_RX_RH_RELAX
2483 && srel
[1].r_addend
& RX_RELAXA_BRA
2484 && srel
[1].r_offset
< irel
->r_offset
+ pcrel
)
2487 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2489 /* The values we compare PCREL with are not what you'd
2490 expect; they're off by a little to compensate for (1)
2491 where the reloc is relative to the insn, and (2) how much
2492 the insn is going to change when we relax it. */
2494 /* These we have to decode. */
2497 case 0x04: /* BRA pcdsp:24 */
2498 if (-32768 + alignment_glue
<= pcrel
2499 && pcrel
<= 32765 - alignment_glue
)
2502 SNIP (3, 1, newrel
);
2507 case 0x38: /* BRA pcdsp:16 */
2508 if (-128 + alignment_glue
<= pcrel
2509 && pcrel
<= 127 - alignment_glue
)
2512 SNIP (2, 1, newrel
);
2517 case 0x2e: /* BRA pcdsp:8 */
2518 /* Note that there's a risk here of shortening things so
2519 much that we no longer fit this reloc; it *should*
2520 only happen when you branch across a branch, and that
2521 branch also devolves into BRA.S. "Real" code should
2523 if (max_pcrel3
+ alignment_glue
<= pcrel
2524 && pcrel
<= 10 - alignment_glue
2528 SNIP (1, 1, newrel
);
2529 move_reloc (irel
, srel
, -1);
2534 case 0x05: /* BSR pcdsp:24 */
2535 if (-32768 + alignment_glue
<= pcrel
2536 && pcrel
<= 32765 - alignment_glue
)
2539 SNIP (1, 1, newrel
);
2544 case 0x3a: /* BEQ.W pcdsp:16 */
2545 case 0x3b: /* BNE.W pcdsp:16 */
2546 if (-128 + alignment_glue
<= pcrel
2547 && pcrel
<= 127 - alignment_glue
)
2549 insn
[0] = 0x20 | (insn
[0] & 1);
2550 SNIP (1, 1, newrel
);
2555 case 0x20: /* BEQ.B pcdsp:8 */
2556 case 0x21: /* BNE.B pcdsp:8 */
2557 if (max_pcrel3
+ alignment_glue
<= pcrel
2558 && pcrel
- alignment_glue
<= 10
2561 insn
[0] = 0x10 | ((insn
[0] & 1) << 3);
2562 SNIP (1, 1, newrel
);
2563 move_reloc (irel
, srel
, -1);
2568 case 0x16: /* synthetic BNE dsp24 */
2569 case 0x1e: /* synthetic BEQ dsp24 */
2570 if (-32767 + alignment_glue
<= pcrel
2571 && pcrel
<= 32766 - alignment_glue
2574 if (insn
[0] == 0x16)
2578 /* We snip out the bytes at the end else the reloc
2579 will get moved too, and too much. */
2580 SNIP (3, 2, newrel
);
2581 move_reloc (irel
, srel
, -1);
2587 /* Special case - synthetic conditional branches, pcrel24.
2588 Note that EQ and NE have been handled above. */
2589 if ((insn
[0] & 0xf0) == 0x20
2592 && srel
->r_offset
!= irel
->r_offset
+ 1
2593 && -32767 + alignment_glue
<= pcrel
2594 && pcrel
<= 32766 - alignment_glue
)
2598 SNIP (5, 1, newrel
);
2602 /* Special case - synthetic conditional branches, pcrel16 */
2603 if ((insn
[0] & 0xf0) == 0x20
2606 && srel
->r_offset
!= irel
->r_offset
+ 1
2607 && -127 + alignment_glue
<= pcrel
2608 && pcrel
<= 126 - alignment_glue
)
2610 int cond
= (insn
[0] & 0x0f) ^ 0x01;
2612 insn
[0] = 0x20 | cond
;
2613 /* By moving the reloc first, we avoid having
2614 delete_bytes move it also. */
2615 move_reloc (irel
, srel
, -2);
2616 SNIP (2, 3, newrel
);
2621 BFD_ASSERT (nrelocs
== 0);
2623 /* Special case - check MOV.bwl #IMM, dsp[reg] and see if we can
2624 use MOV.bwl #uimm:8, dsp:5[r7] format. This is tricky
2625 because it may have one or two relocations. */
2626 if ((insn
[0] & 0xfc) == 0xf8
2627 && (insn
[1] & 0x80) == 0x00
2628 && (insn
[0] & 0x03) != 0x03)
2630 int dcode
, icode
, reg
, ioff
, dscale
, ilen
;
2631 bfd_vma disp_val
= 0;
2633 Elf_Internal_Rela
* disp_rel
= 0;
2634 Elf_Internal_Rela
* imm_rel
= 0;
2639 dcode
= insn
[0] & 0x03;
2640 icode
= (insn
[1] >> 2) & 0x03;
2641 reg
= (insn
[1] >> 4) & 0x0f;
2643 ioff
= dcode
== 1 ? 3 : dcode
== 2 ? 4 : 2;
2645 /* Figure out what the dispacement is. */
2646 if (dcode
== 1 || dcode
== 2)
2648 /* There's a displacement. See if there's a reloc for it. */
2649 if (srel
[1].r_offset
== irel
->r_offset
+ 2)
2661 #if RX_OPCODE_BIG_ENDIAN
2662 disp_val
= insn
[2] * 256 + insn
[3];
2664 disp_val
= insn
[2] + insn
[3] * 256;
2667 switch (insn
[1] & 3)
2683 /* Figure out what the immediate is. */
2684 if (srel
[1].r_offset
== irel
->r_offset
+ ioff
)
2687 imm_val
= (long) symval
;
2692 unsigned char * ip
= insn
+ ioff
;
2697 /* For byte writes, we don't sign extend. Makes the math easier later. */
2701 imm_val
= (char) ip
[0];
2704 #if RX_OPCODE_BIG_ENDIAN
2705 imm_val
= ((char) ip
[0] << 8) | ip
[1];
2707 imm_val
= ((char) ip
[1] << 8) | ip
[0];
2711 #if RX_OPCODE_BIG_ENDIAN
2712 imm_val
= ((char) ip
[0] << 16) | (ip
[1] << 8) | ip
[2];
2714 imm_val
= ((char) ip
[2] << 16) | (ip
[1] << 8) | ip
[0];
2718 #if RX_OPCODE_BIG_ENDIAN
2719 imm_val
= (ip
[0] << 24) | (ip
[1] << 16) | (ip
[2] << 8) | ip
[3];
2721 imm_val
= (ip
[3] << 24) | (ip
[2] << 16) | (ip
[1] << 8) | ip
[0];
2755 /* The shortcut happens when the immediate is 0..255,
2756 register r0 to r7, and displacement (scaled) 0..31. */
2758 if (0 <= imm_val
&& imm_val
<= 255
2759 && 0 <= reg
&& reg
<= 7
2760 && disp_val
/ dscale
<= 31)
2762 insn
[0] = 0x3c | (insn
[1] & 0x03);
2763 insn
[1] = (((disp_val
/ dscale
) << 3) & 0x80) | (reg
<< 4) | ((disp_val
/dscale
) & 0x0f);
2768 int newrel
= R_RX_NONE
;
2773 newrel
= R_RX_RH_ABS5p8B
;
2776 newrel
= R_RX_RH_ABS5p8W
;
2779 newrel
= R_RX_RH_ABS5p8L
;
2782 disp_rel
->r_info
= ELF32_R_INFO (ELF32_R_SYM (disp_rel
->r_info
), newrel
);
2783 move_reloc (irel
, disp_rel
, -1);
2787 imm_rel
->r_info
= ELF32_R_INFO (ELF32_R_SYM (imm_rel
->r_info
), R_RX_DIR8U
);
2788 move_reloc (disp_rel
? disp_rel
: irel
,
2790 irel
->r_offset
- imm_rel
->r_offset
+ 2);
2793 SNIPNR (3, ilen
- 3);
2796 /* We can't relax this new opcode. */
2802 /* We can't reliably relax branches to DIR3U_PCREL unless we know
2803 whatever they're branching over won't shrink any more. If we're
2804 basically done here, do one more pass just for branches - but
2805 don't request a pass after that one! */
2806 if (!*again
&& !allow_pcrel3
)
2808 bfd_boolean ignored
;
2810 elf32_rx_relax_section (abfd
, sec
, link_info
, &ignored
, TRUE
);
2816 if (free_relocs
!= NULL
)
2819 if (free_contents
!= NULL
)
2820 free (free_contents
);
2822 if (shndx_buf
!= NULL
)
2824 shndx_hdr
->contents
= NULL
;
2828 if (free_intsyms
!= NULL
)
2829 free (free_intsyms
);
2835 elf32_rx_relax_section_wrapper (bfd
* abfd
,
2837 struct bfd_link_info
* link_info
,
2838 bfd_boolean
* again
)
2840 return elf32_rx_relax_section (abfd
, sec
, link_info
, again
, FALSE
);
2843 /* Function to set the ELF flag bits. */
2846 rx_elf_set_private_flags (bfd
* abfd
, flagword flags
)
2848 elf_elfheader (abfd
)->e_flags
= flags
;
2849 elf_flags_init (abfd
) = TRUE
;
2853 static bfd_boolean no_warn_mismatch
= FALSE
;
2855 void bfd_elf32_rx_set_target_flags (bfd_boolean
);
2858 bfd_elf32_rx_set_target_flags (bfd_boolean user_no_warn_mismatch
)
2860 no_warn_mismatch
= user_no_warn_mismatch
;
2863 /* Merge backend specific data from an object file to the output
2864 object file when linking. */
2867 rx_elf_merge_private_bfd_data (bfd
* ibfd
, bfd
* obfd
)
2871 bfd_boolean error
= FALSE
;
2873 new_flags
= elf_elfheader (ibfd
)->e_flags
;
2874 old_flags
= elf_elfheader (obfd
)->e_flags
;
2876 if (!elf_flags_init (obfd
))
2878 /* First call, no flags set. */
2879 elf_flags_init (obfd
) = TRUE
;
2880 elf_elfheader (obfd
)->e_flags
= new_flags
;
2882 else if (old_flags
!= new_flags
)
2884 flagword known_flags
= E_FLAG_RX_64BIT_DOUBLES
| E_FLAG_RX_DSP
;
2886 if ((old_flags
^ new_flags
) & known_flags
)
2888 /* Only complain if flag bits we care about do not match.
2889 Other bits may be set, since older binaries did use some
2890 deprecated flags. */
2891 if (no_warn_mismatch
)
2893 elf_elfheader (obfd
)->e_flags
= (new_flags
| old_flags
) & known_flags
;
2897 (*_bfd_error_handler
)
2898 ("ELF header flags mismatch: old_flags = 0x%.8lx, new_flags = 0x%.8lx, filename = %s",
2899 old_flags
, new_flags
, bfd_get_filename (ibfd
));
2904 elf_elfheader (obfd
)->e_flags
= new_flags
& known_flags
;
2908 bfd_set_error (bfd_error_bad_value
);
2914 rx_elf_print_private_bfd_data (bfd
* abfd
, void * ptr
)
2916 FILE * file
= (FILE *) ptr
;
2919 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
2921 /* Print normal ELF private data. */
2922 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
2924 flags
= elf_elfheader (abfd
)->e_flags
;
2925 fprintf (file
, _("private flags = 0x%lx:"), (long) flags
);
2927 if (flags
& E_FLAG_RX_64BIT_DOUBLES
)
2928 fprintf (file
, _(" [64-bit doubles]"));
2929 if (flags
& E_FLAG_RX_DSP
)
2930 fprintf (file
, _(" [dsp]"));
2936 /* Return the MACH for an e_flags value. */
2939 elf32_rx_machine (bfd
* abfd
)
2941 if ((elf_elfheader (abfd
)->e_flags
& EF_RX_CPU_MASK
) == EF_RX_CPU_RX
)
2948 rx_elf_object_p (bfd
* abfd
)
2950 bfd_default_set_arch_mach (abfd
, bfd_arch_rx
,
2951 elf32_rx_machine (abfd
));
2958 rx_dump_symtab (bfd
* abfd
, void * internal_syms
, void * external_syms
)
2961 Elf_Internal_Sym
* isymbuf
;
2962 Elf_Internal_Sym
* isymend
;
2963 Elf_Internal_Sym
* isym
;
2964 Elf_Internal_Shdr
* symtab_hdr
;
2965 bfd_boolean free_internal
= FALSE
, free_external
= FALSE
;
2967 char * st_info_stb_str
;
2968 char * st_other_str
;
2969 char * st_shndx_str
;
2971 if (! internal_syms
)
2973 internal_syms
= bfd_malloc (1000);
2976 if (! external_syms
)
2978 external_syms
= bfd_malloc (1000);
2982 symtab_hdr
= &elf_tdata (abfd
)->symtab_hdr
;
2983 locsymcount
= symtab_hdr
->sh_size
/ get_elf_backend_data (abfd
)->s
->sizeof_sym
;
2985 isymbuf
= bfd_elf_get_elf_syms (abfd
, symtab_hdr
,
2986 symtab_hdr
->sh_info
, 0,
2987 internal_syms
, external_syms
, NULL
);
2989 isymbuf
= internal_syms
;
2990 isymend
= isymbuf
+ locsymcount
;
2992 for (isym
= isymbuf
; isym
< isymend
; isym
++)
2994 switch (ELF_ST_TYPE (isym
->st_info
))
2996 case STT_FUNC
: st_info_str
= "STT_FUNC";
2997 case STT_SECTION
: st_info_str
= "STT_SECTION";
2998 case STT_FILE
: st_info_str
= "STT_FILE";
2999 case STT_OBJECT
: st_info_str
= "STT_OBJECT";
3000 case STT_TLS
: st_info_str
= "STT_TLS";
3001 default: st_info_str
= "";
3003 switch (ELF_ST_BIND (isym
->st_info
))
3005 case STB_LOCAL
: st_info_stb_str
= "STB_LOCAL";
3006 case STB_GLOBAL
: st_info_stb_str
= "STB_GLOBAL";
3007 default: st_info_stb_str
= "";
3009 switch (ELF_ST_VISIBILITY (isym
->st_other
))
3011 case STV_DEFAULT
: st_other_str
= "STV_DEFAULT";
3012 case STV_INTERNAL
: st_other_str
= "STV_INTERNAL";
3013 case STV_PROTECTED
: st_other_str
= "STV_PROTECTED";
3014 default: st_other_str
= "";
3016 switch (isym
->st_shndx
)
3018 case SHN_ABS
: st_shndx_str
= "SHN_ABS";
3019 case SHN_COMMON
: st_shndx_str
= "SHN_COMMON";
3020 case SHN_UNDEF
: st_shndx_str
= "SHN_UNDEF";
3021 default: st_shndx_str
= "";
3024 printf ("isym = %p st_value = %lx st_size = %lx st_name = (%lu) %s "
3025 "st_info = (%d) %s %s st_other = (%d) %s st_shndx = (%d) %s\n",
3027 (unsigned long) isym
->st_value
,
3028 (unsigned long) isym
->st_size
,
3030 bfd_elf_string_from_elf_section (abfd
, symtab_hdr
->sh_link
,
3032 isym
->st_info
, st_info_str
, st_info_stb_str
,
3033 isym
->st_other
, st_other_str
,
3034 isym
->st_shndx
, st_shndx_str
);
3037 free (internal_syms
);
3039 free (external_syms
);
3043 rx_get_reloc (long reloc
)
3045 if (0 <= reloc
&& reloc
< R_RX_max
)
3046 return rx_elf_howto_table
[reloc
].name
;
3052 /* We must take care to keep the on-disk copy of any code sections
3053 that are fully linked swapped if the target is big endian, to match
3054 the Renesas tools. */
3056 /* The rule is: big endian object that are final-link executables,
3057 have code sections stored with 32-bit words swapped relative to
3058 what you'd get by default. */
3061 rx_get_section_contents (bfd
* abfd
,
3065 bfd_size_type count
)
3067 int exec
= (abfd
->flags
& EXEC_P
) ? 1 : 0;
3068 int s_code
= (section
->flags
& SEC_CODE
) ? 1 : 0;
3072 fprintf (stderr
, "dj: get %ld %ld from %s %s e%d sc%d %08lx:%08lx\n",
3073 (long) offset
, (long) count
, section
->name
,
3074 bfd_big_endian(abfd
) ? "be" : "le",
3075 exec
, s_code
, (long unsigned) section
->filepos
,
3076 (long unsigned) offset
);
3079 if (exec
&& s_code
&& bfd_big_endian (abfd
))
3081 char * cloc
= (char *) location
;
3082 bfd_size_type cnt
, end_cnt
;
3086 /* Fetch and swap unaligned bytes at the beginning. */
3091 rv
= _bfd_generic_get_section_contents (abfd
, section
, buf
,
3096 bfd_putb32 (bfd_getl32 (buf
), buf
);
3098 cnt
= 4 - (offset
% 4);
3102 memcpy (location
, buf
+ (offset
% 4), cnt
);
3109 end_cnt
= count
% 4;
3111 /* Fetch and swap the middle bytes. */
3114 rv
= _bfd_generic_get_section_contents (abfd
, section
, cloc
, offset
,
3119 for (cnt
= count
; cnt
>= 4; cnt
-= 4, cloc
+= 4)
3120 bfd_putb32 (bfd_getl32 (cloc
), cloc
);
3123 /* Fetch and swap the end bytes. */
3128 /* Fetch the end bytes. */
3129 rv
= _bfd_generic_get_section_contents (abfd
, section
, buf
,
3130 offset
+ count
- end_cnt
, 4);
3134 bfd_putb32 (bfd_getl32 (buf
), buf
);
3135 memcpy (cloc
, buf
, end_cnt
);
3139 rv
= _bfd_generic_get_section_contents (abfd
, section
, location
, offset
, count
);
3146 rx2_set_section_contents (bfd
* abfd
,
3148 const void * location
,
3150 bfd_size_type count
)
3154 fprintf (stderr
, " set sec %s %08x loc %p offset %#x count %#x\n",
3155 section
->name
, (unsigned) section
->vma
, location
, (int) offset
, (int) count
);
3156 for (i
= 0; i
< count
; i
++)
3158 if (i
% 16 == 0 && i
> 0)
3159 fprintf (stderr
, "\n");
3161 if (i
% 16 && i
% 4 == 0)
3162 fprintf (stderr
, " ");
3165 fprintf (stderr
, " %08x:", (int) (section
->vma
+ offset
+ i
));
3167 fprintf (stderr
, " %02x", ((unsigned char *) location
)[i
]);
3169 fprintf (stderr
, "\n");
3171 return _bfd_elf_set_section_contents (abfd
, section
, location
, offset
, count
);
3173 #define _bfd_elf_set_section_contents rx2_set_section_contents
3177 rx_set_section_contents (bfd
* abfd
,
3179 const void * location
,
3181 bfd_size_type count
)
3183 bfd_boolean exec
= (abfd
->flags
& EXEC_P
) ? TRUE
: FALSE
;
3184 bfd_boolean s_code
= (section
->flags
& SEC_CODE
) ? TRUE
: FALSE
;
3186 char * swapped_data
= NULL
;
3188 bfd_vma caddr
= section
->vma
+ offset
;
3190 bfd_size_type scount
;
3195 fprintf (stderr
, "\ndj: set %ld %ld to %s %s e%d sc%d\n",
3196 (long) offset
, (long) count
, section
->name
,
3197 bfd_big_endian (abfd
) ? "be" : "le",
3200 for (i
= 0; i
< count
; i
++)
3202 int a
= section
->vma
+ offset
+ i
;
3204 if (a
% 16 == 0 && a
> 0)
3205 fprintf (stderr
, "\n");
3207 if (a
% 16 && a
% 4 == 0)
3208 fprintf (stderr
, " ");
3210 if (a
% 16 == 0 || i
== 0)
3211 fprintf (stderr
, " %08x:", (int) (section
->vma
+ offset
+ i
));
3213 fprintf (stderr
, " %02x", ((unsigned char *) location
)[i
]);
3216 fprintf (stderr
, "\n");
3219 if (! exec
|| ! s_code
|| ! bfd_big_endian (abfd
))
3220 return _bfd_elf_set_section_contents (abfd
, section
, location
, offset
, count
);
3222 while (count
> 0 && caddr
> 0 && caddr
% 4)
3226 case 0: faddr
= offset
+ 3; break;
3227 case 1: faddr
= offset
+ 1; break;
3228 case 2: faddr
= offset
- 1; break;
3229 case 3: faddr
= offset
- 3; break;
3232 rv
= _bfd_elf_set_section_contents (abfd
, section
, location
, faddr
, 1);
3242 scount
= (int)(count
/ 4) * 4;
3245 char * cloc
= (char *) location
;
3247 swapped_data
= (char *) bfd_alloc (abfd
, count
);
3249 for (i
= 0; i
< count
; i
+= 4)
3251 bfd_vma v
= bfd_getl32 (cloc
+ i
);
3252 bfd_putb32 (v
, swapped_data
+ i
);
3255 rv
= _bfd_elf_set_section_contents (abfd
, section
, swapped_data
, offset
, scount
);
3267 caddr
= section
->vma
+ offset
;
3272 case 0: faddr
= offset
+ 3; break;
3273 case 1: faddr
= offset
+ 1; break;
3274 case 2: faddr
= offset
- 1; break;
3275 case 3: faddr
= offset
- 3; break;
3277 rv
= _bfd_elf_set_section_contents (abfd
, section
, location
, faddr
, 1);
3292 rx_final_link (bfd
* abfd
, struct bfd_link_info
* info
)
3296 for (o
= abfd
->sections
; o
!= NULL
; o
= o
->next
)
3299 fprintf (stderr
, "sec %s fl %x vma %lx lma %lx size %lx raw %lx\n",
3300 o
->name
, o
->flags
, o
->vma
, o
->lma
, o
->size
, o
->rawsize
);
3302 if (o
->flags
& SEC_CODE
3303 && bfd_big_endian (abfd
)
3304 && (o
->size
% 4 || o
->rawsize
% 4))
3307 fprintf (stderr
, "adjusting...\n");
3309 o
->size
+= 4 - (o
->size
% 4);
3310 o
->rawsize
+= 4 - (o
->rawsize
% 4);
3314 return bfd_elf_final_link (abfd
, info
);
3318 elf32_rx_modify_program_headers (bfd
* abfd ATTRIBUTE_UNUSED
,
3319 struct bfd_link_info
* info ATTRIBUTE_UNUSED
)
3321 const struct elf_backend_data
* bed
;
3322 struct elf_obj_tdata
* tdata
;
3323 Elf_Internal_Phdr
* phdr
;
3327 bed
= get_elf_backend_data (abfd
);
3328 tdata
= elf_tdata (abfd
);
3330 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
3332 for (i
= count
; i
-- != 0; )
3333 if (phdr
[i
].p_type
== PT_LOAD
)
3335 /* The Renesas tools expect p_paddr to be zero. However,
3336 there is no other way to store the writable data in ROM for
3337 startup initialization. So, we let the linker *think*
3338 we're using paddr and vaddr the "usual" way, but at the
3339 last minute we move the paddr into the vaddr (which is what
3340 the simulator uses) and zero out paddr. Note that this
3341 does not affect the section headers, just the program
3342 headers. We hope. */
3343 phdr
[i
].p_vaddr
= phdr
[i
].p_paddr
;
3344 /* If we zero out p_paddr, then the LMA in the section table
3346 /*phdr[i].p_paddr = 0;*/
3352 #define ELF_ARCH bfd_arch_rx
3353 #define ELF_MACHINE_CODE EM_RX
3354 #define ELF_MAXPAGESIZE 0x1000
3356 #define TARGET_BIG_SYM bfd_elf32_rx_be_vec
3357 #define TARGET_BIG_NAME "elf32-rx-be"
3359 #define TARGET_LITTLE_SYM bfd_elf32_rx_le_vec
3360 #define TARGET_LITTLE_NAME "elf32-rx-le"
3362 #define elf_info_to_howto_rel NULL
3363 #define elf_info_to_howto rx_info_to_howto_rela
3364 #define elf_backend_object_p rx_elf_object_p
3365 #define elf_backend_relocate_section rx_elf_relocate_section
3366 #define elf_symbol_leading_char ('_')
3367 #define elf_backend_can_gc_sections 1
3368 #define elf_backend_modify_program_headers elf32_rx_modify_program_headers
3370 #define bfd_elf32_bfd_reloc_type_lookup rx_reloc_type_lookup
3371 #define bfd_elf32_bfd_reloc_name_lookup rx_reloc_name_lookup
3372 #define bfd_elf32_bfd_set_private_flags rx_elf_set_private_flags
3373 #define bfd_elf32_bfd_merge_private_bfd_data rx_elf_merge_private_bfd_data
3374 #define bfd_elf32_bfd_print_private_bfd_data rx_elf_print_private_bfd_data
3375 #define bfd_elf32_get_section_contents rx_get_section_contents
3376 #define bfd_elf32_set_section_contents rx_set_section_contents
3377 #define bfd_elf32_bfd_final_link rx_final_link
3378 #define bfd_elf32_bfd_relax_section elf32_rx_relax_section_wrapper
3380 #include "elf32-target.h"