1 /* Renesas RX specific support for 32-bit ELF.
2 Copyright (C) 2008, 2009, 2010
3 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
26 #include "libiberty.h"
28 #define RX_OPCODE_BIG_ENDIAN 0
31 char * rx_get_reloc (long);
32 void rx_dump_symtab (bfd
*, void *, void *);
35 #define RXREL(n,sz,bit,shift,complain,pcrel) \
36 HOWTO (R_RX_##n, shift, sz, bit, pcrel, 0, complain_overflow_ ## complain, \
37 bfd_elf_generic_reloc, "R_RX_" #n, FALSE, 0, ~0, FALSE)
39 /* Note that the relocations around 0x7f are internal to this file;
40 feel free to move them as needed to avoid conflicts with published
41 relocation numbers. */
43 static reloc_howto_type rx_elf_howto_table
[] =
45 RXREL (NONE
, 0, 0, 0, dont
, FALSE
),
46 RXREL (DIR32
, 2, 32, 0, signed, FALSE
),
47 RXREL (DIR24S
, 2, 24, 0, signed, FALSE
),
48 RXREL (DIR16
, 1, 16, 0, dont
, FALSE
),
49 RXREL (DIR16U
, 1, 16, 0, unsigned, FALSE
),
50 RXREL (DIR16S
, 1, 16, 0, signed, FALSE
),
51 RXREL (DIR8
, 0, 8, 0, dont
, FALSE
),
52 RXREL (DIR8U
, 0, 8, 0, unsigned, FALSE
),
53 RXREL (DIR8S
, 0, 8, 0, signed, FALSE
),
54 RXREL (DIR24S_PCREL
, 2, 24, 0, signed, TRUE
),
55 RXREL (DIR16S_PCREL
, 1, 16, 0, signed, TRUE
),
56 RXREL (DIR8S_PCREL
, 0, 8, 0, signed, TRUE
),
57 RXREL (DIR16UL
, 1, 16, 2, unsigned, FALSE
),
58 RXREL (DIR16UW
, 1, 16, 1, unsigned, FALSE
),
59 RXREL (DIR8UL
, 0, 8, 2, unsigned, FALSE
),
60 RXREL (DIR8UW
, 0, 8, 1, unsigned, FALSE
),
61 RXREL (DIR32_REV
, 1, 16, 0, dont
, FALSE
),
62 RXREL (DIR16_REV
, 1, 16, 0, dont
, FALSE
),
63 RXREL (DIR3U_PCREL
, 0, 3, 0, dont
, TRUE
),
79 RXREL (RH_3_PCREL
, 0, 3, 0, signed, TRUE
),
80 RXREL (RH_16_OP
, 1, 16, 0, signed, FALSE
),
81 RXREL (RH_24_OP
, 2, 24, 0, signed, FALSE
),
82 RXREL (RH_32_OP
, 2, 32, 0, signed, FALSE
),
83 RXREL (RH_24_UNS
, 2, 24, 0, unsigned, FALSE
),
84 RXREL (RH_8_NEG
, 0, 8, 0, signed, FALSE
),
85 RXREL (RH_16_NEG
, 1, 16, 0, signed, FALSE
),
86 RXREL (RH_24_NEG
, 2, 24, 0, signed, FALSE
),
87 RXREL (RH_32_NEG
, 2, 32, 0, signed, FALSE
),
88 RXREL (RH_DIFF
, 2, 32, 0, signed, FALSE
),
89 RXREL (RH_GPRELB
, 1, 16, 0, unsigned, FALSE
),
90 RXREL (RH_GPRELW
, 1, 16, 0, unsigned, FALSE
),
91 RXREL (RH_GPRELL
, 1, 16, 0, unsigned, FALSE
),
92 RXREL (RH_RELAX
, 0, 0, 0, dont
, FALSE
),
114 RXREL (ABS32
, 2, 32, 0, dont
, FALSE
),
115 RXREL (ABS24S
, 2, 24, 0, signed, FALSE
),
116 RXREL (ABS16
, 1, 16, 0, dont
, FALSE
),
117 RXREL (ABS16U
, 1, 16, 0, unsigned, FALSE
),
118 RXREL (ABS16S
, 1, 16, 0, signed, FALSE
),
119 RXREL (ABS8
, 0, 8, 0, dont
, FALSE
),
120 RXREL (ABS8U
, 0, 8, 0, unsigned, FALSE
),
121 RXREL (ABS8S
, 0, 8, 0, signed, FALSE
),
122 RXREL (ABS24S_PCREL
, 2, 24, 0, signed, TRUE
),
123 RXREL (ABS16S_PCREL
, 1, 16, 0, signed, TRUE
),
124 RXREL (ABS8S_PCREL
, 0, 8, 0, signed, TRUE
),
125 RXREL (ABS16UL
, 1, 16, 0, unsigned, FALSE
),
126 RXREL (ABS16UW
, 1, 16, 0, unsigned, FALSE
),
127 RXREL (ABS8UL
, 0, 8, 0, unsigned, FALSE
),
128 RXREL (ABS8UW
, 0, 8, 0, unsigned, FALSE
),
129 RXREL (ABS32_REV
, 2, 32, 0, dont
, FALSE
),
130 RXREL (ABS16_REV
, 1, 16, 0, dont
, FALSE
),
132 #define STACK_REL_P(x) ((x) <= R_RX_ABS16_REV && (x) >= R_RX_ABS32)
173 /* These are internal. */
174 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 8/12. */
175 /* ---- ---- 4--- 3210. */
176 #define R_RX_RH_ABS5p8B 0x78
177 RXREL (RH_ABS5p8B
, 0, 0, 0, dont
, FALSE
),
178 #define R_RX_RH_ABS5p8W 0x79
179 RXREL (RH_ABS5p8W
, 0, 0, 0, dont
, FALSE
),
180 #define R_RX_RH_ABS5p8L 0x7a
181 RXREL (RH_ABS5p8L
, 0, 0, 0, dont
, FALSE
),
182 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 5/12. */
183 /* ---- -432 1--- 0---. */
184 #define R_RX_RH_ABS5p5B 0x7b
185 RXREL (RH_ABS5p5B
, 0, 0, 0, dont
, FALSE
),
186 #define R_RX_RH_ABS5p5W 0x7c
187 RXREL (RH_ABS5p5W
, 0, 0, 0, dont
, FALSE
),
188 #define R_RX_RH_ABS5p5L 0x7d
189 RXREL (RH_ABS5p5L
, 0, 0, 0, dont
, FALSE
),
190 /* A 4-bit unsigned immediate at bit position 8. */
191 #define R_RX_RH_UIMM4p8 0x7e
192 RXREL (RH_UIMM4p8
, 0, 0, 0, dont
, FALSE
),
193 /* A 4-bit negative unsigned immediate at bit position 8. */
194 #define R_RX_RH_UNEG4p8 0x7f
195 RXREL (RH_UNEG4p8
, 0, 0, 0, dont
, FALSE
),
196 /* End of internal relocs. */
198 RXREL (SYM
, 2, 32, 0, dont
, FALSE
),
199 RXREL (OPneg
, 2, 32, 0, dont
, FALSE
),
200 RXREL (OPadd
, 2, 32, 0, dont
, FALSE
),
201 RXREL (OPsub
, 2, 32, 0, dont
, FALSE
),
202 RXREL (OPmul
, 2, 32, 0, dont
, FALSE
),
203 RXREL (OPdiv
, 2, 32, 0, dont
, FALSE
),
204 RXREL (OPshla
, 2, 32, 0, dont
, FALSE
),
205 RXREL (OPshra
, 2, 32, 0, dont
, FALSE
),
206 RXREL (OPsctsize
, 2, 32, 0, dont
, FALSE
),
207 RXREL (OPscttop
, 2, 32, 0, dont
, FALSE
),
208 RXREL (OPand
, 2, 32, 0, dont
, FALSE
),
209 RXREL (OPor
, 2, 32, 0, dont
, FALSE
),
210 RXREL (OPxor
, 2, 32, 0, dont
, FALSE
),
211 RXREL (OPnot
, 2, 32, 0, dont
, FALSE
),
212 RXREL (OPmod
, 2, 32, 0, dont
, FALSE
),
213 RXREL (OPromtop
, 2, 32, 0, dont
, FALSE
),
214 RXREL (OPramtop
, 2, 32, 0, dont
, FALSE
)
217 /* Map BFD reloc types to RX ELF reloc types. */
221 bfd_reloc_code_real_type bfd_reloc_val
;
222 unsigned int rx_reloc_val
;
225 static const struct rx_reloc_map rx_reloc_map
[] =
227 { BFD_RELOC_NONE
, R_RX_NONE
},
228 { BFD_RELOC_8
, R_RX_DIR8S
},
229 { BFD_RELOC_16
, R_RX_DIR16S
},
230 { BFD_RELOC_24
, R_RX_DIR24S
},
231 { BFD_RELOC_32
, R_RX_DIR32
},
232 { BFD_RELOC_RX_16_OP
, R_RX_DIR16
},
233 { BFD_RELOC_RX_DIR3U_PCREL
, R_RX_DIR3U_PCREL
},
234 { BFD_RELOC_8_PCREL
, R_RX_DIR8S_PCREL
},
235 { BFD_RELOC_16_PCREL
, R_RX_DIR16S_PCREL
},
236 { BFD_RELOC_24_PCREL
, R_RX_DIR24S_PCREL
},
237 { BFD_RELOC_RX_8U
, R_RX_DIR8U
},
238 { BFD_RELOC_RX_16U
, R_RX_DIR16U
},
239 { BFD_RELOC_RX_24U
, R_RX_RH_24_UNS
},
240 { BFD_RELOC_RX_NEG8
, R_RX_RH_8_NEG
},
241 { BFD_RELOC_RX_NEG16
, R_RX_RH_16_NEG
},
242 { BFD_RELOC_RX_NEG24
, R_RX_RH_24_NEG
},
243 { BFD_RELOC_RX_NEG32
, R_RX_RH_32_NEG
},
244 { BFD_RELOC_RX_DIFF
, R_RX_RH_DIFF
},
245 { BFD_RELOC_RX_GPRELB
, R_RX_RH_GPRELB
},
246 { BFD_RELOC_RX_GPRELW
, R_RX_RH_GPRELW
},
247 { BFD_RELOC_RX_GPRELL
, R_RX_RH_GPRELL
},
248 { BFD_RELOC_RX_RELAX
, R_RX_RH_RELAX
},
249 { BFD_RELOC_RX_SYM
, R_RX_SYM
},
250 { BFD_RELOC_RX_OP_SUBTRACT
, R_RX_OPsub
},
251 { BFD_RELOC_RX_ABS8
, R_RX_ABS8
},
252 { BFD_RELOC_RX_ABS16
, R_RX_ABS16
},
253 { BFD_RELOC_RX_ABS16_REV
, R_RX_ABS16_REV
},
254 { BFD_RELOC_RX_ABS32
, R_RX_ABS32
},
255 { BFD_RELOC_RX_ABS32_REV
, R_RX_ABS32_REV
},
256 { BFD_RELOC_RX_ABS16UL
, R_RX_ABS16UL
},
257 { BFD_RELOC_RX_ABS16UW
, R_RX_ABS16UW
},
258 { BFD_RELOC_RX_ABS16U
, R_RX_ABS16U
}
261 #define BIGE(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
263 static reloc_howto_type
*
264 rx_reloc_type_lookup (bfd
* abfd ATTRIBUTE_UNUSED
,
265 bfd_reloc_code_real_type code
)
269 if (code
== BFD_RELOC_RX_32_OP
)
270 return rx_elf_howto_table
+ R_RX_DIR32
;
272 for (i
= ARRAY_SIZE (rx_reloc_map
); --i
;)
273 if (rx_reloc_map
[i
].bfd_reloc_val
== code
)
274 return rx_elf_howto_table
+ rx_reloc_map
[i
].rx_reloc_val
;
279 static reloc_howto_type
*
280 rx_reloc_name_lookup (bfd
* abfd ATTRIBUTE_UNUSED
, const char * r_name
)
284 for (i
= 0; i
< ARRAY_SIZE (rx_elf_howto_table
); i
++)
285 if (rx_elf_howto_table
[i
].name
!= NULL
286 && strcasecmp (rx_elf_howto_table
[i
].name
, r_name
) == 0)
287 return rx_elf_howto_table
+ i
;
292 /* Set the howto pointer for an RX ELF reloc. */
295 rx_info_to_howto_rela (bfd
* abfd ATTRIBUTE_UNUSED
,
297 Elf_Internal_Rela
* dst
)
301 r_type
= ELF32_R_TYPE (dst
->r_info
);
302 BFD_ASSERT (r_type
< (unsigned int) R_RX_max
);
303 cache_ptr
->howto
= rx_elf_howto_table
+ r_type
;
307 get_symbol_value (const char * name
,
308 bfd_reloc_status_type
* status
,
309 struct bfd_link_info
* info
,
311 asection
* input_section
,
315 struct bfd_link_hash_entry
* h
;
317 h
= bfd_link_hash_lookup (info
->hash
, name
, FALSE
, FALSE
, TRUE
);
320 || (h
->type
!= bfd_link_hash_defined
321 && h
->type
!= bfd_link_hash_defweak
))
322 * status
= info
->callbacks
->undefined_symbol
323 (info
, name
, input_bfd
, input_section
, offset
, TRUE
);
325 value
= (h
->u
.def
.value
326 + h
->u
.def
.section
->output_section
->vma
327 + h
->u
.def
.section
->output_offset
);
333 get_gp (bfd_reloc_status_type
* status
,
334 struct bfd_link_info
* info
,
339 static bfd_boolean cached
= FALSE
;
340 static bfd_vma cached_value
= 0;
344 cached_value
= get_symbol_value ("__gp", status
, info
, abfd
, sec
, offset
);
351 get_romstart (bfd_reloc_status_type
* status
,
352 struct bfd_link_info
* info
,
357 static bfd_boolean cached
= FALSE
;
358 static bfd_vma cached_value
= 0;
362 cached_value
= get_symbol_value ("_start", status
, info
, abfd
, sec
, offset
);
369 get_ramstart (bfd_reloc_status_type
* status
,
370 struct bfd_link_info
* info
,
375 static bfd_boolean cached
= FALSE
;
376 static bfd_vma cached_value
= 0;
380 cached_value
= get_symbol_value ("__datastart", status
, info
, abfd
, sec
, offset
);
386 #define NUM_STACK_ENTRIES 16
387 static int32_t rx_stack
[ NUM_STACK_ENTRIES
];
388 static unsigned int rx_stack_top
;
390 #define RX_STACK_PUSH(val) \
393 if (rx_stack_top < NUM_STACK_ENTRIES) \
394 rx_stack [rx_stack_top ++] = (val); \
396 r = bfd_reloc_dangerous; \
400 #define RX_STACK_POP(dest) \
403 if (rx_stack_top > 0) \
404 (dest) = rx_stack [-- rx_stack_top]; \
406 (dest) = 0, r = bfd_reloc_dangerous; \
410 /* Relocate an RX ELF section.
411 There is some attempt to make this function usable for many architectures,
412 both USE_REL and USE_RELA ['twould be nice if such a critter existed],
413 if only to serve as a learning tool.
415 The RELOCATE_SECTION function is called by the new ELF backend linker
416 to handle the relocations for a section.
418 The relocs are always passed as Rela structures; if the section
419 actually uses Rel structures, the r_addend field will always be
422 This function is responsible for adjusting the section contents as
423 necessary, and (if using Rela relocs and generating a relocatable
424 output file) adjusting the reloc addend as necessary.
426 This function does not have to worry about setting the reloc
427 address or the reloc symbol index.
429 LOCAL_SYMS is a pointer to the swapped in local symbols.
431 LOCAL_SECTIONS is an array giving the section in the input file
432 corresponding to the st_shndx field of each local symbol.
434 The global hash table entry for the global symbols can be found
435 via elf_sym_hashes (input_bfd).
437 When generating relocatable output, this function must handle
438 STB_LOCAL/STT_SECTION symbols specially. The output symbol is
439 going to be the section symbol corresponding to the output
440 section, which means that the addend must be adjusted
444 rx_elf_relocate_section
446 struct bfd_link_info
* info
,
448 asection
* input_section
,
450 Elf_Internal_Rela
* relocs
,
451 Elf_Internal_Sym
* local_syms
,
452 asection
** local_sections
)
454 Elf_Internal_Shdr
* symtab_hdr
;
455 struct elf_link_hash_entry
** sym_hashes
;
456 Elf_Internal_Rela
* rel
;
457 Elf_Internal_Rela
* relend
;
459 symtab_hdr
= & elf_tdata (input_bfd
)->symtab_hdr
;
460 sym_hashes
= elf_sym_hashes (input_bfd
);
461 relend
= relocs
+ input_section
->reloc_count
;
462 for (rel
= relocs
; rel
< relend
; rel
++)
464 reloc_howto_type
* howto
;
465 unsigned long r_symndx
;
466 Elf_Internal_Sym
* sym
;
468 struct elf_link_hash_entry
* h
;
470 bfd_reloc_status_type r
;
471 const char * name
= NULL
;
472 bfd_boolean unresolved_reloc
= TRUE
;
475 r_type
= ELF32_R_TYPE (rel
->r_info
);
476 r_symndx
= ELF32_R_SYM (rel
->r_info
);
478 howto
= rx_elf_howto_table
+ ELF32_R_TYPE (rel
->r_info
);
484 if (r_symndx
< symtab_hdr
->sh_info
)
486 sym
= local_syms
+ r_symndx
;
487 sec
= local_sections
[r_symndx
];
488 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, & sec
, rel
);
490 name
= bfd_elf_string_from_elf_section
491 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
);
492 name
= (sym
->st_name
== 0) ? bfd_section_name (input_bfd
, sec
) : name
;
498 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
499 r_symndx
, symtab_hdr
, sym_hashes
, h
,
500 sec
, relocation
, unresolved_reloc
,
503 name
= h
->root
.root
.string
;
506 if (sec
!= NULL
&& elf_discarded_section (sec
))
507 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
508 rel
, relend
, howto
, contents
);
510 if (info
->relocatable
)
512 /* This is a relocatable link. We don't have to change
513 anything, unless the reloc is against a section symbol,
514 in which case we have to adjust according to where the
515 section symbol winds up in the output section. */
516 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
517 rel
->r_addend
+= sec
->output_offset
;
521 if (h
!= NULL
&& h
->root
.type
== bfd_link_hash_undefweak
)
522 /* If the symbol is undefined and weak
523 then the relocation resolves to zero. */
527 if (howto
->pc_relative
)
529 relocation
-= (input_section
->output_section
->vma
530 + input_section
->output_offset
532 if (r_type
!= R_RX_RH_3_PCREL
533 && r_type
!= R_RX_DIR3U_PCREL
)
537 relocation
+= rel
->r_addend
;
542 #define RANGE(a,b) if (a > (long) relocation || (long) relocation > b) r = bfd_reloc_overflow
543 #define ALIGN(m) if (relocation & m) r = bfd_reloc_other;
544 #define OP(i) (contents[rel->r_offset + (i)])
545 #define WARN_REDHAT(type) \
546 _bfd_error_handler (_("%B:%A: Warning: deprecated Red Hat reloc " type " detected against: %s."), \
547 input_bfd, input_section, name)
549 /* Opcode relocs are always big endian. Data relocs are bi-endian. */
558 case R_RX_RH_3_PCREL
:
559 WARN_REDHAT ("RX_RH_3_PCREL");
562 OP (0) |= relocation
& 0x07;
566 WARN_REDHAT ("RX_RH_8_NEG");
567 relocation
= - relocation
;
568 case R_RX_DIR8S_PCREL
:
584 WARN_REDHAT ("RX_RH_16_NEG");
585 relocation
= - relocation
;
586 case R_RX_DIR16S_PCREL
:
587 RANGE (-32768, 32767);
588 #if RX_OPCODE_BIG_ENDIAN
591 OP (1) = relocation
>> 8;
596 WARN_REDHAT ("RX_RH_16_OP");
597 RANGE (-32768, 32767);
598 #if RX_OPCODE_BIG_ENDIAN
600 OP (0) = relocation
>> 8;
603 OP (1) = relocation
>> 8;
608 RANGE (-32768, 65535);
609 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
612 OP (0) = relocation
>> 8;
617 OP (1) = relocation
>> 8;
623 #if RX_OPCODE_BIG_ENDIAN
625 OP (0) = relocation
>> 8;
628 OP (1) = relocation
>> 8;
633 RANGE (-32768, 65536);
634 #if RX_OPCODE_BIG_ENDIAN
636 OP (0) = relocation
>> 8;
639 OP (1) = relocation
>> 8;
644 RANGE (-32768, 65536);
645 #if RX_OPCODE_BIG_ENDIAN
647 OP (1) = relocation
>> 8;
650 OP (0) = relocation
>> 8;
654 case R_RX_DIR3U_PCREL
:
657 OP (0) |= relocation
& 0x07;
661 WARN_REDHAT ("RX_RH_24_NEG");
662 relocation
= - relocation
;
663 case R_RX_DIR24S_PCREL
:
664 RANGE (-0x800000, 0x7fffff);
665 #if RX_OPCODE_BIG_ENDIAN
667 OP (1) = relocation
>> 8;
668 OP (0) = relocation
>> 16;
671 OP (1) = relocation
>> 8;
672 OP (2) = relocation
>> 16;
677 WARN_REDHAT ("RX_RH_24_OP");
678 RANGE (-0x800000, 0x7fffff);
679 #if RX_OPCODE_BIG_ENDIAN
681 OP (1) = relocation
>> 8;
682 OP (0) = relocation
>> 16;
685 OP (1) = relocation
>> 8;
686 OP (2) = relocation
>> 16;
691 RANGE (-0x800000, 0x7fffff);
692 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
695 OP (1) = relocation
>> 8;
696 OP (0) = relocation
>> 16;
701 OP (1) = relocation
>> 8;
702 OP (2) = relocation
>> 16;
707 WARN_REDHAT ("RX_RH_24_UNS");
709 #if RX_OPCODE_BIG_ENDIAN
711 OP (1) = relocation
>> 8;
712 OP (0) = relocation
>> 16;
715 OP (1) = relocation
>> 8;
716 OP (2) = relocation
>> 16;
721 WARN_REDHAT ("RX_RH_32_NEG");
722 relocation
= - relocation
;
723 #if RX_OPCODE_BIG_ENDIAN
725 OP (2) = relocation
>> 8;
726 OP (1) = relocation
>> 16;
727 OP (0) = relocation
>> 24;
730 OP (1) = relocation
>> 8;
731 OP (2) = relocation
>> 16;
732 OP (3) = relocation
>> 24;
737 WARN_REDHAT ("RX_RH_32_OP");
738 #if RX_OPCODE_BIG_ENDIAN
740 OP (2) = relocation
>> 8;
741 OP (1) = relocation
>> 16;
742 OP (0) = relocation
>> 24;
745 OP (1) = relocation
>> 8;
746 OP (2) = relocation
>> 16;
747 OP (3) = relocation
>> 24;
752 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
755 OP (2) = relocation
>> 8;
756 OP (1) = relocation
>> 16;
757 OP (0) = relocation
>> 24;
762 OP (1) = relocation
>> 8;
763 OP (2) = relocation
>> 16;
764 OP (3) = relocation
>> 24;
769 if (BIGE (output_bfd
))
772 OP (1) = relocation
>> 8;
773 OP (2) = relocation
>> 16;
774 OP (3) = relocation
>> 24;
779 OP (2) = relocation
>> 8;
780 OP (1) = relocation
>> 16;
781 OP (0) = relocation
>> 24;
788 WARN_REDHAT ("RX_RH_DIFF");
789 val
= bfd_get_32 (output_bfd
, & OP (0));
791 bfd_put_32 (output_bfd
, val
, & OP (0));
796 WARN_REDHAT ("RX_RH_GPRELB");
797 relocation
-= get_gp (&r
, info
, input_bfd
, input_section
, rel
->r_offset
);
799 #if RX_OPCODE_BIG_ENDIAN
801 OP (0) = relocation
>> 8;
804 OP (1) = relocation
>> 8;
809 WARN_REDHAT ("RX_RH_GPRELW");
810 relocation
-= get_gp (&r
, info
, input_bfd
, input_section
, rel
->r_offset
);
814 #if RX_OPCODE_BIG_ENDIAN
816 OP (0) = relocation
>> 8;
819 OP (1) = relocation
>> 8;
824 WARN_REDHAT ("RX_RH_GPRELL");
825 relocation
-= get_gp (&r
, info
, input_bfd
, input_section
, rel
->r_offset
);
829 #if RX_OPCODE_BIG_ENDIAN
831 OP (0) = relocation
>> 8;
834 OP (1) = relocation
>> 8;
838 /* Internal relocations just for relaxation: */
839 case R_RX_RH_ABS5p5B
:
840 RX_STACK_POP (relocation
);
843 OP (0) |= relocation
>> 2;
845 OP (1) |= (relocation
<< 6) & 0x80;
846 OP (1) |= (relocation
<< 3) & 0x08;
849 case R_RX_RH_ABS5p5W
:
850 RX_STACK_POP (relocation
);
855 OP (0) |= relocation
>> 2;
857 OP (1) |= (relocation
<< 6) & 0x80;
858 OP (1) |= (relocation
<< 3) & 0x08;
861 case R_RX_RH_ABS5p5L
:
862 RX_STACK_POP (relocation
);
867 OP (0) |= relocation
>> 2;
869 OP (1) |= (relocation
<< 6) & 0x80;
870 OP (1) |= (relocation
<< 3) & 0x08;
873 case R_RX_RH_ABS5p8B
:
874 RX_STACK_POP (relocation
);
877 OP (0) |= (relocation
<< 3) & 0x80;
878 OP (0) |= relocation
& 0x0f;
881 case R_RX_RH_ABS5p8W
:
882 RX_STACK_POP (relocation
);
887 OP (0) |= (relocation
<< 3) & 0x80;
888 OP (0) |= relocation
& 0x0f;
891 case R_RX_RH_ABS5p8L
:
892 RX_STACK_POP (relocation
);
897 OP (0) |= (relocation
<< 3) & 0x80;
898 OP (0) |= relocation
& 0x0f;
901 case R_RX_RH_UIMM4p8
:
904 OP (0) |= relocation
<< 4;
907 case R_RX_RH_UNEG4p8
:
910 OP (0) |= (-relocation
) << 4;
913 /* Complex reloc handling: */
916 RX_STACK_POP (relocation
);
917 #if RX_OPCODE_BIG_ENDIAN
919 OP (2) = relocation
>> 8;
920 OP (1) = relocation
>> 16;
921 OP (0) = relocation
>> 24;
924 OP (1) = relocation
>> 8;
925 OP (2) = relocation
>> 16;
926 OP (3) = relocation
>> 24;
931 RX_STACK_POP (relocation
);
932 #if RX_OPCODE_BIG_ENDIAN
934 OP (1) = relocation
>> 8;
935 OP (2) = relocation
>> 16;
936 OP (3) = relocation
>> 24;
939 OP (2) = relocation
>> 8;
940 OP (1) = relocation
>> 16;
941 OP (0) = relocation
>> 24;
945 case R_RX_ABS24S_PCREL
:
947 RX_STACK_POP (relocation
);
948 RANGE (-0x800000, 0x7fffff);
949 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
952 OP (1) = relocation
>> 8;
953 OP (0) = relocation
>> 16;
958 OP (1) = relocation
>> 8;
959 OP (2) = relocation
>> 16;
964 RX_STACK_POP (relocation
);
965 RANGE (-32768, 65535);
966 #if RX_OPCODE_BIG_ENDIAN
968 OP (0) = relocation
>> 8;
971 OP (1) = relocation
>> 8;
976 RX_STACK_POP (relocation
);
977 RANGE (-32768, 65535);
978 #if RX_OPCODE_BIG_ENDIAN
980 OP (1) = relocation
>> 8;
983 OP (0) = relocation
>> 8;
987 case R_RX_ABS16S_PCREL
:
989 RX_STACK_POP (relocation
);
990 RANGE (-32768, 32767);
991 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
994 OP (0) = relocation
>> 8;
999 OP (1) = relocation
>> 8;
1004 RX_STACK_POP (relocation
);
1006 #if RX_OPCODE_BIG_ENDIAN
1007 OP (1) = relocation
;
1008 OP (0) = relocation
>> 8;
1010 OP (0) = relocation
;
1011 OP (1) = relocation
>> 8;
1016 RX_STACK_POP (relocation
);
1019 #if RX_OPCODE_BIG_ENDIAN
1020 OP (1) = relocation
;
1021 OP (0) = relocation
>> 8;
1023 OP (0) = relocation
;
1024 OP (1) = relocation
>> 8;
1029 RX_STACK_POP (relocation
);
1032 #if RX_OPCODE_BIG_ENDIAN
1033 OP (1) = relocation
;
1034 OP (0) = relocation
>> 8;
1036 OP (0) = relocation
;
1037 OP (1) = relocation
>> 8;
1042 RX_STACK_POP (relocation
);
1044 OP (0) = relocation
;
1048 RX_STACK_POP (relocation
);
1050 OP (0) = relocation
;
1054 RX_STACK_POP (relocation
);
1057 OP (0) = relocation
;
1061 RX_STACK_POP (relocation
);
1064 OP (0) = relocation
;
1067 case R_RX_ABS8S_PCREL
:
1069 RX_STACK_POP (relocation
);
1071 OP (0) = relocation
;
1075 if (r_symndx
< symtab_hdr
->sh_info
)
1076 RX_STACK_PUSH (sec
->output_section
->vma
1077 + sec
->output_offset
1082 && (h
->root
.type
== bfd_link_hash_defined
1083 || h
->root
.type
== bfd_link_hash_defweak
))
1084 RX_STACK_PUSH (h
->root
.u
.def
.value
1085 + sec
->output_section
->vma
1086 + sec
->output_offset
);
1088 _bfd_error_handler (_("Warning: RX_SYM reloc with an unknown symbol"));
1098 RX_STACK_PUSH (tmp
);
1106 RX_STACK_POP (tmp1
);
1107 RX_STACK_POP (tmp2
);
1109 RX_STACK_PUSH (tmp1
);
1117 RX_STACK_POP (tmp1
);
1118 RX_STACK_POP (tmp2
);
1120 RX_STACK_PUSH (tmp2
);
1128 RX_STACK_POP (tmp1
);
1129 RX_STACK_POP (tmp2
);
1131 RX_STACK_PUSH (tmp1
);
1139 RX_STACK_POP (tmp1
);
1140 RX_STACK_POP (tmp2
);
1142 RX_STACK_PUSH (tmp1
);
1150 RX_STACK_POP (tmp1
);
1151 RX_STACK_POP (tmp2
);
1153 RX_STACK_PUSH (tmp1
);
1161 RX_STACK_POP (tmp1
);
1162 RX_STACK_POP (tmp2
);
1164 RX_STACK_PUSH (tmp1
);
1168 case R_RX_OPsctsize
:
1169 RX_STACK_PUSH (input_section
->size
);
1173 RX_STACK_PUSH (input_section
->output_section
->vma
);
1180 RX_STACK_POP (tmp1
);
1181 RX_STACK_POP (tmp2
);
1183 RX_STACK_PUSH (tmp1
);
1191 RX_STACK_POP (tmp1
);
1192 RX_STACK_POP (tmp2
);
1194 RX_STACK_PUSH (tmp1
);
1202 RX_STACK_POP (tmp1
);
1203 RX_STACK_POP (tmp2
);
1205 RX_STACK_PUSH (tmp1
);
1215 RX_STACK_PUSH (tmp
);
1223 RX_STACK_POP (tmp1
);
1224 RX_STACK_POP (tmp2
);
1226 RX_STACK_PUSH (tmp1
);
1231 RX_STACK_PUSH (get_romstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1235 RX_STACK_PUSH (get_ramstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1239 r
= bfd_reloc_notsupported
;
1243 if (r
!= bfd_reloc_ok
)
1245 const char * msg
= NULL
;
1249 case bfd_reloc_overflow
:
1250 /* Catch the case of a missing function declaration
1251 and emit a more helpful error message. */
1252 if (r_type
== R_RX_DIR24S_PCREL
)
1253 msg
= _("%B(%A): error: call to undefined function '%s'");
1255 r
= info
->callbacks
->reloc_overflow
1256 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
, (bfd_vma
) 0,
1257 input_bfd
, input_section
, rel
->r_offset
);
1260 case bfd_reloc_undefined
:
1261 r
= info
->callbacks
->undefined_symbol
1262 (info
, name
, input_bfd
, input_section
, rel
->r_offset
,
1266 case bfd_reloc_other
:
1267 msg
= _("%B(%A): warning: unaligned access to symbol '%s' in the small data area");
1270 case bfd_reloc_outofrange
:
1271 msg
= _("%B(%A): internal error: out of range error");
1274 case bfd_reloc_notsupported
:
1275 msg
= _("%B(%A): internal error: unsupported relocation error");
1278 case bfd_reloc_dangerous
:
1279 msg
= _("%B(%A): internal error: dangerous relocation");
1283 msg
= _("%B(%A): internal error: unknown error");
1288 _bfd_error_handler (msg
, input_bfd
, input_section
, name
);
1298 /* Relaxation Support. */
1300 /* Progression of relocations from largest operand size to smallest
1304 next_smaller_reloc (int r
)
1308 case R_RX_DIR32
: return R_RX_DIR24S
;
1309 case R_RX_DIR24S
: return R_RX_DIR16S
;
1310 case R_RX_DIR16S
: return R_RX_DIR8S
;
1311 case R_RX_DIR8S
: return R_RX_NONE
;
1313 case R_RX_DIR16
: return R_RX_DIR8
;
1314 case R_RX_DIR8
: return R_RX_NONE
;
1316 case R_RX_DIR16U
: return R_RX_DIR8U
;
1317 case R_RX_DIR8U
: return R_RX_NONE
;
1319 case R_RX_DIR24S_PCREL
: return R_RX_DIR16S_PCREL
;
1320 case R_RX_DIR16S_PCREL
: return R_RX_DIR8S_PCREL
;
1321 case R_RX_DIR8S_PCREL
: return R_RX_DIR3U_PCREL
;
1323 case R_RX_DIR16UL
: return R_RX_DIR8UL
;
1324 case R_RX_DIR8UL
: return R_RX_NONE
;
1325 case R_RX_DIR16UW
: return R_RX_DIR8UW
;
1326 case R_RX_DIR8UW
: return R_RX_NONE
;
1328 case R_RX_RH_32_OP
: return R_RX_RH_24_OP
;
1329 case R_RX_RH_24_OP
: return R_RX_RH_16_OP
;
1330 case R_RX_RH_16_OP
: return R_RX_DIR8
;
1332 case R_RX_ABS32
: return R_RX_ABS24S
;
1333 case R_RX_ABS24S
: return R_RX_ABS16S
;
1334 case R_RX_ABS16
: return R_RX_ABS8
;
1335 case R_RX_ABS16U
: return R_RX_ABS8U
;
1336 case R_RX_ABS16S
: return R_RX_ABS8S
;
1337 case R_RX_ABS8
: return R_RX_NONE
;
1338 case R_RX_ABS8U
: return R_RX_NONE
;
1339 case R_RX_ABS8S
: return R_RX_NONE
;
1340 case R_RX_ABS24S_PCREL
: return R_RX_ABS16S_PCREL
;
1341 case R_RX_ABS16S_PCREL
: return R_RX_ABS8S_PCREL
;
1342 case R_RX_ABS8S_PCREL
: return R_RX_NONE
;
1343 case R_RX_ABS16UL
: return R_RX_ABS8UL
;
1344 case R_RX_ABS16UW
: return R_RX_ABS8UW
;
1345 case R_RX_ABS8UL
: return R_RX_NONE
;
1346 case R_RX_ABS8UW
: return R_RX_NONE
;
1351 /* Delete some bytes from a section while relaxing. */
1354 elf32_rx_relax_delete_bytes (bfd
*abfd
, asection
*sec
, bfd_vma addr
, int count
,
1355 Elf_Internal_Rela
*alignment_rel
, int force_snip
)
1357 Elf_Internal_Shdr
* symtab_hdr
;
1358 unsigned int sec_shndx
;
1359 bfd_byte
* contents
;
1360 Elf_Internal_Rela
* irel
;
1361 Elf_Internal_Rela
* irelend
;
1362 Elf_Internal_Sym
* isym
;
1363 Elf_Internal_Sym
* isymend
;
1365 unsigned int symcount
;
1366 struct elf_link_hash_entry
** sym_hashes
;
1367 struct elf_link_hash_entry
** end_hashes
;
1372 sec_shndx
= _bfd_elf_section_from_bfd_section (abfd
, sec
);
1374 contents
= elf_section_data (sec
)->this_hdr
.contents
;
1376 /* The deletion must stop at the next alignment boundary, if
1377 ALIGNMENT_REL is non-NULL. */
1380 toaddr
= alignment_rel
->r_offset
;
1382 irel
= elf_section_data (sec
)->relocs
;
1383 irelend
= irel
+ sec
->reloc_count
;
1385 /* Actually delete the bytes. */
1386 memmove (contents
+ addr
, contents
+ addr
+ count
,
1387 (size_t) (toaddr
- addr
- count
));
1389 /* If we don't have an alignment marker to worry about, we can just
1390 shrink the section. Otherwise, we have to fill in the newly
1391 created gap with NOP insns (0x03). */
1395 memset (contents
+ toaddr
- count
, 0x03, count
);
1397 /* Adjust all the relocs. */
1398 for (irel
= elf_section_data (sec
)->relocs
; irel
< irelend
; irel
++)
1400 /* Get the new reloc address. */
1401 if (irel
->r_offset
> addr
1402 && (irel
->r_offset
< toaddr
1403 || (force_snip
&& irel
->r_offset
== toaddr
)))
1404 irel
->r_offset
-= count
;
1406 /* If we see an ALIGN marker at the end of the gap, we move it
1407 to the beginning of the gap, since marking these gaps is what
1409 if (irel
->r_offset
== toaddr
1410 && ELF32_R_TYPE (irel
->r_info
) == R_RX_RH_RELAX
1411 && irel
->r_addend
& RX_RELAXA_ALIGN
)
1412 irel
->r_offset
-= count
;
1415 /* Adjust the local symbols defined in this section. */
1416 symtab_hdr
= &elf_tdata (abfd
)->symtab_hdr
;
1417 isym
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
1418 isymend
= isym
+ symtab_hdr
->sh_info
;
1420 for (; isym
< isymend
; isym
++)
1422 /* If the symbol is in the range of memory we just moved, we
1423 have to adjust its value. */
1424 if (isym
->st_shndx
== sec_shndx
1425 && isym
->st_value
> addr
1426 && isym
->st_value
< toaddr
)
1427 isym
->st_value
-= count
;
1429 /* If the symbol *spans* the bytes we just deleted (i.e. it's
1430 *end* is in the moved bytes but it's *start* isn't), then we
1431 must adjust its size. */
1432 if (isym
->st_shndx
== sec_shndx
1433 && isym
->st_value
< addr
1434 && isym
->st_value
+ isym
->st_size
> addr
1435 && isym
->st_value
+ isym
->st_size
< toaddr
)
1436 isym
->st_size
-= count
;
1439 /* Now adjust the global symbols defined in this section. */
1440 symcount
= (symtab_hdr
->sh_size
/ sizeof (Elf32_External_Sym
)
1441 - symtab_hdr
->sh_info
);
1442 sym_hashes
= elf_sym_hashes (abfd
);
1443 end_hashes
= sym_hashes
+ symcount
;
1445 for (; sym_hashes
< end_hashes
; sym_hashes
++)
1447 struct elf_link_hash_entry
*sym_hash
= *sym_hashes
;
1449 if ((sym_hash
->root
.type
== bfd_link_hash_defined
1450 || sym_hash
->root
.type
== bfd_link_hash_defweak
)
1451 && sym_hash
->root
.u
.def
.section
== sec
)
1453 /* As above, adjust the value if needed. */
1454 if (sym_hash
->root
.u
.def
.value
> addr
1455 && sym_hash
->root
.u
.def
.value
< toaddr
)
1456 sym_hash
->root
.u
.def
.value
-= count
;
1458 /* As above, adjust the size if needed. */
1459 if (sym_hash
->root
.u
.def
.value
< addr
1460 && sym_hash
->root
.u
.def
.value
+ sym_hash
->size
> addr
1461 && sym_hash
->root
.u
.def
.value
+ sym_hash
->size
< toaddr
)
1462 sym_hash
->size
-= count
;
1469 /* Used to sort relocs by address. If relocs have the same address,
1470 we maintain their relative order, except that R_RX_RH_RELAX
1471 alignment relocs must be the first reloc for any given address. */
1474 reloc_bubblesort (Elf_Internal_Rela
* r
, int count
)
1478 bfd_boolean swappit
;
1480 /* This is almost a classic bubblesort. It's the slowest sort, but
1481 we're taking advantage of the fact that the relocations are
1482 mostly in order already (the assembler emits them that way) and
1483 we need relocs with the same address to remain in the same
1489 for (i
= 0; i
< count
- 1; i
++)
1491 if (r
[i
].r_offset
> r
[i
+ 1].r_offset
)
1493 else if (r
[i
].r_offset
< r
[i
+ 1].r_offset
)
1495 else if (ELF32_R_TYPE (r
[i
+ 1].r_info
) == R_RX_RH_RELAX
1496 && (r
[i
+ 1].r_addend
& RX_RELAXA_ALIGN
))
1498 else if (ELF32_R_TYPE (r
[i
+ 1].r_info
) == R_RX_RH_RELAX
1499 && (r
[i
+ 1].r_addend
& RX_RELAXA_ELIGN
)
1500 && !(ELF32_R_TYPE (r
[i
].r_info
) == R_RX_RH_RELAX
1501 && (r
[i
].r_addend
& RX_RELAXA_ALIGN
)))
1508 Elf_Internal_Rela tmp
;
1513 /* If we do move a reloc back, re-scan to see if it
1514 needs to be moved even further back. This avoids
1515 most of the O(n^2) behavior for our cases. */
1525 #define OFFSET_FOR_RELOC(rel, lrel, scale) \
1526 rx_offset_for_reloc (abfd, rel + 1, symtab_hdr, shndx_buf, intsyms, \
1527 lrel, abfd, sec, link_info, scale)
1530 rx_offset_for_reloc (bfd
* abfd
,
1531 Elf_Internal_Rela
* rel
,
1532 Elf_Internal_Shdr
* symtab_hdr
,
1533 Elf_External_Sym_Shndx
* shndx_buf ATTRIBUTE_UNUSED
,
1534 Elf_Internal_Sym
* intsyms
,
1535 Elf_Internal_Rela
** lrel
,
1537 asection
* input_section
,
1538 struct bfd_link_info
* info
,
1542 bfd_reloc_status_type r
;
1546 /* REL is the first of 1..N relocations. We compute the symbol
1547 value for each relocation, then combine them if needed. LREL
1548 gets a pointer to the last relocation used. */
1553 /* Get the value of the symbol referred to by the reloc. */
1554 if (ELF32_R_SYM (rel
->r_info
) < symtab_hdr
->sh_info
)
1556 /* A local symbol. */
1557 Elf_Internal_Sym
*isym
;
1560 isym
= intsyms
+ ELF32_R_SYM (rel
->r_info
);
1562 if (isym
->st_shndx
== SHN_UNDEF
)
1563 ssec
= bfd_und_section_ptr
;
1564 else if (isym
->st_shndx
== SHN_ABS
)
1565 ssec
= bfd_abs_section_ptr
;
1566 else if (isym
->st_shndx
== SHN_COMMON
)
1567 ssec
= bfd_com_section_ptr
;
1569 ssec
= bfd_section_from_elf_index (abfd
,
1572 /* Initial symbol value. */
1573 symval
= isym
->st_value
;
1575 /* GAS may have made this symbol relative to a section, in
1576 which case, we have to add the addend to find the
1578 if (ELF_ST_TYPE (isym
->st_info
) == STT_SECTION
)
1579 symval
+= rel
->r_addend
;
1583 if ((ssec
->flags
& SEC_MERGE
)
1584 && ssec
->sec_info_type
== ELF_INFO_TYPE_MERGE
)
1585 symval
= _bfd_merged_section_offset (abfd
, & ssec
,
1586 elf_section_data (ssec
)->sec_info
,
1590 /* Now make the offset relative to where the linker is putting it. */
1593 ssec
->output_section
->vma
+ ssec
->output_offset
;
1595 symval
+= rel
->r_addend
;
1600 struct elf_link_hash_entry
* h
;
1602 /* An external symbol. */
1603 indx
= ELF32_R_SYM (rel
->r_info
) - symtab_hdr
->sh_info
;
1604 h
= elf_sym_hashes (abfd
)[indx
];
1605 BFD_ASSERT (h
!= NULL
);
1607 if (h
->root
.type
!= bfd_link_hash_defined
1608 && h
->root
.type
!= bfd_link_hash_defweak
)
1610 /* This appears to be a reference to an undefined
1611 symbol. Just ignore it--it will be caught by the
1612 regular reloc processing. */
1618 symval
= (h
->root
.u
.def
.value
1619 + h
->root
.u
.def
.section
->output_section
->vma
1620 + h
->root
.u
.def
.section
->output_offset
);
1622 symval
+= rel
->r_addend
;
1625 switch (ELF32_R_TYPE (rel
->r_info
))
1628 RX_STACK_PUSH (symval
);
1632 RX_STACK_POP (tmp1
);
1634 RX_STACK_PUSH (tmp1
);
1638 RX_STACK_POP (tmp1
);
1639 RX_STACK_POP (tmp2
);
1641 RX_STACK_PUSH (tmp1
);
1645 RX_STACK_POP (tmp1
);
1646 RX_STACK_POP (tmp2
);
1648 RX_STACK_PUSH (tmp2
);
1652 RX_STACK_POP (tmp1
);
1653 RX_STACK_POP (tmp2
);
1655 RX_STACK_PUSH (tmp1
);
1659 RX_STACK_POP (tmp1
);
1660 RX_STACK_POP (tmp2
);
1662 RX_STACK_PUSH (tmp1
);
1666 RX_STACK_POP (tmp1
);
1667 RX_STACK_POP (tmp2
);
1669 RX_STACK_PUSH (tmp1
);
1673 RX_STACK_POP (tmp1
);
1674 RX_STACK_POP (tmp2
);
1676 RX_STACK_PUSH (tmp1
);
1679 case R_RX_OPsctsize
:
1680 RX_STACK_PUSH (input_section
->size
);
1684 RX_STACK_PUSH (input_section
->output_section
->vma
);
1688 RX_STACK_POP (tmp1
);
1689 RX_STACK_POP (tmp2
);
1691 RX_STACK_PUSH (tmp1
);
1695 RX_STACK_POP (tmp1
);
1696 RX_STACK_POP (tmp2
);
1698 RX_STACK_PUSH (tmp1
);
1702 RX_STACK_POP (tmp1
);
1703 RX_STACK_POP (tmp2
);
1705 RX_STACK_PUSH (tmp1
);
1709 RX_STACK_POP (tmp1
);
1711 RX_STACK_PUSH (tmp1
);
1715 RX_STACK_POP (tmp1
);
1716 RX_STACK_POP (tmp2
);
1718 RX_STACK_PUSH (tmp1
);
1722 RX_STACK_PUSH (get_romstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1726 RX_STACK_PUSH (get_ramstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1734 RX_STACK_POP (symval
);
1745 RX_STACK_POP (symval
);
1753 RX_STACK_POP (symval
);
1764 move_reloc (Elf_Internal_Rela
* irel
, Elf_Internal_Rela
* srel
, int delta
)
1766 bfd_vma old_offset
= srel
->r_offset
;
1769 while (irel
<= srel
)
1771 if (irel
->r_offset
== old_offset
)
1772 irel
->r_offset
+= delta
;
1777 /* Relax one section. */
1780 elf32_rx_relax_section (bfd
* abfd
,
1782 struct bfd_link_info
* link_info
,
1783 bfd_boolean
* again
,
1784 bfd_boolean allow_pcrel3
)
1786 Elf_Internal_Shdr
* symtab_hdr
;
1787 Elf_Internal_Shdr
* shndx_hdr
;
1788 Elf_Internal_Rela
* internal_relocs
;
1789 Elf_Internal_Rela
* free_relocs
= NULL
;
1790 Elf_Internal_Rela
* irel
;
1791 Elf_Internal_Rela
* srel
;
1792 Elf_Internal_Rela
* irelend
;
1793 Elf_Internal_Rela
* next_alignment
;
1794 Elf_Internal_Rela
* prev_alignment
;
1795 bfd_byte
* contents
= NULL
;
1796 bfd_byte
* free_contents
= NULL
;
1797 Elf_Internal_Sym
* intsyms
= NULL
;
1798 Elf_Internal_Sym
* free_intsyms
= NULL
;
1799 Elf_External_Sym_Shndx
* shndx_buf
= NULL
;
1805 int section_alignment_glue
;
1806 /* how much to scale the relocation by - 1, 2, or 4. */
1809 /* Assume nothing changes. */
1812 /* We don't have to do anything for a relocatable link, if
1813 this section does not have relocs, or if this is not a
1815 if (link_info
->relocatable
1816 || (sec
->flags
& SEC_RELOC
) == 0
1817 || sec
->reloc_count
== 0
1818 || (sec
->flags
& SEC_CODE
) == 0)
1821 symtab_hdr
= &elf_tdata (abfd
)->symtab_hdr
;
1822 shndx_hdr
= &elf_tdata (abfd
)->symtab_shndx_hdr
;
1824 sec_start
= sec
->output_section
->vma
+ sec
->output_offset
;
1826 /* Get the section contents. */
1827 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
1828 contents
= elf_section_data (sec
)->this_hdr
.contents
;
1829 /* Go get them off disk. */
1832 if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
1834 elf_section_data (sec
)->this_hdr
.contents
= contents
;
1837 /* Read this BFD's symbols. */
1838 /* Get cached copy if it exists. */
1839 if (symtab_hdr
->contents
!= NULL
)
1840 intsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
1843 intsyms
= bfd_elf_get_elf_syms (abfd
, symtab_hdr
, symtab_hdr
->sh_info
, 0, NULL
, NULL
, NULL
);
1844 symtab_hdr
->contents
= (bfd_byte
*) intsyms
;
1847 if (shndx_hdr
->sh_size
!= 0)
1851 amt
= symtab_hdr
->sh_info
;
1852 amt
*= sizeof (Elf_External_Sym_Shndx
);
1853 shndx_buf
= (Elf_External_Sym_Shndx
*) bfd_malloc (amt
);
1854 if (shndx_buf
== NULL
)
1856 if (bfd_seek (abfd
, shndx_hdr
->sh_offset
, SEEK_SET
) != 0
1857 || bfd_bread ((PTR
) shndx_buf
, amt
, abfd
) != amt
)
1859 shndx_hdr
->contents
= (bfd_byte
*) shndx_buf
;
1862 /* Get a copy of the native relocations. */
1863 internal_relocs
= (_bfd_elf_link_read_relocs
1864 (abfd
, sec
, (PTR
) NULL
, (Elf_Internal_Rela
*) NULL
,
1865 link_info
->keep_memory
));
1866 if (internal_relocs
== NULL
)
1868 if (! link_info
->keep_memory
)
1869 free_relocs
= internal_relocs
;
1871 /* The RL_ relocs must be just before the operand relocs they go
1872 with, so we must sort them to guarantee this. We use bubblesort
1873 instead of qsort so we can guarantee that relocs with the same
1874 address remain in the same relative order. */
1875 reloc_bubblesort (internal_relocs
, sec
->reloc_count
);
1877 /* Walk through them looking for relaxing opportunities. */
1878 irelend
= internal_relocs
+ sec
->reloc_count
;
1880 /* This will either be NULL or a pointer to the next alignment
1882 next_alignment
= internal_relocs
;
1883 /* This will be the previous alignment, although at first it points
1884 to the first real relocation. */
1885 prev_alignment
= internal_relocs
;
1887 /* We calculate worst case shrinkage caused by alignment directives.
1888 No fool-proof, but better than either ignoring the problem or
1889 doing heavy duty analysis of all the alignment markers in all
1891 section_alignment_glue
= 0;
1892 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
1893 if (ELF32_R_TYPE (irel
->r_info
) == R_RX_RH_RELAX
1894 && irel
->r_addend
& RX_RELAXA_ALIGN
)
1896 int this_glue
= 1 << (irel
->r_addend
& RX_RELAXA_ANUM
);
1898 if (section_alignment_glue
< this_glue
)
1899 section_alignment_glue
= this_glue
;
1901 /* Worst case is all 0..N alignments, in order, causing 2*N-1 byte
1903 section_alignment_glue
*= 2;
1905 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
1907 unsigned char *insn
;
1910 /* The insns we care about are all marked with one of these. */
1911 if (ELF32_R_TYPE (irel
->r_info
) != R_RX_RH_RELAX
)
1914 if (irel
->r_addend
& RX_RELAXA_ALIGN
1915 || next_alignment
== internal_relocs
)
1917 /* When we delete bytes, we need to maintain all the alignments
1918 indicated. In addition, we need to be careful about relaxing
1919 jumps across alignment boundaries - these displacements
1920 *grow* when we delete bytes. For now, don't shrink
1921 displacements across an alignment boundary, just in case.
1922 Note that this only affects relocations to the same
1924 prev_alignment
= next_alignment
;
1925 next_alignment
+= 2;
1926 while (next_alignment
< irelend
1927 && (ELF32_R_TYPE (next_alignment
->r_info
) != R_RX_RH_RELAX
1928 || !(next_alignment
->r_addend
& RX_RELAXA_ELIGN
)))
1930 if (next_alignment
>= irelend
|| next_alignment
->r_offset
== 0)
1931 next_alignment
= NULL
;
1934 /* When we hit alignment markers, see if we've shrunk enough
1935 before them to reduce the gap without violating the alignment
1937 if (irel
->r_addend
& RX_RELAXA_ALIGN
)
1939 /* At this point, the next relocation *should* be the ELIGN
1941 Elf_Internal_Rela
*erel
= irel
+ 1;
1942 unsigned int alignment
, nbytes
;
1944 if (ELF32_R_TYPE (erel
->r_info
) != R_RX_RH_RELAX
)
1946 if (!(erel
->r_addend
& RX_RELAXA_ELIGN
))
1949 alignment
= 1 << (irel
->r_addend
& RX_RELAXA_ANUM
);
1951 if (erel
->r_offset
- irel
->r_offset
< alignment
)
1954 nbytes
= erel
->r_offset
- irel
->r_offset
;
1955 nbytes
/= alignment
;
1956 nbytes
*= alignment
;
1958 elf32_rx_relax_delete_bytes (abfd
, sec
, erel
->r_offset
-nbytes
, nbytes
, next_alignment
,
1959 erel
->r_offset
== sec
->size
);
1965 if (irel
->r_addend
& RX_RELAXA_ELIGN
)
1968 insn
= contents
+ irel
->r_offset
;
1970 nrelocs
= irel
->r_addend
& RX_RELAXA_RNUM
;
1972 /* At this point, we have an insn that is a candidate for linker
1973 relaxation. There are NRELOCS relocs following that may be
1974 relaxed, although each reloc may be made of more than one
1975 reloc entry (such as gp-rel symbols). */
1977 /* Get the value of the symbol referred to by the reloc. Just
1978 in case this is the last reloc in the list, use the RL's
1979 addend to choose between this reloc (no addend) or the next
1980 (yes addend, which means at least one following reloc). */
1982 /* srel points to the "current" reloction for this insn -
1983 actually the last reloc for a given operand, which is the one
1984 we need to update. We check the relaxations in the same
1985 order that the relocations happen, so we'll just push it
1989 pc
= sec
->output_section
->vma
+ sec
->output_offset
1993 symval = OFFSET_FOR_RELOC (srel, &srel, &scale); \
1994 pcrel = symval - pc + srel->r_addend; \
1997 #define SNIPNR(offset, nbytes) \
1998 elf32_rx_relax_delete_bytes (abfd, sec, (insn - contents) + offset, nbytes, next_alignment, 0);
1999 #define SNIP(offset, nbytes, newtype) \
2000 SNIPNR (offset, nbytes); \
2001 srel->r_info = ELF32_R_INFO (ELF32_R_SYM (srel->r_info), newtype)
2003 /* The order of these bit tests must match the order that the
2004 relocs appear in. Since we sorted those by offset, we can
2007 /* Note that the numbers in, say, DSP6 are the bit offsets of
2008 the code fields that describe the operand. Bits number 0 for
2009 the MSB of insn[0]. */
2016 if (irel
->r_addend
& RX_RELAXA_DSP6
)
2021 if (code
== 2 && symval
/scale
<= 255)
2023 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2026 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2027 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2029 SNIP (3, 1, newrel
);
2034 else if (code
== 1 && symval
== 0)
2037 SNIP (2, 1, R_RX_NONE
);
2041 /* Special case DSP:5 format: MOV.bwl dsp:5[Rsrc],Rdst. */
2042 else if (code
== 1 && symval
/scale
<= 31
2043 /* Decodable bits. */
2044 && (insn
[0] & 0xcc) == 0xcc
2046 && (insn
[0] & 0x30) != 3
2047 /* Register MSBs. */
2048 && (insn
[1] & 0x88) == 0x00)
2052 insn
[0] = 0x88 | (insn
[0] & 0x30);
2053 /* The register fields are in the right place already. */
2055 /* We can't relax this new opcode. */
2058 switch ((insn
[0] & 0x30) >> 4)
2061 newrel
= R_RX_RH_ABS5p5B
;
2064 newrel
= R_RX_RH_ABS5p5W
;
2067 newrel
= R_RX_RH_ABS5p5L
;
2071 move_reloc (irel
, srel
, -2);
2072 SNIP (2, 1, newrel
);
2075 /* Special case DSP:5 format: MOVU.bw dsp:5[Rsrc],Rdst. */
2076 else if (code
== 1 && symval
/scale
<= 31
2077 /* Decodable bits. */
2078 && (insn
[0] & 0xf8) == 0x58
2079 /* Register MSBs. */
2080 && (insn
[1] & 0x88) == 0x00)
2084 insn
[0] = 0xb0 | ((insn
[0] & 0x04) << 1);
2085 /* The register fields are in the right place already. */
2087 /* We can't relax this new opcode. */
2090 switch ((insn
[0] & 0x08) >> 3)
2093 newrel
= R_RX_RH_ABS5p5B
;
2096 newrel
= R_RX_RH_ABS5p5W
;
2100 move_reloc (irel
, srel
, -2);
2101 SNIP (2, 1, newrel
);
2105 /* A DSP4 operand always follows a DSP6 operand, even if there's
2106 no relocation for it. We have to read the code out of the
2107 opcode to calculate the offset of the operand. */
2108 if (irel
->r_addend
& RX_RELAXA_DSP4
)
2110 int code6
, offset
= 0;
2114 code6
= insn
[0] & 0x03;
2117 case 0: offset
= 2; break;
2118 case 1: offset
= 3; break;
2119 case 2: offset
= 4; break;
2120 case 3: offset
= 2; break;
2123 code
= (insn
[0] & 0x0c) >> 2;
2125 if (code
== 2 && symval
/ scale
<= 255)
2127 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2131 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2132 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2134 SNIP (offset
+1, 1, newrel
);
2139 else if (code
== 1 && symval
== 0)
2142 SNIP (offset
, 1, R_RX_NONE
);
2145 /* Special case DSP:5 format: MOV.bwl Rsrc,dsp:5[Rdst] */
2146 else if (code
== 1 && symval
/scale
<= 31
2147 /* Decodable bits. */
2148 && (insn
[0] & 0xc3) == 0xc3
2150 && (insn
[0] & 0x30) != 3
2151 /* Register MSBs. */
2152 && (insn
[1] & 0x88) == 0x00)
2156 insn
[0] = 0x80 | (insn
[0] & 0x30);
2157 /* The register fields are in the right place already. */
2159 /* We can't relax this new opcode. */
2162 switch ((insn
[0] & 0x30) >> 4)
2165 newrel
= R_RX_RH_ABS5p5B
;
2168 newrel
= R_RX_RH_ABS5p5W
;
2171 newrel
= R_RX_RH_ABS5p5L
;
2175 move_reloc (irel
, srel
, -2);
2176 SNIP (2, 1, newrel
);
2180 /* These always occur alone, but the offset depends on whether
2181 it's a MEMEX opcode (0x06) or not. */
2182 if (irel
->r_addend
& RX_RELAXA_DSP14
)
2187 if (insn
[0] == 0x06)
2194 if (code
== 2 && symval
/ scale
<= 255)
2196 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2200 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2201 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2203 SNIP (offset
, 1, newrel
);
2207 else if (code
== 1 && symval
== 0)
2210 SNIP (offset
, 1, R_RX_NONE
);
2221 /* These always occur alone. */
2222 if (irel
->r_addend
& RX_RELAXA_IMM6
)
2228 /* These relocations sign-extend, so we must do signed compares. */
2229 ssymval
= (long) symval
;
2231 code
= insn
[0] & 0x03;
2233 if (code
== 0 && ssymval
<= 8388607 && ssymval
>= -8388608)
2235 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2239 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2240 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2242 SNIP (2, 1, newrel
);
2247 else if (code
== 3 && ssymval
<= 32767 && ssymval
>= -32768)
2249 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2253 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2254 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2256 SNIP (2, 1, newrel
);
2261 /* Special case UIMM8 format: CMP #uimm8,Rdst. */
2262 else if (code
== 2 && ssymval
<= 255 && ssymval
>= 16
2263 /* Decodable bits. */
2264 && (insn
[0] & 0xfc) == 0x74
2265 /* Decodable bits. */
2266 && ((insn
[1] & 0xf0) == 0x00))
2271 insn
[1] = 0x50 | (insn
[1] & 0x0f);
2273 /* We can't relax this new opcode. */
2276 if (STACK_REL_P (ELF32_R_TYPE (srel
->r_info
)))
2277 newrel
= R_RX_ABS8U
;
2279 newrel
= R_RX_DIR8U
;
2281 SNIP (2, 1, newrel
);
2285 else if (code
== 2 && ssymval
<= 127 && ssymval
>= -128)
2287 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2291 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2292 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2294 SNIP (2, 1, newrel
);
2299 /* Special case UIMM4 format: CMP, MUL, AND, OR. */
2300 else if (code
== 1 && ssymval
<= 15 && ssymval
>= 0
2301 /* Decodable bits and immediate type. */
2303 /* Decodable bits. */
2304 && (insn
[1] & 0xc0) == 0x00)
2306 static const int newop
[4] = { 1, 3, 4, 5 };
2308 insn
[0] = 0x60 | newop
[insn
[1] >> 4];
2309 /* The register number doesn't move. */
2311 /* We can't relax this new opcode. */
2314 move_reloc (irel
, srel
, -1);
2316 SNIP (2, 1, R_RX_RH_UIMM4p8
);
2320 /* Special case UIMM4 format: ADD -> ADD/SUB. */
2321 else if (code
== 1 && ssymval
<= 15 && ssymval
>= -15
2322 /* Decodable bits and immediate type. */
2324 /* Same register for source and destination. */
2325 && ((insn
[1] >> 4) == (insn
[1] & 0x0f)))
2329 /* Note that we can't turn "add $0,Rs" into a NOP
2330 because the flags need to be set right. */
2334 insn
[0] = 0x60; /* Subtract. */
2335 newrel
= R_RX_RH_UNEG4p8
;
2339 insn
[0] = 0x62; /* Add. */
2340 newrel
= R_RX_RH_UIMM4p8
;
2343 /* The register number is in the right place. */
2345 /* We can't relax this new opcode. */
2348 move_reloc (irel
, srel
, -1);
2350 SNIP (2, 1, newrel
);
2355 /* These are either matched with a DSP6 (2-byte base) or an id24
2357 if (irel
->r_addend
& RX_RELAXA_IMM12
)
2359 int dspcode
, offset
= 0;
2364 if ((insn
[0] & 0xfc) == 0xfc)
2365 dspcode
= 1; /* Just something with one byte operand. */
2367 dspcode
= insn
[0] & 3;
2370 case 0: offset
= 2; break;
2371 case 1: offset
= 3; break;
2372 case 2: offset
= 4; break;
2373 case 3: offset
= 2; break;
2376 /* These relocations sign-extend, so we must do signed compares. */
2377 ssymval
= (long) symval
;
2379 code
= (insn
[1] >> 2) & 3;
2380 if (code
== 0 && ssymval
<= 8388607 && ssymval
>= -8388608)
2382 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2386 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2387 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2389 SNIP (offset
, 1, newrel
);
2394 else if (code
== 3 && ssymval
<= 32767 && ssymval
>= -32768)
2396 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2400 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2401 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2403 SNIP (offset
, 1, newrel
);
2408 /* Special case UIMM8 format: MOV #uimm8,Rdst. */
2409 else if (code
== 2 && ssymval
<= 255 && ssymval
>= 16
2410 /* Decodable bits. */
2412 /* Decodable bits. */
2413 && ((insn
[1] & 0x03) == 0x02))
2418 insn
[1] = 0x40 | (insn
[1] >> 4);
2420 /* We can't relax this new opcode. */
2423 if (STACK_REL_P (ELF32_R_TYPE (srel
->r_info
)))
2424 newrel
= R_RX_ABS8U
;
2426 newrel
= R_RX_DIR8U
;
2428 SNIP (2, 1, newrel
);
2432 else if (code
== 2 && ssymval
<= 127 && ssymval
>= -128)
2434 unsigned int newrel
= ELF32_R_TYPE(srel
->r_info
);
2438 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2439 if (newrel
!= ELF32_R_TYPE(srel
->r_info
))
2441 SNIP (offset
, 1, newrel
);
2446 /* Special case UIMM4 format: MOV #uimm4,Rdst. */
2447 else if (code
== 1 && ssymval
<= 15 && ssymval
>= 0
2448 /* Decodable bits. */
2450 /* Decodable bits. */
2451 && ((insn
[1] & 0x03) == 0x02))
2454 insn
[1] = insn
[1] >> 4;
2456 /* We can't relax this new opcode. */
2459 move_reloc (irel
, srel
, -1);
2461 SNIP (2, 1, R_RX_RH_UIMM4p8
);
2466 if (irel
->r_addend
& RX_RELAXA_BRA
)
2468 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2470 int alignment_glue
= 0;
2474 /* Branches over alignment chunks are problematic, as
2475 deleting bytes here makes the branch *further* away. We
2476 can be agressive with branches within this alignment
2477 block, but not branches outside it. */
2478 if ((prev_alignment
== NULL
2479 || symval
< (bfd_vma
)(sec_start
+ prev_alignment
->r_offset
))
2480 && (next_alignment
== NULL
2481 || symval
> (bfd_vma
)(sec_start
+ next_alignment
->r_offset
)))
2482 alignment_glue
= section_alignment_glue
;
2484 if (ELF32_R_TYPE(srel
[1].r_info
) == R_RX_RH_RELAX
2485 && srel
[1].r_addend
& RX_RELAXA_BRA
2486 && srel
[1].r_offset
< irel
->r_offset
+ pcrel
)
2489 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2491 /* The values we compare PCREL with are not what you'd
2492 expect; they're off by a little to compensate for (1)
2493 where the reloc is relative to the insn, and (2) how much
2494 the insn is going to change when we relax it. */
2496 /* These we have to decode. */
2499 case 0x04: /* BRA pcdsp:24 */
2500 if (-32768 + alignment_glue
<= pcrel
2501 && pcrel
<= 32765 - alignment_glue
)
2504 SNIP (3, 1, newrel
);
2509 case 0x38: /* BRA pcdsp:16 */
2510 if (-128 + alignment_glue
<= pcrel
2511 && pcrel
<= 127 - alignment_glue
)
2514 SNIP (2, 1, newrel
);
2519 case 0x2e: /* BRA pcdsp:8 */
2520 /* Note that there's a risk here of shortening things so
2521 much that we no longer fit this reloc; it *should*
2522 only happen when you branch across a branch, and that
2523 branch also devolves into BRA.S. "Real" code should
2525 if (max_pcrel3
+ alignment_glue
<= pcrel
2526 && pcrel
<= 10 - alignment_glue
2530 SNIP (1, 1, newrel
);
2531 move_reloc (irel
, srel
, -1);
2536 case 0x05: /* BSR pcdsp:24 */
2537 if (-32768 + alignment_glue
<= pcrel
2538 && pcrel
<= 32765 - alignment_glue
)
2541 SNIP (1, 1, newrel
);
2546 case 0x3a: /* BEQ.W pcdsp:16 */
2547 case 0x3b: /* BNE.W pcdsp:16 */
2548 if (-128 + alignment_glue
<= pcrel
2549 && pcrel
<= 127 - alignment_glue
)
2551 insn
[0] = 0x20 | (insn
[0] & 1);
2552 SNIP (1, 1, newrel
);
2557 case 0x20: /* BEQ.B pcdsp:8 */
2558 case 0x21: /* BNE.B pcdsp:8 */
2559 if (max_pcrel3
+ alignment_glue
<= pcrel
2560 && pcrel
- alignment_glue
<= 10
2563 insn
[0] = 0x10 | ((insn
[0] & 1) << 3);
2564 SNIP (1, 1, newrel
);
2565 move_reloc (irel
, srel
, -1);
2570 case 0x16: /* synthetic BNE dsp24 */
2571 case 0x1e: /* synthetic BEQ dsp24 */
2572 if (-32767 + alignment_glue
<= pcrel
2573 && pcrel
<= 32766 - alignment_glue
2576 if (insn
[0] == 0x16)
2580 /* We snip out the bytes at the end else the reloc
2581 will get moved too, and too much. */
2582 SNIP (3, 2, newrel
);
2583 move_reloc (irel
, srel
, -1);
2589 /* Special case - synthetic conditional branches, pcrel24.
2590 Note that EQ and NE have been handled above. */
2591 if ((insn
[0] & 0xf0) == 0x20
2594 && srel
->r_offset
!= irel
->r_offset
+ 1
2595 && -32767 + alignment_glue
<= pcrel
2596 && pcrel
<= 32766 - alignment_glue
)
2600 SNIP (5, 1, newrel
);
2604 /* Special case - synthetic conditional branches, pcrel16 */
2605 if ((insn
[0] & 0xf0) == 0x20
2608 && srel
->r_offset
!= irel
->r_offset
+ 1
2609 && -127 + alignment_glue
<= pcrel
2610 && pcrel
<= 126 - alignment_glue
)
2612 int cond
= (insn
[0] & 0x0f) ^ 0x01;
2614 insn
[0] = 0x20 | cond
;
2615 /* By moving the reloc first, we avoid having
2616 delete_bytes move it also. */
2617 move_reloc (irel
, srel
, -2);
2618 SNIP (2, 3, newrel
);
2623 BFD_ASSERT (nrelocs
== 0);
2625 /* Special case - check MOV.bwl #IMM, dsp[reg] and see if we can
2626 use MOV.bwl #uimm:8, dsp:5[r7] format. This is tricky
2627 because it may have one or two relocations. */
2628 if ((insn
[0] & 0xfc) == 0xf8
2629 && (insn
[1] & 0x80) == 0x00
2630 && (insn
[0] & 0x03) != 0x03)
2632 int dcode
, icode
, reg
, ioff
, dscale
, ilen
;
2633 bfd_vma disp_val
= 0;
2635 Elf_Internal_Rela
* disp_rel
= 0;
2636 Elf_Internal_Rela
* imm_rel
= 0;
2641 dcode
= insn
[0] & 0x03;
2642 icode
= (insn
[1] >> 2) & 0x03;
2643 reg
= (insn
[1] >> 4) & 0x0f;
2645 ioff
= dcode
== 1 ? 3 : dcode
== 2 ? 4 : 2;
2647 /* Figure out what the dispacement is. */
2648 if (dcode
== 1 || dcode
== 2)
2650 /* There's a displacement. See if there's a reloc for it. */
2651 if (srel
[1].r_offset
== irel
->r_offset
+ 2)
2663 #if RX_OPCODE_BIG_ENDIAN
2664 disp_val
= insn
[2] * 256 + insn
[3];
2666 disp_val
= insn
[2] + insn
[3] * 256;
2669 switch (insn
[1] & 3)
2685 /* Figure out what the immediate is. */
2686 if (srel
[1].r_offset
== irel
->r_offset
+ ioff
)
2689 imm_val
= (long) symval
;
2694 unsigned char * ip
= insn
+ ioff
;
2699 /* For byte writes, we don't sign extend. Makes the math easier later. */
2703 imm_val
= (char) ip
[0];
2706 #if RX_OPCODE_BIG_ENDIAN
2707 imm_val
= ((char) ip
[0] << 8) | ip
[1];
2709 imm_val
= ((char) ip
[1] << 8) | ip
[0];
2713 #if RX_OPCODE_BIG_ENDIAN
2714 imm_val
= ((char) ip
[0] << 16) | (ip
[1] << 8) | ip
[2];
2716 imm_val
= ((char) ip
[2] << 16) | (ip
[1] << 8) | ip
[0];
2720 #if RX_OPCODE_BIG_ENDIAN
2721 imm_val
= (ip
[0] << 24) | (ip
[1] << 16) | (ip
[2] << 8) | ip
[3];
2723 imm_val
= (ip
[3] << 24) | (ip
[2] << 16) | (ip
[1] << 8) | ip
[0];
2757 /* The shortcut happens when the immediate is 0..255,
2758 register r0 to r7, and displacement (scaled) 0..31. */
2760 if (0 <= imm_val
&& imm_val
<= 255
2761 && 0 <= reg
&& reg
<= 7
2762 && disp_val
/ dscale
<= 31)
2764 insn
[0] = 0x3c | (insn
[1] & 0x03);
2765 insn
[1] = (((disp_val
/ dscale
) << 3) & 0x80) | (reg
<< 4) | ((disp_val
/dscale
) & 0x0f);
2770 int newrel
= R_RX_NONE
;
2775 newrel
= R_RX_RH_ABS5p8B
;
2778 newrel
= R_RX_RH_ABS5p8W
;
2781 newrel
= R_RX_RH_ABS5p8L
;
2784 disp_rel
->r_info
= ELF32_R_INFO (ELF32_R_SYM (disp_rel
->r_info
), newrel
);
2785 move_reloc (irel
, disp_rel
, -1);
2789 imm_rel
->r_info
= ELF32_R_INFO (ELF32_R_SYM (imm_rel
->r_info
), R_RX_DIR8U
);
2790 move_reloc (disp_rel
? disp_rel
: irel
,
2792 irel
->r_offset
- imm_rel
->r_offset
+ 2);
2795 SNIPNR (3, ilen
- 3);
2798 /* We can't relax this new opcode. */
2804 /* We can't reliably relax branches to DIR3U_PCREL unless we know
2805 whatever they're branching over won't shrink any more. If we're
2806 basically done here, do one more pass just for branches - but
2807 don't request a pass after that one! */
2808 if (!*again
&& !allow_pcrel3
)
2810 bfd_boolean ignored
;
2812 elf32_rx_relax_section (abfd
, sec
, link_info
, &ignored
, TRUE
);
2818 if (free_relocs
!= NULL
)
2821 if (free_contents
!= NULL
)
2822 free (free_contents
);
2824 if (shndx_buf
!= NULL
)
2826 shndx_hdr
->contents
= NULL
;
2830 if (free_intsyms
!= NULL
)
2831 free (free_intsyms
);
2837 elf32_rx_relax_section_wrapper (bfd
* abfd
,
2839 struct bfd_link_info
* link_info
,
2840 bfd_boolean
* again
)
2842 return elf32_rx_relax_section (abfd
, sec
, link_info
, again
, FALSE
);
2845 /* Function to set the ELF flag bits. */
2848 rx_elf_set_private_flags (bfd
* abfd
, flagword flags
)
2850 elf_elfheader (abfd
)->e_flags
= flags
;
2851 elf_flags_init (abfd
) = TRUE
;
2855 static bfd_boolean no_warn_mismatch
= FALSE
;
2857 void bfd_elf32_rx_set_target_flags (bfd_boolean
);
2860 bfd_elf32_rx_set_target_flags (bfd_boolean user_no_warn_mismatch
)
2862 no_warn_mismatch
= user_no_warn_mismatch
;
2865 /* Merge backend specific data from an object file to the output
2866 object file when linking. */
2869 rx_elf_merge_private_bfd_data (bfd
* ibfd
, bfd
* obfd
)
2873 bfd_boolean error
= FALSE
;
2875 new_flags
= elf_elfheader (ibfd
)->e_flags
;
2876 old_flags
= elf_elfheader (obfd
)->e_flags
;
2878 if (!elf_flags_init (obfd
))
2880 /* First call, no flags set. */
2881 elf_flags_init (obfd
) = TRUE
;
2882 elf_elfheader (obfd
)->e_flags
= new_flags
;
2884 else if (old_flags
!= new_flags
)
2886 flagword known_flags
= E_FLAG_RX_64BIT_DOUBLES
| E_FLAG_RX_DSP
;
2888 if ((old_flags
^ new_flags
) & known_flags
)
2890 /* Only complain if flag bits we care about do not match.
2891 Other bits may be set, since older binaries did use some
2892 deprecated flags. */
2893 if (no_warn_mismatch
)
2895 elf_elfheader (obfd
)->e_flags
= (new_flags
| old_flags
) & known_flags
;
2899 (*_bfd_error_handler
)
2900 ("ELF header flags mismatch: old_flags = 0x%.8lx, new_flags = 0x%.8lx, filename = %s",
2901 old_flags
, new_flags
, bfd_get_filename (ibfd
));
2906 elf_elfheader (obfd
)->e_flags
= new_flags
& known_flags
;
2910 bfd_set_error (bfd_error_bad_value
);
2916 rx_elf_print_private_bfd_data (bfd
* abfd
, void * ptr
)
2918 FILE * file
= (FILE *) ptr
;
2921 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
2923 /* Print normal ELF private data. */
2924 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
2926 flags
= elf_elfheader (abfd
)->e_flags
;
2927 fprintf (file
, _("private flags = 0x%lx:"), (long) flags
);
2929 if (flags
& E_FLAG_RX_64BIT_DOUBLES
)
2930 fprintf (file
, _(" [64-bit doubles]"));
2931 if (flags
& E_FLAG_RX_DSP
)
2932 fprintf (file
, _(" [dsp]"));
2938 /* Return the MACH for an e_flags value. */
2941 elf32_rx_machine (bfd
* abfd
)
2943 if ((elf_elfheader (abfd
)->e_flags
& EF_RX_CPU_MASK
) == EF_RX_CPU_RX
)
2950 rx_elf_object_p (bfd
* abfd
)
2952 bfd_default_set_arch_mach (abfd
, bfd_arch_rx
,
2953 elf32_rx_machine (abfd
));
2960 rx_dump_symtab (bfd
* abfd
, void * internal_syms
, void * external_syms
)
2963 Elf_Internal_Sym
* isymbuf
;
2964 Elf_Internal_Sym
* isymend
;
2965 Elf_Internal_Sym
* isym
;
2966 Elf_Internal_Shdr
* symtab_hdr
;
2967 bfd_boolean free_internal
= FALSE
, free_external
= FALSE
;
2969 char * st_info_stb_str
;
2970 char * st_other_str
;
2971 char * st_shndx_str
;
2973 if (! internal_syms
)
2975 internal_syms
= bfd_malloc (1000);
2978 if (! external_syms
)
2980 external_syms
= bfd_malloc (1000);
2984 symtab_hdr
= &elf_tdata (abfd
)->symtab_hdr
;
2985 locsymcount
= symtab_hdr
->sh_size
/ get_elf_backend_data (abfd
)->s
->sizeof_sym
;
2987 isymbuf
= bfd_elf_get_elf_syms (abfd
, symtab_hdr
,
2988 symtab_hdr
->sh_info
, 0,
2989 internal_syms
, external_syms
, NULL
);
2991 isymbuf
= internal_syms
;
2992 isymend
= isymbuf
+ locsymcount
;
2994 for (isym
= isymbuf
; isym
< isymend
; isym
++)
2996 switch (ELF_ST_TYPE (isym
->st_info
))
2998 case STT_FUNC
: st_info_str
= "STT_FUNC";
2999 case STT_SECTION
: st_info_str
= "STT_SECTION";
3000 case STT_FILE
: st_info_str
= "STT_FILE";
3001 case STT_OBJECT
: st_info_str
= "STT_OBJECT";
3002 case STT_TLS
: st_info_str
= "STT_TLS";
3003 default: st_info_str
= "";
3005 switch (ELF_ST_BIND (isym
->st_info
))
3007 case STB_LOCAL
: st_info_stb_str
= "STB_LOCAL";
3008 case STB_GLOBAL
: st_info_stb_str
= "STB_GLOBAL";
3009 default: st_info_stb_str
= "";
3011 switch (ELF_ST_VISIBILITY (isym
->st_other
))
3013 case STV_DEFAULT
: st_other_str
= "STV_DEFAULT";
3014 case STV_INTERNAL
: st_other_str
= "STV_INTERNAL";
3015 case STV_PROTECTED
: st_other_str
= "STV_PROTECTED";
3016 default: st_other_str
= "";
3018 switch (isym
->st_shndx
)
3020 case SHN_ABS
: st_shndx_str
= "SHN_ABS";
3021 case SHN_COMMON
: st_shndx_str
= "SHN_COMMON";
3022 case SHN_UNDEF
: st_shndx_str
= "SHN_UNDEF";
3023 default: st_shndx_str
= "";
3026 printf ("isym = %p st_value = %lx st_size = %lx st_name = (%lu) %s "
3027 "st_info = (%d) %s %s st_other = (%d) %s st_shndx = (%d) %s\n",
3029 (unsigned long) isym
->st_value
,
3030 (unsigned long) isym
->st_size
,
3032 bfd_elf_string_from_elf_section (abfd
, symtab_hdr
->sh_link
,
3034 isym
->st_info
, st_info_str
, st_info_stb_str
,
3035 isym
->st_other
, st_other_str
,
3036 isym
->st_shndx
, st_shndx_str
);
3039 free (internal_syms
);
3041 free (external_syms
);
3045 rx_get_reloc (long reloc
)
3047 if (0 <= reloc
&& reloc
< R_RX_max
)
3048 return rx_elf_howto_table
[reloc
].name
;
3054 /* We must take care to keep the on-disk copy of any code sections
3055 that are fully linked swapped if the target is big endian, to match
3056 the Renesas tools. */
3058 /* The rule is: big endian object that are final-link executables,
3059 have code sections stored with 32-bit words swapped relative to
3060 what you'd get by default. */
3063 rx_get_section_contents (bfd
* abfd
,
3067 bfd_size_type count
)
3069 int exec
= (abfd
->flags
& EXEC_P
) ? 1 : 0;
3070 int s_code
= (section
->flags
& SEC_CODE
) ? 1 : 0;
3074 fprintf (stderr
, "dj: get %ld %ld from %s %s e%d sc%d %08lx:%08lx\n",
3075 (long) offset
, (long) count
, section
->name
,
3076 bfd_big_endian(abfd
) ? "be" : "le",
3077 exec
, s_code
, (long unsigned) section
->filepos
,
3078 (long unsigned) offset
);
3081 if (exec
&& s_code
&& bfd_big_endian (abfd
))
3083 char * cloc
= (char *) location
;
3084 bfd_size_type cnt
, end_cnt
;
3088 /* Fetch and swap unaligned bytes at the beginning. */
3093 rv
= _bfd_generic_get_section_contents (abfd
, section
, buf
,
3098 bfd_putb32 (bfd_getl32 (buf
), buf
);
3100 cnt
= 4 - (offset
% 4);
3104 memcpy (location
, buf
+ (offset
% 4), cnt
);
3111 end_cnt
= count
% 4;
3113 /* Fetch and swap the middle bytes. */
3116 rv
= _bfd_generic_get_section_contents (abfd
, section
, cloc
, offset
,
3121 for (cnt
= count
; cnt
>= 4; cnt
-= 4, cloc
+= 4)
3122 bfd_putb32 (bfd_getl32 (cloc
), cloc
);
3125 /* Fetch and swap the end bytes. */
3130 /* Fetch the end bytes. */
3131 rv
= _bfd_generic_get_section_contents (abfd
, section
, buf
,
3132 offset
+ count
- end_cnt
, 4);
3136 bfd_putb32 (bfd_getl32 (buf
), buf
);
3137 memcpy (cloc
, buf
, end_cnt
);
3141 rv
= _bfd_generic_get_section_contents (abfd
, section
, location
, offset
, count
);
3148 rx2_set_section_contents (bfd
* abfd
,
3150 const void * location
,
3152 bfd_size_type count
)
3156 fprintf (stderr
, " set sec %s %08x loc %p offset %#x count %#x\n",
3157 section
->name
, (unsigned) section
->vma
, location
, (int) offset
, (int) count
);
3158 for (i
= 0; i
< count
; i
++)
3160 if (i
% 16 == 0 && i
> 0)
3161 fprintf (stderr
, "\n");
3163 if (i
% 16 && i
% 4 == 0)
3164 fprintf (stderr
, " ");
3167 fprintf (stderr
, " %08x:", (int) (section
->vma
+ offset
+ i
));
3169 fprintf (stderr
, " %02x", ((unsigned char *) location
)[i
]);
3171 fprintf (stderr
, "\n");
3173 return _bfd_elf_set_section_contents (abfd
, section
, location
, offset
, count
);
3175 #define _bfd_elf_set_section_contents rx2_set_section_contents
3179 rx_set_section_contents (bfd
* abfd
,
3181 const void * location
,
3183 bfd_size_type count
)
3185 bfd_boolean exec
= (abfd
->flags
& EXEC_P
) ? TRUE
: FALSE
;
3186 bfd_boolean s_code
= (section
->flags
& SEC_CODE
) ? TRUE
: FALSE
;
3188 char * swapped_data
= NULL
;
3190 bfd_vma caddr
= section
->vma
+ offset
;
3192 bfd_size_type scount
;
3197 fprintf (stderr
, "\ndj: set %ld %ld to %s %s e%d sc%d\n",
3198 (long) offset
, (long) count
, section
->name
,
3199 bfd_big_endian (abfd
) ? "be" : "le",
3202 for (i
= 0; i
< count
; i
++)
3204 int a
= section
->vma
+ offset
+ i
;
3206 if (a
% 16 == 0 && a
> 0)
3207 fprintf (stderr
, "\n");
3209 if (a
% 16 && a
% 4 == 0)
3210 fprintf (stderr
, " ");
3212 if (a
% 16 == 0 || i
== 0)
3213 fprintf (stderr
, " %08x:", (int) (section
->vma
+ offset
+ i
));
3215 fprintf (stderr
, " %02x", ((unsigned char *) location
)[i
]);
3218 fprintf (stderr
, "\n");
3221 if (! exec
|| ! s_code
|| ! bfd_big_endian (abfd
))
3222 return _bfd_elf_set_section_contents (abfd
, section
, location
, offset
, count
);
3224 while (count
> 0 && caddr
> 0 && caddr
% 4)
3228 case 0: faddr
= offset
+ 3; break;
3229 case 1: faddr
= offset
+ 1; break;
3230 case 2: faddr
= offset
- 1; break;
3231 case 3: faddr
= offset
- 3; break;
3234 rv
= _bfd_elf_set_section_contents (abfd
, section
, location
, faddr
, 1);
3244 scount
= (int)(count
/ 4) * 4;
3247 char * cloc
= (char *) location
;
3249 swapped_data
= (char *) bfd_alloc (abfd
, count
);
3251 for (i
= 0; i
< count
; i
+= 4)
3253 bfd_vma v
= bfd_getl32 (cloc
+ i
);
3254 bfd_putb32 (v
, swapped_data
+ i
);
3257 rv
= _bfd_elf_set_section_contents (abfd
, section
, swapped_data
, offset
, scount
);
3269 caddr
= section
->vma
+ offset
;
3274 case 0: faddr
= offset
+ 3; break;
3275 case 1: faddr
= offset
+ 1; break;
3276 case 2: faddr
= offset
- 1; break;
3277 case 3: faddr
= offset
- 3; break;
3279 rv
= _bfd_elf_set_section_contents (abfd
, section
, location
, faddr
, 1);
3294 rx_final_link (bfd
* abfd
, struct bfd_link_info
* info
)
3298 for (o
= abfd
->sections
; o
!= NULL
; o
= o
->next
)
3301 fprintf (stderr
, "sec %s fl %x vma %lx lma %lx size %lx raw %lx\n",
3302 o
->name
, o
->flags
, o
->vma
, o
->lma
, o
->size
, o
->rawsize
);
3304 if (o
->flags
& SEC_CODE
3305 && bfd_big_endian (abfd
)
3306 && (o
->size
% 4 || o
->rawsize
% 4))
3309 fprintf (stderr
, "adjusting...\n");
3311 o
->size
+= 4 - (o
->size
% 4);
3312 o
->rawsize
+= 4 - (o
->rawsize
% 4);
3316 return bfd_elf_final_link (abfd
, info
);
3320 elf32_rx_modify_program_headers (bfd
* abfd ATTRIBUTE_UNUSED
,
3321 struct bfd_link_info
* info ATTRIBUTE_UNUSED
)
3323 const struct elf_backend_data
* bed
;
3324 struct elf_obj_tdata
* tdata
;
3325 Elf_Internal_Phdr
* phdr
;
3329 bed
= get_elf_backend_data (abfd
);
3330 tdata
= elf_tdata (abfd
);
3332 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
3334 for (i
= count
; i
-- != 0; )
3335 if (phdr
[i
].p_type
== PT_LOAD
)
3337 /* The Renesas tools expect p_paddr to be zero. However,
3338 there is no other way to store the writable data in ROM for
3339 startup initialization. So, we let the linker *think*
3340 we're using paddr and vaddr the "usual" way, but at the
3341 last minute we move the paddr into the vaddr (which is what
3342 the simulator uses) and zero out paddr. Note that this
3343 does not affect the section headers, just the program
3344 headers. We hope. */
3345 phdr
[i
].p_vaddr
= phdr
[i
].p_paddr
;
3346 /* If we zero out p_paddr, then the LMA in the section table
3348 /*phdr[i].p_paddr = 0;*/
3354 #define ELF_ARCH bfd_arch_rx
3355 #define ELF_MACHINE_CODE EM_RX
3356 #define ELF_MAXPAGESIZE 0x1000
3358 #define TARGET_BIG_SYM bfd_elf32_rx_be_vec
3359 #define TARGET_BIG_NAME "elf32-rx-be"
3361 #define TARGET_LITTLE_SYM bfd_elf32_rx_le_vec
3362 #define TARGET_LITTLE_NAME "elf32-rx-le"
3364 #define elf_info_to_howto_rel NULL
3365 #define elf_info_to_howto rx_info_to_howto_rela
3366 #define elf_backend_object_p rx_elf_object_p
3367 #define elf_backend_relocate_section rx_elf_relocate_section
3368 #define elf_symbol_leading_char ('_')
3369 #define elf_backend_can_gc_sections 1
3370 #define elf_backend_modify_program_headers elf32_rx_modify_program_headers
3372 #define bfd_elf32_bfd_reloc_type_lookup rx_reloc_type_lookup
3373 #define bfd_elf32_bfd_reloc_name_lookup rx_reloc_name_lookup
3374 #define bfd_elf32_bfd_set_private_flags rx_elf_set_private_flags
3375 #define bfd_elf32_bfd_merge_private_bfd_data rx_elf_merge_private_bfd_data
3376 #define bfd_elf32_bfd_print_private_bfd_data rx_elf_print_private_bfd_data
3377 #define bfd_elf32_get_section_contents rx_get_section_contents
3378 #define bfd_elf32_set_section_contents rx_set_section_contents
3379 #define bfd_elf32_bfd_final_link rx_final_link
3380 #define bfd_elf32_bfd_relax_section elf32_rx_relax_section_wrapper
3382 #include "elf32-target.h"