1 /* Renesas RX specific support for 32-bit ELF.
2 Copyright (C) 2008-2013 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
22 #include "bfd_stdint.h"
26 #include "libiberty.h"
28 #define RX_OPCODE_BIG_ENDIAN 0
30 /* This is a meta-target that's used only with objcopy, to avoid the
31 endian-swap we would otherwise get. We check for this in
33 const bfd_target bfd_elf32_rx_be_ns_vec
;
34 const bfd_target bfd_elf32_rx_be_vec
;
37 char * rx_get_reloc (long);
38 void rx_dump_symtab (bfd
*, void *, void *);
41 #define RXREL(n,sz,bit,shift,complain,pcrel) \
42 HOWTO (R_RX_##n, shift, sz, bit, pcrel, 0, complain_overflow_ ## complain, \
43 bfd_elf_generic_reloc, "R_RX_" #n, FALSE, 0, ~0, FALSE)
45 /* Note that the relocations around 0x7f are internal to this file;
46 feel free to move them as needed to avoid conflicts with published
47 relocation numbers. */
49 static reloc_howto_type rx_elf_howto_table
[] =
51 RXREL (NONE
, 0, 0, 0, dont
, FALSE
),
52 RXREL (DIR32
, 2, 32, 0, signed, FALSE
),
53 RXREL (DIR24S
, 2, 24, 0, signed, FALSE
),
54 RXREL (DIR16
, 1, 16, 0, dont
, FALSE
),
55 RXREL (DIR16U
, 1, 16, 0, unsigned, FALSE
),
56 RXREL (DIR16S
, 1, 16, 0, signed, FALSE
),
57 RXREL (DIR8
, 0, 8, 0, dont
, FALSE
),
58 RXREL (DIR8U
, 0, 8, 0, unsigned, FALSE
),
59 RXREL (DIR8S
, 0, 8, 0, signed, FALSE
),
60 RXREL (DIR24S_PCREL
, 2, 24, 0, signed, TRUE
),
61 RXREL (DIR16S_PCREL
, 1, 16, 0, signed, TRUE
),
62 RXREL (DIR8S_PCREL
, 0, 8, 0, signed, TRUE
),
63 RXREL (DIR16UL
, 1, 16, 2, unsigned, FALSE
),
64 RXREL (DIR16UW
, 1, 16, 1, unsigned, FALSE
),
65 RXREL (DIR8UL
, 0, 8, 2, unsigned, FALSE
),
66 RXREL (DIR8UW
, 0, 8, 1, unsigned, FALSE
),
67 RXREL (DIR32_REV
, 1, 16, 0, dont
, FALSE
),
68 RXREL (DIR16_REV
, 1, 16, 0, dont
, FALSE
),
69 RXREL (DIR3U_PCREL
, 0, 3, 0, dont
, TRUE
),
85 RXREL (RH_3_PCREL
, 0, 3, 0, signed, TRUE
),
86 RXREL (RH_16_OP
, 1, 16, 0, signed, FALSE
),
87 RXREL (RH_24_OP
, 2, 24, 0, signed, FALSE
),
88 RXREL (RH_32_OP
, 2, 32, 0, signed, FALSE
),
89 RXREL (RH_24_UNS
, 2, 24, 0, unsigned, FALSE
),
90 RXREL (RH_8_NEG
, 0, 8, 0, signed, FALSE
),
91 RXREL (RH_16_NEG
, 1, 16, 0, signed, FALSE
),
92 RXREL (RH_24_NEG
, 2, 24, 0, signed, FALSE
),
93 RXREL (RH_32_NEG
, 2, 32, 0, signed, FALSE
),
94 RXREL (RH_DIFF
, 2, 32, 0, signed, FALSE
),
95 RXREL (RH_GPRELB
, 1, 16, 0, unsigned, FALSE
),
96 RXREL (RH_GPRELW
, 1, 16, 0, unsigned, FALSE
),
97 RXREL (RH_GPRELL
, 1, 16, 0, unsigned, FALSE
),
98 RXREL (RH_RELAX
, 0, 0, 0, dont
, FALSE
),
120 RXREL (ABS32
, 2, 32, 0, dont
, FALSE
),
121 RXREL (ABS24S
, 2, 24, 0, signed, FALSE
),
122 RXREL (ABS16
, 1, 16, 0, dont
, FALSE
),
123 RXREL (ABS16U
, 1, 16, 0, unsigned, FALSE
),
124 RXREL (ABS16S
, 1, 16, 0, signed, FALSE
),
125 RXREL (ABS8
, 0, 8, 0, dont
, FALSE
),
126 RXREL (ABS8U
, 0, 8, 0, unsigned, FALSE
),
127 RXREL (ABS8S
, 0, 8, 0, signed, FALSE
),
128 RXREL (ABS24S_PCREL
, 2, 24, 0, signed, TRUE
),
129 RXREL (ABS16S_PCREL
, 1, 16, 0, signed, TRUE
),
130 RXREL (ABS8S_PCREL
, 0, 8, 0, signed, TRUE
),
131 RXREL (ABS16UL
, 1, 16, 0, unsigned, FALSE
),
132 RXREL (ABS16UW
, 1, 16, 0, unsigned, FALSE
),
133 RXREL (ABS8UL
, 0, 8, 0, unsigned, FALSE
),
134 RXREL (ABS8UW
, 0, 8, 0, unsigned, FALSE
),
135 RXREL (ABS32_REV
, 2, 32, 0, dont
, FALSE
),
136 RXREL (ABS16_REV
, 1, 16, 0, dont
, FALSE
),
138 #define STACK_REL_P(x) ((x) <= R_RX_ABS16_REV && (x) >= R_RX_ABS32)
179 /* These are internal. */
180 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 8/12. */
181 /* ---- ---- 4--- 3210. */
182 #define R_RX_RH_ABS5p8B 0x78
183 RXREL (RH_ABS5p8B
, 0, 0, 0, dont
, FALSE
),
184 #define R_RX_RH_ABS5p8W 0x79
185 RXREL (RH_ABS5p8W
, 0, 0, 0, dont
, FALSE
),
186 #define R_RX_RH_ABS5p8L 0x7a
187 RXREL (RH_ABS5p8L
, 0, 0, 0, dont
, FALSE
),
188 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 5/12. */
189 /* ---- -432 1--- 0---. */
190 #define R_RX_RH_ABS5p5B 0x7b
191 RXREL (RH_ABS5p5B
, 0, 0, 0, dont
, FALSE
),
192 #define R_RX_RH_ABS5p5W 0x7c
193 RXREL (RH_ABS5p5W
, 0, 0, 0, dont
, FALSE
),
194 #define R_RX_RH_ABS5p5L 0x7d
195 RXREL (RH_ABS5p5L
, 0, 0, 0, dont
, FALSE
),
196 /* A 4-bit unsigned immediate at bit position 8. */
197 #define R_RX_RH_UIMM4p8 0x7e
198 RXREL (RH_UIMM4p8
, 0, 0, 0, dont
, FALSE
),
199 /* A 4-bit negative unsigned immediate at bit position 8. */
200 #define R_RX_RH_UNEG4p8 0x7f
201 RXREL (RH_UNEG4p8
, 0, 0, 0, dont
, FALSE
),
202 /* End of internal relocs. */
204 RXREL (SYM
, 2, 32, 0, dont
, FALSE
),
205 RXREL (OPneg
, 2, 32, 0, dont
, FALSE
),
206 RXREL (OPadd
, 2, 32, 0, dont
, FALSE
),
207 RXREL (OPsub
, 2, 32, 0, dont
, FALSE
),
208 RXREL (OPmul
, 2, 32, 0, dont
, FALSE
),
209 RXREL (OPdiv
, 2, 32, 0, dont
, FALSE
),
210 RXREL (OPshla
, 2, 32, 0, dont
, FALSE
),
211 RXREL (OPshra
, 2, 32, 0, dont
, FALSE
),
212 RXREL (OPsctsize
, 2, 32, 0, dont
, FALSE
),
213 RXREL (OPscttop
, 2, 32, 0, dont
, FALSE
),
214 RXREL (OPand
, 2, 32, 0, dont
, FALSE
),
215 RXREL (OPor
, 2, 32, 0, dont
, FALSE
),
216 RXREL (OPxor
, 2, 32, 0, dont
, FALSE
),
217 RXREL (OPnot
, 2, 32, 0, dont
, FALSE
),
218 RXREL (OPmod
, 2, 32, 0, dont
, FALSE
),
219 RXREL (OPromtop
, 2, 32, 0, dont
, FALSE
),
220 RXREL (OPramtop
, 2, 32, 0, dont
, FALSE
)
223 /* Map BFD reloc types to RX ELF reloc types. */
227 bfd_reloc_code_real_type bfd_reloc_val
;
228 unsigned int rx_reloc_val
;
231 static const struct rx_reloc_map rx_reloc_map
[] =
233 { BFD_RELOC_NONE
, R_RX_NONE
},
234 { BFD_RELOC_8
, R_RX_DIR8S
},
235 { BFD_RELOC_16
, R_RX_DIR16S
},
236 { BFD_RELOC_24
, R_RX_DIR24S
},
237 { BFD_RELOC_32
, R_RX_DIR32
},
238 { BFD_RELOC_RX_16_OP
, R_RX_DIR16
},
239 { BFD_RELOC_RX_DIR3U_PCREL
, R_RX_DIR3U_PCREL
},
240 { BFD_RELOC_8_PCREL
, R_RX_DIR8S_PCREL
},
241 { BFD_RELOC_16_PCREL
, R_RX_DIR16S_PCREL
},
242 { BFD_RELOC_24_PCREL
, R_RX_DIR24S_PCREL
},
243 { BFD_RELOC_RX_8U
, R_RX_DIR8U
},
244 { BFD_RELOC_RX_16U
, R_RX_DIR16U
},
245 { BFD_RELOC_RX_24U
, R_RX_RH_24_UNS
},
246 { BFD_RELOC_RX_NEG8
, R_RX_RH_8_NEG
},
247 { BFD_RELOC_RX_NEG16
, R_RX_RH_16_NEG
},
248 { BFD_RELOC_RX_NEG24
, R_RX_RH_24_NEG
},
249 { BFD_RELOC_RX_NEG32
, R_RX_RH_32_NEG
},
250 { BFD_RELOC_RX_DIFF
, R_RX_RH_DIFF
},
251 { BFD_RELOC_RX_GPRELB
, R_RX_RH_GPRELB
},
252 { BFD_RELOC_RX_GPRELW
, R_RX_RH_GPRELW
},
253 { BFD_RELOC_RX_GPRELL
, R_RX_RH_GPRELL
},
254 { BFD_RELOC_RX_RELAX
, R_RX_RH_RELAX
},
255 { BFD_RELOC_RX_SYM
, R_RX_SYM
},
256 { BFD_RELOC_RX_OP_SUBTRACT
, R_RX_OPsub
},
257 { BFD_RELOC_RX_OP_NEG
, R_RX_OPneg
},
258 { BFD_RELOC_RX_ABS8
, R_RX_ABS8
},
259 { BFD_RELOC_RX_ABS16
, R_RX_ABS16
},
260 { BFD_RELOC_RX_ABS16_REV
, R_RX_ABS16_REV
},
261 { BFD_RELOC_RX_ABS32
, R_RX_ABS32
},
262 { BFD_RELOC_RX_ABS32_REV
, R_RX_ABS32_REV
},
263 { BFD_RELOC_RX_ABS16UL
, R_RX_ABS16UL
},
264 { BFD_RELOC_RX_ABS16UW
, R_RX_ABS16UW
},
265 { BFD_RELOC_RX_ABS16U
, R_RX_ABS16U
}
268 #define BIGE(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
270 static reloc_howto_type
*
271 rx_reloc_type_lookup (bfd
* abfd ATTRIBUTE_UNUSED
,
272 bfd_reloc_code_real_type code
)
276 if (code
== BFD_RELOC_RX_32_OP
)
277 return rx_elf_howto_table
+ R_RX_DIR32
;
279 for (i
= ARRAY_SIZE (rx_reloc_map
); --i
;)
280 if (rx_reloc_map
[i
].bfd_reloc_val
== code
)
281 return rx_elf_howto_table
+ rx_reloc_map
[i
].rx_reloc_val
;
286 static reloc_howto_type
*
287 rx_reloc_name_lookup (bfd
* abfd ATTRIBUTE_UNUSED
, const char * r_name
)
291 for (i
= 0; i
< ARRAY_SIZE (rx_elf_howto_table
); i
++)
292 if (rx_elf_howto_table
[i
].name
!= NULL
293 && strcasecmp (rx_elf_howto_table
[i
].name
, r_name
) == 0)
294 return rx_elf_howto_table
+ i
;
299 /* Set the howto pointer for an RX ELF reloc. */
302 rx_info_to_howto_rela (bfd
* abfd ATTRIBUTE_UNUSED
,
304 Elf_Internal_Rela
* dst
)
308 r_type
= ELF32_R_TYPE (dst
->r_info
);
309 BFD_ASSERT (r_type
< (unsigned int) R_RX_max
);
310 cache_ptr
->howto
= rx_elf_howto_table
+ r_type
;
314 get_symbol_value (const char * name
,
315 bfd_reloc_status_type
* status
,
316 struct bfd_link_info
* info
,
318 asection
* input_section
,
322 struct bfd_link_hash_entry
* h
;
324 h
= bfd_link_hash_lookup (info
->hash
, name
, FALSE
, FALSE
, TRUE
);
327 || (h
->type
!= bfd_link_hash_defined
328 && h
->type
!= bfd_link_hash_defweak
))
329 * status
= info
->callbacks
->undefined_symbol
330 (info
, name
, input_bfd
, input_section
, offset
, TRUE
);
332 value
= (h
->u
.def
.value
333 + h
->u
.def
.section
->output_section
->vma
334 + h
->u
.def
.section
->output_offset
);
340 get_gp (bfd_reloc_status_type
* status
,
341 struct bfd_link_info
* info
,
346 static bfd_boolean cached
= FALSE
;
347 static bfd_vma cached_value
= 0;
351 cached_value
= get_symbol_value ("__gp", status
, info
, abfd
, sec
, offset
);
358 get_romstart (bfd_reloc_status_type
* status
,
359 struct bfd_link_info
* info
,
364 static bfd_boolean cached
= FALSE
;
365 static bfd_vma cached_value
= 0;
369 cached_value
= get_symbol_value ("_start", status
, info
, abfd
, sec
, offset
);
376 get_ramstart (bfd_reloc_status_type
* status
,
377 struct bfd_link_info
* info
,
382 static bfd_boolean cached
= FALSE
;
383 static bfd_vma cached_value
= 0;
387 cached_value
= get_symbol_value ("__datastart", status
, info
, abfd
, sec
, offset
);
393 #define NUM_STACK_ENTRIES 16
394 static int32_t rx_stack
[ NUM_STACK_ENTRIES
];
395 static unsigned int rx_stack_top
;
397 #define RX_STACK_PUSH(val) \
400 if (rx_stack_top < NUM_STACK_ENTRIES) \
401 rx_stack [rx_stack_top ++] = (val); \
403 r = bfd_reloc_dangerous; \
407 #define RX_STACK_POP(dest) \
410 if (rx_stack_top > 0) \
411 (dest) = rx_stack [-- rx_stack_top]; \
413 (dest) = 0, r = bfd_reloc_dangerous; \
417 /* Relocate an RX ELF section.
418 There is some attempt to make this function usable for many architectures,
419 both USE_REL and USE_RELA ['twould be nice if such a critter existed],
420 if only to serve as a learning tool.
422 The RELOCATE_SECTION function is called by the new ELF backend linker
423 to handle the relocations for a section.
425 The relocs are always passed as Rela structures; if the section
426 actually uses Rel structures, the r_addend field will always be
429 This function is responsible for adjusting the section contents as
430 necessary, and (if using Rela relocs and generating a relocatable
431 output file) adjusting the reloc addend as necessary.
433 This function does not have to worry about setting the reloc
434 address or the reloc symbol index.
436 LOCAL_SYMS is a pointer to the swapped in local symbols.
438 LOCAL_SECTIONS is an array giving the section in the input file
439 corresponding to the st_shndx field of each local symbol.
441 The global hash table entry for the global symbols can be found
442 via elf_sym_hashes (input_bfd).
444 When generating relocatable output, this function must handle
445 STB_LOCAL/STT_SECTION symbols specially. The output symbol is
446 going to be the section symbol corresponding to the output
447 section, which means that the addend must be adjusted
451 rx_elf_relocate_section
453 struct bfd_link_info
* info
,
455 asection
* input_section
,
457 Elf_Internal_Rela
* relocs
,
458 Elf_Internal_Sym
* local_syms
,
459 asection
** local_sections
)
461 Elf_Internal_Shdr
* symtab_hdr
;
462 struct elf_link_hash_entry
** sym_hashes
;
463 Elf_Internal_Rela
* rel
;
464 Elf_Internal_Rela
* relend
;
465 bfd_boolean pid_mode
;
466 bfd_boolean saw_subtract
= FALSE
;
468 if (elf_elfheader (output_bfd
)->e_flags
& E_FLAG_RX_PID
)
473 symtab_hdr
= & elf_tdata (input_bfd
)->symtab_hdr
;
474 sym_hashes
= elf_sym_hashes (input_bfd
);
475 relend
= relocs
+ input_section
->reloc_count
;
476 for (rel
= relocs
; rel
< relend
; rel
++)
478 reloc_howto_type
* howto
;
479 unsigned long r_symndx
;
480 Elf_Internal_Sym
* sym
;
482 struct elf_link_hash_entry
* h
;
484 bfd_reloc_status_type r
;
485 const char * name
= NULL
;
486 bfd_boolean unresolved_reloc
= TRUE
;
489 r_type
= ELF32_R_TYPE (rel
->r_info
);
490 r_symndx
= ELF32_R_SYM (rel
->r_info
);
492 howto
= rx_elf_howto_table
+ ELF32_R_TYPE (rel
->r_info
);
498 if (rx_stack_top
== 0)
499 saw_subtract
= FALSE
;
501 if (r_symndx
< symtab_hdr
->sh_info
)
503 sym
= local_syms
+ r_symndx
;
504 sec
= local_sections
[r_symndx
];
505 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, & sec
, rel
);
507 name
= bfd_elf_string_from_elf_section
508 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
);
509 name
= (sym
->st_name
== 0) ? bfd_section_name (input_bfd
, sec
) : name
;
515 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
516 r_symndx
, symtab_hdr
, sym_hashes
, h
,
517 sec
, relocation
, unresolved_reloc
,
520 name
= h
->root
.root
.string
;
523 if (sec
!= NULL
&& discarded_section (sec
))
524 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
525 rel
, 1, relend
, howto
, 0, contents
);
527 if (info
->relocatable
)
529 /* This is a relocatable link. We don't have to change
530 anything, unless the reloc is against a section symbol,
531 in which case we have to adjust according to where the
532 section symbol winds up in the output section. */
533 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
534 rel
->r_addend
+= sec
->output_offset
;
538 if (h
!= NULL
&& h
->root
.type
== bfd_link_hash_undefweak
)
539 /* If the symbol is undefined and weak
540 then the relocation resolves to zero. */
544 if (howto
->pc_relative
)
546 relocation
-= (input_section
->output_section
->vma
547 + input_section
->output_offset
549 if (r_type
!= R_RX_RH_3_PCREL
550 && r_type
!= R_RX_DIR3U_PCREL
)
554 relocation
+= rel
->r_addend
;
559 #define RANGE(a,b) if (a > (long) relocation || (long) relocation > b) r = bfd_reloc_overflow
560 #define ALIGN(m) if (relocation & m) r = bfd_reloc_other;
561 #define OP(i) (contents[rel->r_offset + (i)])
562 #define WARN_REDHAT(type) \
563 _bfd_error_handler (_("%B:%A: Warning: deprecated Red Hat reloc " type " detected against: %s."), \
564 input_bfd, input_section, name)
566 /* Check for unsafe relocs in PID mode. These are any relocs where
567 an absolute address is being computed. There are special cases
568 for relocs against symbols that are known to be referenced in
569 crt0.o before the PID base address register has been initialised. */
570 #define UNSAFE_FOR_PID \
575 && sec->flags & SEC_READONLY \
576 && !(input_section->flags & SEC_DEBUGGING) \
577 && strcmp (name, "__pid_base") != 0 \
578 && strcmp (name, "__gp") != 0 \
579 && strcmp (name, "__romdatastart") != 0 \
581 _bfd_error_handler (_("%B(%A): unsafe PID relocation %s at 0x%08lx (against %s in %s)"), \
582 input_bfd, input_section, howto->name, \
583 input_section->output_section->vma + input_section->output_offset + rel->r_offset, \
588 /* Opcode relocs are always big endian. Data relocs are bi-endian. */
597 case R_RX_RH_3_PCREL
:
598 WARN_REDHAT ("RX_RH_3_PCREL");
601 OP (0) |= relocation
& 0x07;
605 WARN_REDHAT ("RX_RH_8_NEG");
606 relocation
= - relocation
;
607 case R_RX_DIR8S_PCREL
:
626 WARN_REDHAT ("RX_RH_16_NEG");
627 relocation
= - relocation
;
628 case R_RX_DIR16S_PCREL
:
630 RANGE (-32768, 32767);
631 #if RX_OPCODE_BIG_ENDIAN
634 OP (1) = relocation
>> 8;
639 WARN_REDHAT ("RX_RH_16_OP");
641 RANGE (-32768, 32767);
642 #if RX_OPCODE_BIG_ENDIAN
644 OP (0) = relocation
>> 8;
647 OP (1) = relocation
>> 8;
653 RANGE (-32768, 65535);
654 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
657 OP (0) = relocation
>> 8;
662 OP (1) = relocation
>> 8;
669 #if RX_OPCODE_BIG_ENDIAN
671 OP (0) = relocation
>> 8;
674 OP (1) = relocation
>> 8;
680 RANGE (-32768, 65536);
681 #if RX_OPCODE_BIG_ENDIAN
683 OP (0) = relocation
>> 8;
686 OP (1) = relocation
>> 8;
692 RANGE (-32768, 65536);
693 #if RX_OPCODE_BIG_ENDIAN
695 OP (1) = relocation
>> 8;
698 OP (0) = relocation
>> 8;
702 case R_RX_DIR3U_PCREL
:
705 OP (0) |= relocation
& 0x07;
710 WARN_REDHAT ("RX_RH_24_NEG");
711 relocation
= - relocation
;
712 case R_RX_DIR24S_PCREL
:
713 RANGE (-0x800000, 0x7fffff);
714 #if RX_OPCODE_BIG_ENDIAN
716 OP (1) = relocation
>> 8;
717 OP (0) = relocation
>> 16;
720 OP (1) = relocation
>> 8;
721 OP (2) = relocation
>> 16;
727 WARN_REDHAT ("RX_RH_24_OP");
728 RANGE (-0x800000, 0x7fffff);
729 #if RX_OPCODE_BIG_ENDIAN
731 OP (1) = relocation
>> 8;
732 OP (0) = relocation
>> 16;
735 OP (1) = relocation
>> 8;
736 OP (2) = relocation
>> 16;
742 RANGE (-0x800000, 0x7fffff);
743 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
746 OP (1) = relocation
>> 8;
747 OP (0) = relocation
>> 16;
752 OP (1) = relocation
>> 8;
753 OP (2) = relocation
>> 16;
759 WARN_REDHAT ("RX_RH_24_UNS");
761 #if RX_OPCODE_BIG_ENDIAN
763 OP (1) = relocation
>> 8;
764 OP (0) = relocation
>> 16;
767 OP (1) = relocation
>> 8;
768 OP (2) = relocation
>> 16;
774 WARN_REDHAT ("RX_RH_32_NEG");
775 relocation
= - relocation
;
776 #if RX_OPCODE_BIG_ENDIAN
778 OP (2) = relocation
>> 8;
779 OP (1) = relocation
>> 16;
780 OP (0) = relocation
>> 24;
783 OP (1) = relocation
>> 8;
784 OP (2) = relocation
>> 16;
785 OP (3) = relocation
>> 24;
791 WARN_REDHAT ("RX_RH_32_OP");
792 #if RX_OPCODE_BIG_ENDIAN
794 OP (2) = relocation
>> 8;
795 OP (1) = relocation
>> 16;
796 OP (0) = relocation
>> 24;
799 OP (1) = relocation
>> 8;
800 OP (2) = relocation
>> 16;
801 OP (3) = relocation
>> 24;
806 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
809 OP (2) = relocation
>> 8;
810 OP (1) = relocation
>> 16;
811 OP (0) = relocation
>> 24;
816 OP (1) = relocation
>> 8;
817 OP (2) = relocation
>> 16;
818 OP (3) = relocation
>> 24;
823 if (BIGE (output_bfd
))
826 OP (1) = relocation
>> 8;
827 OP (2) = relocation
>> 16;
828 OP (3) = relocation
>> 24;
833 OP (2) = relocation
>> 8;
834 OP (1) = relocation
>> 16;
835 OP (0) = relocation
>> 24;
842 WARN_REDHAT ("RX_RH_DIFF");
843 val
= bfd_get_32 (output_bfd
, & OP (0));
845 bfd_put_32 (output_bfd
, val
, & OP (0));
850 WARN_REDHAT ("RX_RH_GPRELB");
851 relocation
-= get_gp (&r
, info
, input_bfd
, input_section
, rel
->r_offset
);
853 #if RX_OPCODE_BIG_ENDIAN
855 OP (0) = relocation
>> 8;
858 OP (1) = relocation
>> 8;
863 WARN_REDHAT ("RX_RH_GPRELW");
864 relocation
-= get_gp (&r
, info
, input_bfd
, input_section
, rel
->r_offset
);
868 #if RX_OPCODE_BIG_ENDIAN
870 OP (0) = relocation
>> 8;
873 OP (1) = relocation
>> 8;
878 WARN_REDHAT ("RX_RH_GPRELL");
879 relocation
-= get_gp (&r
, info
, input_bfd
, input_section
, rel
->r_offset
);
883 #if RX_OPCODE_BIG_ENDIAN
885 OP (0) = relocation
>> 8;
888 OP (1) = relocation
>> 8;
892 /* Internal relocations just for relaxation: */
893 case R_RX_RH_ABS5p5B
:
894 RX_STACK_POP (relocation
);
897 OP (0) |= relocation
>> 2;
899 OP (1) |= (relocation
<< 6) & 0x80;
900 OP (1) |= (relocation
<< 3) & 0x08;
903 case R_RX_RH_ABS5p5W
:
904 RX_STACK_POP (relocation
);
909 OP (0) |= relocation
>> 2;
911 OP (1) |= (relocation
<< 6) & 0x80;
912 OP (1) |= (relocation
<< 3) & 0x08;
915 case R_RX_RH_ABS5p5L
:
916 RX_STACK_POP (relocation
);
921 OP (0) |= relocation
>> 2;
923 OP (1) |= (relocation
<< 6) & 0x80;
924 OP (1) |= (relocation
<< 3) & 0x08;
927 case R_RX_RH_ABS5p8B
:
928 RX_STACK_POP (relocation
);
931 OP (0) |= (relocation
<< 3) & 0x80;
932 OP (0) |= relocation
& 0x0f;
935 case R_RX_RH_ABS5p8W
:
936 RX_STACK_POP (relocation
);
941 OP (0) |= (relocation
<< 3) & 0x80;
942 OP (0) |= relocation
& 0x0f;
945 case R_RX_RH_ABS5p8L
:
946 RX_STACK_POP (relocation
);
951 OP (0) |= (relocation
<< 3) & 0x80;
952 OP (0) |= relocation
& 0x0f;
955 case R_RX_RH_UIMM4p8
:
958 OP (0) |= relocation
<< 4;
961 case R_RX_RH_UNEG4p8
:
964 OP (0) |= (-relocation
) << 4;
967 /* Complex reloc handling: */
971 RX_STACK_POP (relocation
);
972 #if RX_OPCODE_BIG_ENDIAN
974 OP (2) = relocation
>> 8;
975 OP (1) = relocation
>> 16;
976 OP (0) = relocation
>> 24;
979 OP (1) = relocation
>> 8;
980 OP (2) = relocation
>> 16;
981 OP (3) = relocation
>> 24;
987 RX_STACK_POP (relocation
);
988 #if RX_OPCODE_BIG_ENDIAN
990 OP (1) = relocation
>> 8;
991 OP (2) = relocation
>> 16;
992 OP (3) = relocation
>> 24;
995 OP (2) = relocation
>> 8;
996 OP (1) = relocation
>> 16;
997 OP (0) = relocation
>> 24;
1001 case R_RX_ABS24S_PCREL
:
1004 RX_STACK_POP (relocation
);
1005 RANGE (-0x800000, 0x7fffff);
1006 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
1008 OP (2) = relocation
;
1009 OP (1) = relocation
>> 8;
1010 OP (0) = relocation
>> 16;
1014 OP (0) = relocation
;
1015 OP (1) = relocation
>> 8;
1016 OP (2) = relocation
>> 16;
1022 RX_STACK_POP (relocation
);
1023 RANGE (-32768, 65535);
1024 #if RX_OPCODE_BIG_ENDIAN
1025 OP (1) = relocation
;
1026 OP (0) = relocation
>> 8;
1028 OP (0) = relocation
;
1029 OP (1) = relocation
>> 8;
1033 case R_RX_ABS16_REV
:
1035 RX_STACK_POP (relocation
);
1036 RANGE (-32768, 65535);
1037 #if RX_OPCODE_BIG_ENDIAN
1038 OP (0) = relocation
;
1039 OP (1) = relocation
>> 8;
1041 OP (1) = relocation
;
1042 OP (0) = relocation
>> 8;
1046 case R_RX_ABS16S_PCREL
:
1048 RX_STACK_POP (relocation
);
1049 RANGE (-32768, 32767);
1050 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
1052 OP (1) = relocation
;
1053 OP (0) = relocation
>> 8;
1057 OP (0) = relocation
;
1058 OP (1) = relocation
>> 8;
1064 RX_STACK_POP (relocation
);
1066 #if RX_OPCODE_BIG_ENDIAN
1067 OP (1) = relocation
;
1068 OP (0) = relocation
>> 8;
1070 OP (0) = relocation
;
1071 OP (1) = relocation
>> 8;
1077 RX_STACK_POP (relocation
);
1080 #if RX_OPCODE_BIG_ENDIAN
1081 OP (1) = relocation
;
1082 OP (0) = relocation
>> 8;
1084 OP (0) = relocation
;
1085 OP (1) = relocation
>> 8;
1091 RX_STACK_POP (relocation
);
1094 #if RX_OPCODE_BIG_ENDIAN
1095 OP (1) = relocation
;
1096 OP (0) = relocation
>> 8;
1098 OP (0) = relocation
;
1099 OP (1) = relocation
>> 8;
1105 RX_STACK_POP (relocation
);
1107 OP (0) = relocation
;
1112 RX_STACK_POP (relocation
);
1114 OP (0) = relocation
;
1119 RX_STACK_POP (relocation
);
1122 OP (0) = relocation
;
1127 RX_STACK_POP (relocation
);
1130 OP (0) = relocation
;
1135 case R_RX_ABS8S_PCREL
:
1136 RX_STACK_POP (relocation
);
1138 OP (0) = relocation
;
1142 if (r_symndx
< symtab_hdr
->sh_info
)
1143 RX_STACK_PUSH (sec
->output_section
->vma
1144 + sec
->output_offset
1150 && (h
->root
.type
== bfd_link_hash_defined
1151 || h
->root
.type
== bfd_link_hash_defweak
))
1152 RX_STACK_PUSH (h
->root
.u
.def
.value
1153 + sec
->output_section
->vma
1154 + sec
->output_offset
1157 _bfd_error_handler (_("Warning: RX_SYM reloc with an unknown symbol"));
1165 saw_subtract
= TRUE
;
1168 RX_STACK_PUSH (tmp
);
1176 RX_STACK_POP (tmp1
);
1177 RX_STACK_POP (tmp2
);
1179 RX_STACK_PUSH (tmp1
);
1187 saw_subtract
= TRUE
;
1188 RX_STACK_POP (tmp1
);
1189 RX_STACK_POP (tmp2
);
1191 RX_STACK_PUSH (tmp2
);
1199 RX_STACK_POP (tmp1
);
1200 RX_STACK_POP (tmp2
);
1202 RX_STACK_PUSH (tmp1
);
1210 RX_STACK_POP (tmp1
);
1211 RX_STACK_POP (tmp2
);
1213 RX_STACK_PUSH (tmp1
);
1221 RX_STACK_POP (tmp1
);
1222 RX_STACK_POP (tmp2
);
1224 RX_STACK_PUSH (tmp1
);
1232 RX_STACK_POP (tmp1
);
1233 RX_STACK_POP (tmp2
);
1235 RX_STACK_PUSH (tmp1
);
1239 case R_RX_OPsctsize
:
1240 RX_STACK_PUSH (input_section
->size
);
1244 RX_STACK_PUSH (input_section
->output_section
->vma
);
1251 RX_STACK_POP (tmp1
);
1252 RX_STACK_POP (tmp2
);
1254 RX_STACK_PUSH (tmp1
);
1262 RX_STACK_POP (tmp1
);
1263 RX_STACK_POP (tmp2
);
1265 RX_STACK_PUSH (tmp1
);
1273 RX_STACK_POP (tmp1
);
1274 RX_STACK_POP (tmp2
);
1276 RX_STACK_PUSH (tmp1
);
1286 RX_STACK_PUSH (tmp
);
1294 RX_STACK_POP (tmp1
);
1295 RX_STACK_POP (tmp2
);
1297 RX_STACK_PUSH (tmp1
);
1302 RX_STACK_PUSH (get_romstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1306 RX_STACK_PUSH (get_ramstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1310 r
= bfd_reloc_notsupported
;
1314 if (r
!= bfd_reloc_ok
)
1316 const char * msg
= NULL
;
1320 case bfd_reloc_overflow
:
1321 /* Catch the case of a missing function declaration
1322 and emit a more helpful error message. */
1323 if (r_type
== R_RX_DIR24S_PCREL
)
1324 msg
= _("%B(%A): error: call to undefined function '%s'");
1326 r
= info
->callbacks
->reloc_overflow
1327 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
, (bfd_vma
) 0,
1328 input_bfd
, input_section
, rel
->r_offset
);
1331 case bfd_reloc_undefined
:
1332 r
= info
->callbacks
->undefined_symbol
1333 (info
, name
, input_bfd
, input_section
, rel
->r_offset
,
1337 case bfd_reloc_other
:
1338 msg
= _("%B(%A): warning: unaligned access to symbol '%s' in the small data area");
1341 case bfd_reloc_outofrange
:
1342 msg
= _("%B(%A): internal error: out of range error");
1345 case bfd_reloc_notsupported
:
1346 msg
= _("%B(%A): internal error: unsupported relocation error");
1349 case bfd_reloc_dangerous
:
1350 msg
= _("%B(%A): internal error: dangerous relocation");
1354 msg
= _("%B(%A): internal error: unknown error");
1359 _bfd_error_handler (msg
, input_bfd
, input_section
, name
);
1369 /* Relaxation Support. */
1371 /* Progression of relocations from largest operand size to smallest
1375 next_smaller_reloc (int r
)
1379 case R_RX_DIR32
: return R_RX_DIR24S
;
1380 case R_RX_DIR24S
: return R_RX_DIR16S
;
1381 case R_RX_DIR16S
: return R_RX_DIR8S
;
1382 case R_RX_DIR8S
: return R_RX_NONE
;
1384 case R_RX_DIR16
: return R_RX_DIR8
;
1385 case R_RX_DIR8
: return R_RX_NONE
;
1387 case R_RX_DIR16U
: return R_RX_DIR8U
;
1388 case R_RX_DIR8U
: return R_RX_NONE
;
1390 case R_RX_DIR24S_PCREL
: return R_RX_DIR16S_PCREL
;
1391 case R_RX_DIR16S_PCREL
: return R_RX_DIR8S_PCREL
;
1392 case R_RX_DIR8S_PCREL
: return R_RX_DIR3U_PCREL
;
1394 case R_RX_DIR16UL
: return R_RX_DIR8UL
;
1395 case R_RX_DIR8UL
: return R_RX_NONE
;
1396 case R_RX_DIR16UW
: return R_RX_DIR8UW
;
1397 case R_RX_DIR8UW
: return R_RX_NONE
;
1399 case R_RX_RH_32_OP
: return R_RX_RH_24_OP
;
1400 case R_RX_RH_24_OP
: return R_RX_RH_16_OP
;
1401 case R_RX_RH_16_OP
: return R_RX_DIR8
;
1403 case R_RX_ABS32
: return R_RX_ABS24S
;
1404 case R_RX_ABS24S
: return R_RX_ABS16S
;
1405 case R_RX_ABS16
: return R_RX_ABS8
;
1406 case R_RX_ABS16U
: return R_RX_ABS8U
;
1407 case R_RX_ABS16S
: return R_RX_ABS8S
;
1408 case R_RX_ABS8
: return R_RX_NONE
;
1409 case R_RX_ABS8U
: return R_RX_NONE
;
1410 case R_RX_ABS8S
: return R_RX_NONE
;
1411 case R_RX_ABS24S_PCREL
: return R_RX_ABS16S_PCREL
;
1412 case R_RX_ABS16S_PCREL
: return R_RX_ABS8S_PCREL
;
1413 case R_RX_ABS8S_PCREL
: return R_RX_NONE
;
1414 case R_RX_ABS16UL
: return R_RX_ABS8UL
;
1415 case R_RX_ABS16UW
: return R_RX_ABS8UW
;
1416 case R_RX_ABS8UL
: return R_RX_NONE
;
1417 case R_RX_ABS8UW
: return R_RX_NONE
;
1422 /* Delete some bytes from a section while relaxing. */
1425 elf32_rx_relax_delete_bytes (bfd
*abfd
, asection
*sec
, bfd_vma addr
, int count
,
1426 Elf_Internal_Rela
*alignment_rel
, int force_snip
)
1428 Elf_Internal_Shdr
* symtab_hdr
;
1429 unsigned int sec_shndx
;
1430 bfd_byte
* contents
;
1431 Elf_Internal_Rela
* irel
;
1432 Elf_Internal_Rela
* irelend
;
1433 Elf_Internal_Sym
* isym
;
1434 Elf_Internal_Sym
* isymend
;
1436 unsigned int symcount
;
1437 struct elf_link_hash_entry
** sym_hashes
;
1438 struct elf_link_hash_entry
** end_hashes
;
1443 sec_shndx
= _bfd_elf_section_from_bfd_section (abfd
, sec
);
1445 contents
= elf_section_data (sec
)->this_hdr
.contents
;
1447 /* The deletion must stop at the next alignment boundary, if
1448 ALIGNMENT_REL is non-NULL. */
1451 toaddr
= alignment_rel
->r_offset
;
1453 irel
= elf_section_data (sec
)->relocs
;
1454 irelend
= irel
+ sec
->reloc_count
;
1456 /* Actually delete the bytes. */
1457 memmove (contents
+ addr
, contents
+ addr
+ count
,
1458 (size_t) (toaddr
- addr
- count
));
1460 /* If we don't have an alignment marker to worry about, we can just
1461 shrink the section. Otherwise, we have to fill in the newly
1462 created gap with NOP insns (0x03). */
1466 memset (contents
+ toaddr
- count
, 0x03, count
);
1468 /* Adjust all the relocs. */
1469 for (irel
= elf_section_data (sec
)->relocs
; irel
< irelend
; irel
++)
1471 /* Get the new reloc address. */
1472 if (irel
->r_offset
> addr
1473 && (irel
->r_offset
< toaddr
1474 || (force_snip
&& irel
->r_offset
== toaddr
)))
1475 irel
->r_offset
-= count
;
1477 /* If we see an ALIGN marker at the end of the gap, we move it
1478 to the beginning of the gap, since marking these gaps is what
1480 if (irel
->r_offset
== toaddr
1481 && ELF32_R_TYPE (irel
->r_info
) == R_RX_RH_RELAX
1482 && irel
->r_addend
& RX_RELAXA_ALIGN
)
1483 irel
->r_offset
-= count
;
1486 /* Adjust the local symbols defined in this section. */
1487 symtab_hdr
= &elf_tdata (abfd
)->symtab_hdr
;
1488 isym
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
1489 isymend
= isym
+ symtab_hdr
->sh_info
;
1491 for (; isym
< isymend
; isym
++)
1493 /* If the symbol is in the range of memory we just moved, we
1494 have to adjust its value. */
1495 if (isym
->st_shndx
== sec_shndx
1496 && isym
->st_value
> addr
1497 && isym
->st_value
< toaddr
)
1498 isym
->st_value
-= count
;
1500 /* If the symbol *spans* the bytes we just deleted (i.e. it's
1501 *end* is in the moved bytes but it's *start* isn't), then we
1502 must adjust its size. */
1503 if (isym
->st_shndx
== sec_shndx
1504 && isym
->st_value
< addr
1505 && isym
->st_value
+ isym
->st_size
> addr
1506 && isym
->st_value
+ isym
->st_size
< toaddr
)
1507 isym
->st_size
-= count
;
1510 /* Now adjust the global symbols defined in this section. */
1511 symcount
= (symtab_hdr
->sh_size
/ sizeof (Elf32_External_Sym
)
1512 - symtab_hdr
->sh_info
);
1513 sym_hashes
= elf_sym_hashes (abfd
);
1514 end_hashes
= sym_hashes
+ symcount
;
1516 for (; sym_hashes
< end_hashes
; sym_hashes
++)
1518 struct elf_link_hash_entry
*sym_hash
= *sym_hashes
;
1520 if ((sym_hash
->root
.type
== bfd_link_hash_defined
1521 || sym_hash
->root
.type
== bfd_link_hash_defweak
)
1522 && sym_hash
->root
.u
.def
.section
== sec
)
1524 /* As above, adjust the value if needed. */
1525 if (sym_hash
->root
.u
.def
.value
> addr
1526 && sym_hash
->root
.u
.def
.value
< toaddr
)
1527 sym_hash
->root
.u
.def
.value
-= count
;
1529 /* As above, adjust the size if needed. */
1530 if (sym_hash
->root
.u
.def
.value
< addr
1531 && sym_hash
->root
.u
.def
.value
+ sym_hash
->size
> addr
1532 && sym_hash
->root
.u
.def
.value
+ sym_hash
->size
< toaddr
)
1533 sym_hash
->size
-= count
;
1540 /* Used to sort relocs by address. If relocs have the same address,
1541 we maintain their relative order, except that R_RX_RH_RELAX
1542 alignment relocs must be the first reloc for any given address. */
1545 reloc_bubblesort (Elf_Internal_Rela
* r
, int count
)
1549 bfd_boolean swappit
;
1551 /* This is almost a classic bubblesort. It's the slowest sort, but
1552 we're taking advantage of the fact that the relocations are
1553 mostly in order already (the assembler emits them that way) and
1554 we need relocs with the same address to remain in the same
1560 for (i
= 0; i
< count
- 1; i
++)
1562 if (r
[i
].r_offset
> r
[i
+ 1].r_offset
)
1564 else if (r
[i
].r_offset
< r
[i
+ 1].r_offset
)
1566 else if (ELF32_R_TYPE (r
[i
+ 1].r_info
) == R_RX_RH_RELAX
1567 && (r
[i
+ 1].r_addend
& RX_RELAXA_ALIGN
))
1569 else if (ELF32_R_TYPE (r
[i
+ 1].r_info
) == R_RX_RH_RELAX
1570 && (r
[i
+ 1].r_addend
& RX_RELAXA_ELIGN
)
1571 && !(ELF32_R_TYPE (r
[i
].r_info
) == R_RX_RH_RELAX
1572 && (r
[i
].r_addend
& RX_RELAXA_ALIGN
)))
1579 Elf_Internal_Rela tmp
;
1584 /* If we do move a reloc back, re-scan to see if it
1585 needs to be moved even further back. This avoids
1586 most of the O(n^2) behavior for our cases. */
1596 #define OFFSET_FOR_RELOC(rel, lrel, scale) \
1597 rx_offset_for_reloc (abfd, rel + 1, symtab_hdr, shndx_buf, intsyms, \
1598 lrel, abfd, sec, link_info, scale)
1601 rx_offset_for_reloc (bfd
* abfd
,
1602 Elf_Internal_Rela
* rel
,
1603 Elf_Internal_Shdr
* symtab_hdr
,
1604 Elf_External_Sym_Shndx
* shndx_buf ATTRIBUTE_UNUSED
,
1605 Elf_Internal_Sym
* intsyms
,
1606 Elf_Internal_Rela
** lrel
,
1608 asection
* input_section
,
1609 struct bfd_link_info
* info
,
1613 bfd_reloc_status_type r
;
1617 /* REL is the first of 1..N relocations. We compute the symbol
1618 value for each relocation, then combine them if needed. LREL
1619 gets a pointer to the last relocation used. */
1624 /* Get the value of the symbol referred to by the reloc. */
1625 if (ELF32_R_SYM (rel
->r_info
) < symtab_hdr
->sh_info
)
1627 /* A local symbol. */
1628 Elf_Internal_Sym
*isym
;
1631 isym
= intsyms
+ ELF32_R_SYM (rel
->r_info
);
1633 if (isym
->st_shndx
== SHN_UNDEF
)
1634 ssec
= bfd_und_section_ptr
;
1635 else if (isym
->st_shndx
== SHN_ABS
)
1636 ssec
= bfd_abs_section_ptr
;
1637 else if (isym
->st_shndx
== SHN_COMMON
)
1638 ssec
= bfd_com_section_ptr
;
1640 ssec
= bfd_section_from_elf_index (abfd
,
1643 /* Initial symbol value. */
1644 symval
= isym
->st_value
;
1646 /* GAS may have made this symbol relative to a section, in
1647 which case, we have to add the addend to find the
1649 if (ELF_ST_TYPE (isym
->st_info
) == STT_SECTION
)
1650 symval
+= rel
->r_addend
;
1654 if ((ssec
->flags
& SEC_MERGE
)
1655 && ssec
->sec_info_type
== SEC_INFO_TYPE_MERGE
)
1656 symval
= _bfd_merged_section_offset (abfd
, & ssec
,
1657 elf_section_data (ssec
)->sec_info
,
1661 /* Now make the offset relative to where the linker is putting it. */
1664 ssec
->output_section
->vma
+ ssec
->output_offset
;
1666 symval
+= rel
->r_addend
;
1671 struct elf_link_hash_entry
* h
;
1673 /* An external symbol. */
1674 indx
= ELF32_R_SYM (rel
->r_info
) - symtab_hdr
->sh_info
;
1675 h
= elf_sym_hashes (abfd
)[indx
];
1676 BFD_ASSERT (h
!= NULL
);
1678 if (h
->root
.type
!= bfd_link_hash_defined
1679 && h
->root
.type
!= bfd_link_hash_defweak
)
1681 /* This appears to be a reference to an undefined
1682 symbol. Just ignore it--it will be caught by the
1683 regular reloc processing. */
1689 symval
= (h
->root
.u
.def
.value
1690 + h
->root
.u
.def
.section
->output_section
->vma
1691 + h
->root
.u
.def
.section
->output_offset
);
1693 symval
+= rel
->r_addend
;
1696 switch (ELF32_R_TYPE (rel
->r_info
))
1699 RX_STACK_PUSH (symval
);
1703 RX_STACK_POP (tmp1
);
1705 RX_STACK_PUSH (tmp1
);
1709 RX_STACK_POP (tmp1
);
1710 RX_STACK_POP (tmp2
);
1712 RX_STACK_PUSH (tmp1
);
1716 RX_STACK_POP (tmp1
);
1717 RX_STACK_POP (tmp2
);
1719 RX_STACK_PUSH (tmp2
);
1723 RX_STACK_POP (tmp1
);
1724 RX_STACK_POP (tmp2
);
1726 RX_STACK_PUSH (tmp1
);
1730 RX_STACK_POP (tmp1
);
1731 RX_STACK_POP (tmp2
);
1733 RX_STACK_PUSH (tmp1
);
1737 RX_STACK_POP (tmp1
);
1738 RX_STACK_POP (tmp2
);
1740 RX_STACK_PUSH (tmp1
);
1744 RX_STACK_POP (tmp1
);
1745 RX_STACK_POP (tmp2
);
1747 RX_STACK_PUSH (tmp1
);
1750 case R_RX_OPsctsize
:
1751 RX_STACK_PUSH (input_section
->size
);
1755 RX_STACK_PUSH (input_section
->output_section
->vma
);
1759 RX_STACK_POP (tmp1
);
1760 RX_STACK_POP (tmp2
);
1762 RX_STACK_PUSH (tmp1
);
1766 RX_STACK_POP (tmp1
);
1767 RX_STACK_POP (tmp2
);
1769 RX_STACK_PUSH (tmp1
);
1773 RX_STACK_POP (tmp1
);
1774 RX_STACK_POP (tmp2
);
1776 RX_STACK_PUSH (tmp1
);
1780 RX_STACK_POP (tmp1
);
1782 RX_STACK_PUSH (tmp1
);
1786 RX_STACK_POP (tmp1
);
1787 RX_STACK_POP (tmp2
);
1789 RX_STACK_PUSH (tmp1
);
1793 RX_STACK_PUSH (get_romstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1797 RX_STACK_PUSH (get_ramstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1805 RX_STACK_POP (symval
);
1816 RX_STACK_POP (symval
);
1824 RX_STACK_POP (symval
);
1835 move_reloc (Elf_Internal_Rela
* irel
, Elf_Internal_Rela
* srel
, int delta
)
1837 bfd_vma old_offset
= srel
->r_offset
;
1840 while (irel
<= srel
)
1842 if (irel
->r_offset
== old_offset
)
1843 irel
->r_offset
+= delta
;
1848 /* Relax one section. */
1851 elf32_rx_relax_section (bfd
* abfd
,
1853 struct bfd_link_info
* link_info
,
1854 bfd_boolean
* again
,
1855 bfd_boolean allow_pcrel3
)
1857 Elf_Internal_Shdr
* symtab_hdr
;
1858 Elf_Internal_Shdr
* shndx_hdr
;
1859 Elf_Internal_Rela
* internal_relocs
;
1860 Elf_Internal_Rela
* free_relocs
= NULL
;
1861 Elf_Internal_Rela
* irel
;
1862 Elf_Internal_Rela
* srel
;
1863 Elf_Internal_Rela
* irelend
;
1864 Elf_Internal_Rela
* next_alignment
;
1865 Elf_Internal_Rela
* prev_alignment
;
1866 bfd_byte
* contents
= NULL
;
1867 bfd_byte
* free_contents
= NULL
;
1868 Elf_Internal_Sym
* intsyms
= NULL
;
1869 Elf_Internal_Sym
* free_intsyms
= NULL
;
1870 Elf_External_Sym_Shndx
* shndx_buf
= NULL
;
1876 int section_alignment_glue
;
1877 /* how much to scale the relocation by - 1, 2, or 4. */
1880 /* Assume nothing changes. */
1883 /* We don't have to do anything for a relocatable link, if
1884 this section does not have relocs, or if this is not a
1886 if (link_info
->relocatable
1887 || (sec
->flags
& SEC_RELOC
) == 0
1888 || sec
->reloc_count
== 0
1889 || (sec
->flags
& SEC_CODE
) == 0)
1892 symtab_hdr
= &elf_tdata (abfd
)->symtab_hdr
;
1893 shndx_hdr
= &elf_tdata (abfd
)->symtab_shndx_hdr
;
1895 sec_start
= sec
->output_section
->vma
+ sec
->output_offset
;
1897 /* Get the section contents. */
1898 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
1899 contents
= elf_section_data (sec
)->this_hdr
.contents
;
1900 /* Go get them off disk. */
1903 if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
1905 elf_section_data (sec
)->this_hdr
.contents
= contents
;
1908 /* Read this BFD's symbols. */
1909 /* Get cached copy if it exists. */
1910 if (symtab_hdr
->contents
!= NULL
)
1911 intsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
1914 intsyms
= bfd_elf_get_elf_syms (abfd
, symtab_hdr
, symtab_hdr
->sh_info
, 0, NULL
, NULL
, NULL
);
1915 symtab_hdr
->contents
= (bfd_byte
*) intsyms
;
1918 if (shndx_hdr
->sh_size
!= 0)
1922 amt
= symtab_hdr
->sh_info
;
1923 amt
*= sizeof (Elf_External_Sym_Shndx
);
1924 shndx_buf
= (Elf_External_Sym_Shndx
*) bfd_malloc (amt
);
1925 if (shndx_buf
== NULL
)
1927 if (bfd_seek (abfd
, shndx_hdr
->sh_offset
, SEEK_SET
) != 0
1928 || bfd_bread (shndx_buf
, amt
, abfd
) != amt
)
1930 shndx_hdr
->contents
= (bfd_byte
*) shndx_buf
;
1933 /* Get a copy of the native relocations. */
1934 internal_relocs
= (_bfd_elf_link_read_relocs
1935 (abfd
, sec
, NULL
, (Elf_Internal_Rela
*) NULL
,
1936 link_info
->keep_memory
));
1937 if (internal_relocs
== NULL
)
1939 if (! link_info
->keep_memory
)
1940 free_relocs
= internal_relocs
;
1942 /* The RL_ relocs must be just before the operand relocs they go
1943 with, so we must sort them to guarantee this. We use bubblesort
1944 instead of qsort so we can guarantee that relocs with the same
1945 address remain in the same relative order. */
1946 reloc_bubblesort (internal_relocs
, sec
->reloc_count
);
1948 /* Walk through them looking for relaxing opportunities. */
1949 irelend
= internal_relocs
+ sec
->reloc_count
;
1951 /* This will either be NULL or a pointer to the next alignment
1953 next_alignment
= internal_relocs
;
1954 /* This will be the previous alignment, although at first it points
1955 to the first real relocation. */
1956 prev_alignment
= internal_relocs
;
1958 /* We calculate worst case shrinkage caused by alignment directives.
1959 No fool-proof, but better than either ignoring the problem or
1960 doing heavy duty analysis of all the alignment markers in all
1962 section_alignment_glue
= 0;
1963 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
1964 if (ELF32_R_TYPE (irel
->r_info
) == R_RX_RH_RELAX
1965 && irel
->r_addend
& RX_RELAXA_ALIGN
)
1967 int this_glue
= 1 << (irel
->r_addend
& RX_RELAXA_ANUM
);
1969 if (section_alignment_glue
< this_glue
)
1970 section_alignment_glue
= this_glue
;
1972 /* Worst case is all 0..N alignments, in order, causing 2*N-1 byte
1974 section_alignment_glue
*= 2;
1976 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
1978 unsigned char *insn
;
1981 /* The insns we care about are all marked with one of these. */
1982 if (ELF32_R_TYPE (irel
->r_info
) != R_RX_RH_RELAX
)
1985 if (irel
->r_addend
& RX_RELAXA_ALIGN
1986 || next_alignment
== internal_relocs
)
1988 /* When we delete bytes, we need to maintain all the alignments
1989 indicated. In addition, we need to be careful about relaxing
1990 jumps across alignment boundaries - these displacements
1991 *grow* when we delete bytes. For now, don't shrink
1992 displacements across an alignment boundary, just in case.
1993 Note that this only affects relocations to the same
1995 prev_alignment
= next_alignment
;
1996 next_alignment
+= 2;
1997 while (next_alignment
< irelend
1998 && (ELF32_R_TYPE (next_alignment
->r_info
) != R_RX_RH_RELAX
1999 || !(next_alignment
->r_addend
& RX_RELAXA_ELIGN
)))
2001 if (next_alignment
>= irelend
|| next_alignment
->r_offset
== 0)
2002 next_alignment
= NULL
;
2005 /* When we hit alignment markers, see if we've shrunk enough
2006 before them to reduce the gap without violating the alignment
2008 if (irel
->r_addend
& RX_RELAXA_ALIGN
)
2010 /* At this point, the next relocation *should* be the ELIGN
2012 Elf_Internal_Rela
*erel
= irel
+ 1;
2013 unsigned int alignment
, nbytes
;
2015 if (ELF32_R_TYPE (erel
->r_info
) != R_RX_RH_RELAX
)
2017 if (!(erel
->r_addend
& RX_RELAXA_ELIGN
))
2020 alignment
= 1 << (irel
->r_addend
& RX_RELAXA_ANUM
);
2022 if (erel
->r_offset
- irel
->r_offset
< alignment
)
2025 nbytes
= erel
->r_offset
- irel
->r_offset
;
2026 nbytes
/= alignment
;
2027 nbytes
*= alignment
;
2029 elf32_rx_relax_delete_bytes (abfd
, sec
, erel
->r_offset
-nbytes
, nbytes
, next_alignment
,
2030 erel
->r_offset
== sec
->size
);
2036 if (irel
->r_addend
& RX_RELAXA_ELIGN
)
2039 insn
= contents
+ irel
->r_offset
;
2041 nrelocs
= irel
->r_addend
& RX_RELAXA_RNUM
;
2043 /* At this point, we have an insn that is a candidate for linker
2044 relaxation. There are NRELOCS relocs following that may be
2045 relaxed, although each reloc may be made of more than one
2046 reloc entry (such as gp-rel symbols). */
2048 /* Get the value of the symbol referred to by the reloc. Just
2049 in case this is the last reloc in the list, use the RL's
2050 addend to choose between this reloc (no addend) or the next
2051 (yes addend, which means at least one following reloc). */
2053 /* srel points to the "current" reloction for this insn -
2054 actually the last reloc for a given operand, which is the one
2055 we need to update. We check the relaxations in the same
2056 order that the relocations happen, so we'll just push it
2060 pc
= sec
->output_section
->vma
+ sec
->output_offset
2064 symval = OFFSET_FOR_RELOC (srel, &srel, &scale); \
2065 pcrel = symval - pc + srel->r_addend; \
2068 #define SNIPNR(offset, nbytes) \
2069 elf32_rx_relax_delete_bytes (abfd, sec, (insn - contents) + offset, nbytes, next_alignment, 0);
2070 #define SNIP(offset, nbytes, newtype) \
2071 SNIPNR (offset, nbytes); \
2072 srel->r_info = ELF32_R_INFO (ELF32_R_SYM (srel->r_info), newtype)
2074 /* The order of these bit tests must match the order that the
2075 relocs appear in. Since we sorted those by offset, we can
2078 /* Note that the numbers in, say, DSP6 are the bit offsets of
2079 the code fields that describe the operand. Bits number 0 for
2080 the MSB of insn[0]. */
2087 if (irel
->r_addend
& RX_RELAXA_DSP6
)
2092 if (code
== 2 && symval
/scale
<= 255)
2094 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2097 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2098 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2100 SNIP (3, 1, newrel
);
2105 else if (code
== 1 && symval
== 0)
2108 SNIP (2, 1, R_RX_NONE
);
2112 /* Special case DSP:5 format: MOV.bwl dsp:5[Rsrc],Rdst. */
2113 else if (code
== 1 && symval
/scale
<= 31
2114 /* Decodable bits. */
2115 && (insn
[0] & 0xcc) == 0xcc
2117 && (insn
[0] & 0x30) != 0x30
2118 /* Register MSBs. */
2119 && (insn
[1] & 0x88) == 0x00)
2123 insn
[0] = 0x88 | (insn
[0] & 0x30);
2124 /* The register fields are in the right place already. */
2126 /* We can't relax this new opcode. */
2129 switch ((insn
[0] & 0x30) >> 4)
2132 newrel
= R_RX_RH_ABS5p5B
;
2135 newrel
= R_RX_RH_ABS5p5W
;
2138 newrel
= R_RX_RH_ABS5p5L
;
2142 move_reloc (irel
, srel
, -2);
2143 SNIP (2, 1, newrel
);
2146 /* Special case DSP:5 format: MOVU.bw dsp:5[Rsrc],Rdst. */
2147 else if (code
== 1 && symval
/scale
<= 31
2148 /* Decodable bits. */
2149 && (insn
[0] & 0xf8) == 0x58
2150 /* Register MSBs. */
2151 && (insn
[1] & 0x88) == 0x00)
2155 insn
[0] = 0xb0 | ((insn
[0] & 0x04) << 1);
2156 /* The register fields are in the right place already. */
2158 /* We can't relax this new opcode. */
2161 switch ((insn
[0] & 0x08) >> 3)
2164 newrel
= R_RX_RH_ABS5p5B
;
2167 newrel
= R_RX_RH_ABS5p5W
;
2171 move_reloc (irel
, srel
, -2);
2172 SNIP (2, 1, newrel
);
2176 /* A DSP4 operand always follows a DSP6 operand, even if there's
2177 no relocation for it. We have to read the code out of the
2178 opcode to calculate the offset of the operand. */
2179 if (irel
->r_addend
& RX_RELAXA_DSP4
)
2181 int code6
, offset
= 0;
2185 code6
= insn
[0] & 0x03;
2188 case 0: offset
= 2; break;
2189 case 1: offset
= 3; break;
2190 case 2: offset
= 4; break;
2191 case 3: offset
= 2; break;
2194 code
= (insn
[0] & 0x0c) >> 2;
2196 if (code
== 2 && symval
/ scale
<= 255)
2198 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2202 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2203 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2205 SNIP (offset
+1, 1, newrel
);
2210 else if (code
== 1 && symval
== 0)
2213 SNIP (offset
, 1, R_RX_NONE
);
2216 /* Special case DSP:5 format: MOV.bwl Rsrc,dsp:5[Rdst] */
2217 else if (code
== 1 && symval
/scale
<= 31
2218 /* Decodable bits. */
2219 && (insn
[0] & 0xc3) == 0xc3
2221 && (insn
[0] & 0x30) != 0x30
2222 /* Register MSBs. */
2223 && (insn
[1] & 0x88) == 0x00)
2227 insn
[0] = 0x80 | (insn
[0] & 0x30);
2228 /* The register fields are in the right place already. */
2230 /* We can't relax this new opcode. */
2233 switch ((insn
[0] & 0x30) >> 4)
2236 newrel
= R_RX_RH_ABS5p5B
;
2239 newrel
= R_RX_RH_ABS5p5W
;
2242 newrel
= R_RX_RH_ABS5p5L
;
2246 move_reloc (irel
, srel
, -2);
2247 SNIP (2, 1, newrel
);
2251 /* These always occur alone, but the offset depends on whether
2252 it's a MEMEX opcode (0x06) or not. */
2253 if (irel
->r_addend
& RX_RELAXA_DSP14
)
2258 if (insn
[0] == 0x06)
2265 if (code
== 2 && symval
/ scale
<= 255)
2267 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2271 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2272 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2274 SNIP (offset
, 1, newrel
);
2278 else if (code
== 1 && symval
== 0)
2281 SNIP (offset
, 1, R_RX_NONE
);
2292 /* These always occur alone. */
2293 if (irel
->r_addend
& RX_RELAXA_IMM6
)
2299 /* These relocations sign-extend, so we must do signed compares. */
2300 ssymval
= (long) symval
;
2302 code
= insn
[0] & 0x03;
2304 if (code
== 0 && ssymval
<= 8388607 && ssymval
>= -8388608)
2306 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2310 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2311 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2313 SNIP (2, 1, newrel
);
2318 else if (code
== 3 && ssymval
<= 32767 && ssymval
>= -32768)
2320 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2324 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2325 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2327 SNIP (2, 1, newrel
);
2332 /* Special case UIMM8 format: CMP #uimm8,Rdst. */
2333 else if (code
== 2 && ssymval
<= 255 && ssymval
>= 16
2334 /* Decodable bits. */
2335 && (insn
[0] & 0xfc) == 0x74
2336 /* Decodable bits. */
2337 && ((insn
[1] & 0xf0) == 0x00))
2342 insn
[1] = 0x50 | (insn
[1] & 0x0f);
2344 /* We can't relax this new opcode. */
2347 if (STACK_REL_P (ELF32_R_TYPE (srel
->r_info
)))
2348 newrel
= R_RX_ABS8U
;
2350 newrel
= R_RX_DIR8U
;
2352 SNIP (2, 1, newrel
);
2356 else if (code
== 2 && ssymval
<= 127 && ssymval
>= -128)
2358 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2362 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2363 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2365 SNIP (2, 1, newrel
);
2370 /* Special case UIMM4 format: CMP, MUL, AND, OR. */
2371 else if (code
== 1 && ssymval
<= 15 && ssymval
>= 0
2372 /* Decodable bits and immediate type. */
2374 /* Decodable bits. */
2375 && (insn
[1] & 0xc0) == 0x00)
2377 static const int newop
[4] = { 1, 3, 4, 5 };
2379 insn
[0] = 0x60 | newop
[insn
[1] >> 4];
2380 /* The register number doesn't move. */
2382 /* We can't relax this new opcode. */
2385 move_reloc (irel
, srel
, -1);
2387 SNIP (2, 1, R_RX_RH_UIMM4p8
);
2391 /* Special case UIMM4 format: ADD -> ADD/SUB. */
2392 else if (code
== 1 && ssymval
<= 15 && ssymval
>= -15
2393 /* Decodable bits and immediate type. */
2395 /* Same register for source and destination. */
2396 && ((insn
[1] >> 4) == (insn
[1] & 0x0f)))
2400 /* Note that we can't turn "add $0,Rs" into a NOP
2401 because the flags need to be set right. */
2405 insn
[0] = 0x60; /* Subtract. */
2406 newrel
= R_RX_RH_UNEG4p8
;
2410 insn
[0] = 0x62; /* Add. */
2411 newrel
= R_RX_RH_UIMM4p8
;
2414 /* The register number is in the right place. */
2416 /* We can't relax this new opcode. */
2419 move_reloc (irel
, srel
, -1);
2421 SNIP (2, 1, newrel
);
2426 /* These are either matched with a DSP6 (2-byte base) or an id24
2428 if (irel
->r_addend
& RX_RELAXA_IMM12
)
2430 int dspcode
, offset
= 0;
2435 if ((insn
[0] & 0xfc) == 0xfc)
2436 dspcode
= 1; /* Just something with one byte operand. */
2438 dspcode
= insn
[0] & 3;
2441 case 0: offset
= 2; break;
2442 case 1: offset
= 3; break;
2443 case 2: offset
= 4; break;
2444 case 3: offset
= 2; break;
2447 /* These relocations sign-extend, so we must do signed compares. */
2448 ssymval
= (long) symval
;
2450 code
= (insn
[1] >> 2) & 3;
2451 if (code
== 0 && ssymval
<= 8388607 && ssymval
>= -8388608)
2453 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2457 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2458 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2460 SNIP (offset
, 1, newrel
);
2465 else if (code
== 3 && ssymval
<= 32767 && ssymval
>= -32768)
2467 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2471 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2472 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2474 SNIP (offset
, 1, newrel
);
2479 /* Special case UIMM8 format: MOV #uimm8,Rdst. */
2480 else if (code
== 2 && ssymval
<= 255 && ssymval
>= 16
2481 /* Decodable bits. */
2483 /* Decodable bits. */
2484 && ((insn
[1] & 0x03) == 0x02))
2489 insn
[1] = 0x40 | (insn
[1] >> 4);
2491 /* We can't relax this new opcode. */
2494 if (STACK_REL_P (ELF32_R_TYPE (srel
->r_info
)))
2495 newrel
= R_RX_ABS8U
;
2497 newrel
= R_RX_DIR8U
;
2499 SNIP (2, 1, newrel
);
2503 else if (code
== 2 && ssymval
<= 127 && ssymval
>= -128)
2505 unsigned int newrel
= ELF32_R_TYPE(srel
->r_info
);
2509 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2510 if (newrel
!= ELF32_R_TYPE(srel
->r_info
))
2512 SNIP (offset
, 1, newrel
);
2517 /* Special case UIMM4 format: MOV #uimm4,Rdst. */
2518 else if (code
== 1 && ssymval
<= 15 && ssymval
>= 0
2519 /* Decodable bits. */
2521 /* Decodable bits. */
2522 && ((insn
[1] & 0x03) == 0x02))
2525 insn
[1] = insn
[1] >> 4;
2527 /* We can't relax this new opcode. */
2530 move_reloc (irel
, srel
, -1);
2532 SNIP (2, 1, R_RX_RH_UIMM4p8
);
2537 if (irel
->r_addend
& RX_RELAXA_BRA
)
2539 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2541 int alignment_glue
= 0;
2545 /* Branches over alignment chunks are problematic, as
2546 deleting bytes here makes the branch *further* away. We
2547 can be agressive with branches within this alignment
2548 block, but not branches outside it. */
2549 if ((prev_alignment
== NULL
2550 || symval
< (bfd_vma
)(sec_start
+ prev_alignment
->r_offset
))
2551 && (next_alignment
== NULL
2552 || symval
> (bfd_vma
)(sec_start
+ next_alignment
->r_offset
)))
2553 alignment_glue
= section_alignment_glue
;
2555 if (ELF32_R_TYPE(srel
[1].r_info
) == R_RX_RH_RELAX
2556 && srel
[1].r_addend
& RX_RELAXA_BRA
2557 && srel
[1].r_offset
< irel
->r_offset
+ pcrel
)
2560 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2562 /* The values we compare PCREL with are not what you'd
2563 expect; they're off by a little to compensate for (1)
2564 where the reloc is relative to the insn, and (2) how much
2565 the insn is going to change when we relax it. */
2567 /* These we have to decode. */
2570 case 0x04: /* BRA pcdsp:24 */
2571 if (-32768 + alignment_glue
<= pcrel
2572 && pcrel
<= 32765 - alignment_glue
)
2575 SNIP (3, 1, newrel
);
2580 case 0x38: /* BRA pcdsp:16 */
2581 if (-128 + alignment_glue
<= pcrel
2582 && pcrel
<= 127 - alignment_glue
)
2585 SNIP (2, 1, newrel
);
2590 case 0x2e: /* BRA pcdsp:8 */
2591 /* Note that there's a risk here of shortening things so
2592 much that we no longer fit this reloc; it *should*
2593 only happen when you branch across a branch, and that
2594 branch also devolves into BRA.S. "Real" code should
2596 if (max_pcrel3
+ alignment_glue
<= pcrel
2597 && pcrel
<= 10 - alignment_glue
2601 SNIP (1, 1, newrel
);
2602 move_reloc (irel
, srel
, -1);
2607 case 0x05: /* BSR pcdsp:24 */
2608 if (-32768 + alignment_glue
<= pcrel
2609 && pcrel
<= 32765 - alignment_glue
)
2612 SNIP (1, 1, newrel
);
2617 case 0x3a: /* BEQ.W pcdsp:16 */
2618 case 0x3b: /* BNE.W pcdsp:16 */
2619 if (-128 + alignment_glue
<= pcrel
2620 && pcrel
<= 127 - alignment_glue
)
2622 insn
[0] = 0x20 | (insn
[0] & 1);
2623 SNIP (1, 1, newrel
);
2628 case 0x20: /* BEQ.B pcdsp:8 */
2629 case 0x21: /* BNE.B pcdsp:8 */
2630 if (max_pcrel3
+ alignment_glue
<= pcrel
2631 && pcrel
- alignment_glue
<= 10
2634 insn
[0] = 0x10 | ((insn
[0] & 1) << 3);
2635 SNIP (1, 1, newrel
);
2636 move_reloc (irel
, srel
, -1);
2641 case 0x16: /* synthetic BNE dsp24 */
2642 case 0x1e: /* synthetic BEQ dsp24 */
2643 if (-32767 + alignment_glue
<= pcrel
2644 && pcrel
<= 32766 - alignment_glue
2647 if (insn
[0] == 0x16)
2651 /* We snip out the bytes at the end else the reloc
2652 will get moved too, and too much. */
2653 SNIP (3, 2, newrel
);
2654 move_reloc (irel
, srel
, -1);
2660 /* Special case - synthetic conditional branches, pcrel24.
2661 Note that EQ and NE have been handled above. */
2662 if ((insn
[0] & 0xf0) == 0x20
2665 && srel
->r_offset
!= irel
->r_offset
+ 1
2666 && -32767 + alignment_glue
<= pcrel
2667 && pcrel
<= 32766 - alignment_glue
)
2671 SNIP (5, 1, newrel
);
2675 /* Special case - synthetic conditional branches, pcrel16 */
2676 if ((insn
[0] & 0xf0) == 0x20
2679 && srel
->r_offset
!= irel
->r_offset
+ 1
2680 && -127 + alignment_glue
<= pcrel
2681 && pcrel
<= 126 - alignment_glue
)
2683 int cond
= (insn
[0] & 0x0f) ^ 0x01;
2685 insn
[0] = 0x20 | cond
;
2686 /* By moving the reloc first, we avoid having
2687 delete_bytes move it also. */
2688 move_reloc (irel
, srel
, -2);
2689 SNIP (2, 3, newrel
);
2694 BFD_ASSERT (nrelocs
== 0);
2696 /* Special case - check MOV.bwl #IMM, dsp[reg] and see if we can
2697 use MOV.bwl #uimm:8, dsp:5[r7] format. This is tricky
2698 because it may have one or two relocations. */
2699 if ((insn
[0] & 0xfc) == 0xf8
2700 && (insn
[1] & 0x80) == 0x00
2701 && (insn
[0] & 0x03) != 0x03)
2703 int dcode
, icode
, reg
, ioff
, dscale
, ilen
;
2704 bfd_vma disp_val
= 0;
2706 Elf_Internal_Rela
* disp_rel
= 0;
2707 Elf_Internal_Rela
* imm_rel
= 0;
2712 dcode
= insn
[0] & 0x03;
2713 icode
= (insn
[1] >> 2) & 0x03;
2714 reg
= (insn
[1] >> 4) & 0x0f;
2716 ioff
= dcode
== 1 ? 3 : dcode
== 2 ? 4 : 2;
2718 /* Figure out what the dispacement is. */
2719 if (dcode
== 1 || dcode
== 2)
2721 /* There's a displacement. See if there's a reloc for it. */
2722 if (srel
[1].r_offset
== irel
->r_offset
+ 2)
2734 #if RX_OPCODE_BIG_ENDIAN
2735 disp_val
= insn
[2] * 256 + insn
[3];
2737 disp_val
= insn
[2] + insn
[3] * 256;
2740 switch (insn
[1] & 3)
2756 /* Figure out what the immediate is. */
2757 if (srel
[1].r_offset
== irel
->r_offset
+ ioff
)
2760 imm_val
= (long) symval
;
2765 unsigned char * ip
= insn
+ ioff
;
2770 /* For byte writes, we don't sign extend. Makes the math easier later. */
2774 imm_val
= (char) ip
[0];
2777 #if RX_OPCODE_BIG_ENDIAN
2778 imm_val
= ((char) ip
[0] << 8) | ip
[1];
2780 imm_val
= ((char) ip
[1] << 8) | ip
[0];
2784 #if RX_OPCODE_BIG_ENDIAN
2785 imm_val
= ((char) ip
[0] << 16) | (ip
[1] << 8) | ip
[2];
2787 imm_val
= ((char) ip
[2] << 16) | (ip
[1] << 8) | ip
[0];
2791 #if RX_OPCODE_BIG_ENDIAN
2792 imm_val
= (ip
[0] << 24) | (ip
[1] << 16) | (ip
[2] << 8) | ip
[3];
2794 imm_val
= (ip
[3] << 24) | (ip
[2] << 16) | (ip
[1] << 8) | ip
[0];
2828 /* The shortcut happens when the immediate is 0..255,
2829 register r0 to r7, and displacement (scaled) 0..31. */
2831 if (0 <= imm_val
&& imm_val
<= 255
2832 && 0 <= reg
&& reg
<= 7
2833 && disp_val
/ dscale
<= 31)
2835 insn
[0] = 0x3c | (insn
[1] & 0x03);
2836 insn
[1] = (((disp_val
/ dscale
) << 3) & 0x80) | (reg
<< 4) | ((disp_val
/dscale
) & 0x0f);
2841 int newrel
= R_RX_NONE
;
2846 newrel
= R_RX_RH_ABS5p8B
;
2849 newrel
= R_RX_RH_ABS5p8W
;
2852 newrel
= R_RX_RH_ABS5p8L
;
2855 disp_rel
->r_info
= ELF32_R_INFO (ELF32_R_SYM (disp_rel
->r_info
), newrel
);
2856 move_reloc (irel
, disp_rel
, -1);
2860 imm_rel
->r_info
= ELF32_R_INFO (ELF32_R_SYM (imm_rel
->r_info
), R_RX_DIR8U
);
2861 move_reloc (disp_rel
? disp_rel
: irel
,
2863 irel
->r_offset
- imm_rel
->r_offset
+ 2);
2866 SNIPNR (3, ilen
- 3);
2869 /* We can't relax this new opcode. */
2875 /* We can't reliably relax branches to DIR3U_PCREL unless we know
2876 whatever they're branching over won't shrink any more. If we're
2877 basically done here, do one more pass just for branches - but
2878 don't request a pass after that one! */
2879 if (!*again
&& !allow_pcrel3
)
2881 bfd_boolean ignored
;
2883 elf32_rx_relax_section (abfd
, sec
, link_info
, &ignored
, TRUE
);
2889 if (free_relocs
!= NULL
)
2892 if (free_contents
!= NULL
)
2893 free (free_contents
);
2895 if (shndx_buf
!= NULL
)
2897 shndx_hdr
->contents
= NULL
;
2901 if (free_intsyms
!= NULL
)
2902 free (free_intsyms
);
2908 elf32_rx_relax_section_wrapper (bfd
* abfd
,
2910 struct bfd_link_info
* link_info
,
2911 bfd_boolean
* again
)
2913 return elf32_rx_relax_section (abfd
, sec
, link_info
, again
, FALSE
);
2916 /* Function to set the ELF flag bits. */
2919 rx_elf_set_private_flags (bfd
* abfd
, flagword flags
)
2921 elf_elfheader (abfd
)->e_flags
= flags
;
2922 elf_flags_init (abfd
) = TRUE
;
2926 static bfd_boolean no_warn_mismatch
= FALSE
;
2927 static bfd_boolean ignore_lma
= TRUE
;
2929 void bfd_elf32_rx_set_target_flags (bfd_boolean
, bfd_boolean
);
2932 bfd_elf32_rx_set_target_flags (bfd_boolean user_no_warn_mismatch
,
2933 bfd_boolean user_ignore_lma
)
2935 no_warn_mismatch
= user_no_warn_mismatch
;
2936 ignore_lma
= user_ignore_lma
;
2939 /* Converts FLAGS into a descriptive string.
2940 Returns a static pointer. */
2943 describe_flags (flagword flags
)
2945 static char buf
[128];
2949 if (flags
& E_FLAG_RX_64BIT_DOUBLES
)
2950 strcat (buf
, "64-bit doubles");
2952 strcat (buf
, "32-bit doubles");
2954 if (flags
& E_FLAG_RX_DSP
)
2955 strcat (buf
, ", dsp");
2957 strcat (buf
, ", no dsp");
2959 if (flags
& E_FLAG_RX_PID
)
2960 strcat (buf
, ", pid");
2962 strcat (buf
, ", no pid");
2964 if (flags
& E_FLAG_RX_ABI
)
2965 strcat (buf
, ", RX ABI");
2967 strcat (buf
, ", GCC ABI");
2972 /* Merge backend specific data from an object file to the output
2973 object file when linking. */
2976 rx_elf_merge_private_bfd_data (bfd
* ibfd
, bfd
* obfd
)
2980 bfd_boolean error
= FALSE
;
2982 new_flags
= elf_elfheader (ibfd
)->e_flags
;
2983 old_flags
= elf_elfheader (obfd
)->e_flags
;
2985 if (!elf_flags_init (obfd
))
2987 /* First call, no flags set. */
2988 elf_flags_init (obfd
) = TRUE
;
2989 elf_elfheader (obfd
)->e_flags
= new_flags
;
2991 else if (old_flags
!= new_flags
)
2993 flagword known_flags
;
2995 known_flags
= E_FLAG_RX_ABI
| E_FLAG_RX_64BIT_DOUBLES
2996 | E_FLAG_RX_DSP
| E_FLAG_RX_PID
;
2998 if ((old_flags
^ new_flags
) & known_flags
)
3000 /* Only complain if flag bits we care about do not match.
3001 Other bits may be set, since older binaries did use some
3002 deprecated flags. */
3003 if (no_warn_mismatch
)
3005 elf_elfheader (obfd
)->e_flags
= (new_flags
| old_flags
) & known_flags
;
3009 _bfd_error_handler ("There is a conflict merging the ELF header flags from %s",
3010 bfd_get_filename (ibfd
));
3011 _bfd_error_handler (" the input file's flags: %s",
3012 describe_flags (new_flags
));
3013 _bfd_error_handler (" the output file's flags: %s",
3014 describe_flags (old_flags
));
3019 elf_elfheader (obfd
)->e_flags
= new_flags
& known_flags
;
3023 bfd_set_error (bfd_error_bad_value
);
3029 rx_elf_print_private_bfd_data (bfd
* abfd
, void * ptr
)
3031 FILE * file
= (FILE *) ptr
;
3034 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
3036 /* Print normal ELF private data. */
3037 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
3039 flags
= elf_elfheader (abfd
)->e_flags
;
3040 fprintf (file
, _("private flags = 0x%lx:"), (long) flags
);
3042 fprintf (file
, "%s", describe_flags (flags
));
3046 /* Return the MACH for an e_flags value. */
3049 elf32_rx_machine (bfd
* abfd ATTRIBUTE_UNUSED
)
3051 #if 0 /* FIXME: EF_RX_CPU_MASK collides with E_FLAG_RX_...
3052 Need to sort out how these flag bits are used.
3053 For now we assume that the flags are OK. */
3054 if ((elf_elfheader (abfd
)->e_flags
& EF_RX_CPU_MASK
) == EF_RX_CPU_RX
)
3062 rx_elf_object_p (bfd
* abfd
)
3066 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
3067 int nphdrs
= elf_elfheader (abfd
)->e_phnum
;
3069 static int saw_be
= FALSE
;
3071 /* We never want to automatically choose the non-swapping big-endian
3072 target. The user can only get that explicitly, such as with -I
3074 if (abfd
->xvec
== &bfd_elf32_rx_be_ns_vec
3075 && abfd
->target_defaulted
)
3078 /* BFD->target_defaulted is not set to TRUE when a target is chosen
3079 as a fallback, so we check for "scanning" to know when to stop
3080 using the non-swapping target. */
3081 if (abfd
->xvec
== &bfd_elf32_rx_be_ns_vec
3084 if (abfd
->xvec
== &bfd_elf32_rx_be_vec
)
3087 bfd_default_set_arch_mach (abfd
, bfd_arch_rx
,
3088 elf32_rx_machine (abfd
));
3090 /* For each PHDR in the object, we must find some section that
3091 corresponds (based on matching file offsets) and use its VMA
3092 information to reconstruct the p_vaddr field we clobbered when we
3094 for (i
=0; i
<nphdrs
; i
++)
3096 for (u
=0; u
<elf_tdata(abfd
)->num_elf_sections
; u
++)
3098 Elf_Internal_Shdr
*sec
= elf_tdata(abfd
)->elf_sect_ptr
[u
];
3100 if (phdr
[i
].p_filesz
3101 && phdr
[i
].p_offset
<= (bfd_vma
) sec
->sh_offset
3102 && (bfd_vma
)sec
->sh_offset
<= phdr
[i
].p_offset
+ (phdr
[i
].p_filesz
- 1))
3104 /* Found one! The difference between the two addresses,
3105 plus the difference between the two file offsets, is
3106 enough information to reconstruct the lma. */
3108 /* Example where they aren't:
3109 PHDR[1] = lma fffc0100 offset 00002010 size 00000100
3110 SEC[6] = vma 00000050 offset 00002050 size 00000040
3112 The correct LMA for the section is fffc0140 + (2050-2010).
3115 phdr
[i
].p_vaddr
= sec
->sh_addr
+ (sec
->sh_offset
- phdr
[i
].p_offset
);
3120 /* We must update the bfd sections as well, so we don't stop
3122 bsec
= abfd
->sections
;
3125 if (phdr
[i
].p_filesz
3126 && phdr
[i
].p_vaddr
<= bsec
->vma
3127 && bsec
->vma
<= phdr
[i
].p_vaddr
+ (phdr
[i
].p_filesz
- 1))
3129 bsec
->lma
= phdr
[i
].p_paddr
+ (bsec
->vma
- phdr
[i
].p_vaddr
);
3141 rx_dump_symtab (bfd
* abfd
, void * internal_syms
, void * external_syms
)
3144 Elf_Internal_Sym
* isymbuf
;
3145 Elf_Internal_Sym
* isymend
;
3146 Elf_Internal_Sym
* isym
;
3147 Elf_Internal_Shdr
* symtab_hdr
;
3148 bfd_boolean free_internal
= FALSE
, free_external
= FALSE
;
3150 char * st_info_stb_str
;
3151 char * st_other_str
;
3152 char * st_shndx_str
;
3154 if (! internal_syms
)
3156 internal_syms
= bfd_malloc (1000);
3159 if (! external_syms
)
3161 external_syms
= bfd_malloc (1000);
3165 symtab_hdr
= &elf_tdata (abfd
)->symtab_hdr
;
3166 locsymcount
= symtab_hdr
->sh_size
/ get_elf_backend_data (abfd
)->s
->sizeof_sym
;
3168 isymbuf
= bfd_elf_get_elf_syms (abfd
, symtab_hdr
,
3169 symtab_hdr
->sh_info
, 0,
3170 internal_syms
, external_syms
, NULL
);
3172 isymbuf
= internal_syms
;
3173 isymend
= isymbuf
+ locsymcount
;
3175 for (isym
= isymbuf
; isym
< isymend
; isym
++)
3177 switch (ELF_ST_TYPE (isym
->st_info
))
3179 case STT_FUNC
: st_info_str
= "STT_FUNC"; break;
3180 case STT_SECTION
: st_info_str
= "STT_SECTION"; break;
3181 case STT_FILE
: st_info_str
= "STT_FILE"; break;
3182 case STT_OBJECT
: st_info_str
= "STT_OBJECT"; break;
3183 case STT_TLS
: st_info_str
= "STT_TLS"; break;
3184 default: st_info_str
= "";
3186 switch (ELF_ST_BIND (isym
->st_info
))
3188 case STB_LOCAL
: st_info_stb_str
= "STB_LOCAL"; break;
3189 case STB_GLOBAL
: st_info_stb_str
= "STB_GLOBAL"; break;
3190 default: st_info_stb_str
= "";
3192 switch (ELF_ST_VISIBILITY (isym
->st_other
))
3194 case STV_DEFAULT
: st_other_str
= "STV_DEFAULT"; break;
3195 case STV_INTERNAL
: st_other_str
= "STV_INTERNAL"; break;
3196 case STV_PROTECTED
: st_other_str
= "STV_PROTECTED"; break;
3197 default: st_other_str
= "";
3199 switch (isym
->st_shndx
)
3201 case SHN_ABS
: st_shndx_str
= "SHN_ABS"; break;
3202 case SHN_COMMON
: st_shndx_str
= "SHN_COMMON"; break;
3203 case SHN_UNDEF
: st_shndx_str
= "SHN_UNDEF"; break;
3204 default: st_shndx_str
= "";
3207 printf ("isym = %p st_value = %lx st_size = %lx st_name = (%lu) %s "
3208 "st_info = (%d) %s %s st_other = (%d) %s st_shndx = (%d) %s\n",
3210 (unsigned long) isym
->st_value
,
3211 (unsigned long) isym
->st_size
,
3213 bfd_elf_string_from_elf_section (abfd
, symtab_hdr
->sh_link
,
3215 isym
->st_info
, st_info_str
, st_info_stb_str
,
3216 isym
->st_other
, st_other_str
,
3217 isym
->st_shndx
, st_shndx_str
);
3220 free (internal_syms
);
3222 free (external_syms
);
3226 rx_get_reloc (long reloc
)
3228 if (0 <= reloc
&& reloc
< R_RX_max
)
3229 return rx_elf_howto_table
[reloc
].name
;
3235 /* We must take care to keep the on-disk copy of any code sections
3236 that are fully linked swapped if the target is big endian, to match
3237 the Renesas tools. */
3239 /* The rule is: big endian object that are final-link executables,
3240 have code sections stored with 32-bit words swapped relative to
3241 what you'd get by default. */
3244 rx_get_section_contents (bfd
* abfd
,
3248 bfd_size_type count
)
3250 int exec
= (abfd
->flags
& EXEC_P
) ? 1 : 0;
3251 int s_code
= (section
->flags
& SEC_CODE
) ? 1 : 0;
3255 fprintf (stderr
, "dj: get %ld %ld from %s %s e%d sc%d %08lx:%08lx\n",
3256 (long) offset
, (long) count
, section
->name
,
3257 bfd_big_endian(abfd
) ? "be" : "le",
3258 exec
, s_code
, (long unsigned) section
->filepos
,
3259 (long unsigned) offset
);
3262 if (exec
&& s_code
&& bfd_big_endian (abfd
))
3264 char * cloc
= (char *) location
;
3265 bfd_size_type cnt
, end_cnt
;
3269 /* Fetch and swap unaligned bytes at the beginning. */
3274 rv
= _bfd_generic_get_section_contents (abfd
, section
, buf
,
3279 bfd_putb32 (bfd_getl32 (buf
), buf
);
3281 cnt
= 4 - (offset
% 4);
3285 memcpy (location
, buf
+ (offset
% 4), cnt
);
3292 end_cnt
= count
% 4;
3294 /* Fetch and swap the middle bytes. */
3297 rv
= _bfd_generic_get_section_contents (abfd
, section
, cloc
, offset
,
3302 for (cnt
= count
; cnt
>= 4; cnt
-= 4, cloc
+= 4)
3303 bfd_putb32 (bfd_getl32 (cloc
), cloc
);
3306 /* Fetch and swap the end bytes. */
3311 /* Fetch the end bytes. */
3312 rv
= _bfd_generic_get_section_contents (abfd
, section
, buf
,
3313 offset
+ count
- end_cnt
, 4);
3317 bfd_putb32 (bfd_getl32 (buf
), buf
);
3318 memcpy (cloc
, buf
, end_cnt
);
3322 rv
= _bfd_generic_get_section_contents (abfd
, section
, location
, offset
, count
);
3329 rx2_set_section_contents (bfd
* abfd
,
3331 const void * location
,
3333 bfd_size_type count
)
3337 fprintf (stderr
, " set sec %s %08x loc %p offset %#x count %#x\n",
3338 section
->name
, (unsigned) section
->vma
, location
, (int) offset
, (int) count
);
3339 for (i
= 0; i
< count
; i
++)
3341 if (i
% 16 == 0 && i
> 0)
3342 fprintf (stderr
, "\n");
3344 if (i
% 16 && i
% 4 == 0)
3345 fprintf (stderr
, " ");
3348 fprintf (stderr
, " %08x:", (int) (section
->vma
+ offset
+ i
));
3350 fprintf (stderr
, " %02x", ((unsigned char *) location
)[i
]);
3352 fprintf (stderr
, "\n");
3354 return _bfd_elf_set_section_contents (abfd
, section
, location
, offset
, count
);
3356 #define _bfd_elf_set_section_contents rx2_set_section_contents
3360 rx_set_section_contents (bfd
* abfd
,
3362 const void * location
,
3364 bfd_size_type count
)
3366 bfd_boolean exec
= (abfd
->flags
& EXEC_P
) ? TRUE
: FALSE
;
3367 bfd_boolean s_code
= (section
->flags
& SEC_CODE
) ? TRUE
: FALSE
;
3369 char * swapped_data
= NULL
;
3371 bfd_vma caddr
= section
->vma
+ offset
;
3373 bfd_size_type scount
;
3378 fprintf (stderr
, "\ndj: set %ld %ld to %s %s e%d sc%d\n",
3379 (long) offset
, (long) count
, section
->name
,
3380 bfd_big_endian (abfd
) ? "be" : "le",
3383 for (i
= 0; i
< count
; i
++)
3385 int a
= section
->vma
+ offset
+ i
;
3387 if (a
% 16 == 0 && a
> 0)
3388 fprintf (stderr
, "\n");
3390 if (a
% 16 && a
% 4 == 0)
3391 fprintf (stderr
, " ");
3393 if (a
% 16 == 0 || i
== 0)
3394 fprintf (stderr
, " %08x:", (int) (section
->vma
+ offset
+ i
));
3396 fprintf (stderr
, " %02x", ((unsigned char *) location
)[i
]);
3399 fprintf (stderr
, "\n");
3402 if (! exec
|| ! s_code
|| ! bfd_big_endian (abfd
))
3403 return _bfd_elf_set_section_contents (abfd
, section
, location
, offset
, count
);
3405 while (count
> 0 && caddr
> 0 && caddr
% 4)
3409 case 0: faddr
= offset
+ 3; break;
3410 case 1: faddr
= offset
+ 1; break;
3411 case 2: faddr
= offset
- 1; break;
3412 case 3: faddr
= offset
- 3; break;
3415 rv
= _bfd_elf_set_section_contents (abfd
, section
, location
, faddr
, 1);
3425 scount
= (int)(count
/ 4) * 4;
3428 char * cloc
= (char *) location
;
3430 swapped_data
= (char *) bfd_alloc (abfd
, count
);
3432 for (i
= 0; i
< count
; i
+= 4)
3434 bfd_vma v
= bfd_getl32 (cloc
+ i
);
3435 bfd_putb32 (v
, swapped_data
+ i
);
3438 rv
= _bfd_elf_set_section_contents (abfd
, section
, swapped_data
, offset
, scount
);
3450 caddr
= section
->vma
+ offset
;
3455 case 0: faddr
= offset
+ 3; break;
3456 case 1: faddr
= offset
+ 1; break;
3457 case 2: faddr
= offset
- 1; break;
3458 case 3: faddr
= offset
- 3; break;
3460 rv
= _bfd_elf_set_section_contents (abfd
, section
, location
, faddr
, 1);
3475 rx_final_link (bfd
* abfd
, struct bfd_link_info
* info
)
3479 for (o
= abfd
->sections
; o
!= NULL
; o
= o
->next
)
3482 fprintf (stderr
, "sec %s fl %x vma %lx lma %lx size %lx raw %lx\n",
3483 o
->name
, o
->flags
, o
->vma
, o
->lma
, o
->size
, o
->rawsize
);
3485 if (o
->flags
& SEC_CODE
3486 && bfd_big_endian (abfd
)
3490 fprintf (stderr
, "adjusting...\n");
3492 o
->size
+= 4 - (o
->size
% 4);
3496 return bfd_elf_final_link (abfd
, info
);
3500 elf32_rx_modify_program_headers (bfd
* abfd ATTRIBUTE_UNUSED
,
3501 struct bfd_link_info
* info ATTRIBUTE_UNUSED
)
3503 const struct elf_backend_data
* bed
;
3504 struct elf_obj_tdata
* tdata
;
3505 Elf_Internal_Phdr
* phdr
;
3509 bed
= get_elf_backend_data (abfd
);
3510 tdata
= elf_tdata (abfd
);
3512 count
= elf_program_header_size (abfd
) / bed
->s
->sizeof_phdr
;
3515 for (i
= count
; i
-- != 0;)
3516 if (phdr
[i
].p_type
== PT_LOAD
)
3518 /* The Renesas tools expect p_paddr to be zero. However,
3519 there is no other way to store the writable data in ROM for
3520 startup initialization. So, we let the linker *think*
3521 we're using paddr and vaddr the "usual" way, but at the
3522 last minute we move the paddr into the vaddr (which is what
3523 the simulator uses) and zero out paddr. Note that this
3524 does not affect the section headers, just the program
3525 headers. We hope. */
3526 phdr
[i
].p_vaddr
= phdr
[i
].p_paddr
;
3527 #if 0 /* If we zero out p_paddr, then the LMA in the section table
3529 phdr
[i
].p_paddr
= 0;
3536 /* The default literal sections should always be marked as "code" (i.e.,
3537 SHF_EXECINSTR). This is particularly important for big-endian mode
3538 when we do not want their contents byte reversed. */
3539 static const struct bfd_elf_special_section elf32_rx_special_sections
[] =
3541 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY
, SHF_ALLOC
+ SHF_EXECINSTR
},
3542 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY
, SHF_ALLOC
+ SHF_EXECINSTR
},
3543 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY
, SHF_ALLOC
+ SHF_EXECINSTR
},
3544 { NULL
, 0, 0, 0, 0 }
3547 #define ELF_ARCH bfd_arch_rx
3548 #define ELF_MACHINE_CODE EM_RX
3549 #define ELF_MAXPAGESIZE 0x1000
3551 #define TARGET_BIG_SYM bfd_elf32_rx_be_vec
3552 #define TARGET_BIG_NAME "elf32-rx-be"
3554 #define TARGET_LITTLE_SYM bfd_elf32_rx_le_vec
3555 #define TARGET_LITTLE_NAME "elf32-rx-le"
3557 #define elf_info_to_howto_rel NULL
3558 #define elf_info_to_howto rx_info_to_howto_rela
3559 #define elf_backend_object_p rx_elf_object_p
3560 #define elf_backend_relocate_section rx_elf_relocate_section
3561 #define elf_symbol_leading_char ('_')
3562 #define elf_backend_can_gc_sections 1
3563 #define elf_backend_modify_program_headers elf32_rx_modify_program_headers
3565 #define bfd_elf32_bfd_reloc_type_lookup rx_reloc_type_lookup
3566 #define bfd_elf32_bfd_reloc_name_lookup rx_reloc_name_lookup
3567 #define bfd_elf32_bfd_set_private_flags rx_elf_set_private_flags
3568 #define bfd_elf32_bfd_merge_private_bfd_data rx_elf_merge_private_bfd_data
3569 #define bfd_elf32_bfd_print_private_bfd_data rx_elf_print_private_bfd_data
3570 #define bfd_elf32_get_section_contents rx_get_section_contents
3571 #define bfd_elf32_set_section_contents rx_set_section_contents
3572 #define bfd_elf32_bfd_final_link rx_final_link
3573 #define bfd_elf32_bfd_relax_section elf32_rx_relax_section_wrapper
3574 #define elf_backend_special_sections elf32_rx_special_sections
3576 #include "elf32-target.h"
3578 /* We define a second big-endian target that doesn't have the custom
3579 section get/set hooks, for times when we want to preserve the
3580 pre-swapped .text sections (like objcopy). */
3582 #undef TARGET_BIG_SYM
3583 #define TARGET_BIG_SYM bfd_elf32_rx_be_ns_vec
3584 #undef TARGET_BIG_NAME
3585 #define TARGET_BIG_NAME "elf32-rx-be-ns"
3586 #undef TARGET_LITTLE_SYM
3588 #undef bfd_elf32_get_section_contents
3589 #undef bfd_elf32_set_section_contents
3592 #define elf32_bed elf32_rx_be_ns_bed
3594 #include "elf32-target.h"