1 /* BFD back-end for Renesas Super-H COFF binaries.
2 Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Cygnus Support.
5 Written by Steve Chamberlain, <sac@cygnus.com>.
6 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
8 This file is part of BFD, the Binary File Descriptor library.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
26 #include "libiberty.h"
30 #include "coff/internal.h"
35 #ifndef COFF_IMAGE_WITH_PE
36 static bfd_boolean sh_align_load_span
37 PARAMS ((bfd
*, asection
*, bfd_byte
*,
38 bfd_boolean (*) (bfd
*, asection
*, PTR
, bfd_byte
*, bfd_vma
),
39 PTR
, bfd_vma
**, bfd_vma
*, bfd_vma
, bfd_vma
, bfd_boolean
*));
41 #define _bfd_sh_align_load_span sh_align_load_span
47 /* Internal functions. */
48 static bfd_reloc_status_type sh_reloc
49 PARAMS ((bfd
*, arelent
*, asymbol
*, PTR
, asection
*, bfd
*, char **));
50 static long get_symbol_value
PARAMS ((asymbol
*));
51 static bfd_boolean sh_relax_section
52 PARAMS ((bfd
*, asection
*, struct bfd_link_info
*, bfd_boolean
*));
53 static bfd_boolean sh_relax_delete_bytes
54 PARAMS ((bfd
*, asection
*, bfd_vma
, int));
55 #ifndef COFF_IMAGE_WITH_PE
56 static const struct sh_opcode
*sh_insn_info
PARAMS ((unsigned int));
58 static bfd_boolean sh_align_loads
59 PARAMS ((bfd
*, asection
*, struct internal_reloc
*, bfd_byte
*,
61 static bfd_boolean sh_swap_insns
62 PARAMS ((bfd
*, asection
*, PTR
, bfd_byte
*, bfd_vma
));
63 static bfd_boolean sh_relocate_section
64 PARAMS ((bfd
*, struct bfd_link_info
*, bfd
*, asection
*, bfd_byte
*,
65 struct internal_reloc
*, struct internal_syment
*, asection
**));
66 static bfd_byte
*sh_coff_get_relocated_section_contents
67 PARAMS ((bfd
*, struct bfd_link_info
*, struct bfd_link_order
*,
68 bfd_byte
*, bfd_boolean
, asymbol
**));
69 static reloc_howto_type
* sh_coff_reloc_type_lookup
PARAMS ((bfd
*, bfd_reloc_code_real_type
));
72 /* Can't build import tables with 2**4 alignment. */
73 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 2
75 /* Default section alignment to 2**4. */
76 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 4
79 #ifdef COFF_IMAGE_WITH_PE
80 /* Align PE executables. */
81 #define COFF_PAGE_SIZE 0x1000
84 /* Generate long file names. */
85 #define COFF_LONG_FILENAMES
88 static bfd_boolean in_reloc_p
PARAMS ((bfd
*, reloc_howto_type
*));
89 /* Return TRUE if this relocation should
90 appear in the output .reloc section. */
91 static bfd_boolean
in_reloc_p (abfd
, howto
)
92 bfd
* abfd ATTRIBUTE_UNUSED
;
93 reloc_howto_type
* howto
;
95 return ! howto
->pc_relative
&& howto
->type
!= R_SH_IMAGEBASE
;
99 /* The supported relocations. There are a lot of relocations defined
100 in coff/internal.h which we do not expect to ever see. */
101 static reloc_howto_type sh_coff_howtos
[] =
107 HOWTO (R_SH_IMM32CE
, /* type */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
111 FALSE
, /* pc_relative */
113 complain_overflow_bitfield
, /* complain_on_overflow */
114 sh_reloc
, /* special_function */
115 "r_imm32ce", /* name */
116 TRUE
, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE
), /* pcrel_offset */
123 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
124 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
125 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
126 EMPTY_HOWTO (6), /* R_SH_IMM24 */
127 EMPTY_HOWTO (7), /* R_SH_LOW16 */
129 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
131 HOWTO (R_SH_PCDISP8BY2
, /* type */
133 1, /* size (0 = byte, 1 = short, 2 = long) */
135 TRUE
, /* pc_relative */
137 complain_overflow_signed
, /* complain_on_overflow */
138 sh_reloc
, /* special_function */
139 "r_pcdisp8by2", /* name */
140 TRUE
, /* partial_inplace */
143 TRUE
), /* pcrel_offset */
145 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
147 HOWTO (R_SH_PCDISP
, /* type */
149 1, /* size (0 = byte, 1 = short, 2 = long) */
151 TRUE
, /* pc_relative */
153 complain_overflow_signed
, /* complain_on_overflow */
154 sh_reloc
, /* special_function */
155 "r_pcdisp12by2", /* name */
156 TRUE
, /* partial_inplace */
157 0xfff, /* src_mask */
158 0xfff, /* dst_mask */
159 TRUE
), /* pcrel_offset */
163 HOWTO (R_SH_IMM32
, /* type */
165 2, /* size (0 = byte, 1 = short, 2 = long) */
167 FALSE
, /* pc_relative */
169 complain_overflow_bitfield
, /* complain_on_overflow */
170 sh_reloc
, /* special_function */
171 "r_imm32", /* name */
172 TRUE
, /* partial_inplace */
173 0xffffffff, /* src_mask */
174 0xffffffff, /* dst_mask */
175 FALSE
), /* pcrel_offset */
179 HOWTO (R_SH_IMAGEBASE
, /* type */
181 2, /* size (0 = byte, 1 = short, 2 = long) */
183 FALSE
, /* pc_relative */
185 complain_overflow_bitfield
, /* complain_on_overflow */
186 sh_reloc
, /* special_function */
188 TRUE
, /* partial_inplace */
189 0xffffffff, /* src_mask */
190 0xffffffff, /* dst_mask */
191 FALSE
), /* pcrel_offset */
193 EMPTY_HOWTO (16), /* R_SH_IMM8 */
195 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
196 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
197 EMPTY_HOWTO (19), /* R_SH_IMM4 */
198 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
199 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
201 HOWTO (R_SH_PCRELIMM8BY2
, /* type */
203 1, /* size (0 = byte, 1 = short, 2 = long) */
205 TRUE
, /* pc_relative */
207 complain_overflow_unsigned
, /* complain_on_overflow */
208 sh_reloc
, /* special_function */
209 "r_pcrelimm8by2", /* name */
210 TRUE
, /* partial_inplace */
213 TRUE
), /* pcrel_offset */
215 HOWTO (R_SH_PCRELIMM8BY4
, /* type */
217 1, /* size (0 = byte, 1 = short, 2 = long) */
219 TRUE
, /* pc_relative */
221 complain_overflow_unsigned
, /* complain_on_overflow */
222 sh_reloc
, /* special_function */
223 "r_pcrelimm8by4", /* name */
224 TRUE
, /* partial_inplace */
227 TRUE
), /* pcrel_offset */
229 HOWTO (R_SH_IMM16
, /* type */
231 1, /* size (0 = byte, 1 = short, 2 = long) */
233 FALSE
, /* pc_relative */
235 complain_overflow_bitfield
, /* complain_on_overflow */
236 sh_reloc
, /* special_function */
237 "r_imm16", /* name */
238 TRUE
, /* partial_inplace */
239 0xffff, /* src_mask */
240 0xffff, /* dst_mask */
241 FALSE
), /* pcrel_offset */
243 HOWTO (R_SH_SWITCH16
, /* type */
245 1, /* size (0 = byte, 1 = short, 2 = long) */
247 FALSE
, /* pc_relative */
249 complain_overflow_bitfield
, /* complain_on_overflow */
250 sh_reloc
, /* special_function */
251 "r_switch16", /* name */
252 TRUE
, /* partial_inplace */
253 0xffff, /* src_mask */
254 0xffff, /* dst_mask */
255 FALSE
), /* pcrel_offset */
257 HOWTO (R_SH_SWITCH32
, /* type */
259 2, /* size (0 = byte, 1 = short, 2 = long) */
261 FALSE
, /* pc_relative */
263 complain_overflow_bitfield
, /* complain_on_overflow */
264 sh_reloc
, /* special_function */
265 "r_switch32", /* name */
266 TRUE
, /* partial_inplace */
267 0xffffffff, /* src_mask */
268 0xffffffff, /* dst_mask */
269 FALSE
), /* pcrel_offset */
271 HOWTO (R_SH_USES
, /* type */
273 1, /* size (0 = byte, 1 = short, 2 = long) */
275 FALSE
, /* pc_relative */
277 complain_overflow_bitfield
, /* complain_on_overflow */
278 sh_reloc
, /* special_function */
280 TRUE
, /* partial_inplace */
281 0xffff, /* src_mask */
282 0xffff, /* dst_mask */
283 FALSE
), /* pcrel_offset */
285 HOWTO (R_SH_COUNT
, /* type */
287 2, /* size (0 = byte, 1 = short, 2 = long) */
289 FALSE
, /* pc_relative */
291 complain_overflow_bitfield
, /* complain_on_overflow */
292 sh_reloc
, /* special_function */
293 "r_count", /* name */
294 TRUE
, /* partial_inplace */
295 0xffffffff, /* src_mask */
296 0xffffffff, /* dst_mask */
297 FALSE
), /* pcrel_offset */
299 HOWTO (R_SH_ALIGN
, /* type */
301 2, /* size (0 = byte, 1 = short, 2 = long) */
303 FALSE
, /* pc_relative */
305 complain_overflow_bitfield
, /* complain_on_overflow */
306 sh_reloc
, /* special_function */
307 "r_align", /* name */
308 TRUE
, /* partial_inplace */
309 0xffffffff, /* src_mask */
310 0xffffffff, /* dst_mask */
311 FALSE
), /* pcrel_offset */
313 HOWTO (R_SH_CODE
, /* type */
315 2, /* size (0 = byte, 1 = short, 2 = long) */
317 FALSE
, /* pc_relative */
319 complain_overflow_bitfield
, /* complain_on_overflow */
320 sh_reloc
, /* special_function */
322 TRUE
, /* partial_inplace */
323 0xffffffff, /* src_mask */
324 0xffffffff, /* dst_mask */
325 FALSE
), /* pcrel_offset */
327 HOWTO (R_SH_DATA
, /* type */
329 2, /* size (0 = byte, 1 = short, 2 = long) */
331 FALSE
, /* pc_relative */
333 complain_overflow_bitfield
, /* complain_on_overflow */
334 sh_reloc
, /* special_function */
336 TRUE
, /* partial_inplace */
337 0xffffffff, /* src_mask */
338 0xffffffff, /* dst_mask */
339 FALSE
), /* pcrel_offset */
341 HOWTO (R_SH_LABEL
, /* type */
343 2, /* size (0 = byte, 1 = short, 2 = long) */
345 FALSE
, /* pc_relative */
347 complain_overflow_bitfield
, /* complain_on_overflow */
348 sh_reloc
, /* special_function */
349 "r_label", /* name */
350 TRUE
, /* partial_inplace */
351 0xffffffff, /* src_mask */
352 0xffffffff, /* dst_mask */
353 FALSE
), /* pcrel_offset */
355 HOWTO (R_SH_SWITCH8
, /* type */
357 0, /* size (0 = byte, 1 = short, 2 = long) */
359 FALSE
, /* pc_relative */
361 complain_overflow_bitfield
, /* complain_on_overflow */
362 sh_reloc
, /* special_function */
363 "r_switch8", /* name */
364 TRUE
, /* partial_inplace */
367 FALSE
) /* pcrel_offset */
370 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
372 /* Check for a bad magic number. */
373 #define BADMAG(x) SHBADMAG(x)
375 /* Customize coffcode.h (this is not currently used). */
378 /* FIXME: This should not be set here. */
379 #define __A_MAGIC_SET__
382 /* Swap the r_offset field in and out. */
383 #define SWAP_IN_RELOC_OFFSET H_GET_32
384 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
386 /* Swap out extra information in the reloc structure. */
387 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
390 dst->r_stuff[0] = 'S'; \
391 dst->r_stuff[1] = 'C'; \
396 /* Get the value of a symbol, when performing a relocation. */
399 get_symbol_value (symbol
)
404 if (bfd_is_com_section (symbol
->section
))
407 relocation
= (symbol
->value
+
408 symbol
->section
->output_section
->vma
+
409 symbol
->section
->output_offset
);
415 /* Convert an rtype to howto for the COFF backend linker.
416 Copied from coff-i386. */
417 #define coff_rtype_to_howto coff_sh_rtype_to_howto
418 static reloc_howto_type
* coff_sh_rtype_to_howto
PARAMS ((bfd
*, asection
*, struct internal_reloc
*, struct coff_link_hash_entry
*, struct internal_syment
*, bfd_vma
*));
420 static reloc_howto_type
*
421 coff_sh_rtype_to_howto (abfd
, sec
, rel
, h
, sym
, addendp
)
422 bfd
* abfd ATTRIBUTE_UNUSED
;
424 struct internal_reloc
* rel
;
425 struct coff_link_hash_entry
* h
;
426 struct internal_syment
* sym
;
429 reloc_howto_type
* howto
;
431 howto
= sh_coff_howtos
+ rel
->r_type
;
435 if (howto
->pc_relative
)
436 *addendp
+= sec
->vma
;
438 if (sym
!= NULL
&& sym
->n_scnum
== 0 && sym
->n_value
!= 0)
440 /* This is a common symbol. The section contents include the
441 size (sym->n_value) as an addend. The relocate_section
442 function will be adding in the final value of the symbol. We
443 need to subtract out the current size in order to get the
445 BFD_ASSERT (h
!= NULL
);
448 if (howto
->pc_relative
)
452 /* If the symbol is defined, then the generic code is going to
453 add back the symbol value in order to cancel out an
454 adjustment it made to the addend. However, we set the addend
455 to 0 at the start of this function. We need to adjust here,
456 to avoid the adjustment the generic code will make. FIXME:
457 This is getting a bit hackish. */
458 if (sym
!= NULL
&& sym
->n_scnum
!= 0)
459 *addendp
-= sym
->n_value
;
462 if (rel
->r_type
== R_SH_IMAGEBASE
)
463 *addendp
-= pe_data (sec
->output_section
->owner
)->pe_opthdr
.ImageBase
;
468 #endif /* COFF_WITH_PE */
470 /* This structure is used to map BFD reloc codes to SH PE relocs. */
471 struct shcoff_reloc_map
473 bfd_reloc_code_real_type bfd_reloc_val
;
474 unsigned char shcoff_reloc_val
;
478 /* An array mapping BFD reloc codes to SH PE relocs. */
479 static const struct shcoff_reloc_map sh_reloc_map
[] =
481 { BFD_RELOC_32
, R_SH_IMM32CE
},
482 { BFD_RELOC_RVA
, R_SH_IMAGEBASE
},
483 { BFD_RELOC_CTOR
, R_SH_IMM32CE
},
486 /* An array mapping BFD reloc codes to SH PE relocs. */
487 static const struct shcoff_reloc_map sh_reloc_map
[] =
489 { BFD_RELOC_32
, R_SH_IMM32
},
490 { BFD_RELOC_CTOR
, R_SH_IMM32
},
494 /* Given a BFD reloc code, return the howto structure for the
495 corresponding SH PE reloc. */
496 #define coff_bfd_reloc_type_lookup sh_coff_reloc_type_lookup
498 static reloc_howto_type
*
499 sh_coff_reloc_type_lookup (abfd
, code
)
500 bfd
* abfd ATTRIBUTE_UNUSED
;
501 bfd_reloc_code_real_type code
;
505 for (i
= ARRAY_SIZE (sh_reloc_map
); i
--;)
506 if (sh_reloc_map
[i
].bfd_reloc_val
== code
)
507 return &sh_coff_howtos
[(int) sh_reloc_map
[i
].shcoff_reloc_val
];
509 fprintf (stderr
, "SH Error: unknown reloc type %d\n", code
);
513 /* This macro is used in coffcode.h to get the howto corresponding to
514 an internal reloc. */
516 #define RTYPE2HOWTO(relent, internal) \
518 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
519 ? &sh_coff_howtos[(internal)->r_type] \
520 : (reloc_howto_type *) NULL))
522 /* This is the same as the macro in coffcode.h, except that it copies
523 r_offset into reloc_entry->addend for some relocs. */
524 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
526 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
527 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
528 coffsym = (obj_symbols (abfd) \
529 + (cache_ptr->sym_ptr_ptr - symbols)); \
531 coffsym = coff_symbol_from (abfd, ptr); \
532 if (coffsym != (coff_symbol_type *) NULL \
533 && coffsym->native->u.syment.n_scnum == 0) \
534 cache_ptr->addend = 0; \
535 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
536 && ptr->section != (asection *) NULL) \
537 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
539 cache_ptr->addend = 0; \
540 if ((reloc).r_type == R_SH_SWITCH8 \
541 || (reloc).r_type == R_SH_SWITCH16 \
542 || (reloc).r_type == R_SH_SWITCH32 \
543 || (reloc).r_type == R_SH_USES \
544 || (reloc).r_type == R_SH_COUNT \
545 || (reloc).r_type == R_SH_ALIGN) \
546 cache_ptr->addend = (reloc).r_offset; \
549 /* This is the howto function for the SH relocations. */
551 static bfd_reloc_status_type
552 sh_reloc (abfd
, reloc_entry
, symbol_in
, data
, input_section
, output_bfd
,
555 arelent
*reloc_entry
;
558 asection
*input_section
;
560 char **error_message ATTRIBUTE_UNUSED
;
564 unsigned short r_type
;
565 bfd_vma addr
= reloc_entry
->address
;
566 bfd_byte
*hit_data
= addr
+ (bfd_byte
*) data
;
568 r_type
= reloc_entry
->howto
->type
;
570 if (output_bfd
!= NULL
)
572 /* Partial linking--do nothing. */
573 reloc_entry
->address
+= input_section
->output_offset
;
577 /* Almost all relocs have to do with relaxing. If any work must be
578 done for them, it has been done in sh_relax_section. */
579 if (r_type
!= R_SH_IMM32
581 && r_type
!= R_SH_IMM32CE
582 && r_type
!= R_SH_IMAGEBASE
584 && (r_type
!= R_SH_PCDISP
585 || (symbol_in
->flags
& BSF_LOCAL
) != 0))
588 if (symbol_in
!= NULL
589 && bfd_is_und_section (symbol_in
->section
))
590 return bfd_reloc_undefined
;
592 sym_value
= get_symbol_value (symbol_in
);
600 insn
= bfd_get_32 (abfd
, hit_data
);
601 insn
+= sym_value
+ reloc_entry
->addend
;
602 bfd_put_32 (abfd
, (bfd_vma
) insn
, hit_data
);
606 insn
= bfd_get_32 (abfd
, hit_data
);
607 insn
+= sym_value
+ reloc_entry
->addend
;
608 insn
-= pe_data (input_section
->output_section
->owner
)->pe_opthdr
.ImageBase
;
609 bfd_put_32 (abfd
, (bfd_vma
) insn
, hit_data
);
613 insn
= bfd_get_16 (abfd
, hit_data
);
614 sym_value
+= reloc_entry
->addend
;
615 sym_value
-= (input_section
->output_section
->vma
616 + input_section
->output_offset
619 sym_value
+= (insn
& 0xfff) << 1;
622 insn
= (insn
& 0xf000) | (sym_value
& 0xfff);
623 bfd_put_16 (abfd
, (bfd_vma
) insn
, hit_data
);
624 if (sym_value
< (bfd_vma
) -0x1000 || sym_value
>= 0x1000)
625 return bfd_reloc_overflow
;
635 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
637 /* We can do relaxing. */
638 #define coff_bfd_relax_section sh_relax_section
640 /* We use the special COFF backend linker. */
641 #define coff_relocate_section sh_relocate_section
643 /* When relaxing, we need to use special code to get the relocated
645 #define coff_bfd_get_relocated_section_contents \
646 sh_coff_get_relocated_section_contents
648 #include "coffcode.h"
650 /* This function handles relaxing on the SH.
652 Function calls on the SH look like this:
661 The compiler and assembler will cooperate to create R_SH_USES
662 relocs on the jsr instructions. The r_offset field of the
663 R_SH_USES reloc is the PC relative offset to the instruction which
664 loads the register (the r_offset field is computed as though it
665 were a jump instruction, so the offset value is actually from four
666 bytes past the instruction). The linker can use this reloc to
667 determine just which function is being called, and thus decide
668 whether it is possible to replace the jsr with a bsr.
670 If multiple function calls are all based on a single register load
671 (i.e., the same function is called multiple times), the compiler
672 guarantees that each function call will have an R_SH_USES reloc.
673 Therefore, if the linker is able to convert each R_SH_USES reloc
674 which refers to that address, it can safely eliminate the register
677 When the assembler creates an R_SH_USES reloc, it examines it to
678 determine which address is being loaded (L1 in the above example).
679 It then counts the number of references to that address, and
680 creates an R_SH_COUNT reloc at that address. The r_offset field of
681 the R_SH_COUNT reloc will be the number of references. If the
682 linker is able to eliminate a register load, it can use the
683 R_SH_COUNT reloc to see whether it can also eliminate the function
686 SH relaxing also handles another, unrelated, matter. On the SH, if
687 a load or store instruction is not aligned on a four byte boundary,
688 the memory cycle interferes with the 32 bit instruction fetch,
689 causing a one cycle bubble in the pipeline. Therefore, we try to
690 align load and store instructions on four byte boundaries if we
691 can, by swapping them with one of the adjacent instructions. */
694 sh_relax_section (abfd
, sec
, link_info
, again
)
697 struct bfd_link_info
*link_info
;
700 struct internal_reloc
*internal_relocs
;
701 bfd_boolean have_code
;
702 struct internal_reloc
*irel
, *irelend
;
703 bfd_byte
*contents
= NULL
;
707 if (link_info
->relocatable
708 || (sec
->flags
& SEC_RELOC
) == 0
709 || sec
->reloc_count
== 0)
712 if (coff_section_data (abfd
, sec
) == NULL
)
714 bfd_size_type amt
= sizeof (struct coff_section_tdata
);
715 sec
->used_by_bfd
= (PTR
) bfd_zalloc (abfd
, amt
);
716 if (sec
->used_by_bfd
== NULL
)
720 internal_relocs
= (_bfd_coff_read_internal_relocs
721 (abfd
, sec
, link_info
->keep_memory
,
722 (bfd_byte
*) NULL
, FALSE
,
723 (struct internal_reloc
*) NULL
));
724 if (internal_relocs
== NULL
)
729 irelend
= internal_relocs
+ sec
->reloc_count
;
730 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
732 bfd_vma laddr
, paddr
, symval
;
734 struct internal_reloc
*irelfn
, *irelscan
, *irelcount
;
735 struct internal_syment sym
;
738 if (irel
->r_type
== R_SH_CODE
)
741 if (irel
->r_type
!= R_SH_USES
)
744 /* Get the section contents. */
745 if (contents
== NULL
)
747 if (coff_section_data (abfd
, sec
)->contents
!= NULL
)
748 contents
= coff_section_data (abfd
, sec
)->contents
;
751 if (!bfd_malloc_and_get_section (abfd
, sec
, &contents
))
756 /* The r_offset field of the R_SH_USES reloc will point us to
757 the register load. The 4 is because the r_offset field is
758 computed as though it were a jump offset, which are based
759 from 4 bytes after the jump instruction. */
760 laddr
= irel
->r_vaddr
- sec
->vma
+ 4;
761 /* Careful to sign extend the 32-bit offset. */
762 laddr
+= ((irel
->r_offset
& 0xffffffff) ^ 0x80000000) - 0x80000000;
763 if (laddr
>= sec
->size
)
765 (*_bfd_error_handler
) ("%B: 0x%lx: warning: bad R_SH_USES offset",
766 abfd
, (unsigned long) irel
->r_vaddr
);
769 insn
= bfd_get_16 (abfd
, contents
+ laddr
);
771 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
772 if ((insn
& 0xf000) != 0xd000)
774 ((*_bfd_error_handler
)
775 ("%B: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
776 abfd
, (unsigned long) irel
->r_vaddr
, insn
));
780 /* Get the address from which the register is being loaded. The
781 displacement in the mov.l instruction is quadrupled. It is a
782 displacement from four bytes after the movl instruction, but,
783 before adding in the PC address, two least significant bits
784 of the PC are cleared. We assume that the section is aligned
785 on a four byte boundary. */
788 paddr
+= (laddr
+ 4) &~ (bfd_vma
) 3;
789 if (paddr
>= sec
->size
)
791 ((*_bfd_error_handler
)
792 ("%B: 0x%lx: warning: bad R_SH_USES load offset",
793 abfd
, (unsigned long) irel
->r_vaddr
));
797 /* Get the reloc for the address from which the register is
798 being loaded. This reloc will tell us which function is
799 actually being called. */
801 for (irelfn
= internal_relocs
; irelfn
< irelend
; irelfn
++)
802 if (irelfn
->r_vaddr
== paddr
804 && (irelfn
->r_type
== R_SH_IMM32
805 || irelfn
->r_type
== R_SH_IMM32CE
806 || irelfn
->r_type
== R_SH_IMAGEBASE
)
809 && irelfn
->r_type
== R_SH_IMM32
813 if (irelfn
>= irelend
)
815 ((*_bfd_error_handler
)
816 ("%B: 0x%lx: warning: could not find expected reloc",
817 abfd
, (unsigned long) paddr
));
821 /* Get the value of the symbol referred to by the reloc. */
822 if (! _bfd_coff_get_external_symbols (abfd
))
824 bfd_coff_swap_sym_in (abfd
,
825 ((bfd_byte
*) obj_coff_external_syms (abfd
)
827 * bfd_coff_symesz (abfd
))),
829 if (sym
.n_scnum
!= 0 && sym
.n_scnum
!= sec
->target_index
)
831 ((*_bfd_error_handler
)
832 ("%B: 0x%lx: warning: symbol in unexpected section",
833 abfd
, (unsigned long) paddr
));
837 if (sym
.n_sclass
!= C_EXT
)
839 symval
= (sym
.n_value
841 + sec
->output_section
->vma
842 + sec
->output_offset
);
846 struct coff_link_hash_entry
*h
;
848 h
= obj_coff_sym_hashes (abfd
)[irelfn
->r_symndx
];
849 BFD_ASSERT (h
!= NULL
);
850 if (h
->root
.type
!= bfd_link_hash_defined
851 && h
->root
.type
!= bfd_link_hash_defweak
)
853 /* This appears to be a reference to an undefined
854 symbol. Just ignore it--it will be caught by the
855 regular reloc processing. */
859 symval
= (h
->root
.u
.def
.value
860 + h
->root
.u
.def
.section
->output_section
->vma
861 + h
->root
.u
.def
.section
->output_offset
);
864 symval
+= bfd_get_32 (abfd
, contents
+ paddr
- sec
->vma
);
866 /* See if this function call can be shortened. */
870 + sec
->output_section
->vma
873 if (foff
< -0x1000 || foff
>= 0x1000)
875 /* After all that work, we can't shorten this function call. */
879 /* Shorten the function call. */
881 /* For simplicity of coding, we are going to modify the section
882 contents, the section relocs, and the BFD symbol table. We
883 must tell the rest of the code not to free up this
884 information. It would be possible to instead create a table
885 of changes which have to be made, as is done in coff-mips.c;
886 that would be more work, but would require less memory when
887 the linker is run. */
889 coff_section_data (abfd
, sec
)->relocs
= internal_relocs
;
890 coff_section_data (abfd
, sec
)->keep_relocs
= TRUE
;
892 coff_section_data (abfd
, sec
)->contents
= contents
;
893 coff_section_data (abfd
, sec
)->keep_contents
= TRUE
;
895 obj_coff_keep_syms (abfd
) = TRUE
;
897 /* Replace the jsr with a bsr. */
899 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
900 replace the jsr with a bsr. */
901 irel
->r_type
= R_SH_PCDISP
;
902 irel
->r_symndx
= irelfn
->r_symndx
;
903 if (sym
.n_sclass
!= C_EXT
)
905 /* If this needs to be changed because of future relaxing,
906 it will be handled here like other internal PCDISP
909 (bfd_vma
) 0xb000 | ((foff
>> 1) & 0xfff),
910 contents
+ irel
->r_vaddr
- sec
->vma
);
914 /* We can't fully resolve this yet, because the external
915 symbol value may be changed by future relaxing. We let
916 the final link phase handle it. */
917 bfd_put_16 (abfd
, (bfd_vma
) 0xb000,
918 contents
+ irel
->r_vaddr
- sec
->vma
);
921 /* See if there is another R_SH_USES reloc referring to the same
923 for (irelscan
= internal_relocs
; irelscan
< irelend
; irelscan
++)
924 if (irelscan
->r_type
== R_SH_USES
925 && laddr
== irelscan
->r_vaddr
- sec
->vma
+ 4 + irelscan
->r_offset
)
927 if (irelscan
< irelend
)
929 /* Some other function call depends upon this register load,
930 and we have not yet converted that function call.
931 Indeed, we may never be able to convert it. There is
932 nothing else we can do at this point. */
936 /* Look for a R_SH_COUNT reloc on the location where the
937 function address is stored. Do this before deleting any
938 bytes, to avoid confusion about the address. */
939 for (irelcount
= internal_relocs
; irelcount
< irelend
; irelcount
++)
940 if (irelcount
->r_vaddr
== paddr
941 && irelcount
->r_type
== R_SH_COUNT
)
944 /* Delete the register load. */
945 if (! sh_relax_delete_bytes (abfd
, sec
, laddr
, 2))
948 /* That will change things, so, just in case it permits some
949 other function call to come within range, we should relax
950 again. Note that this is not required, and it may be slow. */
953 /* Now check whether we got a COUNT reloc. */
954 if (irelcount
>= irelend
)
956 ((*_bfd_error_handler
)
957 ("%B: 0x%lx: warning: could not find expected COUNT reloc",
958 abfd
, (unsigned long) paddr
));
962 /* The number of uses is stored in the r_offset field. We've
964 if (irelcount
->r_offset
== 0)
966 ((*_bfd_error_handler
) ("%B: 0x%lx: warning: bad count",
967 abfd
, (unsigned long) paddr
));
971 --irelcount
->r_offset
;
973 /* If there are no more uses, we can delete the address. Reload
974 the address from irelfn, in case it was changed by the
975 previous call to sh_relax_delete_bytes. */
976 if (irelcount
->r_offset
== 0)
978 if (! sh_relax_delete_bytes (abfd
, sec
,
979 irelfn
->r_vaddr
- sec
->vma
, 4))
983 /* We've done all we can with that function call. */
986 /* Look for load and store instructions that we can align on four
992 /* Get the section contents. */
993 if (contents
== NULL
)
995 if (coff_section_data (abfd
, sec
)->contents
!= NULL
)
996 contents
= coff_section_data (abfd
, sec
)->contents
;
999 if (!bfd_malloc_and_get_section (abfd
, sec
, &contents
))
1004 if (! sh_align_loads (abfd
, sec
, internal_relocs
, contents
, &swapped
))
1009 coff_section_data (abfd
, sec
)->relocs
= internal_relocs
;
1010 coff_section_data (abfd
, sec
)->keep_relocs
= TRUE
;
1012 coff_section_data (abfd
, sec
)->contents
= contents
;
1013 coff_section_data (abfd
, sec
)->keep_contents
= TRUE
;
1015 obj_coff_keep_syms (abfd
) = TRUE
;
1019 if (internal_relocs
!= NULL
1020 && internal_relocs
!= coff_section_data (abfd
, sec
)->relocs
)
1022 if (! link_info
->keep_memory
)
1023 free (internal_relocs
);
1025 coff_section_data (abfd
, sec
)->relocs
= internal_relocs
;
1028 if (contents
!= NULL
&& contents
!= coff_section_data (abfd
, sec
)->contents
)
1030 if (! link_info
->keep_memory
)
1033 /* Cache the section contents for coff_link_input_bfd. */
1034 coff_section_data (abfd
, sec
)->contents
= contents
;
1040 if (internal_relocs
!= NULL
1041 && internal_relocs
!= coff_section_data (abfd
, sec
)->relocs
)
1042 free (internal_relocs
);
1043 if (contents
!= NULL
&& contents
!= coff_section_data (abfd
, sec
)->contents
)
1048 /* Delete some bytes from a section while relaxing. */
1051 sh_relax_delete_bytes (abfd
, sec
, addr
, count
)
1058 struct internal_reloc
*irel
, *irelend
;
1059 struct internal_reloc
*irelalign
;
1061 bfd_byte
*esym
, *esymend
;
1062 bfd_size_type symesz
;
1063 struct coff_link_hash_entry
**sym_hash
;
1066 contents
= coff_section_data (abfd
, sec
)->contents
;
1068 /* The deletion must stop at the next ALIGN reloc for an aligment
1069 power larger than the number of bytes we are deleting. */
1074 irel
= coff_section_data (abfd
, sec
)->relocs
;
1075 irelend
= irel
+ sec
->reloc_count
;
1076 for (; irel
< irelend
; irel
++)
1078 if (irel
->r_type
== R_SH_ALIGN
1079 && irel
->r_vaddr
- sec
->vma
> addr
1080 && count
< (1 << irel
->r_offset
))
1083 toaddr
= irel
->r_vaddr
- sec
->vma
;
1088 /* Actually delete the bytes. */
1089 memmove (contents
+ addr
, contents
+ addr
+ count
,
1090 (size_t) (toaddr
- addr
- count
));
1091 if (irelalign
== NULL
)
1097 #define NOP_OPCODE (0x0009)
1099 BFD_ASSERT ((count
& 1) == 0);
1100 for (i
= 0; i
< count
; i
+= 2)
1101 bfd_put_16 (abfd
, (bfd_vma
) NOP_OPCODE
, contents
+ toaddr
- count
+ i
);
1104 /* Adjust all the relocs. */
1105 for (irel
= coff_section_data (abfd
, sec
)->relocs
; irel
< irelend
; irel
++)
1107 bfd_vma nraddr
, stop
;
1110 struct internal_syment sym
;
1111 int off
, adjust
, oinsn
;
1112 bfd_signed_vma voff
= 0;
1113 bfd_boolean overflow
;
1115 /* Get the new reloc address. */
1116 nraddr
= irel
->r_vaddr
- sec
->vma
;
1117 if ((irel
->r_vaddr
- sec
->vma
> addr
1118 && irel
->r_vaddr
- sec
->vma
< toaddr
)
1119 || (irel
->r_type
== R_SH_ALIGN
1120 && irel
->r_vaddr
- sec
->vma
== toaddr
))
1123 /* See if this reloc was for the bytes we have deleted, in which
1124 case we no longer care about it. Don't delete relocs which
1125 represent addresses, though. */
1126 if (irel
->r_vaddr
- sec
->vma
>= addr
1127 && irel
->r_vaddr
- sec
->vma
< addr
+ count
1128 && irel
->r_type
!= R_SH_ALIGN
1129 && irel
->r_type
!= R_SH_CODE
1130 && irel
->r_type
!= R_SH_DATA
1131 && irel
->r_type
!= R_SH_LABEL
)
1132 irel
->r_type
= R_SH_UNUSED
;
1134 /* If this is a PC relative reloc, see if the range it covers
1135 includes the bytes we have deleted. */
1136 switch (irel
->r_type
)
1141 case R_SH_PCDISP8BY2
:
1143 case R_SH_PCRELIMM8BY2
:
1144 case R_SH_PCRELIMM8BY4
:
1145 start
= irel
->r_vaddr
- sec
->vma
;
1146 insn
= bfd_get_16 (abfd
, contents
+ nraddr
);
1150 switch (irel
->r_type
)
1153 start
= stop
= addr
;
1159 case R_SH_IMAGEBASE
:
1161 /* If this reloc is against a symbol defined in this
1162 section, and the symbol will not be adjusted below, we
1163 must check the addend to see it will put the value in
1164 range to be adjusted, and hence must be changed. */
1165 bfd_coff_swap_sym_in (abfd
,
1166 ((bfd_byte
*) obj_coff_external_syms (abfd
)
1168 * bfd_coff_symesz (abfd
))),
1170 if (sym
.n_sclass
!= C_EXT
1171 && sym
.n_scnum
== sec
->target_index
1172 && ((bfd_vma
) sym
.n_value
<= addr
1173 || (bfd_vma
) sym
.n_value
>= toaddr
))
1177 val
= bfd_get_32 (abfd
, contents
+ nraddr
);
1179 if (val
> addr
&& val
< toaddr
)
1180 bfd_put_32 (abfd
, val
- count
, contents
+ nraddr
);
1182 start
= stop
= addr
;
1185 case R_SH_PCDISP8BY2
:
1189 stop
= (bfd_vma
) ((bfd_signed_vma
) start
+ 4 + off
* 2);
1193 bfd_coff_swap_sym_in (abfd
,
1194 ((bfd_byte
*) obj_coff_external_syms (abfd
)
1196 * bfd_coff_symesz (abfd
))),
1198 if (sym
.n_sclass
== C_EXT
)
1199 start
= stop
= addr
;
1205 stop
= (bfd_vma
) ((bfd_signed_vma
) start
+ 4 + off
* 2);
1209 case R_SH_PCRELIMM8BY2
:
1211 stop
= start
+ 4 + off
* 2;
1214 case R_SH_PCRELIMM8BY4
:
1216 stop
= (start
&~ (bfd_vma
) 3) + 4 + off
* 4;
1222 /* These relocs types represent
1224 The r_offset field holds the difference between the reloc
1225 address and L1. That is the start of the reloc, and
1226 adding in the contents gives us the top. We must adjust
1227 both the r_offset field and the section contents. */
1229 start
= irel
->r_vaddr
- sec
->vma
;
1230 stop
= (bfd_vma
) ((bfd_signed_vma
) start
- (long) irel
->r_offset
);
1234 && (stop
<= addr
|| stop
>= toaddr
))
1235 irel
->r_offset
+= count
;
1236 else if (stop
> addr
1238 && (start
<= addr
|| start
>= toaddr
))
1239 irel
->r_offset
-= count
;
1243 if (irel
->r_type
== R_SH_SWITCH16
)
1244 voff
= bfd_get_signed_16 (abfd
, contents
+ nraddr
);
1245 else if (irel
->r_type
== R_SH_SWITCH8
)
1246 voff
= bfd_get_8 (abfd
, contents
+ nraddr
);
1248 voff
= bfd_get_signed_32 (abfd
, contents
+ nraddr
);
1249 stop
= (bfd_vma
) ((bfd_signed_vma
) start
+ voff
);
1254 start
= irel
->r_vaddr
- sec
->vma
;
1255 stop
= (bfd_vma
) ((bfd_signed_vma
) start
1256 + (long) irel
->r_offset
1263 && (stop
<= addr
|| stop
>= toaddr
))
1265 else if (stop
> addr
1267 && (start
<= addr
|| start
>= toaddr
))
1276 switch (irel
->r_type
)
1282 case R_SH_PCDISP8BY2
:
1283 case R_SH_PCRELIMM8BY2
:
1285 if ((oinsn
& 0xff00) != (insn
& 0xff00))
1287 bfd_put_16 (abfd
, (bfd_vma
) insn
, contents
+ nraddr
);
1292 if ((oinsn
& 0xf000) != (insn
& 0xf000))
1294 bfd_put_16 (abfd
, (bfd_vma
) insn
, contents
+ nraddr
);
1297 case R_SH_PCRELIMM8BY4
:
1298 BFD_ASSERT (adjust
== count
|| count
>= 4);
1303 if ((irel
->r_vaddr
& 3) == 0)
1306 if ((oinsn
& 0xff00) != (insn
& 0xff00))
1308 bfd_put_16 (abfd
, (bfd_vma
) insn
, contents
+ nraddr
);
1313 if (voff
< 0 || voff
>= 0xff)
1315 bfd_put_8 (abfd
, (bfd_vma
) voff
, contents
+ nraddr
);
1320 if (voff
< - 0x8000 || voff
>= 0x8000)
1322 bfd_put_signed_16 (abfd
, (bfd_vma
) voff
, contents
+ nraddr
);
1327 bfd_put_signed_32 (abfd
, (bfd_vma
) voff
, contents
+ nraddr
);
1331 irel
->r_offset
+= adjust
;
1337 ((*_bfd_error_handler
)
1338 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
1339 abfd
, (unsigned long) irel
->r_vaddr
));
1340 bfd_set_error (bfd_error_bad_value
);
1345 irel
->r_vaddr
= nraddr
+ sec
->vma
;
1348 /* Look through all the other sections. If there contain any IMM32
1349 relocs against internal symbols which we are not going to adjust
1350 below, we may need to adjust the addends. */
1351 for (o
= abfd
->sections
; o
!= NULL
; o
= o
->next
)
1353 struct internal_reloc
*internal_relocs
;
1354 struct internal_reloc
*irelscan
, *irelscanend
;
1355 bfd_byte
*ocontents
;
1358 || (o
->flags
& SEC_RELOC
) == 0
1359 || o
->reloc_count
== 0)
1362 /* We always cache the relocs. Perhaps, if info->keep_memory is
1363 FALSE, we should free them, if we are permitted to, when we
1364 leave sh_coff_relax_section. */
1365 internal_relocs
= (_bfd_coff_read_internal_relocs
1366 (abfd
, o
, TRUE
, (bfd_byte
*) NULL
, FALSE
,
1367 (struct internal_reloc
*) NULL
));
1368 if (internal_relocs
== NULL
)
1372 irelscanend
= internal_relocs
+ o
->reloc_count
;
1373 for (irelscan
= internal_relocs
; irelscan
< irelscanend
; irelscan
++)
1375 struct internal_syment sym
;
1378 if (irelscan
->r_type
!= R_SH_IMM32
1379 && irelscan
->r_type
!= R_SH_IMAGEBASE
1380 && irelscan
->r_type
!= R_SH_IMM32CE
)
1382 if (irelscan
->r_type
!= R_SH_IMM32
)
1386 bfd_coff_swap_sym_in (abfd
,
1387 ((bfd_byte
*) obj_coff_external_syms (abfd
)
1388 + (irelscan
->r_symndx
1389 * bfd_coff_symesz (abfd
))),
1391 if (sym
.n_sclass
!= C_EXT
1392 && sym
.n_scnum
== sec
->target_index
1393 && ((bfd_vma
) sym
.n_value
<= addr
1394 || (bfd_vma
) sym
.n_value
>= toaddr
))
1398 if (ocontents
== NULL
)
1400 if (coff_section_data (abfd
, o
)->contents
!= NULL
)
1401 ocontents
= coff_section_data (abfd
, o
)->contents
;
1404 if (!bfd_malloc_and_get_section (abfd
, o
, &ocontents
))
1406 /* We always cache the section contents.
1407 Perhaps, if info->keep_memory is FALSE, we
1408 should free them, if we are permitted to,
1409 when we leave sh_coff_relax_section. */
1410 coff_section_data (abfd
, o
)->contents
= ocontents
;
1414 val
= bfd_get_32 (abfd
, ocontents
+ irelscan
->r_vaddr
- o
->vma
);
1416 if (val
> addr
&& val
< toaddr
)
1417 bfd_put_32 (abfd
, val
- count
,
1418 ocontents
+ irelscan
->r_vaddr
- o
->vma
);
1420 coff_section_data (abfd
, o
)->keep_contents
= TRUE
;
1425 /* Adjusting the internal symbols will not work if something has
1426 already retrieved the generic symbols. It would be possible to
1427 make this work by adjusting the generic symbols at the same time.
1428 However, this case should not arise in normal usage. */
1429 if (obj_symbols (abfd
) != NULL
1430 || obj_raw_syments (abfd
) != NULL
)
1432 ((*_bfd_error_handler
)
1433 ("%B: fatal: generic symbols retrieved before relaxing", abfd
));
1434 bfd_set_error (bfd_error_invalid_operation
);
1438 /* Adjust all the symbols. */
1439 sym_hash
= obj_coff_sym_hashes (abfd
);
1440 symesz
= bfd_coff_symesz (abfd
);
1441 esym
= (bfd_byte
*) obj_coff_external_syms (abfd
);
1442 esymend
= esym
+ obj_raw_syment_count (abfd
) * symesz
;
1443 while (esym
< esymend
)
1445 struct internal_syment isym
;
1447 bfd_coff_swap_sym_in (abfd
, (PTR
) esym
, (PTR
) &isym
);
1449 if (isym
.n_scnum
== sec
->target_index
1450 && (bfd_vma
) isym
.n_value
> addr
1451 && (bfd_vma
) isym
.n_value
< toaddr
)
1453 isym
.n_value
-= count
;
1455 bfd_coff_swap_sym_out (abfd
, (PTR
) &isym
, (PTR
) esym
);
1457 if (*sym_hash
!= NULL
)
1459 BFD_ASSERT ((*sym_hash
)->root
.type
== bfd_link_hash_defined
1460 || (*sym_hash
)->root
.type
== bfd_link_hash_defweak
);
1461 BFD_ASSERT ((*sym_hash
)->root
.u
.def
.value
>= addr
1462 && (*sym_hash
)->root
.u
.def
.value
< toaddr
);
1463 (*sym_hash
)->root
.u
.def
.value
-= count
;
1467 esym
+= (isym
.n_numaux
+ 1) * symesz
;
1468 sym_hash
+= isym
.n_numaux
+ 1;
1471 /* See if we can move the ALIGN reloc forward. We have adjusted
1472 r_vaddr for it already. */
1473 if (irelalign
!= NULL
)
1475 bfd_vma alignto
, alignaddr
;
1477 alignto
= BFD_ALIGN (toaddr
, 1 << irelalign
->r_offset
);
1478 alignaddr
= BFD_ALIGN (irelalign
->r_vaddr
- sec
->vma
,
1479 1 << irelalign
->r_offset
);
1480 if (alignto
!= alignaddr
)
1482 /* Tail recursion. */
1483 return sh_relax_delete_bytes (abfd
, sec
, alignaddr
,
1484 (int) (alignto
- alignaddr
));
1491 /* This is yet another version of the SH opcode table, used to rapidly
1492 get information about a particular instruction. */
1494 /* The opcode map is represented by an array of these structures. The
1495 array is indexed by the high order four bits in the instruction. */
1497 struct sh_major_opcode
1499 /* A pointer to the instruction list. This is an array which
1500 contains all the instructions with this major opcode. */
1501 const struct sh_minor_opcode
*minor_opcodes
;
1502 /* The number of elements in minor_opcodes. */
1503 unsigned short count
;
1506 /* This structure holds information for a set of SH opcodes. The
1507 instruction code is anded with the mask value, and the resulting
1508 value is used to search the order opcode list. */
1510 struct sh_minor_opcode
1512 /* The sorted opcode list. */
1513 const struct sh_opcode
*opcodes
;
1514 /* The number of elements in opcodes. */
1515 unsigned short count
;
1516 /* The mask value to use when searching the opcode list. */
1517 unsigned short mask
;
1520 /* This structure holds information for an SH instruction. An array
1521 of these structures is sorted in order by opcode. */
1525 /* The code for this instruction, after it has been anded with the
1526 mask value in the sh_major_opcode structure. */
1527 unsigned short opcode
;
1528 /* Flags for this instruction. */
1529 unsigned long flags
;
1532 /* Flag which appear in the sh_opcode structure. */
1534 /* This instruction loads a value from memory. */
1537 /* This instruction stores a value to memory. */
1540 /* This instruction is a branch. */
1541 #define BRANCH (0x4)
1543 /* This instruction has a delay slot. */
1546 /* This instruction uses the value in the register in the field at
1547 mask 0x0f00 of the instruction. */
1548 #define USES1 (0x10)
1549 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1551 /* This instruction uses the value in the register in the field at
1552 mask 0x00f0 of the instruction. */
1553 #define USES2 (0x20)
1554 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1556 /* This instruction uses the value in register 0. */
1557 #define USESR0 (0x40)
1559 /* This instruction sets the value in the register in the field at
1560 mask 0x0f00 of the instruction. */
1561 #define SETS1 (0x80)
1562 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1564 /* This instruction sets the value in the register in the field at
1565 mask 0x00f0 of the instruction. */
1566 #define SETS2 (0x100)
1567 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1569 /* This instruction sets register 0. */
1570 #define SETSR0 (0x200)
1572 /* This instruction sets a special register. */
1573 #define SETSSP (0x400)
1575 /* This instruction uses a special register. */
1576 #define USESSP (0x800)
1578 /* This instruction uses the floating point register in the field at
1579 mask 0x0f00 of the instruction. */
1580 #define USESF1 (0x1000)
1581 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1583 /* This instruction uses the floating point register in the field at
1584 mask 0x00f0 of the instruction. */
1585 #define USESF2 (0x2000)
1586 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1588 /* This instruction uses floating point register 0. */
1589 #define USESF0 (0x4000)
1591 /* This instruction sets the floating point register in the field at
1592 mask 0x0f00 of the instruction. */
1593 #define SETSF1 (0x8000)
1594 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1596 #define USESAS (0x10000)
1597 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1598 #define USESR8 (0x20000)
1599 #define SETSAS (0x40000)
1600 #define SETSAS_REG(x) USESAS_REG (x)
1602 #define MAP(a) a, sizeof a / sizeof a[0]
1604 #ifndef COFF_IMAGE_WITH_PE
1605 static bfd_boolean sh_insn_uses_reg
1606 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int));
1607 static bfd_boolean sh_insn_sets_reg
1608 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int));
1609 static bfd_boolean sh_insn_uses_or_sets_reg
1610 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int));
1611 static bfd_boolean sh_insn_uses_freg
1612 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int));
1613 static bfd_boolean sh_insn_sets_freg
1614 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int));
1615 static bfd_boolean sh_insn_uses_or_sets_freg
1616 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int));
1617 static bfd_boolean sh_insns_conflict
1618 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int,
1619 const struct sh_opcode
*));
1620 static bfd_boolean sh_load_use
1621 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int,
1622 const struct sh_opcode
*));
1624 /* The opcode maps. */
1626 static const struct sh_opcode sh_opcode00
[] =
1628 { 0x0008, SETSSP
}, /* clrt */
1629 { 0x0009, 0 }, /* nop */
1630 { 0x000b, BRANCH
| DELAY
| USESSP
}, /* rts */
1631 { 0x0018, SETSSP
}, /* sett */
1632 { 0x0019, SETSSP
}, /* div0u */
1633 { 0x001b, 0 }, /* sleep */
1634 { 0x0028, SETSSP
}, /* clrmac */
1635 { 0x002b, BRANCH
| DELAY
| SETSSP
}, /* rte */
1636 { 0x0038, USESSP
| SETSSP
}, /* ldtlb */
1637 { 0x0048, SETSSP
}, /* clrs */
1638 { 0x0058, SETSSP
} /* sets */
1641 static const struct sh_opcode sh_opcode01
[] =
1643 { 0x0003, BRANCH
| DELAY
| USES1
| SETSSP
}, /* bsrf rn */
1644 { 0x000a, SETS1
| USESSP
}, /* sts mach,rn */
1645 { 0x001a, SETS1
| USESSP
}, /* sts macl,rn */
1646 { 0x0023, BRANCH
| DELAY
| USES1
}, /* braf rn */
1647 { 0x0029, SETS1
| USESSP
}, /* movt rn */
1648 { 0x002a, SETS1
| USESSP
}, /* sts pr,rn */
1649 { 0x005a, SETS1
| USESSP
}, /* sts fpul,rn */
1650 { 0x006a, SETS1
| USESSP
}, /* sts fpscr,rn / sts dsr,rn */
1651 { 0x0083, LOAD
| USES1
}, /* pref @rn */
1652 { 0x007a, SETS1
| USESSP
}, /* sts a0,rn */
1653 { 0x008a, SETS1
| USESSP
}, /* sts x0,rn */
1654 { 0x009a, SETS1
| USESSP
}, /* sts x1,rn */
1655 { 0x00aa, SETS1
| USESSP
}, /* sts y0,rn */
1656 { 0x00ba, SETS1
| USESSP
} /* sts y1,rn */
1659 static const struct sh_opcode sh_opcode02
[] =
1661 { 0x0002, SETS1
| USESSP
}, /* stc <special_reg>,rn */
1662 { 0x0004, STORE
| USES1
| USES2
| USESR0
}, /* mov.b rm,@(r0,rn) */
1663 { 0x0005, STORE
| USES1
| USES2
| USESR0
}, /* mov.w rm,@(r0,rn) */
1664 { 0x0006, STORE
| USES1
| USES2
| USESR0
}, /* mov.l rm,@(r0,rn) */
1665 { 0x0007, SETSSP
| USES1
| USES2
}, /* mul.l rm,rn */
1666 { 0x000c, LOAD
| SETS1
| USES2
| USESR0
}, /* mov.b @(r0,rm),rn */
1667 { 0x000d, LOAD
| SETS1
| USES2
| USESR0
}, /* mov.w @(r0,rm),rn */
1668 { 0x000e, LOAD
| SETS1
| USES2
| USESR0
}, /* mov.l @(r0,rm),rn */
1669 { 0x000f, LOAD
|SETS1
|SETS2
|SETSSP
|USES1
|USES2
|USESSP
}, /* mac.l @rm+,@rn+ */
1672 static const struct sh_minor_opcode sh_opcode0
[] =
1674 { MAP (sh_opcode00
), 0xffff },
1675 { MAP (sh_opcode01
), 0xf0ff },
1676 { MAP (sh_opcode02
), 0xf00f }
1679 static const struct sh_opcode sh_opcode10
[] =
1681 { 0x1000, STORE
| USES1
| USES2
} /* mov.l rm,@(disp,rn) */
1684 static const struct sh_minor_opcode sh_opcode1
[] =
1686 { MAP (sh_opcode10
), 0xf000 }
1689 static const struct sh_opcode sh_opcode20
[] =
1691 { 0x2000, STORE
| USES1
| USES2
}, /* mov.b rm,@rn */
1692 { 0x2001, STORE
| USES1
| USES2
}, /* mov.w rm,@rn */
1693 { 0x2002, STORE
| USES1
| USES2
}, /* mov.l rm,@rn */
1694 { 0x2004, STORE
| SETS1
| USES1
| USES2
}, /* mov.b rm,@-rn */
1695 { 0x2005, STORE
| SETS1
| USES1
| USES2
}, /* mov.w rm,@-rn */
1696 { 0x2006, STORE
| SETS1
| USES1
| USES2
}, /* mov.l rm,@-rn */
1697 { 0x2007, SETSSP
| USES1
| USES2
| USESSP
}, /* div0s */
1698 { 0x2008, SETSSP
| USES1
| USES2
}, /* tst rm,rn */
1699 { 0x2009, SETS1
| USES1
| USES2
}, /* and rm,rn */
1700 { 0x200a, SETS1
| USES1
| USES2
}, /* xor rm,rn */
1701 { 0x200b, SETS1
| USES1
| USES2
}, /* or rm,rn */
1702 { 0x200c, SETSSP
| USES1
| USES2
}, /* cmp/str rm,rn */
1703 { 0x200d, SETS1
| USES1
| USES2
}, /* xtrct rm,rn */
1704 { 0x200e, SETSSP
| USES1
| USES2
}, /* mulu.w rm,rn */
1705 { 0x200f, SETSSP
| USES1
| USES2
} /* muls.w rm,rn */
1708 static const struct sh_minor_opcode sh_opcode2
[] =
1710 { MAP (sh_opcode20
), 0xf00f }
1713 static const struct sh_opcode sh_opcode30
[] =
1715 { 0x3000, SETSSP
| USES1
| USES2
}, /* cmp/eq rm,rn */
1716 { 0x3002, SETSSP
| USES1
| USES2
}, /* cmp/hs rm,rn */
1717 { 0x3003, SETSSP
| USES1
| USES2
}, /* cmp/ge rm,rn */
1718 { 0x3004, SETSSP
| USESSP
| USES1
| USES2
}, /* div1 rm,rn */
1719 { 0x3005, SETSSP
| USES1
| USES2
}, /* dmulu.l rm,rn */
1720 { 0x3006, SETSSP
| USES1
| USES2
}, /* cmp/hi rm,rn */
1721 { 0x3007, SETSSP
| USES1
| USES2
}, /* cmp/gt rm,rn */
1722 { 0x3008, SETS1
| USES1
| USES2
}, /* sub rm,rn */
1723 { 0x300a, SETS1
| SETSSP
| USES1
| USES2
| USESSP
}, /* subc rm,rn */
1724 { 0x300b, SETS1
| SETSSP
| USES1
| USES2
}, /* subv rm,rn */
1725 { 0x300c, SETS1
| USES1
| USES2
}, /* add rm,rn */
1726 { 0x300d, SETSSP
| USES1
| USES2
}, /* dmuls.l rm,rn */
1727 { 0x300e, SETS1
| SETSSP
| USES1
| USES2
| USESSP
}, /* addc rm,rn */
1728 { 0x300f, SETS1
| SETSSP
| USES1
| USES2
} /* addv rm,rn */
1731 static const struct sh_minor_opcode sh_opcode3
[] =
1733 { MAP (sh_opcode30
), 0xf00f }
1736 static const struct sh_opcode sh_opcode40
[] =
1738 { 0x4000, SETS1
| SETSSP
| USES1
}, /* shll rn */
1739 { 0x4001, SETS1
| SETSSP
| USES1
}, /* shlr rn */
1740 { 0x4002, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l mach,@-rn */
1741 { 0x4004, SETS1
| SETSSP
| USES1
}, /* rotl rn */
1742 { 0x4005, SETS1
| SETSSP
| USES1
}, /* rotr rn */
1743 { 0x4006, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,mach */
1744 { 0x4008, SETS1
| USES1
}, /* shll2 rn */
1745 { 0x4009, SETS1
| USES1
}, /* shlr2 rn */
1746 { 0x400a, SETSSP
| USES1
}, /* lds rm,mach */
1747 { 0x400b, BRANCH
| DELAY
| USES1
}, /* jsr @rn */
1748 { 0x4010, SETS1
| SETSSP
| USES1
}, /* dt rn */
1749 { 0x4011, SETSSP
| USES1
}, /* cmp/pz rn */
1750 { 0x4012, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l macl,@-rn */
1751 { 0x4014, SETSSP
| USES1
}, /* setrc rm */
1752 { 0x4015, SETSSP
| USES1
}, /* cmp/pl rn */
1753 { 0x4016, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,macl */
1754 { 0x4018, SETS1
| USES1
}, /* shll8 rn */
1755 { 0x4019, SETS1
| USES1
}, /* shlr8 rn */
1756 { 0x401a, SETSSP
| USES1
}, /* lds rm,macl */
1757 { 0x401b, LOAD
| SETSSP
| USES1
}, /* tas.b @rn */
1758 { 0x4020, SETS1
| SETSSP
| USES1
}, /* shal rn */
1759 { 0x4021, SETS1
| SETSSP
| USES1
}, /* shar rn */
1760 { 0x4022, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l pr,@-rn */
1761 { 0x4024, SETS1
| SETSSP
| USES1
| USESSP
}, /* rotcl rn */
1762 { 0x4025, SETS1
| SETSSP
| USES1
| USESSP
}, /* rotcr rn */
1763 { 0x4026, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,pr */
1764 { 0x4028, SETS1
| USES1
}, /* shll16 rn */
1765 { 0x4029, SETS1
| USES1
}, /* shlr16 rn */
1766 { 0x402a, SETSSP
| USES1
}, /* lds rm,pr */
1767 { 0x402b, BRANCH
| DELAY
| USES1
}, /* jmp @rn */
1768 { 0x4052, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l fpul,@-rn */
1769 { 0x4056, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,fpul */
1770 { 0x405a, SETSSP
| USES1
}, /* lds.l rm,fpul */
1771 { 0x4062, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l fpscr / dsr,@-rn */
1772 { 0x4066, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,fpscr / dsr */
1773 { 0x406a, SETSSP
| USES1
}, /* lds rm,fpscr / lds rm,dsr */
1774 { 0x4072, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l a0,@-rn */
1775 { 0x4076, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,a0 */
1776 { 0x407a, SETSSP
| USES1
}, /* lds.l rm,a0 */
1777 { 0x4082, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l x0,@-rn */
1778 { 0x4086, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,x0 */
1779 { 0x408a, SETSSP
| USES1
}, /* lds.l rm,x0 */
1780 { 0x4092, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l x1,@-rn */
1781 { 0x4096, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,x1 */
1782 { 0x409a, SETSSP
| USES1
}, /* lds.l rm,x1 */
1783 { 0x40a2, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l y0,@-rn */
1784 { 0x40a6, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,y0 */
1785 { 0x40aa, SETSSP
| USES1
}, /* lds.l rm,y0 */
1786 { 0x40b2, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l y1,@-rn */
1787 { 0x40b6, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,y1 */
1788 { 0x40ba, SETSSP
| USES1
} /* lds.l rm,y1 */
1791 static const struct sh_opcode sh_opcode41
[] =
1793 { 0x4003, STORE
| SETS1
| USES1
| USESSP
}, /* stc.l <special_reg>,@-rn */
1794 { 0x4007, LOAD
| SETS1
| SETSSP
| USES1
}, /* ldc.l @rm+,<special_reg> */
1795 { 0x400c, SETS1
| USES1
| USES2
}, /* shad rm,rn */
1796 { 0x400d, SETS1
| USES1
| USES2
}, /* shld rm,rn */
1797 { 0x400e, SETSSP
| USES1
}, /* ldc rm,<special_reg> */
1798 { 0x400f, LOAD
|SETS1
|SETS2
|SETSSP
|USES1
|USES2
|USESSP
}, /* mac.w @rm+,@rn+ */
1801 static const struct sh_minor_opcode sh_opcode4
[] =
1803 { MAP (sh_opcode40
), 0xf0ff },
1804 { MAP (sh_opcode41
), 0xf00f }
1807 static const struct sh_opcode sh_opcode50
[] =
1809 { 0x5000, LOAD
| SETS1
| USES2
} /* mov.l @(disp,rm),rn */
1812 static const struct sh_minor_opcode sh_opcode5
[] =
1814 { MAP (sh_opcode50
), 0xf000 }
1817 static const struct sh_opcode sh_opcode60
[] =
1819 { 0x6000, LOAD
| SETS1
| USES2
}, /* mov.b @rm,rn */
1820 { 0x6001, LOAD
| SETS1
| USES2
}, /* mov.w @rm,rn */
1821 { 0x6002, LOAD
| SETS1
| USES2
}, /* mov.l @rm,rn */
1822 { 0x6003, SETS1
| USES2
}, /* mov rm,rn */
1823 { 0x6004, LOAD
| SETS1
| SETS2
| USES2
}, /* mov.b @rm+,rn */
1824 { 0x6005, LOAD
| SETS1
| SETS2
| USES2
}, /* mov.w @rm+,rn */
1825 { 0x6006, LOAD
| SETS1
| SETS2
| USES2
}, /* mov.l @rm+,rn */
1826 { 0x6007, SETS1
| USES2
}, /* not rm,rn */
1827 { 0x6008, SETS1
| USES2
}, /* swap.b rm,rn */
1828 { 0x6009, SETS1
| USES2
}, /* swap.w rm,rn */
1829 { 0x600a, SETS1
| SETSSP
| USES2
| USESSP
}, /* negc rm,rn */
1830 { 0x600b, SETS1
| USES2
}, /* neg rm,rn */
1831 { 0x600c, SETS1
| USES2
}, /* extu.b rm,rn */
1832 { 0x600d, SETS1
| USES2
}, /* extu.w rm,rn */
1833 { 0x600e, SETS1
| USES2
}, /* exts.b rm,rn */
1834 { 0x600f, SETS1
| USES2
} /* exts.w rm,rn */
1837 static const struct sh_minor_opcode sh_opcode6
[] =
1839 { MAP (sh_opcode60
), 0xf00f }
1842 static const struct sh_opcode sh_opcode70
[] =
1844 { 0x7000, SETS1
| USES1
} /* add #imm,rn */
1847 static const struct sh_minor_opcode sh_opcode7
[] =
1849 { MAP (sh_opcode70
), 0xf000 }
1852 static const struct sh_opcode sh_opcode80
[] =
1854 { 0x8000, STORE
| USES2
| USESR0
}, /* mov.b r0,@(disp,rn) */
1855 { 0x8100, STORE
| USES2
| USESR0
}, /* mov.w r0,@(disp,rn) */
1856 { 0x8200, SETSSP
}, /* setrc #imm */
1857 { 0x8400, LOAD
| SETSR0
| USES2
}, /* mov.b @(disp,rm),r0 */
1858 { 0x8500, LOAD
| SETSR0
| USES2
}, /* mov.w @(disp,rn),r0 */
1859 { 0x8800, SETSSP
| USESR0
}, /* cmp/eq #imm,r0 */
1860 { 0x8900, BRANCH
| USESSP
}, /* bt label */
1861 { 0x8b00, BRANCH
| USESSP
}, /* bf label */
1862 { 0x8c00, SETSSP
}, /* ldrs @(disp,pc) */
1863 { 0x8d00, BRANCH
| DELAY
| USESSP
}, /* bt/s label */
1864 { 0x8e00, SETSSP
}, /* ldre @(disp,pc) */
1865 { 0x8f00, BRANCH
| DELAY
| USESSP
} /* bf/s label */
1868 static const struct sh_minor_opcode sh_opcode8
[] =
1870 { MAP (sh_opcode80
), 0xff00 }
1873 static const struct sh_opcode sh_opcode90
[] =
1875 { 0x9000, LOAD
| SETS1
} /* mov.w @(disp,pc),rn */
1878 static const struct sh_minor_opcode sh_opcode9
[] =
1880 { MAP (sh_opcode90
), 0xf000 }
1883 static const struct sh_opcode sh_opcodea0
[] =
1885 { 0xa000, BRANCH
| DELAY
} /* bra label */
1888 static const struct sh_minor_opcode sh_opcodea
[] =
1890 { MAP (sh_opcodea0
), 0xf000 }
1893 static const struct sh_opcode sh_opcodeb0
[] =
1895 { 0xb000, BRANCH
| DELAY
} /* bsr label */
1898 static const struct sh_minor_opcode sh_opcodeb
[] =
1900 { MAP (sh_opcodeb0
), 0xf000 }
1903 static const struct sh_opcode sh_opcodec0
[] =
1905 { 0xc000, STORE
| USESR0
| USESSP
}, /* mov.b r0,@(disp,gbr) */
1906 { 0xc100, STORE
| USESR0
| USESSP
}, /* mov.w r0,@(disp,gbr) */
1907 { 0xc200, STORE
| USESR0
| USESSP
}, /* mov.l r0,@(disp,gbr) */
1908 { 0xc300, BRANCH
| USESSP
}, /* trapa #imm */
1909 { 0xc400, LOAD
| SETSR0
| USESSP
}, /* mov.b @(disp,gbr),r0 */
1910 { 0xc500, LOAD
| SETSR0
| USESSP
}, /* mov.w @(disp,gbr),r0 */
1911 { 0xc600, LOAD
| SETSR0
| USESSP
}, /* mov.l @(disp,gbr),r0 */
1912 { 0xc700, SETSR0
}, /* mova @(disp,pc),r0 */
1913 { 0xc800, SETSSP
| USESR0
}, /* tst #imm,r0 */
1914 { 0xc900, SETSR0
| USESR0
}, /* and #imm,r0 */
1915 { 0xca00, SETSR0
| USESR0
}, /* xor #imm,r0 */
1916 { 0xcb00, SETSR0
| USESR0
}, /* or #imm,r0 */
1917 { 0xcc00, LOAD
| SETSSP
| USESR0
| USESSP
}, /* tst.b #imm,@(r0,gbr) */
1918 { 0xcd00, LOAD
| STORE
| USESR0
| USESSP
}, /* and.b #imm,@(r0,gbr) */
1919 { 0xce00, LOAD
| STORE
| USESR0
| USESSP
}, /* xor.b #imm,@(r0,gbr) */
1920 { 0xcf00, LOAD
| STORE
| USESR0
| USESSP
} /* or.b #imm,@(r0,gbr) */
1923 static const struct sh_minor_opcode sh_opcodec
[] =
1925 { MAP (sh_opcodec0
), 0xff00 }
1928 static const struct sh_opcode sh_opcoded0
[] =
1930 { 0xd000, LOAD
| SETS1
} /* mov.l @(disp,pc),rn */
1933 static const struct sh_minor_opcode sh_opcoded
[] =
1935 { MAP (sh_opcoded0
), 0xf000 }
1938 static const struct sh_opcode sh_opcodee0
[] =
1940 { 0xe000, SETS1
} /* mov #imm,rn */
1943 static const struct sh_minor_opcode sh_opcodee
[] =
1945 { MAP (sh_opcodee0
), 0xf000 }
1948 static const struct sh_opcode sh_opcodef0
[] =
1950 { 0xf000, SETSF1
| USESF1
| USESF2
}, /* fadd fm,fn */
1951 { 0xf001, SETSF1
| USESF1
| USESF2
}, /* fsub fm,fn */
1952 { 0xf002, SETSF1
| USESF1
| USESF2
}, /* fmul fm,fn */
1953 { 0xf003, SETSF1
| USESF1
| USESF2
}, /* fdiv fm,fn */
1954 { 0xf004, SETSSP
| USESF1
| USESF2
}, /* fcmp/eq fm,fn */
1955 { 0xf005, SETSSP
| USESF1
| USESF2
}, /* fcmp/gt fm,fn */
1956 { 0xf006, LOAD
| SETSF1
| USES2
| USESR0
}, /* fmov.s @(r0,rm),fn */
1957 { 0xf007, STORE
| USES1
| USESF2
| USESR0
}, /* fmov.s fm,@(r0,rn) */
1958 { 0xf008, LOAD
| SETSF1
| USES2
}, /* fmov.s @rm,fn */
1959 { 0xf009, LOAD
| SETS2
| SETSF1
| USES2
}, /* fmov.s @rm+,fn */
1960 { 0xf00a, STORE
| USES1
| USESF2
}, /* fmov.s fm,@rn */
1961 { 0xf00b, STORE
| SETS1
| USES1
| USESF2
}, /* fmov.s fm,@-rn */
1962 { 0xf00c, SETSF1
| USESF2
}, /* fmov fm,fn */
1963 { 0xf00e, SETSF1
| USESF1
| USESF2
| USESF0
} /* fmac f0,fm,fn */
1966 static const struct sh_opcode sh_opcodef1
[] =
1968 { 0xf00d, SETSF1
| USESSP
}, /* fsts fpul,fn */
1969 { 0xf01d, SETSSP
| USESF1
}, /* flds fn,fpul */
1970 { 0xf02d, SETSF1
| USESSP
}, /* float fpul,fn */
1971 { 0xf03d, SETSSP
| USESF1
}, /* ftrc fn,fpul */
1972 { 0xf04d, SETSF1
| USESF1
}, /* fneg fn */
1973 { 0xf05d, SETSF1
| USESF1
}, /* fabs fn */
1974 { 0xf06d, SETSF1
| USESF1
}, /* fsqrt fn */
1975 { 0xf07d, SETSSP
| USESF1
}, /* ftst/nan fn */
1976 { 0xf08d, SETSF1
}, /* fldi0 fn */
1977 { 0xf09d, SETSF1
} /* fldi1 fn */
1980 static const struct sh_minor_opcode sh_opcodef
[] =
1982 { MAP (sh_opcodef0
), 0xf00f },
1983 { MAP (sh_opcodef1
), 0xf0ff }
1986 static struct sh_major_opcode sh_opcodes
[] =
1988 { MAP (sh_opcode0
) },
1989 { MAP (sh_opcode1
) },
1990 { MAP (sh_opcode2
) },
1991 { MAP (sh_opcode3
) },
1992 { MAP (sh_opcode4
) },
1993 { MAP (sh_opcode5
) },
1994 { MAP (sh_opcode6
) },
1995 { MAP (sh_opcode7
) },
1996 { MAP (sh_opcode8
) },
1997 { MAP (sh_opcode9
) },
1998 { MAP (sh_opcodea
) },
1999 { MAP (sh_opcodeb
) },
2000 { MAP (sh_opcodec
) },
2001 { MAP (sh_opcoded
) },
2002 { MAP (sh_opcodee
) },
2003 { MAP (sh_opcodef
) }
2006 /* The double data transfer / parallel processing insns are not
2007 described here. This will cause sh_align_load_span to leave them alone. */
2009 static const struct sh_opcode sh_dsp_opcodef0
[] =
2011 { 0xf400, USESAS
| SETSAS
| LOAD
| SETSSP
}, /* movs.x @-as,ds */
2012 { 0xf401, USESAS
| SETSAS
| STORE
| USESSP
}, /* movs.x ds,@-as */
2013 { 0xf404, USESAS
| LOAD
| SETSSP
}, /* movs.x @as,ds */
2014 { 0xf405, USESAS
| STORE
| USESSP
}, /* movs.x ds,@as */
2015 { 0xf408, USESAS
| SETSAS
| LOAD
| SETSSP
}, /* movs.x @as+,ds */
2016 { 0xf409, USESAS
| SETSAS
| STORE
| USESSP
}, /* movs.x ds,@as+ */
2017 { 0xf40c, USESAS
| SETSAS
| LOAD
| SETSSP
| USESR8
}, /* movs.x @as+r8,ds */
2018 { 0xf40d, USESAS
| SETSAS
| STORE
| USESSP
| USESR8
} /* movs.x ds,@as+r8 */
2021 static const struct sh_minor_opcode sh_dsp_opcodef
[] =
2023 { MAP (sh_dsp_opcodef0
), 0xfc0d }
2026 /* Given an instruction, return a pointer to the corresponding
2027 sh_opcode structure. Return NULL if the instruction is not
2030 static const struct sh_opcode
*
2034 const struct sh_major_opcode
*maj
;
2035 const struct sh_minor_opcode
*min
, *minend
;
2037 maj
= &sh_opcodes
[(insn
& 0xf000) >> 12];
2038 min
= maj
->minor_opcodes
;
2039 minend
= min
+ maj
->count
;
2040 for (; min
< minend
; min
++)
2043 const struct sh_opcode
*op
, *opend
;
2045 l
= insn
& min
->mask
;
2047 opend
= op
+ min
->count
;
2049 /* Since the opcodes tables are sorted, we could use a binary
2050 search here if the count were above some cutoff value. */
2051 for (; op
< opend
; op
++)
2052 if (op
->opcode
== l
)
2059 /* See whether an instruction uses or sets a general purpose register */
2062 sh_insn_uses_or_sets_reg (insn
, op
, reg
)
2064 const struct sh_opcode
*op
;
2067 if (sh_insn_uses_reg (insn
, op
, reg
))
2070 return sh_insn_sets_reg (insn
, op
, reg
);
2073 /* See whether an instruction uses a general purpose register. */
2076 sh_insn_uses_reg (insn
, op
, reg
)
2078 const struct sh_opcode
*op
;
2085 if ((f
& USES1
) != 0
2086 && USES1_REG (insn
) == reg
)
2088 if ((f
& USES2
) != 0
2089 && USES2_REG (insn
) == reg
)
2091 if ((f
& USESR0
) != 0
2094 if ((f
& USESAS
) && reg
== USESAS_REG (insn
))
2096 if ((f
& USESR8
) && reg
== 8)
2102 /* See whether an instruction sets a general purpose register. */
2105 sh_insn_sets_reg (insn
, op
, reg
)
2107 const struct sh_opcode
*op
;
2114 if ((f
& SETS1
) != 0
2115 && SETS1_REG (insn
) == reg
)
2117 if ((f
& SETS2
) != 0
2118 && SETS2_REG (insn
) == reg
)
2120 if ((f
& SETSR0
) != 0
2123 if ((f
& SETSAS
) && reg
== SETSAS_REG (insn
))
2129 /* See whether an instruction uses or sets a floating point register */
2132 sh_insn_uses_or_sets_freg (insn
, op
, reg
)
2134 const struct sh_opcode
*op
;
2137 if (sh_insn_uses_freg (insn
, op
, reg
))
2140 return sh_insn_sets_freg (insn
, op
, reg
);
2143 /* See whether an instruction uses a floating point register. */
2146 sh_insn_uses_freg (insn
, op
, freg
)
2148 const struct sh_opcode
*op
;
2155 /* We can't tell if this is a double-precision insn, so just play safe
2156 and assume that it might be. So not only have we test FREG against
2157 itself, but also even FREG against FREG+1 - if the using insn uses
2158 just the low part of a double precision value - but also an odd
2159 FREG against FREG-1 - if the setting insn sets just the low part
2160 of a double precision value.
2161 So what this all boils down to is that we have to ignore the lowest
2162 bit of the register number. */
2164 if ((f
& USESF1
) != 0
2165 && (USESF1_REG (insn
) & 0xe) == (freg
& 0xe))
2167 if ((f
& USESF2
) != 0
2168 && (USESF2_REG (insn
) & 0xe) == (freg
& 0xe))
2170 if ((f
& USESF0
) != 0
2177 /* See whether an instruction sets a floating point register. */
2180 sh_insn_sets_freg (insn
, op
, freg
)
2182 const struct sh_opcode
*op
;
2189 /* We can't tell if this is a double-precision insn, so just play safe
2190 and assume that it might be. So not only have we test FREG against
2191 itself, but also even FREG against FREG+1 - if the using insn uses
2192 just the low part of a double precision value - but also an odd
2193 FREG against FREG-1 - if the setting insn sets just the low part
2194 of a double precision value.
2195 So what this all boils down to is that we have to ignore the lowest
2196 bit of the register number. */
2198 if ((f
& SETSF1
) != 0
2199 && (SETSF1_REG (insn
) & 0xe) == (freg
& 0xe))
2205 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2206 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2207 This should return TRUE if there is a conflict, or FALSE if the
2208 instructions can be swapped safely. */
2211 sh_insns_conflict (i1
, op1
, i2
, op2
)
2213 const struct sh_opcode
*op1
;
2215 const struct sh_opcode
*op2
;
2217 unsigned int f1
, f2
;
2222 /* Load of fpscr conflicts with floating point operations.
2223 FIXME: shouldn't test raw opcodes here. */
2224 if (((i1
& 0xf0ff) == 0x4066 && (i2
& 0xf000) == 0xf000)
2225 || ((i2
& 0xf0ff) == 0x4066 && (i1
& 0xf000) == 0xf000))
2228 if ((f1
& (BRANCH
| DELAY
)) != 0
2229 || (f2
& (BRANCH
| DELAY
)) != 0)
2232 if (((f1
| f2
) & SETSSP
)
2233 && (f1
& (SETSSP
| USESSP
))
2234 && (f2
& (SETSSP
| USESSP
)))
2237 if ((f1
& SETS1
) != 0
2238 && sh_insn_uses_or_sets_reg (i2
, op2
, SETS1_REG (i1
)))
2240 if ((f1
& SETS2
) != 0
2241 && sh_insn_uses_or_sets_reg (i2
, op2
, SETS2_REG (i1
)))
2243 if ((f1
& SETSR0
) != 0
2244 && sh_insn_uses_or_sets_reg (i2
, op2
, 0))
2247 && sh_insn_uses_or_sets_reg (i2
, op2
, SETSAS_REG (i1
)))
2249 if ((f1
& SETSF1
) != 0
2250 && sh_insn_uses_or_sets_freg (i2
, op2
, SETSF1_REG (i1
)))
2253 if ((f2
& SETS1
) != 0
2254 && sh_insn_uses_or_sets_reg (i1
, op1
, SETS1_REG (i2
)))
2256 if ((f2
& SETS2
) != 0
2257 && sh_insn_uses_or_sets_reg (i1
, op1
, SETS2_REG (i2
)))
2259 if ((f2
& SETSR0
) != 0
2260 && sh_insn_uses_or_sets_reg (i1
, op1
, 0))
2263 && sh_insn_uses_or_sets_reg (i1
, op1
, SETSAS_REG (i2
)))
2265 if ((f2
& SETSF1
) != 0
2266 && sh_insn_uses_or_sets_freg (i1
, op1
, SETSF1_REG (i2
)))
2269 /* The instructions do not conflict. */
2273 /* I1 is a load instruction, and I2 is some other instruction. Return
2274 TRUE if I1 loads a register which I2 uses. */
2277 sh_load_use (i1
, op1
, i2
, op2
)
2279 const struct sh_opcode
*op1
;
2281 const struct sh_opcode
*op2
;
2287 if ((f1
& LOAD
) == 0)
2290 /* If both SETS1 and SETSSP are set, that means a load to a special
2291 register using postincrement addressing mode, which we don't care
2293 if ((f1
& SETS1
) != 0
2294 && (f1
& SETSSP
) == 0
2295 && sh_insn_uses_reg (i2
, op2
, (i1
& 0x0f00) >> 8))
2298 if ((f1
& SETSR0
) != 0
2299 && sh_insn_uses_reg (i2
, op2
, 0))
2302 if ((f1
& SETSF1
) != 0
2303 && sh_insn_uses_freg (i2
, op2
, (i1
& 0x0f00) >> 8))
2309 /* Try to align loads and stores within a span of memory. This is
2310 called by both the ELF and the COFF sh targets. ABFD and SEC are
2311 the BFD and section we are examining. CONTENTS is the contents of
2312 the section. SWAP is the routine to call to swap two instructions.
2313 RELOCS is a pointer to the internal relocation information, to be
2314 passed to SWAP. PLABEL is a pointer to the current label in a
2315 sorted list of labels; LABEL_END is the end of the list. START and
2316 STOP are the range of memory to examine. If a swap is made,
2317 *PSWAPPED is set to TRUE. */
2323 _bfd_sh_align_load_span (abfd
, sec
, contents
, swap
, relocs
,
2324 plabel
, label_end
, start
, stop
, pswapped
)
2328 bfd_boolean (*swap
) PARAMS ((bfd
*, asection
*, PTR
, bfd_byte
*, bfd_vma
));
2334 bfd_boolean
*pswapped
;
2336 int dsp
= (abfd
->arch_info
->mach
== bfd_mach_sh_dsp
2337 || abfd
->arch_info
->mach
== bfd_mach_sh3_dsp
);
2340 /* The SH4 has a Harvard architecture, hence aligning loads is not
2341 desirable. In fact, it is counter-productive, since it interferes
2342 with the schedules generated by the compiler. */
2343 if (abfd
->arch_info
->mach
== bfd_mach_sh4
)
2346 /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2350 sh_opcodes
[0xf].minor_opcodes
= sh_dsp_opcodef
;
2351 sh_opcodes
[0xf].count
= sizeof sh_dsp_opcodef
/ sizeof sh_dsp_opcodef
;
2354 /* Instructions should be aligned on 2 byte boundaries. */
2355 if ((start
& 1) == 1)
2358 /* Now look through the unaligned addresses. */
2362 for (; i
< stop
; i
+= 4)
2365 const struct sh_opcode
*op
;
2366 unsigned int prev_insn
= 0;
2367 const struct sh_opcode
*prev_op
= NULL
;
2369 insn
= bfd_get_16 (abfd
, contents
+ i
);
2370 op
= sh_insn_info (insn
);
2372 || (op
->flags
& (LOAD
| STORE
)) == 0)
2375 /* This is a load or store which is not on a four byte boundary. */
2377 while (*plabel
< label_end
&& **plabel
< i
)
2382 prev_insn
= bfd_get_16 (abfd
, contents
+ i
- 2);
2383 /* If INSN is the field b of a parallel processing insn, it is not
2384 a load / store after all. Note that the test here might mistake
2385 the field_b of a pcopy insn for the starting code of a parallel
2386 processing insn; this might miss a swapping opportunity, but at
2387 least we're on the safe side. */
2388 if (dsp
&& (prev_insn
& 0xfc00) == 0xf800)
2391 /* Check if prev_insn is actually the field b of a parallel
2392 processing insn. Again, this can give a spurious match
2394 if (dsp
&& i
- 2 > start
)
2396 unsigned pprev_insn
= bfd_get_16 (abfd
, contents
+ i
- 4);
2398 if ((pprev_insn
& 0xfc00) == 0xf800)
2401 prev_op
= sh_insn_info (prev_insn
);
2404 prev_op
= sh_insn_info (prev_insn
);
2406 /* If the load/store instruction is in a delay slot, we
2409 || (prev_op
->flags
& DELAY
) != 0)
2413 && (*plabel
>= label_end
|| **plabel
!= i
)
2415 && (prev_op
->flags
& (LOAD
| STORE
)) == 0
2416 && ! sh_insns_conflict (prev_insn
, prev_op
, insn
, op
))
2420 /* The load/store instruction does not have a label, and
2421 there is a previous instruction; PREV_INSN is not
2422 itself a load/store instruction, and PREV_INSN and
2423 INSN do not conflict. */
2429 unsigned int prev2_insn
;
2430 const struct sh_opcode
*prev2_op
;
2432 prev2_insn
= bfd_get_16 (abfd
, contents
+ i
- 4);
2433 prev2_op
= sh_insn_info (prev2_insn
);
2435 /* If the instruction before PREV_INSN has a delay
2436 slot--that is, PREV_INSN is in a delay slot--we
2438 if (prev2_op
== NULL
2439 || (prev2_op
->flags
& DELAY
) != 0)
2442 /* If the instruction before PREV_INSN is a load,
2443 and it sets a register which INSN uses, then
2444 putting INSN immediately after PREV_INSN will
2445 cause a pipeline bubble, so there is no point to
2448 && (prev2_op
->flags
& LOAD
) != 0
2449 && sh_load_use (prev2_insn
, prev2_op
, insn
, op
))
2455 if (! (*swap
) (abfd
, sec
, relocs
, contents
, i
- 2))
2462 while (*plabel
< label_end
&& **plabel
< i
+ 2)
2466 && (*plabel
>= label_end
|| **plabel
!= i
+ 2))
2468 unsigned int next_insn
;
2469 const struct sh_opcode
*next_op
;
2471 /* There is an instruction after the load/store
2472 instruction, and it does not have a label. */
2473 next_insn
= bfd_get_16 (abfd
, contents
+ i
+ 2);
2474 next_op
= sh_insn_info (next_insn
);
2476 && (next_op
->flags
& (LOAD
| STORE
)) == 0
2477 && ! sh_insns_conflict (insn
, op
, next_insn
, next_op
))
2481 /* NEXT_INSN is not itself a load/store instruction,
2482 and it does not conflict with INSN. */
2486 /* If PREV_INSN is a load, and it sets a register
2487 which NEXT_INSN uses, then putting NEXT_INSN
2488 immediately after PREV_INSN will cause a pipeline
2489 bubble, so there is no reason to make this swap. */
2491 && (prev_op
->flags
& LOAD
) != 0
2492 && sh_load_use (prev_insn
, prev_op
, next_insn
, next_op
))
2495 /* If INSN is a load, and it sets a register which
2496 the insn after NEXT_INSN uses, then doing the
2497 swap will cause a pipeline bubble, so there is no
2498 reason to make the swap. However, if the insn
2499 after NEXT_INSN is itself a load or store
2500 instruction, then it is misaligned, so
2501 optimistically hope that it will be swapped
2502 itself, and just live with the pipeline bubble if
2506 && (op
->flags
& LOAD
) != 0)
2508 unsigned int next2_insn
;
2509 const struct sh_opcode
*next2_op
;
2511 next2_insn
= bfd_get_16 (abfd
, contents
+ i
+ 4);
2512 next2_op
= sh_insn_info (next2_insn
);
2513 if (next2_op
== NULL
2514 || ((next2_op
->flags
& (LOAD
| STORE
)) == 0
2515 && sh_load_use (insn
, op
, next2_insn
, next2_op
)))
2521 if (! (*swap
) (abfd
, sec
, relocs
, contents
, i
))
2532 #endif /* not COFF_IMAGE_WITH_PE */
2534 /* Look for loads and stores which we can align to four byte
2535 boundaries. See the longer comment above sh_relax_section for why
2536 this is desirable. This sets *PSWAPPED if some instruction was
2540 sh_align_loads (abfd
, sec
, internal_relocs
, contents
, pswapped
)
2543 struct internal_reloc
*internal_relocs
;
2545 bfd_boolean
*pswapped
;
2547 struct internal_reloc
*irel
, *irelend
;
2548 bfd_vma
*labels
= NULL
;
2549 bfd_vma
*label
, *label_end
;
2554 irelend
= internal_relocs
+ sec
->reloc_count
;
2556 /* Get all the addresses with labels on them. */
2557 amt
= (bfd_size_type
) sec
->reloc_count
* sizeof (bfd_vma
);
2558 labels
= (bfd_vma
*) bfd_malloc (amt
);
2562 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
2564 if (irel
->r_type
== R_SH_LABEL
)
2566 *label_end
= irel
->r_vaddr
- sec
->vma
;
2571 /* Note that the assembler currently always outputs relocs in
2572 address order. If that ever changes, this code will need to sort
2573 the label values and the relocs. */
2577 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
2579 bfd_vma start
, stop
;
2581 if (irel
->r_type
!= R_SH_CODE
)
2584 start
= irel
->r_vaddr
- sec
->vma
;
2586 for (irel
++; irel
< irelend
; irel
++)
2587 if (irel
->r_type
== R_SH_DATA
)
2590 stop
= irel
->r_vaddr
- sec
->vma
;
2594 if (! _bfd_sh_align_load_span (abfd
, sec
, contents
, sh_swap_insns
,
2595 (PTR
) internal_relocs
, &label
,
2596 label_end
, start
, stop
, pswapped
))
2610 /* Swap two SH instructions. */
2613 sh_swap_insns (abfd
, sec
, relocs
, contents
, addr
)
2620 struct internal_reloc
*internal_relocs
= (struct internal_reloc
*) relocs
;
2621 unsigned short i1
, i2
;
2622 struct internal_reloc
*irel
, *irelend
;
2624 /* Swap the instructions themselves. */
2625 i1
= bfd_get_16 (abfd
, contents
+ addr
);
2626 i2
= bfd_get_16 (abfd
, contents
+ addr
+ 2);
2627 bfd_put_16 (abfd
, (bfd_vma
) i2
, contents
+ addr
);
2628 bfd_put_16 (abfd
, (bfd_vma
) i1
, contents
+ addr
+ 2);
2630 /* Adjust all reloc addresses. */
2631 irelend
= internal_relocs
+ sec
->reloc_count
;
2632 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
2636 /* There are a few special types of relocs that we don't want to
2637 adjust. These relocs do not apply to the instruction itself,
2638 but are only associated with the address. */
2639 type
= irel
->r_type
;
2640 if (type
== R_SH_ALIGN
2641 || type
== R_SH_CODE
2642 || type
== R_SH_DATA
2643 || type
== R_SH_LABEL
)
2646 /* If an R_SH_USES reloc points to one of the addresses being
2647 swapped, we must adjust it. It would be incorrect to do this
2648 for a jump, though, since we want to execute both
2649 instructions after the jump. (We have avoided swapping
2650 around a label, so the jump will not wind up executing an
2651 instruction it shouldn't). */
2652 if (type
== R_SH_USES
)
2656 off
= irel
->r_vaddr
- sec
->vma
+ 4 + irel
->r_offset
;
2658 irel
->r_offset
+= 2;
2659 else if (off
== addr
+ 2)
2660 irel
->r_offset
-= 2;
2663 if (irel
->r_vaddr
- sec
->vma
== addr
)
2668 else if (irel
->r_vaddr
- sec
->vma
== addr
+ 2)
2679 unsigned short insn
, oinsn
;
2680 bfd_boolean overflow
;
2682 loc
= contents
+ irel
->r_vaddr
- sec
->vma
;
2689 case R_SH_PCDISP8BY2
:
2690 case R_SH_PCRELIMM8BY2
:
2691 insn
= bfd_get_16 (abfd
, loc
);
2694 if ((oinsn
& 0xff00) != (insn
& 0xff00))
2696 bfd_put_16 (abfd
, (bfd_vma
) insn
, loc
);
2700 insn
= bfd_get_16 (abfd
, loc
);
2703 if ((oinsn
& 0xf000) != (insn
& 0xf000))
2705 bfd_put_16 (abfd
, (bfd_vma
) insn
, loc
);
2708 case R_SH_PCRELIMM8BY4
:
2709 /* This reloc ignores the least significant 3 bits of
2710 the program counter before adding in the offset.
2711 This means that if ADDR is at an even address, the
2712 swap will not affect the offset. If ADDR is an at an
2713 odd address, then the instruction will be crossing a
2714 four byte boundary, and must be adjusted. */
2715 if ((addr
& 3) != 0)
2717 insn
= bfd_get_16 (abfd
, loc
);
2720 if ((oinsn
& 0xff00) != (insn
& 0xff00))
2722 bfd_put_16 (abfd
, (bfd_vma
) insn
, loc
);
2730 ((*_bfd_error_handler
)
2731 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
2732 abfd
, (unsigned long) irel
->r_vaddr
));
2733 bfd_set_error (bfd_error_bad_value
);
2742 /* This is a modification of _bfd_coff_generic_relocate_section, which
2743 will handle SH relaxing. */
2746 sh_relocate_section (output_bfd
, info
, input_bfd
, input_section
, contents
,
2747 relocs
, syms
, sections
)
2748 bfd
*output_bfd ATTRIBUTE_UNUSED
;
2749 struct bfd_link_info
*info
;
2751 asection
*input_section
;
2753 struct internal_reloc
*relocs
;
2754 struct internal_syment
*syms
;
2755 asection
**sections
;
2757 struct internal_reloc
*rel
;
2758 struct internal_reloc
*relend
;
2761 relend
= rel
+ input_section
->reloc_count
;
2762 for (; rel
< relend
; rel
++)
2765 struct coff_link_hash_entry
*h
;
2766 struct internal_syment
*sym
;
2769 reloc_howto_type
*howto
;
2770 bfd_reloc_status_type rstat
;
2772 /* Almost all relocs have to do with relaxing. If any work must
2773 be done for them, it has been done in sh_relax_section. */
2774 if (rel
->r_type
!= R_SH_IMM32
2776 && rel
->r_type
!= R_SH_IMM32CE
2777 && rel
->r_type
!= R_SH_IMAGEBASE
2779 && rel
->r_type
!= R_SH_PCDISP
)
2782 symndx
= rel
->r_symndx
;
2792 || (unsigned long) symndx
>= obj_raw_syment_count (input_bfd
))
2794 (*_bfd_error_handler
)
2795 ("%B: illegal symbol index %ld in relocs",
2797 bfd_set_error (bfd_error_bad_value
);
2800 h
= obj_coff_sym_hashes (input_bfd
)[symndx
];
2801 sym
= syms
+ symndx
;
2804 if (sym
!= NULL
&& sym
->n_scnum
!= 0)
2805 addend
= - sym
->n_value
;
2809 if (rel
->r_type
== R_SH_PCDISP
)
2812 if (rel
->r_type
>= SH_COFF_HOWTO_COUNT
)
2815 howto
= &sh_coff_howtos
[rel
->r_type
];
2819 bfd_set_error (bfd_error_bad_value
);
2824 if (rel
->r_type
== R_SH_IMAGEBASE
)
2825 addend
-= pe_data (input_section
->output_section
->owner
)->pe_opthdr
.ImageBase
;
2834 /* There is nothing to do for an internal PCDISP reloc. */
2835 if (rel
->r_type
== R_SH_PCDISP
)
2840 sec
= bfd_abs_section_ptr
;
2845 sec
= sections
[symndx
];
2846 val
= (sec
->output_section
->vma
2847 + sec
->output_offset
2854 if (h
->root
.type
== bfd_link_hash_defined
2855 || h
->root
.type
== bfd_link_hash_defweak
)
2859 sec
= h
->root
.u
.def
.section
;
2860 val
= (h
->root
.u
.def
.value
2861 + sec
->output_section
->vma
2862 + sec
->output_offset
);
2864 else if (! info
->relocatable
)
2866 if (! ((*info
->callbacks
->undefined_symbol
)
2867 (info
, h
->root
.root
.string
, input_bfd
, input_section
,
2868 rel
->r_vaddr
- input_section
->vma
, TRUE
)))
2873 rstat
= _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
2875 rel
->r_vaddr
- input_section
->vma
,
2884 case bfd_reloc_overflow
:
2887 char buf
[SYMNMLEN
+ 1];
2893 else if (sym
->_n
._n_n
._n_zeroes
== 0
2894 && sym
->_n
._n_n
._n_offset
!= 0)
2895 name
= obj_coff_strings (input_bfd
) + sym
->_n
._n_n
._n_offset
;
2898 strncpy (buf
, sym
->_n
._n_name
, SYMNMLEN
);
2899 buf
[SYMNMLEN
] = '\0';
2903 if (! ((*info
->callbacks
->reloc_overflow
)
2904 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
,
2905 (bfd_vma
) 0, input_bfd
, input_section
,
2906 rel
->r_vaddr
- input_section
->vma
)))
2915 /* This is a version of bfd_generic_get_relocated_section_contents
2916 which uses sh_relocate_section. */
2919 sh_coff_get_relocated_section_contents (output_bfd
, link_info
, link_order
,
2920 data
, relocatable
, symbols
)
2922 struct bfd_link_info
*link_info
;
2923 struct bfd_link_order
*link_order
;
2925 bfd_boolean relocatable
;
2928 asection
*input_section
= link_order
->u
.indirect
.section
;
2929 bfd
*input_bfd
= input_section
->owner
;
2930 asection
**sections
= NULL
;
2931 struct internal_reloc
*internal_relocs
= NULL
;
2932 struct internal_syment
*internal_syms
= NULL
;
2934 /* We only need to handle the case of relaxing, or of having a
2935 particular set of section contents, specially. */
2937 || coff_section_data (input_bfd
, input_section
) == NULL
2938 || coff_section_data (input_bfd
, input_section
)->contents
== NULL
)
2939 return bfd_generic_get_relocated_section_contents (output_bfd
, link_info
,
2944 memcpy (data
, coff_section_data (input_bfd
, input_section
)->contents
,
2945 (size_t) input_section
->size
);
2947 if ((input_section
->flags
& SEC_RELOC
) != 0
2948 && input_section
->reloc_count
> 0)
2950 bfd_size_type symesz
= bfd_coff_symesz (input_bfd
);
2951 bfd_byte
*esym
, *esymend
;
2952 struct internal_syment
*isymp
;
2956 if (! _bfd_coff_get_external_symbols (input_bfd
))
2959 internal_relocs
= (_bfd_coff_read_internal_relocs
2960 (input_bfd
, input_section
, FALSE
, (bfd_byte
*) NULL
,
2961 FALSE
, (struct internal_reloc
*) NULL
));
2962 if (internal_relocs
== NULL
)
2965 amt
= obj_raw_syment_count (input_bfd
);
2966 amt
*= sizeof (struct internal_syment
);
2967 internal_syms
= (struct internal_syment
*) bfd_malloc (amt
);
2968 if (internal_syms
== NULL
)
2971 amt
= obj_raw_syment_count (input_bfd
);
2972 amt
*= sizeof (asection
*);
2973 sections
= (asection
**) bfd_malloc (amt
);
2974 if (sections
== NULL
)
2977 isymp
= internal_syms
;
2979 esym
= (bfd_byte
*) obj_coff_external_syms (input_bfd
);
2980 esymend
= esym
+ obj_raw_syment_count (input_bfd
) * symesz
;
2981 while (esym
< esymend
)
2983 bfd_coff_swap_sym_in (input_bfd
, (PTR
) esym
, (PTR
) isymp
);
2985 if (isymp
->n_scnum
!= 0)
2986 *secpp
= coff_section_from_bfd_index (input_bfd
, isymp
->n_scnum
);
2989 if (isymp
->n_value
== 0)
2990 *secpp
= bfd_und_section_ptr
;
2992 *secpp
= bfd_com_section_ptr
;
2995 esym
+= (isymp
->n_numaux
+ 1) * symesz
;
2996 secpp
+= isymp
->n_numaux
+ 1;
2997 isymp
+= isymp
->n_numaux
+ 1;
3000 if (! sh_relocate_section (output_bfd
, link_info
, input_bfd
,
3001 input_section
, data
, internal_relocs
,
3002 internal_syms
, sections
))
3007 free (internal_syms
);
3008 internal_syms
= NULL
;
3009 free (internal_relocs
);
3010 internal_relocs
= NULL
;
3016 if (internal_relocs
!= NULL
)
3017 free (internal_relocs
);
3018 if (internal_syms
!= NULL
)
3019 free (internal_syms
);
3020 if (sections
!= NULL
)
3025 /* The target vectors. */
3027 #ifndef TARGET_SHL_SYM
3028 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec
, "coff-sh", BFD_IS_RELAXABLE
, 0, '_', NULL
, COFF_SWAP_TABLE
)
3031 #ifdef TARGET_SHL_SYM
3032 #define TARGET_SYM TARGET_SHL_SYM
3034 #define TARGET_SYM shlcoff_vec
3037 #ifndef TARGET_SHL_NAME
3038 #define TARGET_SHL_NAME "coff-shl"
3042 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM
, TARGET_SHL_NAME
, BFD_IS_RELAXABLE
,
3043 SEC_CODE
| SEC_DATA
, '_', NULL
, COFF_SWAP_TABLE
);
3045 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM
, TARGET_SHL_NAME
, BFD_IS_RELAXABLE
,
3046 0, '_', NULL
, COFF_SWAP_TABLE
)
3049 #ifndef TARGET_SHL_SYM
3050 static const bfd_target
* coff_small_object_p
PARAMS ((bfd
*));
3051 static bfd_boolean coff_small_new_section_hook
PARAMS ((bfd
*, asection
*));
3052 /* Some people want versions of the SH COFF target which do not align
3053 to 16 byte boundaries. We implement that by adding a couple of new
3054 target vectors. These are just like the ones above, but they
3055 change the default section alignment. To generate them in the
3056 assembler, use -small. To use them in the linker, use -b
3057 coff-sh{l}-small and -oformat coff-sh{l}-small.
3059 Yes, this is a horrible hack. A general solution for setting
3060 section alignment in COFF is rather complex. ELF handles this
3063 /* Only recognize the small versions if the target was not defaulted.
3064 Otherwise we won't recognize the non default endianness. */
3066 static const bfd_target
*
3067 coff_small_object_p (abfd
)
3070 if (abfd
->target_defaulted
)
3072 bfd_set_error (bfd_error_wrong_format
);
3075 return coff_object_p (abfd
);
3078 /* Set the section alignment for the small versions. */
3081 coff_small_new_section_hook (abfd
, section
)
3085 if (! coff_new_section_hook (abfd
, section
))
3088 /* We must align to at least a four byte boundary, because longword
3089 accesses must be on a four byte boundary. */
3090 if (section
->alignment_power
== COFF_DEFAULT_SECTION_ALIGNMENT_POWER
)
3091 section
->alignment_power
= 2;
3096 /* This is copied from bfd_coff_std_swap_table so that we can change
3097 the default section alignment power. */
3099 static const bfd_coff_backend_data bfd_coff_small_swap_table
=
3101 coff_swap_aux_in
, coff_swap_sym_in
, coff_swap_lineno_in
,
3102 coff_swap_aux_out
, coff_swap_sym_out
,
3103 coff_swap_lineno_out
, coff_swap_reloc_out
,
3104 coff_swap_filehdr_out
, coff_swap_aouthdr_out
,
3105 coff_swap_scnhdr_out
,
3106 FILHSZ
, AOUTSZ
, SCNHSZ
, SYMESZ
, AUXESZ
, RELSZ
, LINESZ
, FILNMLEN
,
3107 #ifdef COFF_LONG_FILENAMES
3112 #ifdef COFF_LONG_SECTION_NAMES
3118 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3123 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3128 coff_swap_filehdr_in
, coff_swap_aouthdr_in
, coff_swap_scnhdr_in
,
3129 coff_swap_reloc_in
, coff_bad_format_hook
, coff_set_arch_mach_hook
,
3130 coff_mkobject_hook
, styp_to_sec_flags
, coff_set_alignment_hook
,
3131 coff_slurp_symbol_table
, symname_in_debug_hook
, coff_pointerize_aux_hook
,
3132 coff_print_aux
, coff_reloc16_extra_cases
, coff_reloc16_estimate
,
3133 coff_classify_symbol
, coff_compute_section_file_positions
,
3134 coff_start_final_link
, coff_relocate_section
, coff_rtype_to_howto
,
3135 coff_adjust_symndx
, coff_link_add_one_symbol
,
3136 coff_link_output_has_begun
, coff_final_link_postscript
3139 #define coff_small_close_and_cleanup \
3140 coff_close_and_cleanup
3141 #define coff_small_bfd_free_cached_info \
3142 coff_bfd_free_cached_info
3143 #define coff_small_get_section_contents \
3144 coff_get_section_contents
3145 #define coff_small_get_section_contents_in_window \
3146 coff_get_section_contents_in_window
3148 extern const bfd_target shlcoff_small_vec
;
3150 const bfd_target shcoff_small_vec
=
3152 "coff-sh-small", /* name */
3153 bfd_target_coff_flavour
,
3154 BFD_ENDIAN_BIG
, /* data byte order is big */
3155 BFD_ENDIAN_BIG
, /* header byte order is big */
3157 (HAS_RELOC
| EXEC_P
| /* object flags */
3158 HAS_LINENO
| HAS_DEBUG
|
3159 HAS_SYMS
| HAS_LOCALS
| WP_TEXT
| BFD_IS_RELAXABLE
),
3161 (SEC_HAS_CONTENTS
| SEC_ALLOC
| SEC_LOAD
| SEC_RELOC
),
3162 '_', /* leading symbol underscore */
3163 '/', /* ar_pad_char */
3164 15, /* ar_max_namelen */
3165 bfd_getb64
, bfd_getb_signed_64
, bfd_putb64
,
3166 bfd_getb32
, bfd_getb_signed_32
, bfd_putb32
,
3167 bfd_getb16
, bfd_getb_signed_16
, bfd_putb16
, /* data */
3168 bfd_getb64
, bfd_getb_signed_64
, bfd_putb64
,
3169 bfd_getb32
, bfd_getb_signed_32
, bfd_putb32
,
3170 bfd_getb16
, bfd_getb_signed_16
, bfd_putb16
, /* hdrs */
3172 {_bfd_dummy_target
, coff_small_object_p
, /* bfd_check_format */
3173 bfd_generic_archive_p
, _bfd_dummy_target
},
3174 {bfd_false
, coff_mkobject
, _bfd_generic_mkarchive
, /* bfd_set_format */
3176 {bfd_false
, coff_write_object_contents
, /* bfd_write_contents */
3177 _bfd_write_archive_contents
, bfd_false
},
3179 BFD_JUMP_TABLE_GENERIC (coff_small
),
3180 BFD_JUMP_TABLE_COPY (coff
),
3181 BFD_JUMP_TABLE_CORE (_bfd_nocore
),
3182 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff
),
3183 BFD_JUMP_TABLE_SYMBOLS (coff
),
3184 BFD_JUMP_TABLE_RELOCS (coff
),
3185 BFD_JUMP_TABLE_WRITE (coff
),
3186 BFD_JUMP_TABLE_LINK (coff
),
3187 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic
),
3189 & shlcoff_small_vec
,
3191 (PTR
) &bfd_coff_small_swap_table
3194 const bfd_target shlcoff_small_vec
=
3196 "coff-shl-small", /* name */
3197 bfd_target_coff_flavour
,
3198 BFD_ENDIAN_LITTLE
, /* data byte order is little */
3199 BFD_ENDIAN_LITTLE
, /* header byte order is little endian too*/
3201 (HAS_RELOC
| EXEC_P
| /* object flags */
3202 HAS_LINENO
| HAS_DEBUG
|
3203 HAS_SYMS
| HAS_LOCALS
| WP_TEXT
| BFD_IS_RELAXABLE
),
3205 (SEC_HAS_CONTENTS
| SEC_ALLOC
| SEC_LOAD
| SEC_RELOC
),
3206 '_', /* leading symbol underscore */
3207 '/', /* ar_pad_char */
3208 15, /* ar_max_namelen */
3209 bfd_getl64
, bfd_getl_signed_64
, bfd_putl64
,
3210 bfd_getl32
, bfd_getl_signed_32
, bfd_putl32
,
3211 bfd_getl16
, bfd_getl_signed_16
, bfd_putl16
, /* data */
3212 bfd_getl64
, bfd_getl_signed_64
, bfd_putl64
,
3213 bfd_getl32
, bfd_getl_signed_32
, bfd_putl32
,
3214 bfd_getl16
, bfd_getl_signed_16
, bfd_putl16
, /* hdrs */
3216 {_bfd_dummy_target
, coff_small_object_p
, /* bfd_check_format */
3217 bfd_generic_archive_p
, _bfd_dummy_target
},
3218 {bfd_false
, coff_mkobject
, _bfd_generic_mkarchive
, /* bfd_set_format */
3220 {bfd_false
, coff_write_object_contents
, /* bfd_write_contents */
3221 _bfd_write_archive_contents
, bfd_false
},
3223 BFD_JUMP_TABLE_GENERIC (coff_small
),
3224 BFD_JUMP_TABLE_COPY (coff
),
3225 BFD_JUMP_TABLE_CORE (_bfd_nocore
),
3226 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff
),
3227 BFD_JUMP_TABLE_SYMBOLS (coff
),
3228 BFD_JUMP_TABLE_RELOCS (coff
),
3229 BFD_JUMP_TABLE_WRITE (coff
),
3230 BFD_JUMP_TABLE_LINK (coff
),
3231 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic
),
3235 (PTR
) &bfd_coff_small_swap_table