1 /* BFD back-end for Renesas Super-H COFF binaries.
2 Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
4 Contributed by Cygnus Support.
5 Written by Steve Chamberlain, <sac@cygnus.com>.
6 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
8 This file is part of BFD, the Binary File Descriptor library.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
23 MA 02110-1301, USA. */
27 #include "libiberty.h"
31 #include "coff/internal.h"
36 #ifndef COFF_IMAGE_WITH_PE
37 static bfd_boolean sh_align_load_span
38 PARAMS ((bfd
*, asection
*, bfd_byte
*,
39 bfd_boolean (*) (bfd
*, asection
*, PTR
, bfd_byte
*, bfd_vma
),
40 PTR
, bfd_vma
**, bfd_vma
*, bfd_vma
, bfd_vma
, bfd_boolean
*));
42 #define _bfd_sh_align_load_span sh_align_load_span
48 /* Internal functions. */
49 static bfd_reloc_status_type sh_reloc
50 PARAMS ((bfd
*, arelent
*, asymbol
*, PTR
, asection
*, bfd
*, char **));
51 static long get_symbol_value
PARAMS ((asymbol
*));
52 static bfd_boolean sh_relax_section
53 PARAMS ((bfd
*, asection
*, struct bfd_link_info
*, bfd_boolean
*));
54 static bfd_boolean sh_relax_delete_bytes
55 PARAMS ((bfd
*, asection
*, bfd_vma
, int));
56 #ifndef COFF_IMAGE_WITH_PE
57 static const struct sh_opcode
*sh_insn_info
PARAMS ((unsigned int));
59 static bfd_boolean sh_align_loads
60 PARAMS ((bfd
*, asection
*, struct internal_reloc
*, bfd_byte
*,
62 static bfd_boolean sh_swap_insns
63 PARAMS ((bfd
*, asection
*, PTR
, bfd_byte
*, bfd_vma
));
64 static bfd_boolean sh_relocate_section
65 PARAMS ((bfd
*, struct bfd_link_info
*, bfd
*, asection
*, bfd_byte
*,
66 struct internal_reloc
*, struct internal_syment
*, asection
**));
67 static bfd_byte
*sh_coff_get_relocated_section_contents
68 PARAMS ((bfd
*, struct bfd_link_info
*, struct bfd_link_order
*,
69 bfd_byte
*, bfd_boolean
, asymbol
**));
70 static reloc_howto_type
* sh_coff_reloc_type_lookup
PARAMS ((bfd
*, bfd_reloc_code_real_type
));
73 /* Can't build import tables with 2**4 alignment. */
74 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 2
76 /* Default section alignment to 2**4. */
77 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 4
80 #ifdef COFF_IMAGE_WITH_PE
81 /* Align PE executables. */
82 #define COFF_PAGE_SIZE 0x1000
85 /* Generate long file names. */
86 #define COFF_LONG_FILENAMES
89 static bfd_boolean in_reloc_p
PARAMS ((bfd
*, reloc_howto_type
*));
90 /* Return TRUE if this relocation should
91 appear in the output .reloc section. */
92 static bfd_boolean
in_reloc_p (abfd
, howto
)
93 bfd
* abfd ATTRIBUTE_UNUSED
;
94 reloc_howto_type
* howto
;
96 return ! howto
->pc_relative
&& howto
->type
!= R_SH_IMAGEBASE
;
100 /* The supported relocations. There are a lot of relocations defined
101 in coff/internal.h which we do not expect to ever see. */
102 static reloc_howto_type sh_coff_howtos
[] =
108 HOWTO (R_SH_IMM32CE
, /* type */
110 2, /* size (0 = byte, 1 = short, 2 = long) */
112 FALSE
, /* pc_relative */
114 complain_overflow_bitfield
, /* complain_on_overflow */
115 sh_reloc
, /* special_function */
116 "r_imm32ce", /* name */
117 TRUE
, /* partial_inplace */
118 0xffffffff, /* src_mask */
119 0xffffffff, /* dst_mask */
120 FALSE
), /* pcrel_offset */
124 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
125 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
126 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
127 EMPTY_HOWTO (6), /* R_SH_IMM24 */
128 EMPTY_HOWTO (7), /* R_SH_LOW16 */
130 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
132 HOWTO (R_SH_PCDISP8BY2
, /* type */
134 1, /* size (0 = byte, 1 = short, 2 = long) */
136 TRUE
, /* pc_relative */
138 complain_overflow_signed
, /* complain_on_overflow */
139 sh_reloc
, /* special_function */
140 "r_pcdisp8by2", /* name */
141 TRUE
, /* partial_inplace */
144 TRUE
), /* pcrel_offset */
146 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
148 HOWTO (R_SH_PCDISP
, /* type */
150 1, /* size (0 = byte, 1 = short, 2 = long) */
152 TRUE
, /* pc_relative */
154 complain_overflow_signed
, /* complain_on_overflow */
155 sh_reloc
, /* special_function */
156 "r_pcdisp12by2", /* name */
157 TRUE
, /* partial_inplace */
158 0xfff, /* src_mask */
159 0xfff, /* dst_mask */
160 TRUE
), /* pcrel_offset */
164 HOWTO (R_SH_IMM32
, /* type */
166 2, /* size (0 = byte, 1 = short, 2 = long) */
168 FALSE
, /* pc_relative */
170 complain_overflow_bitfield
, /* complain_on_overflow */
171 sh_reloc
, /* special_function */
172 "r_imm32", /* name */
173 TRUE
, /* partial_inplace */
174 0xffffffff, /* src_mask */
175 0xffffffff, /* dst_mask */
176 FALSE
), /* pcrel_offset */
180 HOWTO (R_SH_IMAGEBASE
, /* type */
182 2, /* size (0 = byte, 1 = short, 2 = long) */
184 FALSE
, /* pc_relative */
186 complain_overflow_bitfield
, /* complain_on_overflow */
187 sh_reloc
, /* special_function */
189 TRUE
, /* partial_inplace */
190 0xffffffff, /* src_mask */
191 0xffffffff, /* dst_mask */
192 FALSE
), /* pcrel_offset */
194 EMPTY_HOWTO (16), /* R_SH_IMM8 */
196 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
197 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
198 EMPTY_HOWTO (19), /* R_SH_IMM4 */
199 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
200 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
202 HOWTO (R_SH_PCRELIMM8BY2
, /* type */
204 1, /* size (0 = byte, 1 = short, 2 = long) */
206 TRUE
, /* pc_relative */
208 complain_overflow_unsigned
, /* complain_on_overflow */
209 sh_reloc
, /* special_function */
210 "r_pcrelimm8by2", /* name */
211 TRUE
, /* partial_inplace */
214 TRUE
), /* pcrel_offset */
216 HOWTO (R_SH_PCRELIMM8BY4
, /* type */
218 1, /* size (0 = byte, 1 = short, 2 = long) */
220 TRUE
, /* pc_relative */
222 complain_overflow_unsigned
, /* complain_on_overflow */
223 sh_reloc
, /* special_function */
224 "r_pcrelimm8by4", /* name */
225 TRUE
, /* partial_inplace */
228 TRUE
), /* pcrel_offset */
230 HOWTO (R_SH_IMM16
, /* type */
232 1, /* size (0 = byte, 1 = short, 2 = long) */
234 FALSE
, /* pc_relative */
236 complain_overflow_bitfield
, /* complain_on_overflow */
237 sh_reloc
, /* special_function */
238 "r_imm16", /* name */
239 TRUE
, /* partial_inplace */
240 0xffff, /* src_mask */
241 0xffff, /* dst_mask */
242 FALSE
), /* pcrel_offset */
244 HOWTO (R_SH_SWITCH16
, /* type */
246 1, /* size (0 = byte, 1 = short, 2 = long) */
248 FALSE
, /* pc_relative */
250 complain_overflow_bitfield
, /* complain_on_overflow */
251 sh_reloc
, /* special_function */
252 "r_switch16", /* name */
253 TRUE
, /* partial_inplace */
254 0xffff, /* src_mask */
255 0xffff, /* dst_mask */
256 FALSE
), /* pcrel_offset */
258 HOWTO (R_SH_SWITCH32
, /* type */
260 2, /* size (0 = byte, 1 = short, 2 = long) */
262 FALSE
, /* pc_relative */
264 complain_overflow_bitfield
, /* complain_on_overflow */
265 sh_reloc
, /* special_function */
266 "r_switch32", /* name */
267 TRUE
, /* partial_inplace */
268 0xffffffff, /* src_mask */
269 0xffffffff, /* dst_mask */
270 FALSE
), /* pcrel_offset */
272 HOWTO (R_SH_USES
, /* type */
274 1, /* size (0 = byte, 1 = short, 2 = long) */
276 FALSE
, /* pc_relative */
278 complain_overflow_bitfield
, /* complain_on_overflow */
279 sh_reloc
, /* special_function */
281 TRUE
, /* partial_inplace */
282 0xffff, /* src_mask */
283 0xffff, /* dst_mask */
284 FALSE
), /* pcrel_offset */
286 HOWTO (R_SH_COUNT
, /* type */
288 2, /* size (0 = byte, 1 = short, 2 = long) */
290 FALSE
, /* pc_relative */
292 complain_overflow_bitfield
, /* complain_on_overflow */
293 sh_reloc
, /* special_function */
294 "r_count", /* name */
295 TRUE
, /* partial_inplace */
296 0xffffffff, /* src_mask */
297 0xffffffff, /* dst_mask */
298 FALSE
), /* pcrel_offset */
300 HOWTO (R_SH_ALIGN
, /* type */
302 2, /* size (0 = byte, 1 = short, 2 = long) */
304 FALSE
, /* pc_relative */
306 complain_overflow_bitfield
, /* complain_on_overflow */
307 sh_reloc
, /* special_function */
308 "r_align", /* name */
309 TRUE
, /* partial_inplace */
310 0xffffffff, /* src_mask */
311 0xffffffff, /* dst_mask */
312 FALSE
), /* pcrel_offset */
314 HOWTO (R_SH_CODE
, /* type */
316 2, /* size (0 = byte, 1 = short, 2 = long) */
318 FALSE
, /* pc_relative */
320 complain_overflow_bitfield
, /* complain_on_overflow */
321 sh_reloc
, /* special_function */
323 TRUE
, /* partial_inplace */
324 0xffffffff, /* src_mask */
325 0xffffffff, /* dst_mask */
326 FALSE
), /* pcrel_offset */
328 HOWTO (R_SH_DATA
, /* type */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
332 FALSE
, /* pc_relative */
334 complain_overflow_bitfield
, /* complain_on_overflow */
335 sh_reloc
, /* special_function */
337 TRUE
, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE
), /* pcrel_offset */
342 HOWTO (R_SH_LABEL
, /* type */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
346 FALSE
, /* pc_relative */
348 complain_overflow_bitfield
, /* complain_on_overflow */
349 sh_reloc
, /* special_function */
350 "r_label", /* name */
351 TRUE
, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE
), /* pcrel_offset */
356 HOWTO (R_SH_SWITCH8
, /* type */
358 0, /* size (0 = byte, 1 = short, 2 = long) */
360 FALSE
, /* pc_relative */
362 complain_overflow_bitfield
, /* complain_on_overflow */
363 sh_reloc
, /* special_function */
364 "r_switch8", /* name */
365 TRUE
, /* partial_inplace */
368 FALSE
) /* pcrel_offset */
371 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
373 /* Check for a bad magic number. */
374 #define BADMAG(x) SHBADMAG(x)
376 /* Customize coffcode.h (this is not currently used). */
379 /* FIXME: This should not be set here. */
380 #define __A_MAGIC_SET__
383 /* Swap the r_offset field in and out. */
384 #define SWAP_IN_RELOC_OFFSET H_GET_32
385 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
387 /* Swap out extra information in the reloc structure. */
388 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
391 dst->r_stuff[0] = 'S'; \
392 dst->r_stuff[1] = 'C'; \
397 /* Get the value of a symbol, when performing a relocation. */
400 get_symbol_value (symbol
)
405 if (bfd_is_com_section (symbol
->section
))
408 relocation
= (symbol
->value
+
409 symbol
->section
->output_section
->vma
+
410 symbol
->section
->output_offset
);
416 /* Convert an rtype to howto for the COFF backend linker.
417 Copied from coff-i386. */
418 #define coff_rtype_to_howto coff_sh_rtype_to_howto
419 static reloc_howto_type
* coff_sh_rtype_to_howto
PARAMS ((bfd
*, asection
*, struct internal_reloc
*, struct coff_link_hash_entry
*, struct internal_syment
*, bfd_vma
*));
421 static reloc_howto_type
*
422 coff_sh_rtype_to_howto (abfd
, sec
, rel
, h
, sym
, addendp
)
423 bfd
* abfd ATTRIBUTE_UNUSED
;
425 struct internal_reloc
* rel
;
426 struct coff_link_hash_entry
* h
;
427 struct internal_syment
* sym
;
430 reloc_howto_type
* howto
;
432 howto
= sh_coff_howtos
+ rel
->r_type
;
436 if (howto
->pc_relative
)
437 *addendp
+= sec
->vma
;
439 if (sym
!= NULL
&& sym
->n_scnum
== 0 && sym
->n_value
!= 0)
441 /* This is a common symbol. The section contents include the
442 size (sym->n_value) as an addend. The relocate_section
443 function will be adding in the final value of the symbol. We
444 need to subtract out the current size in order to get the
446 BFD_ASSERT (h
!= NULL
);
449 if (howto
->pc_relative
)
453 /* If the symbol is defined, then the generic code is going to
454 add back the symbol value in order to cancel out an
455 adjustment it made to the addend. However, we set the addend
456 to 0 at the start of this function. We need to adjust here,
457 to avoid the adjustment the generic code will make. FIXME:
458 This is getting a bit hackish. */
459 if (sym
!= NULL
&& sym
->n_scnum
!= 0)
460 *addendp
-= sym
->n_value
;
463 if (rel
->r_type
== R_SH_IMAGEBASE
)
464 *addendp
-= pe_data (sec
->output_section
->owner
)->pe_opthdr
.ImageBase
;
469 #endif /* COFF_WITH_PE */
471 /* This structure is used to map BFD reloc codes to SH PE relocs. */
472 struct shcoff_reloc_map
474 bfd_reloc_code_real_type bfd_reloc_val
;
475 unsigned char shcoff_reloc_val
;
479 /* An array mapping BFD reloc codes to SH PE relocs. */
480 static const struct shcoff_reloc_map sh_reloc_map
[] =
482 { BFD_RELOC_32
, R_SH_IMM32CE
},
483 { BFD_RELOC_RVA
, R_SH_IMAGEBASE
},
484 { BFD_RELOC_CTOR
, R_SH_IMM32CE
},
487 /* An array mapping BFD reloc codes to SH PE relocs. */
488 static const struct shcoff_reloc_map sh_reloc_map
[] =
490 { BFD_RELOC_32
, R_SH_IMM32
},
491 { BFD_RELOC_CTOR
, R_SH_IMM32
},
495 /* Given a BFD reloc code, return the howto structure for the
496 corresponding SH PE reloc. */
497 #define coff_bfd_reloc_type_lookup sh_coff_reloc_type_lookup
498 #define coff_bfd_reloc_name_lookup sh_coff_reloc_name_lookup
500 static reloc_howto_type
*
501 sh_coff_reloc_type_lookup (abfd
, code
)
502 bfd
* abfd ATTRIBUTE_UNUSED
;
503 bfd_reloc_code_real_type code
;
507 for (i
= ARRAY_SIZE (sh_reloc_map
); i
--;)
508 if (sh_reloc_map
[i
].bfd_reloc_val
== code
)
509 return &sh_coff_howtos
[(int) sh_reloc_map
[i
].shcoff_reloc_val
];
511 fprintf (stderr
, "SH Error: unknown reloc type %d\n", code
);
515 static reloc_howto_type
*
516 sh_coff_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
521 for (i
= 0; i
< sizeof (sh_coff_howtos
) / sizeof (sh_coff_howtos
[0]); i
++)
522 if (sh_coff_howtos
[i
].name
!= NULL
523 && strcasecmp (sh_coff_howtos
[i
].name
, r_name
) == 0)
524 return &sh_coff_howtos
[i
];
529 /* This macro is used in coffcode.h to get the howto corresponding to
530 an internal reloc. */
532 #define RTYPE2HOWTO(relent, internal) \
534 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
535 ? &sh_coff_howtos[(internal)->r_type] \
536 : (reloc_howto_type *) NULL))
538 /* This is the same as the macro in coffcode.h, except that it copies
539 r_offset into reloc_entry->addend for some relocs. */
540 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
542 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
543 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
544 coffsym = (obj_symbols (abfd) \
545 + (cache_ptr->sym_ptr_ptr - symbols)); \
547 coffsym = coff_symbol_from (abfd, ptr); \
548 if (coffsym != (coff_symbol_type *) NULL \
549 && coffsym->native->u.syment.n_scnum == 0) \
550 cache_ptr->addend = 0; \
551 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
552 && ptr->section != (asection *) NULL) \
553 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
555 cache_ptr->addend = 0; \
556 if ((reloc).r_type == R_SH_SWITCH8 \
557 || (reloc).r_type == R_SH_SWITCH16 \
558 || (reloc).r_type == R_SH_SWITCH32 \
559 || (reloc).r_type == R_SH_USES \
560 || (reloc).r_type == R_SH_COUNT \
561 || (reloc).r_type == R_SH_ALIGN) \
562 cache_ptr->addend = (reloc).r_offset; \
565 /* This is the howto function for the SH relocations. */
567 static bfd_reloc_status_type
568 sh_reloc (abfd
, reloc_entry
, symbol_in
, data
, input_section
, output_bfd
,
571 arelent
*reloc_entry
;
574 asection
*input_section
;
576 char **error_message ATTRIBUTE_UNUSED
;
580 unsigned short r_type
;
581 bfd_vma addr
= reloc_entry
->address
;
582 bfd_byte
*hit_data
= addr
+ (bfd_byte
*) data
;
584 r_type
= reloc_entry
->howto
->type
;
586 if (output_bfd
!= NULL
)
588 /* Partial linking--do nothing. */
589 reloc_entry
->address
+= input_section
->output_offset
;
593 /* Almost all relocs have to do with relaxing. If any work must be
594 done for them, it has been done in sh_relax_section. */
595 if (r_type
!= R_SH_IMM32
597 && r_type
!= R_SH_IMM32CE
598 && r_type
!= R_SH_IMAGEBASE
600 && (r_type
!= R_SH_PCDISP
601 || (symbol_in
->flags
& BSF_LOCAL
) != 0))
604 if (symbol_in
!= NULL
605 && bfd_is_und_section (symbol_in
->section
))
606 return bfd_reloc_undefined
;
608 sym_value
= get_symbol_value (symbol_in
);
616 insn
= bfd_get_32 (abfd
, hit_data
);
617 insn
+= sym_value
+ reloc_entry
->addend
;
618 bfd_put_32 (abfd
, (bfd_vma
) insn
, hit_data
);
622 insn
= bfd_get_32 (abfd
, hit_data
);
623 insn
+= sym_value
+ reloc_entry
->addend
;
624 insn
-= pe_data (input_section
->output_section
->owner
)->pe_opthdr
.ImageBase
;
625 bfd_put_32 (abfd
, (bfd_vma
) insn
, hit_data
);
629 insn
= bfd_get_16 (abfd
, hit_data
);
630 sym_value
+= reloc_entry
->addend
;
631 sym_value
-= (input_section
->output_section
->vma
632 + input_section
->output_offset
635 sym_value
+= (insn
& 0xfff) << 1;
638 insn
= (insn
& 0xf000) | (sym_value
& 0xfff);
639 bfd_put_16 (abfd
, (bfd_vma
) insn
, hit_data
);
640 if (sym_value
< (bfd_vma
) -0x1000 || sym_value
>= 0x1000)
641 return bfd_reloc_overflow
;
651 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
653 /* We can do relaxing. */
654 #define coff_bfd_relax_section sh_relax_section
656 /* We use the special COFF backend linker. */
657 #define coff_relocate_section sh_relocate_section
659 /* When relaxing, we need to use special code to get the relocated
661 #define coff_bfd_get_relocated_section_contents \
662 sh_coff_get_relocated_section_contents
664 #include "coffcode.h"
666 /* This function handles relaxing on the SH.
668 Function calls on the SH look like this:
677 The compiler and assembler will cooperate to create R_SH_USES
678 relocs on the jsr instructions. The r_offset field of the
679 R_SH_USES reloc is the PC relative offset to the instruction which
680 loads the register (the r_offset field is computed as though it
681 were a jump instruction, so the offset value is actually from four
682 bytes past the instruction). The linker can use this reloc to
683 determine just which function is being called, and thus decide
684 whether it is possible to replace the jsr with a bsr.
686 If multiple function calls are all based on a single register load
687 (i.e., the same function is called multiple times), the compiler
688 guarantees that each function call will have an R_SH_USES reloc.
689 Therefore, if the linker is able to convert each R_SH_USES reloc
690 which refers to that address, it can safely eliminate the register
693 When the assembler creates an R_SH_USES reloc, it examines it to
694 determine which address is being loaded (L1 in the above example).
695 It then counts the number of references to that address, and
696 creates an R_SH_COUNT reloc at that address. The r_offset field of
697 the R_SH_COUNT reloc will be the number of references. If the
698 linker is able to eliminate a register load, it can use the
699 R_SH_COUNT reloc to see whether it can also eliminate the function
702 SH relaxing also handles another, unrelated, matter. On the SH, if
703 a load or store instruction is not aligned on a four byte boundary,
704 the memory cycle interferes with the 32 bit instruction fetch,
705 causing a one cycle bubble in the pipeline. Therefore, we try to
706 align load and store instructions on four byte boundaries if we
707 can, by swapping them with one of the adjacent instructions. */
710 sh_relax_section (abfd
, sec
, link_info
, again
)
713 struct bfd_link_info
*link_info
;
716 struct internal_reloc
*internal_relocs
;
717 bfd_boolean have_code
;
718 struct internal_reloc
*irel
, *irelend
;
719 bfd_byte
*contents
= NULL
;
723 if (link_info
->relocatable
724 || (sec
->flags
& SEC_RELOC
) == 0
725 || sec
->reloc_count
== 0)
728 if (coff_section_data (abfd
, sec
) == NULL
)
730 bfd_size_type amt
= sizeof (struct coff_section_tdata
);
731 sec
->used_by_bfd
= (PTR
) bfd_zalloc (abfd
, amt
);
732 if (sec
->used_by_bfd
== NULL
)
736 internal_relocs
= (_bfd_coff_read_internal_relocs
737 (abfd
, sec
, link_info
->keep_memory
,
738 (bfd_byte
*) NULL
, FALSE
,
739 (struct internal_reloc
*) NULL
));
740 if (internal_relocs
== NULL
)
745 irelend
= internal_relocs
+ sec
->reloc_count
;
746 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
748 bfd_vma laddr
, paddr
, symval
;
750 struct internal_reloc
*irelfn
, *irelscan
, *irelcount
;
751 struct internal_syment sym
;
754 if (irel
->r_type
== R_SH_CODE
)
757 if (irel
->r_type
!= R_SH_USES
)
760 /* Get the section contents. */
761 if (contents
== NULL
)
763 if (coff_section_data (abfd
, sec
)->contents
!= NULL
)
764 contents
= coff_section_data (abfd
, sec
)->contents
;
767 if (!bfd_malloc_and_get_section (abfd
, sec
, &contents
))
772 /* The r_offset field of the R_SH_USES reloc will point us to
773 the register load. The 4 is because the r_offset field is
774 computed as though it were a jump offset, which are based
775 from 4 bytes after the jump instruction. */
776 laddr
= irel
->r_vaddr
- sec
->vma
+ 4;
777 /* Careful to sign extend the 32-bit offset. */
778 laddr
+= ((irel
->r_offset
& 0xffffffff) ^ 0x80000000) - 0x80000000;
779 if (laddr
>= sec
->size
)
781 (*_bfd_error_handler
) ("%B: 0x%lx: warning: bad R_SH_USES offset",
782 abfd
, (unsigned long) irel
->r_vaddr
);
785 insn
= bfd_get_16 (abfd
, contents
+ laddr
);
787 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
788 if ((insn
& 0xf000) != 0xd000)
790 ((*_bfd_error_handler
)
791 ("%B: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
792 abfd
, (unsigned long) irel
->r_vaddr
, insn
));
796 /* Get the address from which the register is being loaded. The
797 displacement in the mov.l instruction is quadrupled. It is a
798 displacement from four bytes after the movl instruction, but,
799 before adding in the PC address, two least significant bits
800 of the PC are cleared. We assume that the section is aligned
801 on a four byte boundary. */
804 paddr
+= (laddr
+ 4) &~ (bfd_vma
) 3;
805 if (paddr
>= sec
->size
)
807 ((*_bfd_error_handler
)
808 ("%B: 0x%lx: warning: bad R_SH_USES load offset",
809 abfd
, (unsigned long) irel
->r_vaddr
));
813 /* Get the reloc for the address from which the register is
814 being loaded. This reloc will tell us which function is
815 actually being called. */
817 for (irelfn
= internal_relocs
; irelfn
< irelend
; irelfn
++)
818 if (irelfn
->r_vaddr
== paddr
820 && (irelfn
->r_type
== R_SH_IMM32
821 || irelfn
->r_type
== R_SH_IMM32CE
822 || irelfn
->r_type
== R_SH_IMAGEBASE
)
825 && irelfn
->r_type
== R_SH_IMM32
829 if (irelfn
>= irelend
)
831 ((*_bfd_error_handler
)
832 ("%B: 0x%lx: warning: could not find expected reloc",
833 abfd
, (unsigned long) paddr
));
837 /* Get the value of the symbol referred to by the reloc. */
838 if (! _bfd_coff_get_external_symbols (abfd
))
840 bfd_coff_swap_sym_in (abfd
,
841 ((bfd_byte
*) obj_coff_external_syms (abfd
)
843 * bfd_coff_symesz (abfd
))),
845 if (sym
.n_scnum
!= 0 && sym
.n_scnum
!= sec
->target_index
)
847 ((*_bfd_error_handler
)
848 ("%B: 0x%lx: warning: symbol in unexpected section",
849 abfd
, (unsigned long) paddr
));
853 if (sym
.n_sclass
!= C_EXT
)
855 symval
= (sym
.n_value
857 + sec
->output_section
->vma
858 + sec
->output_offset
);
862 struct coff_link_hash_entry
*h
;
864 h
= obj_coff_sym_hashes (abfd
)[irelfn
->r_symndx
];
865 BFD_ASSERT (h
!= NULL
);
866 if (h
->root
.type
!= bfd_link_hash_defined
867 && h
->root
.type
!= bfd_link_hash_defweak
)
869 /* This appears to be a reference to an undefined
870 symbol. Just ignore it--it will be caught by the
871 regular reloc processing. */
875 symval
= (h
->root
.u
.def
.value
876 + h
->root
.u
.def
.section
->output_section
->vma
877 + h
->root
.u
.def
.section
->output_offset
);
880 symval
+= bfd_get_32 (abfd
, contents
+ paddr
- sec
->vma
);
882 /* See if this function call can be shortened. */
886 + sec
->output_section
->vma
889 if (foff
< -0x1000 || foff
>= 0x1000)
891 /* After all that work, we can't shorten this function call. */
895 /* Shorten the function call. */
897 /* For simplicity of coding, we are going to modify the section
898 contents, the section relocs, and the BFD symbol table. We
899 must tell the rest of the code not to free up this
900 information. It would be possible to instead create a table
901 of changes which have to be made, as is done in coff-mips.c;
902 that would be more work, but would require less memory when
903 the linker is run. */
905 coff_section_data (abfd
, sec
)->relocs
= internal_relocs
;
906 coff_section_data (abfd
, sec
)->keep_relocs
= TRUE
;
908 coff_section_data (abfd
, sec
)->contents
= contents
;
909 coff_section_data (abfd
, sec
)->keep_contents
= TRUE
;
911 obj_coff_keep_syms (abfd
) = TRUE
;
913 /* Replace the jsr with a bsr. */
915 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
916 replace the jsr with a bsr. */
917 irel
->r_type
= R_SH_PCDISP
;
918 irel
->r_symndx
= irelfn
->r_symndx
;
919 if (sym
.n_sclass
!= C_EXT
)
921 /* If this needs to be changed because of future relaxing,
922 it will be handled here like other internal PCDISP
925 (bfd_vma
) 0xb000 | ((foff
>> 1) & 0xfff),
926 contents
+ irel
->r_vaddr
- sec
->vma
);
930 /* We can't fully resolve this yet, because the external
931 symbol value may be changed by future relaxing. We let
932 the final link phase handle it. */
933 bfd_put_16 (abfd
, (bfd_vma
) 0xb000,
934 contents
+ irel
->r_vaddr
- sec
->vma
);
937 /* See if there is another R_SH_USES reloc referring to the same
939 for (irelscan
= internal_relocs
; irelscan
< irelend
; irelscan
++)
940 if (irelscan
->r_type
== R_SH_USES
941 && laddr
== irelscan
->r_vaddr
- sec
->vma
+ 4 + irelscan
->r_offset
)
943 if (irelscan
< irelend
)
945 /* Some other function call depends upon this register load,
946 and we have not yet converted that function call.
947 Indeed, we may never be able to convert it. There is
948 nothing else we can do at this point. */
952 /* Look for a R_SH_COUNT reloc on the location where the
953 function address is stored. Do this before deleting any
954 bytes, to avoid confusion about the address. */
955 for (irelcount
= internal_relocs
; irelcount
< irelend
; irelcount
++)
956 if (irelcount
->r_vaddr
== paddr
957 && irelcount
->r_type
== R_SH_COUNT
)
960 /* Delete the register load. */
961 if (! sh_relax_delete_bytes (abfd
, sec
, laddr
, 2))
964 /* That will change things, so, just in case it permits some
965 other function call to come within range, we should relax
966 again. Note that this is not required, and it may be slow. */
969 /* Now check whether we got a COUNT reloc. */
970 if (irelcount
>= irelend
)
972 ((*_bfd_error_handler
)
973 ("%B: 0x%lx: warning: could not find expected COUNT reloc",
974 abfd
, (unsigned long) paddr
));
978 /* The number of uses is stored in the r_offset field. We've
980 if (irelcount
->r_offset
== 0)
982 ((*_bfd_error_handler
) ("%B: 0x%lx: warning: bad count",
983 abfd
, (unsigned long) paddr
));
987 --irelcount
->r_offset
;
989 /* If there are no more uses, we can delete the address. Reload
990 the address from irelfn, in case it was changed by the
991 previous call to sh_relax_delete_bytes. */
992 if (irelcount
->r_offset
== 0)
994 if (! sh_relax_delete_bytes (abfd
, sec
,
995 irelfn
->r_vaddr
- sec
->vma
, 4))
999 /* We've done all we can with that function call. */
1002 /* Look for load and store instructions that we can align on four
1006 bfd_boolean swapped
;
1008 /* Get the section contents. */
1009 if (contents
== NULL
)
1011 if (coff_section_data (abfd
, sec
)->contents
!= NULL
)
1012 contents
= coff_section_data (abfd
, sec
)->contents
;
1015 if (!bfd_malloc_and_get_section (abfd
, sec
, &contents
))
1020 if (! sh_align_loads (abfd
, sec
, internal_relocs
, contents
, &swapped
))
1025 coff_section_data (abfd
, sec
)->relocs
= internal_relocs
;
1026 coff_section_data (abfd
, sec
)->keep_relocs
= TRUE
;
1028 coff_section_data (abfd
, sec
)->contents
= contents
;
1029 coff_section_data (abfd
, sec
)->keep_contents
= TRUE
;
1031 obj_coff_keep_syms (abfd
) = TRUE
;
1035 if (internal_relocs
!= NULL
1036 && internal_relocs
!= coff_section_data (abfd
, sec
)->relocs
)
1038 if (! link_info
->keep_memory
)
1039 free (internal_relocs
);
1041 coff_section_data (abfd
, sec
)->relocs
= internal_relocs
;
1044 if (contents
!= NULL
&& contents
!= coff_section_data (abfd
, sec
)->contents
)
1046 if (! link_info
->keep_memory
)
1049 /* Cache the section contents for coff_link_input_bfd. */
1050 coff_section_data (abfd
, sec
)->contents
= contents
;
1056 if (internal_relocs
!= NULL
1057 && internal_relocs
!= coff_section_data (abfd
, sec
)->relocs
)
1058 free (internal_relocs
);
1059 if (contents
!= NULL
&& contents
!= coff_section_data (abfd
, sec
)->contents
)
1064 /* Delete some bytes from a section while relaxing. */
1067 sh_relax_delete_bytes (abfd
, sec
, addr
, count
)
1074 struct internal_reloc
*irel
, *irelend
;
1075 struct internal_reloc
*irelalign
;
1077 bfd_byte
*esym
, *esymend
;
1078 bfd_size_type symesz
;
1079 struct coff_link_hash_entry
**sym_hash
;
1082 contents
= coff_section_data (abfd
, sec
)->contents
;
1084 /* The deletion must stop at the next ALIGN reloc for an aligment
1085 power larger than the number of bytes we are deleting. */
1090 irel
= coff_section_data (abfd
, sec
)->relocs
;
1091 irelend
= irel
+ sec
->reloc_count
;
1092 for (; irel
< irelend
; irel
++)
1094 if (irel
->r_type
== R_SH_ALIGN
1095 && irel
->r_vaddr
- sec
->vma
> addr
1096 && count
< (1 << irel
->r_offset
))
1099 toaddr
= irel
->r_vaddr
- sec
->vma
;
1104 /* Actually delete the bytes. */
1105 memmove (contents
+ addr
, contents
+ addr
+ count
,
1106 (size_t) (toaddr
- addr
- count
));
1107 if (irelalign
== NULL
)
1113 #define NOP_OPCODE (0x0009)
1115 BFD_ASSERT ((count
& 1) == 0);
1116 for (i
= 0; i
< count
; i
+= 2)
1117 bfd_put_16 (abfd
, (bfd_vma
) NOP_OPCODE
, contents
+ toaddr
- count
+ i
);
1120 /* Adjust all the relocs. */
1121 for (irel
= coff_section_data (abfd
, sec
)->relocs
; irel
< irelend
; irel
++)
1123 bfd_vma nraddr
, stop
;
1126 struct internal_syment sym
;
1127 int off
, adjust
, oinsn
;
1128 bfd_signed_vma voff
= 0;
1129 bfd_boolean overflow
;
1131 /* Get the new reloc address. */
1132 nraddr
= irel
->r_vaddr
- sec
->vma
;
1133 if ((irel
->r_vaddr
- sec
->vma
> addr
1134 && irel
->r_vaddr
- sec
->vma
< toaddr
)
1135 || (irel
->r_type
== R_SH_ALIGN
1136 && irel
->r_vaddr
- sec
->vma
== toaddr
))
1139 /* See if this reloc was for the bytes we have deleted, in which
1140 case we no longer care about it. Don't delete relocs which
1141 represent addresses, though. */
1142 if (irel
->r_vaddr
- sec
->vma
>= addr
1143 && irel
->r_vaddr
- sec
->vma
< addr
+ count
1144 && irel
->r_type
!= R_SH_ALIGN
1145 && irel
->r_type
!= R_SH_CODE
1146 && irel
->r_type
!= R_SH_DATA
1147 && irel
->r_type
!= R_SH_LABEL
)
1148 irel
->r_type
= R_SH_UNUSED
;
1150 /* If this is a PC relative reloc, see if the range it covers
1151 includes the bytes we have deleted. */
1152 switch (irel
->r_type
)
1157 case R_SH_PCDISP8BY2
:
1159 case R_SH_PCRELIMM8BY2
:
1160 case R_SH_PCRELIMM8BY4
:
1161 start
= irel
->r_vaddr
- sec
->vma
;
1162 insn
= bfd_get_16 (abfd
, contents
+ nraddr
);
1166 switch (irel
->r_type
)
1169 start
= stop
= addr
;
1175 case R_SH_IMAGEBASE
:
1177 /* If this reloc is against a symbol defined in this
1178 section, and the symbol will not be adjusted below, we
1179 must check the addend to see it will put the value in
1180 range to be adjusted, and hence must be changed. */
1181 bfd_coff_swap_sym_in (abfd
,
1182 ((bfd_byte
*) obj_coff_external_syms (abfd
)
1184 * bfd_coff_symesz (abfd
))),
1186 if (sym
.n_sclass
!= C_EXT
1187 && sym
.n_scnum
== sec
->target_index
1188 && ((bfd_vma
) sym
.n_value
<= addr
1189 || (bfd_vma
) sym
.n_value
>= toaddr
))
1193 val
= bfd_get_32 (abfd
, contents
+ nraddr
);
1195 if (val
> addr
&& val
< toaddr
)
1196 bfd_put_32 (abfd
, val
- count
, contents
+ nraddr
);
1198 start
= stop
= addr
;
1201 case R_SH_PCDISP8BY2
:
1205 stop
= (bfd_vma
) ((bfd_signed_vma
) start
+ 4 + off
* 2);
1209 bfd_coff_swap_sym_in (abfd
,
1210 ((bfd_byte
*) obj_coff_external_syms (abfd
)
1212 * bfd_coff_symesz (abfd
))),
1214 if (sym
.n_sclass
== C_EXT
)
1215 start
= stop
= addr
;
1221 stop
= (bfd_vma
) ((bfd_signed_vma
) start
+ 4 + off
* 2);
1225 case R_SH_PCRELIMM8BY2
:
1227 stop
= start
+ 4 + off
* 2;
1230 case R_SH_PCRELIMM8BY4
:
1232 stop
= (start
&~ (bfd_vma
) 3) + 4 + off
* 4;
1238 /* These relocs types represent
1240 The r_offset field holds the difference between the reloc
1241 address and L1. That is the start of the reloc, and
1242 adding in the contents gives us the top. We must adjust
1243 both the r_offset field and the section contents. */
1245 start
= irel
->r_vaddr
- sec
->vma
;
1246 stop
= (bfd_vma
) ((bfd_signed_vma
) start
- (long) irel
->r_offset
);
1250 && (stop
<= addr
|| stop
>= toaddr
))
1251 irel
->r_offset
+= count
;
1252 else if (stop
> addr
1254 && (start
<= addr
|| start
>= toaddr
))
1255 irel
->r_offset
-= count
;
1259 if (irel
->r_type
== R_SH_SWITCH16
)
1260 voff
= bfd_get_signed_16 (abfd
, contents
+ nraddr
);
1261 else if (irel
->r_type
== R_SH_SWITCH8
)
1262 voff
= bfd_get_8 (abfd
, contents
+ nraddr
);
1264 voff
= bfd_get_signed_32 (abfd
, contents
+ nraddr
);
1265 stop
= (bfd_vma
) ((bfd_signed_vma
) start
+ voff
);
1270 start
= irel
->r_vaddr
- sec
->vma
;
1271 stop
= (bfd_vma
) ((bfd_signed_vma
) start
1272 + (long) irel
->r_offset
1279 && (stop
<= addr
|| stop
>= toaddr
))
1281 else if (stop
> addr
1283 && (start
<= addr
|| start
>= toaddr
))
1292 switch (irel
->r_type
)
1298 case R_SH_PCDISP8BY2
:
1299 case R_SH_PCRELIMM8BY2
:
1301 if ((oinsn
& 0xff00) != (insn
& 0xff00))
1303 bfd_put_16 (abfd
, (bfd_vma
) insn
, contents
+ nraddr
);
1308 if ((oinsn
& 0xf000) != (insn
& 0xf000))
1310 bfd_put_16 (abfd
, (bfd_vma
) insn
, contents
+ nraddr
);
1313 case R_SH_PCRELIMM8BY4
:
1314 BFD_ASSERT (adjust
== count
|| count
>= 4);
1319 if ((irel
->r_vaddr
& 3) == 0)
1322 if ((oinsn
& 0xff00) != (insn
& 0xff00))
1324 bfd_put_16 (abfd
, (bfd_vma
) insn
, contents
+ nraddr
);
1329 if (voff
< 0 || voff
>= 0xff)
1331 bfd_put_8 (abfd
, (bfd_vma
) voff
, contents
+ nraddr
);
1336 if (voff
< - 0x8000 || voff
>= 0x8000)
1338 bfd_put_signed_16 (abfd
, (bfd_vma
) voff
, contents
+ nraddr
);
1343 bfd_put_signed_32 (abfd
, (bfd_vma
) voff
, contents
+ nraddr
);
1347 irel
->r_offset
+= adjust
;
1353 ((*_bfd_error_handler
)
1354 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
1355 abfd
, (unsigned long) irel
->r_vaddr
));
1356 bfd_set_error (bfd_error_bad_value
);
1361 irel
->r_vaddr
= nraddr
+ sec
->vma
;
1364 /* Look through all the other sections. If there contain any IMM32
1365 relocs against internal symbols which we are not going to adjust
1366 below, we may need to adjust the addends. */
1367 for (o
= abfd
->sections
; o
!= NULL
; o
= o
->next
)
1369 struct internal_reloc
*internal_relocs
;
1370 struct internal_reloc
*irelscan
, *irelscanend
;
1371 bfd_byte
*ocontents
;
1374 || (o
->flags
& SEC_RELOC
) == 0
1375 || o
->reloc_count
== 0)
1378 /* We always cache the relocs. Perhaps, if info->keep_memory is
1379 FALSE, we should free them, if we are permitted to, when we
1380 leave sh_coff_relax_section. */
1381 internal_relocs
= (_bfd_coff_read_internal_relocs
1382 (abfd
, o
, TRUE
, (bfd_byte
*) NULL
, FALSE
,
1383 (struct internal_reloc
*) NULL
));
1384 if (internal_relocs
== NULL
)
1388 irelscanend
= internal_relocs
+ o
->reloc_count
;
1389 for (irelscan
= internal_relocs
; irelscan
< irelscanend
; irelscan
++)
1391 struct internal_syment sym
;
1394 if (irelscan
->r_type
!= R_SH_IMM32
1395 && irelscan
->r_type
!= R_SH_IMAGEBASE
1396 && irelscan
->r_type
!= R_SH_IMM32CE
)
1398 if (irelscan
->r_type
!= R_SH_IMM32
)
1402 bfd_coff_swap_sym_in (abfd
,
1403 ((bfd_byte
*) obj_coff_external_syms (abfd
)
1404 + (irelscan
->r_symndx
1405 * bfd_coff_symesz (abfd
))),
1407 if (sym
.n_sclass
!= C_EXT
1408 && sym
.n_scnum
== sec
->target_index
1409 && ((bfd_vma
) sym
.n_value
<= addr
1410 || (bfd_vma
) sym
.n_value
>= toaddr
))
1414 if (ocontents
== NULL
)
1416 if (coff_section_data (abfd
, o
)->contents
!= NULL
)
1417 ocontents
= coff_section_data (abfd
, o
)->contents
;
1420 if (!bfd_malloc_and_get_section (abfd
, o
, &ocontents
))
1422 /* We always cache the section contents.
1423 Perhaps, if info->keep_memory is FALSE, we
1424 should free them, if we are permitted to,
1425 when we leave sh_coff_relax_section. */
1426 coff_section_data (abfd
, o
)->contents
= ocontents
;
1430 val
= bfd_get_32 (abfd
, ocontents
+ irelscan
->r_vaddr
- o
->vma
);
1432 if (val
> addr
&& val
< toaddr
)
1433 bfd_put_32 (abfd
, val
- count
,
1434 ocontents
+ irelscan
->r_vaddr
- o
->vma
);
1436 coff_section_data (abfd
, o
)->keep_contents
= TRUE
;
1441 /* Adjusting the internal symbols will not work if something has
1442 already retrieved the generic symbols. It would be possible to
1443 make this work by adjusting the generic symbols at the same time.
1444 However, this case should not arise in normal usage. */
1445 if (obj_symbols (abfd
) != NULL
1446 || obj_raw_syments (abfd
) != NULL
)
1448 ((*_bfd_error_handler
)
1449 ("%B: fatal: generic symbols retrieved before relaxing", abfd
));
1450 bfd_set_error (bfd_error_invalid_operation
);
1454 /* Adjust all the symbols. */
1455 sym_hash
= obj_coff_sym_hashes (abfd
);
1456 symesz
= bfd_coff_symesz (abfd
);
1457 esym
= (bfd_byte
*) obj_coff_external_syms (abfd
);
1458 esymend
= esym
+ obj_raw_syment_count (abfd
) * symesz
;
1459 while (esym
< esymend
)
1461 struct internal_syment isym
;
1463 bfd_coff_swap_sym_in (abfd
, (PTR
) esym
, (PTR
) &isym
);
1465 if (isym
.n_scnum
== sec
->target_index
1466 && (bfd_vma
) isym
.n_value
> addr
1467 && (bfd_vma
) isym
.n_value
< toaddr
)
1469 isym
.n_value
-= count
;
1471 bfd_coff_swap_sym_out (abfd
, (PTR
) &isym
, (PTR
) esym
);
1473 if (*sym_hash
!= NULL
)
1475 BFD_ASSERT ((*sym_hash
)->root
.type
== bfd_link_hash_defined
1476 || (*sym_hash
)->root
.type
== bfd_link_hash_defweak
);
1477 BFD_ASSERT ((*sym_hash
)->root
.u
.def
.value
>= addr
1478 && (*sym_hash
)->root
.u
.def
.value
< toaddr
);
1479 (*sym_hash
)->root
.u
.def
.value
-= count
;
1483 esym
+= (isym
.n_numaux
+ 1) * symesz
;
1484 sym_hash
+= isym
.n_numaux
+ 1;
1487 /* See if we can move the ALIGN reloc forward. We have adjusted
1488 r_vaddr for it already. */
1489 if (irelalign
!= NULL
)
1491 bfd_vma alignto
, alignaddr
;
1493 alignto
= BFD_ALIGN (toaddr
, 1 << irelalign
->r_offset
);
1494 alignaddr
= BFD_ALIGN (irelalign
->r_vaddr
- sec
->vma
,
1495 1 << irelalign
->r_offset
);
1496 if (alignto
!= alignaddr
)
1498 /* Tail recursion. */
1499 return sh_relax_delete_bytes (abfd
, sec
, alignaddr
,
1500 (int) (alignto
- alignaddr
));
1507 /* This is yet another version of the SH opcode table, used to rapidly
1508 get information about a particular instruction. */
1510 /* The opcode map is represented by an array of these structures. The
1511 array is indexed by the high order four bits in the instruction. */
1513 struct sh_major_opcode
1515 /* A pointer to the instruction list. This is an array which
1516 contains all the instructions with this major opcode. */
1517 const struct sh_minor_opcode
*minor_opcodes
;
1518 /* The number of elements in minor_opcodes. */
1519 unsigned short count
;
1522 /* This structure holds information for a set of SH opcodes. The
1523 instruction code is anded with the mask value, and the resulting
1524 value is used to search the order opcode list. */
1526 struct sh_minor_opcode
1528 /* The sorted opcode list. */
1529 const struct sh_opcode
*opcodes
;
1530 /* The number of elements in opcodes. */
1531 unsigned short count
;
1532 /* The mask value to use when searching the opcode list. */
1533 unsigned short mask
;
1536 /* This structure holds information for an SH instruction. An array
1537 of these structures is sorted in order by opcode. */
1541 /* The code for this instruction, after it has been anded with the
1542 mask value in the sh_major_opcode structure. */
1543 unsigned short opcode
;
1544 /* Flags for this instruction. */
1545 unsigned long flags
;
1548 /* Flag which appear in the sh_opcode structure. */
1550 /* This instruction loads a value from memory. */
1553 /* This instruction stores a value to memory. */
1556 /* This instruction is a branch. */
1557 #define BRANCH (0x4)
1559 /* This instruction has a delay slot. */
1562 /* This instruction uses the value in the register in the field at
1563 mask 0x0f00 of the instruction. */
1564 #define USES1 (0x10)
1565 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1567 /* This instruction uses the value in the register in the field at
1568 mask 0x00f0 of the instruction. */
1569 #define USES2 (0x20)
1570 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1572 /* This instruction uses the value in register 0. */
1573 #define USESR0 (0x40)
1575 /* This instruction sets the value in the register in the field at
1576 mask 0x0f00 of the instruction. */
1577 #define SETS1 (0x80)
1578 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1580 /* This instruction sets the value in the register in the field at
1581 mask 0x00f0 of the instruction. */
1582 #define SETS2 (0x100)
1583 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1585 /* This instruction sets register 0. */
1586 #define SETSR0 (0x200)
1588 /* This instruction sets a special register. */
1589 #define SETSSP (0x400)
1591 /* This instruction uses a special register. */
1592 #define USESSP (0x800)
1594 /* This instruction uses the floating point register in the field at
1595 mask 0x0f00 of the instruction. */
1596 #define USESF1 (0x1000)
1597 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1599 /* This instruction uses the floating point register in the field at
1600 mask 0x00f0 of the instruction. */
1601 #define USESF2 (0x2000)
1602 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1604 /* This instruction uses floating point register 0. */
1605 #define USESF0 (0x4000)
1607 /* This instruction sets the floating point register in the field at
1608 mask 0x0f00 of the instruction. */
1609 #define SETSF1 (0x8000)
1610 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1612 #define USESAS (0x10000)
1613 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1614 #define USESR8 (0x20000)
1615 #define SETSAS (0x40000)
1616 #define SETSAS_REG(x) USESAS_REG (x)
1618 #define MAP(a) a, sizeof a / sizeof a[0]
1620 #ifndef COFF_IMAGE_WITH_PE
1621 static bfd_boolean sh_insn_uses_reg
1622 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int));
1623 static bfd_boolean sh_insn_sets_reg
1624 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int));
1625 static bfd_boolean sh_insn_uses_or_sets_reg
1626 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int));
1627 static bfd_boolean sh_insn_uses_freg
1628 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int));
1629 static bfd_boolean sh_insn_sets_freg
1630 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int));
1631 static bfd_boolean sh_insn_uses_or_sets_freg
1632 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int));
1633 static bfd_boolean sh_insns_conflict
1634 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int,
1635 const struct sh_opcode
*));
1636 static bfd_boolean sh_load_use
1637 PARAMS ((unsigned int, const struct sh_opcode
*, unsigned int,
1638 const struct sh_opcode
*));
1640 /* The opcode maps. */
1642 static const struct sh_opcode sh_opcode00
[] =
1644 { 0x0008, SETSSP
}, /* clrt */
1645 { 0x0009, 0 }, /* nop */
1646 { 0x000b, BRANCH
| DELAY
| USESSP
}, /* rts */
1647 { 0x0018, SETSSP
}, /* sett */
1648 { 0x0019, SETSSP
}, /* div0u */
1649 { 0x001b, 0 }, /* sleep */
1650 { 0x0028, SETSSP
}, /* clrmac */
1651 { 0x002b, BRANCH
| DELAY
| SETSSP
}, /* rte */
1652 { 0x0038, USESSP
| SETSSP
}, /* ldtlb */
1653 { 0x0048, SETSSP
}, /* clrs */
1654 { 0x0058, SETSSP
} /* sets */
1657 static const struct sh_opcode sh_opcode01
[] =
1659 { 0x0003, BRANCH
| DELAY
| USES1
| SETSSP
}, /* bsrf rn */
1660 { 0x000a, SETS1
| USESSP
}, /* sts mach,rn */
1661 { 0x001a, SETS1
| USESSP
}, /* sts macl,rn */
1662 { 0x0023, BRANCH
| DELAY
| USES1
}, /* braf rn */
1663 { 0x0029, SETS1
| USESSP
}, /* movt rn */
1664 { 0x002a, SETS1
| USESSP
}, /* sts pr,rn */
1665 { 0x005a, SETS1
| USESSP
}, /* sts fpul,rn */
1666 { 0x006a, SETS1
| USESSP
}, /* sts fpscr,rn / sts dsr,rn */
1667 { 0x0083, LOAD
| USES1
}, /* pref @rn */
1668 { 0x007a, SETS1
| USESSP
}, /* sts a0,rn */
1669 { 0x008a, SETS1
| USESSP
}, /* sts x0,rn */
1670 { 0x009a, SETS1
| USESSP
}, /* sts x1,rn */
1671 { 0x00aa, SETS1
| USESSP
}, /* sts y0,rn */
1672 { 0x00ba, SETS1
| USESSP
} /* sts y1,rn */
1675 static const struct sh_opcode sh_opcode02
[] =
1677 { 0x0002, SETS1
| USESSP
}, /* stc <special_reg>,rn */
1678 { 0x0004, STORE
| USES1
| USES2
| USESR0
}, /* mov.b rm,@(r0,rn) */
1679 { 0x0005, STORE
| USES1
| USES2
| USESR0
}, /* mov.w rm,@(r0,rn) */
1680 { 0x0006, STORE
| USES1
| USES2
| USESR0
}, /* mov.l rm,@(r0,rn) */
1681 { 0x0007, SETSSP
| USES1
| USES2
}, /* mul.l rm,rn */
1682 { 0x000c, LOAD
| SETS1
| USES2
| USESR0
}, /* mov.b @(r0,rm),rn */
1683 { 0x000d, LOAD
| SETS1
| USES2
| USESR0
}, /* mov.w @(r0,rm),rn */
1684 { 0x000e, LOAD
| SETS1
| USES2
| USESR0
}, /* mov.l @(r0,rm),rn */
1685 { 0x000f, LOAD
|SETS1
|SETS2
|SETSSP
|USES1
|USES2
|USESSP
}, /* mac.l @rm+,@rn+ */
1688 static const struct sh_minor_opcode sh_opcode0
[] =
1690 { MAP (sh_opcode00
), 0xffff },
1691 { MAP (sh_opcode01
), 0xf0ff },
1692 { MAP (sh_opcode02
), 0xf00f }
1695 static const struct sh_opcode sh_opcode10
[] =
1697 { 0x1000, STORE
| USES1
| USES2
} /* mov.l rm,@(disp,rn) */
1700 static const struct sh_minor_opcode sh_opcode1
[] =
1702 { MAP (sh_opcode10
), 0xf000 }
1705 static const struct sh_opcode sh_opcode20
[] =
1707 { 0x2000, STORE
| USES1
| USES2
}, /* mov.b rm,@rn */
1708 { 0x2001, STORE
| USES1
| USES2
}, /* mov.w rm,@rn */
1709 { 0x2002, STORE
| USES1
| USES2
}, /* mov.l rm,@rn */
1710 { 0x2004, STORE
| SETS1
| USES1
| USES2
}, /* mov.b rm,@-rn */
1711 { 0x2005, STORE
| SETS1
| USES1
| USES2
}, /* mov.w rm,@-rn */
1712 { 0x2006, STORE
| SETS1
| USES1
| USES2
}, /* mov.l rm,@-rn */
1713 { 0x2007, SETSSP
| USES1
| USES2
| USESSP
}, /* div0s */
1714 { 0x2008, SETSSP
| USES1
| USES2
}, /* tst rm,rn */
1715 { 0x2009, SETS1
| USES1
| USES2
}, /* and rm,rn */
1716 { 0x200a, SETS1
| USES1
| USES2
}, /* xor rm,rn */
1717 { 0x200b, SETS1
| USES1
| USES2
}, /* or rm,rn */
1718 { 0x200c, SETSSP
| USES1
| USES2
}, /* cmp/str rm,rn */
1719 { 0x200d, SETS1
| USES1
| USES2
}, /* xtrct rm,rn */
1720 { 0x200e, SETSSP
| USES1
| USES2
}, /* mulu.w rm,rn */
1721 { 0x200f, SETSSP
| USES1
| USES2
} /* muls.w rm,rn */
1724 static const struct sh_minor_opcode sh_opcode2
[] =
1726 { MAP (sh_opcode20
), 0xf00f }
1729 static const struct sh_opcode sh_opcode30
[] =
1731 { 0x3000, SETSSP
| USES1
| USES2
}, /* cmp/eq rm,rn */
1732 { 0x3002, SETSSP
| USES1
| USES2
}, /* cmp/hs rm,rn */
1733 { 0x3003, SETSSP
| USES1
| USES2
}, /* cmp/ge rm,rn */
1734 { 0x3004, SETSSP
| USESSP
| USES1
| USES2
}, /* div1 rm,rn */
1735 { 0x3005, SETSSP
| USES1
| USES2
}, /* dmulu.l rm,rn */
1736 { 0x3006, SETSSP
| USES1
| USES2
}, /* cmp/hi rm,rn */
1737 { 0x3007, SETSSP
| USES1
| USES2
}, /* cmp/gt rm,rn */
1738 { 0x3008, SETS1
| USES1
| USES2
}, /* sub rm,rn */
1739 { 0x300a, SETS1
| SETSSP
| USES1
| USES2
| USESSP
}, /* subc rm,rn */
1740 { 0x300b, SETS1
| SETSSP
| USES1
| USES2
}, /* subv rm,rn */
1741 { 0x300c, SETS1
| USES1
| USES2
}, /* add rm,rn */
1742 { 0x300d, SETSSP
| USES1
| USES2
}, /* dmuls.l rm,rn */
1743 { 0x300e, SETS1
| SETSSP
| USES1
| USES2
| USESSP
}, /* addc rm,rn */
1744 { 0x300f, SETS1
| SETSSP
| USES1
| USES2
} /* addv rm,rn */
1747 static const struct sh_minor_opcode sh_opcode3
[] =
1749 { MAP (sh_opcode30
), 0xf00f }
1752 static const struct sh_opcode sh_opcode40
[] =
1754 { 0x4000, SETS1
| SETSSP
| USES1
}, /* shll rn */
1755 { 0x4001, SETS1
| SETSSP
| USES1
}, /* shlr rn */
1756 { 0x4002, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l mach,@-rn */
1757 { 0x4004, SETS1
| SETSSP
| USES1
}, /* rotl rn */
1758 { 0x4005, SETS1
| SETSSP
| USES1
}, /* rotr rn */
1759 { 0x4006, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,mach */
1760 { 0x4008, SETS1
| USES1
}, /* shll2 rn */
1761 { 0x4009, SETS1
| USES1
}, /* shlr2 rn */
1762 { 0x400a, SETSSP
| USES1
}, /* lds rm,mach */
1763 { 0x400b, BRANCH
| DELAY
| USES1
}, /* jsr @rn */
1764 { 0x4010, SETS1
| SETSSP
| USES1
}, /* dt rn */
1765 { 0x4011, SETSSP
| USES1
}, /* cmp/pz rn */
1766 { 0x4012, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l macl,@-rn */
1767 { 0x4014, SETSSP
| USES1
}, /* setrc rm */
1768 { 0x4015, SETSSP
| USES1
}, /* cmp/pl rn */
1769 { 0x4016, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,macl */
1770 { 0x4018, SETS1
| USES1
}, /* shll8 rn */
1771 { 0x4019, SETS1
| USES1
}, /* shlr8 rn */
1772 { 0x401a, SETSSP
| USES1
}, /* lds rm,macl */
1773 { 0x401b, LOAD
| SETSSP
| USES1
}, /* tas.b @rn */
1774 { 0x4020, SETS1
| SETSSP
| USES1
}, /* shal rn */
1775 { 0x4021, SETS1
| SETSSP
| USES1
}, /* shar rn */
1776 { 0x4022, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l pr,@-rn */
1777 { 0x4024, SETS1
| SETSSP
| USES1
| USESSP
}, /* rotcl rn */
1778 { 0x4025, SETS1
| SETSSP
| USES1
| USESSP
}, /* rotcr rn */
1779 { 0x4026, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,pr */
1780 { 0x4028, SETS1
| USES1
}, /* shll16 rn */
1781 { 0x4029, SETS1
| USES1
}, /* shlr16 rn */
1782 { 0x402a, SETSSP
| USES1
}, /* lds rm,pr */
1783 { 0x402b, BRANCH
| DELAY
| USES1
}, /* jmp @rn */
1784 { 0x4052, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l fpul,@-rn */
1785 { 0x4056, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,fpul */
1786 { 0x405a, SETSSP
| USES1
}, /* lds.l rm,fpul */
1787 { 0x4062, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l fpscr / dsr,@-rn */
1788 { 0x4066, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,fpscr / dsr */
1789 { 0x406a, SETSSP
| USES1
}, /* lds rm,fpscr / lds rm,dsr */
1790 { 0x4072, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l a0,@-rn */
1791 { 0x4076, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,a0 */
1792 { 0x407a, SETSSP
| USES1
}, /* lds.l rm,a0 */
1793 { 0x4082, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l x0,@-rn */
1794 { 0x4086, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,x0 */
1795 { 0x408a, SETSSP
| USES1
}, /* lds.l rm,x0 */
1796 { 0x4092, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l x1,@-rn */
1797 { 0x4096, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,x1 */
1798 { 0x409a, SETSSP
| USES1
}, /* lds.l rm,x1 */
1799 { 0x40a2, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l y0,@-rn */
1800 { 0x40a6, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,y0 */
1801 { 0x40aa, SETSSP
| USES1
}, /* lds.l rm,y0 */
1802 { 0x40b2, STORE
| SETS1
| USES1
| USESSP
}, /* sts.l y1,@-rn */
1803 { 0x40b6, LOAD
| SETS1
| SETSSP
| USES1
}, /* lds.l @rm+,y1 */
1804 { 0x40ba, SETSSP
| USES1
} /* lds.l rm,y1 */
1807 static const struct sh_opcode sh_opcode41
[] =
1809 { 0x4003, STORE
| SETS1
| USES1
| USESSP
}, /* stc.l <special_reg>,@-rn */
1810 { 0x4007, LOAD
| SETS1
| SETSSP
| USES1
}, /* ldc.l @rm+,<special_reg> */
1811 { 0x400c, SETS1
| USES1
| USES2
}, /* shad rm,rn */
1812 { 0x400d, SETS1
| USES1
| USES2
}, /* shld rm,rn */
1813 { 0x400e, SETSSP
| USES1
}, /* ldc rm,<special_reg> */
1814 { 0x400f, LOAD
|SETS1
|SETS2
|SETSSP
|USES1
|USES2
|USESSP
}, /* mac.w @rm+,@rn+ */
1817 static const struct sh_minor_opcode sh_opcode4
[] =
1819 { MAP (sh_opcode40
), 0xf0ff },
1820 { MAP (sh_opcode41
), 0xf00f }
1823 static const struct sh_opcode sh_opcode50
[] =
1825 { 0x5000, LOAD
| SETS1
| USES2
} /* mov.l @(disp,rm),rn */
1828 static const struct sh_minor_opcode sh_opcode5
[] =
1830 { MAP (sh_opcode50
), 0xf000 }
1833 static const struct sh_opcode sh_opcode60
[] =
1835 { 0x6000, LOAD
| SETS1
| USES2
}, /* mov.b @rm,rn */
1836 { 0x6001, LOAD
| SETS1
| USES2
}, /* mov.w @rm,rn */
1837 { 0x6002, LOAD
| SETS1
| USES2
}, /* mov.l @rm,rn */
1838 { 0x6003, SETS1
| USES2
}, /* mov rm,rn */
1839 { 0x6004, LOAD
| SETS1
| SETS2
| USES2
}, /* mov.b @rm+,rn */
1840 { 0x6005, LOAD
| SETS1
| SETS2
| USES2
}, /* mov.w @rm+,rn */
1841 { 0x6006, LOAD
| SETS1
| SETS2
| USES2
}, /* mov.l @rm+,rn */
1842 { 0x6007, SETS1
| USES2
}, /* not rm,rn */
1843 { 0x6008, SETS1
| USES2
}, /* swap.b rm,rn */
1844 { 0x6009, SETS1
| USES2
}, /* swap.w rm,rn */
1845 { 0x600a, SETS1
| SETSSP
| USES2
| USESSP
}, /* negc rm,rn */
1846 { 0x600b, SETS1
| USES2
}, /* neg rm,rn */
1847 { 0x600c, SETS1
| USES2
}, /* extu.b rm,rn */
1848 { 0x600d, SETS1
| USES2
}, /* extu.w rm,rn */
1849 { 0x600e, SETS1
| USES2
}, /* exts.b rm,rn */
1850 { 0x600f, SETS1
| USES2
} /* exts.w rm,rn */
1853 static const struct sh_minor_opcode sh_opcode6
[] =
1855 { MAP (sh_opcode60
), 0xf00f }
1858 static const struct sh_opcode sh_opcode70
[] =
1860 { 0x7000, SETS1
| USES1
} /* add #imm,rn */
1863 static const struct sh_minor_opcode sh_opcode7
[] =
1865 { MAP (sh_opcode70
), 0xf000 }
1868 static const struct sh_opcode sh_opcode80
[] =
1870 { 0x8000, STORE
| USES2
| USESR0
}, /* mov.b r0,@(disp,rn) */
1871 { 0x8100, STORE
| USES2
| USESR0
}, /* mov.w r0,@(disp,rn) */
1872 { 0x8200, SETSSP
}, /* setrc #imm */
1873 { 0x8400, LOAD
| SETSR0
| USES2
}, /* mov.b @(disp,rm),r0 */
1874 { 0x8500, LOAD
| SETSR0
| USES2
}, /* mov.w @(disp,rn),r0 */
1875 { 0x8800, SETSSP
| USESR0
}, /* cmp/eq #imm,r0 */
1876 { 0x8900, BRANCH
| USESSP
}, /* bt label */
1877 { 0x8b00, BRANCH
| USESSP
}, /* bf label */
1878 { 0x8c00, SETSSP
}, /* ldrs @(disp,pc) */
1879 { 0x8d00, BRANCH
| DELAY
| USESSP
}, /* bt/s label */
1880 { 0x8e00, SETSSP
}, /* ldre @(disp,pc) */
1881 { 0x8f00, BRANCH
| DELAY
| USESSP
} /* bf/s label */
1884 static const struct sh_minor_opcode sh_opcode8
[] =
1886 { MAP (sh_opcode80
), 0xff00 }
1889 static const struct sh_opcode sh_opcode90
[] =
1891 { 0x9000, LOAD
| SETS1
} /* mov.w @(disp,pc),rn */
1894 static const struct sh_minor_opcode sh_opcode9
[] =
1896 { MAP (sh_opcode90
), 0xf000 }
1899 static const struct sh_opcode sh_opcodea0
[] =
1901 { 0xa000, BRANCH
| DELAY
} /* bra label */
1904 static const struct sh_minor_opcode sh_opcodea
[] =
1906 { MAP (sh_opcodea0
), 0xf000 }
1909 static const struct sh_opcode sh_opcodeb0
[] =
1911 { 0xb000, BRANCH
| DELAY
} /* bsr label */
1914 static const struct sh_minor_opcode sh_opcodeb
[] =
1916 { MAP (sh_opcodeb0
), 0xf000 }
1919 static const struct sh_opcode sh_opcodec0
[] =
1921 { 0xc000, STORE
| USESR0
| USESSP
}, /* mov.b r0,@(disp,gbr) */
1922 { 0xc100, STORE
| USESR0
| USESSP
}, /* mov.w r0,@(disp,gbr) */
1923 { 0xc200, STORE
| USESR0
| USESSP
}, /* mov.l r0,@(disp,gbr) */
1924 { 0xc300, BRANCH
| USESSP
}, /* trapa #imm */
1925 { 0xc400, LOAD
| SETSR0
| USESSP
}, /* mov.b @(disp,gbr),r0 */
1926 { 0xc500, LOAD
| SETSR0
| USESSP
}, /* mov.w @(disp,gbr),r0 */
1927 { 0xc600, LOAD
| SETSR0
| USESSP
}, /* mov.l @(disp,gbr),r0 */
1928 { 0xc700, SETSR0
}, /* mova @(disp,pc),r0 */
1929 { 0xc800, SETSSP
| USESR0
}, /* tst #imm,r0 */
1930 { 0xc900, SETSR0
| USESR0
}, /* and #imm,r0 */
1931 { 0xca00, SETSR0
| USESR0
}, /* xor #imm,r0 */
1932 { 0xcb00, SETSR0
| USESR0
}, /* or #imm,r0 */
1933 { 0xcc00, LOAD
| SETSSP
| USESR0
| USESSP
}, /* tst.b #imm,@(r0,gbr) */
1934 { 0xcd00, LOAD
| STORE
| USESR0
| USESSP
}, /* and.b #imm,@(r0,gbr) */
1935 { 0xce00, LOAD
| STORE
| USESR0
| USESSP
}, /* xor.b #imm,@(r0,gbr) */
1936 { 0xcf00, LOAD
| STORE
| USESR0
| USESSP
} /* or.b #imm,@(r0,gbr) */
1939 static const struct sh_minor_opcode sh_opcodec
[] =
1941 { MAP (sh_opcodec0
), 0xff00 }
1944 static const struct sh_opcode sh_opcoded0
[] =
1946 { 0xd000, LOAD
| SETS1
} /* mov.l @(disp,pc),rn */
1949 static const struct sh_minor_opcode sh_opcoded
[] =
1951 { MAP (sh_opcoded0
), 0xf000 }
1954 static const struct sh_opcode sh_opcodee0
[] =
1956 { 0xe000, SETS1
} /* mov #imm,rn */
1959 static const struct sh_minor_opcode sh_opcodee
[] =
1961 { MAP (sh_opcodee0
), 0xf000 }
1964 static const struct sh_opcode sh_opcodef0
[] =
1966 { 0xf000, SETSF1
| USESF1
| USESF2
}, /* fadd fm,fn */
1967 { 0xf001, SETSF1
| USESF1
| USESF2
}, /* fsub fm,fn */
1968 { 0xf002, SETSF1
| USESF1
| USESF2
}, /* fmul fm,fn */
1969 { 0xf003, SETSF1
| USESF1
| USESF2
}, /* fdiv fm,fn */
1970 { 0xf004, SETSSP
| USESF1
| USESF2
}, /* fcmp/eq fm,fn */
1971 { 0xf005, SETSSP
| USESF1
| USESF2
}, /* fcmp/gt fm,fn */
1972 { 0xf006, LOAD
| SETSF1
| USES2
| USESR0
}, /* fmov.s @(r0,rm),fn */
1973 { 0xf007, STORE
| USES1
| USESF2
| USESR0
}, /* fmov.s fm,@(r0,rn) */
1974 { 0xf008, LOAD
| SETSF1
| USES2
}, /* fmov.s @rm,fn */
1975 { 0xf009, LOAD
| SETS2
| SETSF1
| USES2
}, /* fmov.s @rm+,fn */
1976 { 0xf00a, STORE
| USES1
| USESF2
}, /* fmov.s fm,@rn */
1977 { 0xf00b, STORE
| SETS1
| USES1
| USESF2
}, /* fmov.s fm,@-rn */
1978 { 0xf00c, SETSF1
| USESF2
}, /* fmov fm,fn */
1979 { 0xf00e, SETSF1
| USESF1
| USESF2
| USESF0
} /* fmac f0,fm,fn */
1982 static const struct sh_opcode sh_opcodef1
[] =
1984 { 0xf00d, SETSF1
| USESSP
}, /* fsts fpul,fn */
1985 { 0xf01d, SETSSP
| USESF1
}, /* flds fn,fpul */
1986 { 0xf02d, SETSF1
| USESSP
}, /* float fpul,fn */
1987 { 0xf03d, SETSSP
| USESF1
}, /* ftrc fn,fpul */
1988 { 0xf04d, SETSF1
| USESF1
}, /* fneg fn */
1989 { 0xf05d, SETSF1
| USESF1
}, /* fabs fn */
1990 { 0xf06d, SETSF1
| USESF1
}, /* fsqrt fn */
1991 { 0xf07d, SETSSP
| USESF1
}, /* ftst/nan fn */
1992 { 0xf08d, SETSF1
}, /* fldi0 fn */
1993 { 0xf09d, SETSF1
} /* fldi1 fn */
1996 static const struct sh_minor_opcode sh_opcodef
[] =
1998 { MAP (sh_opcodef0
), 0xf00f },
1999 { MAP (sh_opcodef1
), 0xf0ff }
2002 static struct sh_major_opcode sh_opcodes
[] =
2004 { MAP (sh_opcode0
) },
2005 { MAP (sh_opcode1
) },
2006 { MAP (sh_opcode2
) },
2007 { MAP (sh_opcode3
) },
2008 { MAP (sh_opcode4
) },
2009 { MAP (sh_opcode5
) },
2010 { MAP (sh_opcode6
) },
2011 { MAP (sh_opcode7
) },
2012 { MAP (sh_opcode8
) },
2013 { MAP (sh_opcode9
) },
2014 { MAP (sh_opcodea
) },
2015 { MAP (sh_opcodeb
) },
2016 { MAP (sh_opcodec
) },
2017 { MAP (sh_opcoded
) },
2018 { MAP (sh_opcodee
) },
2019 { MAP (sh_opcodef
) }
2022 /* The double data transfer / parallel processing insns are not
2023 described here. This will cause sh_align_load_span to leave them alone. */
2025 static const struct sh_opcode sh_dsp_opcodef0
[] =
2027 { 0xf400, USESAS
| SETSAS
| LOAD
| SETSSP
}, /* movs.x @-as,ds */
2028 { 0xf401, USESAS
| SETSAS
| STORE
| USESSP
}, /* movs.x ds,@-as */
2029 { 0xf404, USESAS
| LOAD
| SETSSP
}, /* movs.x @as,ds */
2030 { 0xf405, USESAS
| STORE
| USESSP
}, /* movs.x ds,@as */
2031 { 0xf408, USESAS
| SETSAS
| LOAD
| SETSSP
}, /* movs.x @as+,ds */
2032 { 0xf409, USESAS
| SETSAS
| STORE
| USESSP
}, /* movs.x ds,@as+ */
2033 { 0xf40c, USESAS
| SETSAS
| LOAD
| SETSSP
| USESR8
}, /* movs.x @as+r8,ds */
2034 { 0xf40d, USESAS
| SETSAS
| STORE
| USESSP
| USESR8
} /* movs.x ds,@as+r8 */
2037 static const struct sh_minor_opcode sh_dsp_opcodef
[] =
2039 { MAP (sh_dsp_opcodef0
), 0xfc0d }
2042 /* Given an instruction, return a pointer to the corresponding
2043 sh_opcode structure. Return NULL if the instruction is not
2046 static const struct sh_opcode
*
2050 const struct sh_major_opcode
*maj
;
2051 const struct sh_minor_opcode
*min
, *minend
;
2053 maj
= &sh_opcodes
[(insn
& 0xf000) >> 12];
2054 min
= maj
->minor_opcodes
;
2055 minend
= min
+ maj
->count
;
2056 for (; min
< minend
; min
++)
2059 const struct sh_opcode
*op
, *opend
;
2061 l
= insn
& min
->mask
;
2063 opend
= op
+ min
->count
;
2065 /* Since the opcodes tables are sorted, we could use a binary
2066 search here if the count were above some cutoff value. */
2067 for (; op
< opend
; op
++)
2068 if (op
->opcode
== l
)
2075 /* See whether an instruction uses or sets a general purpose register */
2078 sh_insn_uses_or_sets_reg (insn
, op
, reg
)
2080 const struct sh_opcode
*op
;
2083 if (sh_insn_uses_reg (insn
, op
, reg
))
2086 return sh_insn_sets_reg (insn
, op
, reg
);
2089 /* See whether an instruction uses a general purpose register. */
2092 sh_insn_uses_reg (insn
, op
, reg
)
2094 const struct sh_opcode
*op
;
2101 if ((f
& USES1
) != 0
2102 && USES1_REG (insn
) == reg
)
2104 if ((f
& USES2
) != 0
2105 && USES2_REG (insn
) == reg
)
2107 if ((f
& USESR0
) != 0
2110 if ((f
& USESAS
) && reg
== USESAS_REG (insn
))
2112 if ((f
& USESR8
) && reg
== 8)
2118 /* See whether an instruction sets a general purpose register. */
2121 sh_insn_sets_reg (insn
, op
, reg
)
2123 const struct sh_opcode
*op
;
2130 if ((f
& SETS1
) != 0
2131 && SETS1_REG (insn
) == reg
)
2133 if ((f
& SETS2
) != 0
2134 && SETS2_REG (insn
) == reg
)
2136 if ((f
& SETSR0
) != 0
2139 if ((f
& SETSAS
) && reg
== SETSAS_REG (insn
))
2145 /* See whether an instruction uses or sets a floating point register */
2148 sh_insn_uses_or_sets_freg (insn
, op
, reg
)
2150 const struct sh_opcode
*op
;
2153 if (sh_insn_uses_freg (insn
, op
, reg
))
2156 return sh_insn_sets_freg (insn
, op
, reg
);
2159 /* See whether an instruction uses a floating point register. */
2162 sh_insn_uses_freg (insn
, op
, freg
)
2164 const struct sh_opcode
*op
;
2171 /* We can't tell if this is a double-precision insn, so just play safe
2172 and assume that it might be. So not only have we test FREG against
2173 itself, but also even FREG against FREG+1 - if the using insn uses
2174 just the low part of a double precision value - but also an odd
2175 FREG against FREG-1 - if the setting insn sets just the low part
2176 of a double precision value.
2177 So what this all boils down to is that we have to ignore the lowest
2178 bit of the register number. */
2180 if ((f
& USESF1
) != 0
2181 && (USESF1_REG (insn
) & 0xe) == (freg
& 0xe))
2183 if ((f
& USESF2
) != 0
2184 && (USESF2_REG (insn
) & 0xe) == (freg
& 0xe))
2186 if ((f
& USESF0
) != 0
2193 /* See whether an instruction sets a floating point register. */
2196 sh_insn_sets_freg (insn
, op
, freg
)
2198 const struct sh_opcode
*op
;
2205 /* We can't tell if this is a double-precision insn, so just play safe
2206 and assume that it might be. So not only have we test FREG against
2207 itself, but also even FREG against FREG+1 - if the using insn uses
2208 just the low part of a double precision value - but also an odd
2209 FREG against FREG-1 - if the setting insn sets just the low part
2210 of a double precision value.
2211 So what this all boils down to is that we have to ignore the lowest
2212 bit of the register number. */
2214 if ((f
& SETSF1
) != 0
2215 && (SETSF1_REG (insn
) & 0xe) == (freg
& 0xe))
2221 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2222 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2223 This should return TRUE if there is a conflict, or FALSE if the
2224 instructions can be swapped safely. */
2227 sh_insns_conflict (i1
, op1
, i2
, op2
)
2229 const struct sh_opcode
*op1
;
2231 const struct sh_opcode
*op2
;
2233 unsigned int f1
, f2
;
2238 /* Load of fpscr conflicts with floating point operations.
2239 FIXME: shouldn't test raw opcodes here. */
2240 if (((i1
& 0xf0ff) == 0x4066 && (i2
& 0xf000) == 0xf000)
2241 || ((i2
& 0xf0ff) == 0x4066 && (i1
& 0xf000) == 0xf000))
2244 if ((f1
& (BRANCH
| DELAY
)) != 0
2245 || (f2
& (BRANCH
| DELAY
)) != 0)
2248 if (((f1
| f2
) & SETSSP
)
2249 && (f1
& (SETSSP
| USESSP
))
2250 && (f2
& (SETSSP
| USESSP
)))
2253 if ((f1
& SETS1
) != 0
2254 && sh_insn_uses_or_sets_reg (i2
, op2
, SETS1_REG (i1
)))
2256 if ((f1
& SETS2
) != 0
2257 && sh_insn_uses_or_sets_reg (i2
, op2
, SETS2_REG (i1
)))
2259 if ((f1
& SETSR0
) != 0
2260 && sh_insn_uses_or_sets_reg (i2
, op2
, 0))
2263 && sh_insn_uses_or_sets_reg (i2
, op2
, SETSAS_REG (i1
)))
2265 if ((f1
& SETSF1
) != 0
2266 && sh_insn_uses_or_sets_freg (i2
, op2
, SETSF1_REG (i1
)))
2269 if ((f2
& SETS1
) != 0
2270 && sh_insn_uses_or_sets_reg (i1
, op1
, SETS1_REG (i2
)))
2272 if ((f2
& SETS2
) != 0
2273 && sh_insn_uses_or_sets_reg (i1
, op1
, SETS2_REG (i2
)))
2275 if ((f2
& SETSR0
) != 0
2276 && sh_insn_uses_or_sets_reg (i1
, op1
, 0))
2279 && sh_insn_uses_or_sets_reg (i1
, op1
, SETSAS_REG (i2
)))
2281 if ((f2
& SETSF1
) != 0
2282 && sh_insn_uses_or_sets_freg (i1
, op1
, SETSF1_REG (i2
)))
2285 /* The instructions do not conflict. */
2289 /* I1 is a load instruction, and I2 is some other instruction. Return
2290 TRUE if I1 loads a register which I2 uses. */
2293 sh_load_use (i1
, op1
, i2
, op2
)
2295 const struct sh_opcode
*op1
;
2297 const struct sh_opcode
*op2
;
2303 if ((f1
& LOAD
) == 0)
2306 /* If both SETS1 and SETSSP are set, that means a load to a special
2307 register using postincrement addressing mode, which we don't care
2309 if ((f1
& SETS1
) != 0
2310 && (f1
& SETSSP
) == 0
2311 && sh_insn_uses_reg (i2
, op2
, (i1
& 0x0f00) >> 8))
2314 if ((f1
& SETSR0
) != 0
2315 && sh_insn_uses_reg (i2
, op2
, 0))
2318 if ((f1
& SETSF1
) != 0
2319 && sh_insn_uses_freg (i2
, op2
, (i1
& 0x0f00) >> 8))
2325 /* Try to align loads and stores within a span of memory. This is
2326 called by both the ELF and the COFF sh targets. ABFD and SEC are
2327 the BFD and section we are examining. CONTENTS is the contents of
2328 the section. SWAP is the routine to call to swap two instructions.
2329 RELOCS is a pointer to the internal relocation information, to be
2330 passed to SWAP. PLABEL is a pointer to the current label in a
2331 sorted list of labels; LABEL_END is the end of the list. START and
2332 STOP are the range of memory to examine. If a swap is made,
2333 *PSWAPPED is set to TRUE. */
2339 _bfd_sh_align_load_span (abfd
, sec
, contents
, swap
, relocs
,
2340 plabel
, label_end
, start
, stop
, pswapped
)
2344 bfd_boolean (*swap
) PARAMS ((bfd
*, asection
*, PTR
, bfd_byte
*, bfd_vma
));
2350 bfd_boolean
*pswapped
;
2352 int dsp
= (abfd
->arch_info
->mach
== bfd_mach_sh_dsp
2353 || abfd
->arch_info
->mach
== bfd_mach_sh3_dsp
);
2356 /* The SH4 has a Harvard architecture, hence aligning loads is not
2357 desirable. In fact, it is counter-productive, since it interferes
2358 with the schedules generated by the compiler. */
2359 if (abfd
->arch_info
->mach
== bfd_mach_sh4
)
2362 /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2366 sh_opcodes
[0xf].minor_opcodes
= sh_dsp_opcodef
;
2367 sh_opcodes
[0xf].count
= sizeof sh_dsp_opcodef
/ sizeof sh_dsp_opcodef
;
2370 /* Instructions should be aligned on 2 byte boundaries. */
2371 if ((start
& 1) == 1)
2374 /* Now look through the unaligned addresses. */
2378 for (; i
< stop
; i
+= 4)
2381 const struct sh_opcode
*op
;
2382 unsigned int prev_insn
= 0;
2383 const struct sh_opcode
*prev_op
= NULL
;
2385 insn
= bfd_get_16 (abfd
, contents
+ i
);
2386 op
= sh_insn_info (insn
);
2388 || (op
->flags
& (LOAD
| STORE
)) == 0)
2391 /* This is a load or store which is not on a four byte boundary. */
2393 while (*plabel
< label_end
&& **plabel
< i
)
2398 prev_insn
= bfd_get_16 (abfd
, contents
+ i
- 2);
2399 /* If INSN is the field b of a parallel processing insn, it is not
2400 a load / store after all. Note that the test here might mistake
2401 the field_b of a pcopy insn for the starting code of a parallel
2402 processing insn; this might miss a swapping opportunity, but at
2403 least we're on the safe side. */
2404 if (dsp
&& (prev_insn
& 0xfc00) == 0xf800)
2407 /* Check if prev_insn is actually the field b of a parallel
2408 processing insn. Again, this can give a spurious match
2410 if (dsp
&& i
- 2 > start
)
2412 unsigned pprev_insn
= bfd_get_16 (abfd
, contents
+ i
- 4);
2414 if ((pprev_insn
& 0xfc00) == 0xf800)
2417 prev_op
= sh_insn_info (prev_insn
);
2420 prev_op
= sh_insn_info (prev_insn
);
2422 /* If the load/store instruction is in a delay slot, we
2425 || (prev_op
->flags
& DELAY
) != 0)
2429 && (*plabel
>= label_end
|| **plabel
!= i
)
2431 && (prev_op
->flags
& (LOAD
| STORE
)) == 0
2432 && ! sh_insns_conflict (prev_insn
, prev_op
, insn
, op
))
2436 /* The load/store instruction does not have a label, and
2437 there is a previous instruction; PREV_INSN is not
2438 itself a load/store instruction, and PREV_INSN and
2439 INSN do not conflict. */
2445 unsigned int prev2_insn
;
2446 const struct sh_opcode
*prev2_op
;
2448 prev2_insn
= bfd_get_16 (abfd
, contents
+ i
- 4);
2449 prev2_op
= sh_insn_info (prev2_insn
);
2451 /* If the instruction before PREV_INSN has a delay
2452 slot--that is, PREV_INSN is in a delay slot--we
2454 if (prev2_op
== NULL
2455 || (prev2_op
->flags
& DELAY
) != 0)
2458 /* If the instruction before PREV_INSN is a load,
2459 and it sets a register which INSN uses, then
2460 putting INSN immediately after PREV_INSN will
2461 cause a pipeline bubble, so there is no point to
2464 && (prev2_op
->flags
& LOAD
) != 0
2465 && sh_load_use (prev2_insn
, prev2_op
, insn
, op
))
2471 if (! (*swap
) (abfd
, sec
, relocs
, contents
, i
- 2))
2478 while (*plabel
< label_end
&& **plabel
< i
+ 2)
2482 && (*plabel
>= label_end
|| **plabel
!= i
+ 2))
2484 unsigned int next_insn
;
2485 const struct sh_opcode
*next_op
;
2487 /* There is an instruction after the load/store
2488 instruction, and it does not have a label. */
2489 next_insn
= bfd_get_16 (abfd
, contents
+ i
+ 2);
2490 next_op
= sh_insn_info (next_insn
);
2492 && (next_op
->flags
& (LOAD
| STORE
)) == 0
2493 && ! sh_insns_conflict (insn
, op
, next_insn
, next_op
))
2497 /* NEXT_INSN is not itself a load/store instruction,
2498 and it does not conflict with INSN. */
2502 /* If PREV_INSN is a load, and it sets a register
2503 which NEXT_INSN uses, then putting NEXT_INSN
2504 immediately after PREV_INSN will cause a pipeline
2505 bubble, so there is no reason to make this swap. */
2507 && (prev_op
->flags
& LOAD
) != 0
2508 && sh_load_use (prev_insn
, prev_op
, next_insn
, next_op
))
2511 /* If INSN is a load, and it sets a register which
2512 the insn after NEXT_INSN uses, then doing the
2513 swap will cause a pipeline bubble, so there is no
2514 reason to make the swap. However, if the insn
2515 after NEXT_INSN is itself a load or store
2516 instruction, then it is misaligned, so
2517 optimistically hope that it will be swapped
2518 itself, and just live with the pipeline bubble if
2522 && (op
->flags
& LOAD
) != 0)
2524 unsigned int next2_insn
;
2525 const struct sh_opcode
*next2_op
;
2527 next2_insn
= bfd_get_16 (abfd
, contents
+ i
+ 4);
2528 next2_op
= sh_insn_info (next2_insn
);
2529 if (next2_op
== NULL
2530 || ((next2_op
->flags
& (LOAD
| STORE
)) == 0
2531 && sh_load_use (insn
, op
, next2_insn
, next2_op
)))
2537 if (! (*swap
) (abfd
, sec
, relocs
, contents
, i
))
2548 #endif /* not COFF_IMAGE_WITH_PE */
2550 /* Look for loads and stores which we can align to four byte
2551 boundaries. See the longer comment above sh_relax_section for why
2552 this is desirable. This sets *PSWAPPED if some instruction was
2556 sh_align_loads (abfd
, sec
, internal_relocs
, contents
, pswapped
)
2559 struct internal_reloc
*internal_relocs
;
2561 bfd_boolean
*pswapped
;
2563 struct internal_reloc
*irel
, *irelend
;
2564 bfd_vma
*labels
= NULL
;
2565 bfd_vma
*label
, *label_end
;
2570 irelend
= internal_relocs
+ sec
->reloc_count
;
2572 /* Get all the addresses with labels on them. */
2573 amt
= (bfd_size_type
) sec
->reloc_count
* sizeof (bfd_vma
);
2574 labels
= (bfd_vma
*) bfd_malloc (amt
);
2578 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
2580 if (irel
->r_type
== R_SH_LABEL
)
2582 *label_end
= irel
->r_vaddr
- sec
->vma
;
2587 /* Note that the assembler currently always outputs relocs in
2588 address order. If that ever changes, this code will need to sort
2589 the label values and the relocs. */
2593 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
2595 bfd_vma start
, stop
;
2597 if (irel
->r_type
!= R_SH_CODE
)
2600 start
= irel
->r_vaddr
- sec
->vma
;
2602 for (irel
++; irel
< irelend
; irel
++)
2603 if (irel
->r_type
== R_SH_DATA
)
2606 stop
= irel
->r_vaddr
- sec
->vma
;
2610 if (! _bfd_sh_align_load_span (abfd
, sec
, contents
, sh_swap_insns
,
2611 (PTR
) internal_relocs
, &label
,
2612 label_end
, start
, stop
, pswapped
))
2626 /* Swap two SH instructions. */
2629 sh_swap_insns (abfd
, sec
, relocs
, contents
, addr
)
2636 struct internal_reloc
*internal_relocs
= (struct internal_reloc
*) relocs
;
2637 unsigned short i1
, i2
;
2638 struct internal_reloc
*irel
, *irelend
;
2640 /* Swap the instructions themselves. */
2641 i1
= bfd_get_16 (abfd
, contents
+ addr
);
2642 i2
= bfd_get_16 (abfd
, contents
+ addr
+ 2);
2643 bfd_put_16 (abfd
, (bfd_vma
) i2
, contents
+ addr
);
2644 bfd_put_16 (abfd
, (bfd_vma
) i1
, contents
+ addr
+ 2);
2646 /* Adjust all reloc addresses. */
2647 irelend
= internal_relocs
+ sec
->reloc_count
;
2648 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
2652 /* There are a few special types of relocs that we don't want to
2653 adjust. These relocs do not apply to the instruction itself,
2654 but are only associated with the address. */
2655 type
= irel
->r_type
;
2656 if (type
== R_SH_ALIGN
2657 || type
== R_SH_CODE
2658 || type
== R_SH_DATA
2659 || type
== R_SH_LABEL
)
2662 /* If an R_SH_USES reloc points to one of the addresses being
2663 swapped, we must adjust it. It would be incorrect to do this
2664 for a jump, though, since we want to execute both
2665 instructions after the jump. (We have avoided swapping
2666 around a label, so the jump will not wind up executing an
2667 instruction it shouldn't). */
2668 if (type
== R_SH_USES
)
2672 off
= irel
->r_vaddr
- sec
->vma
+ 4 + irel
->r_offset
;
2674 irel
->r_offset
+= 2;
2675 else if (off
== addr
+ 2)
2676 irel
->r_offset
-= 2;
2679 if (irel
->r_vaddr
- sec
->vma
== addr
)
2684 else if (irel
->r_vaddr
- sec
->vma
== addr
+ 2)
2695 unsigned short insn
, oinsn
;
2696 bfd_boolean overflow
;
2698 loc
= contents
+ irel
->r_vaddr
- sec
->vma
;
2705 case R_SH_PCDISP8BY2
:
2706 case R_SH_PCRELIMM8BY2
:
2707 insn
= bfd_get_16 (abfd
, loc
);
2710 if ((oinsn
& 0xff00) != (insn
& 0xff00))
2712 bfd_put_16 (abfd
, (bfd_vma
) insn
, loc
);
2716 insn
= bfd_get_16 (abfd
, loc
);
2719 if ((oinsn
& 0xf000) != (insn
& 0xf000))
2721 bfd_put_16 (abfd
, (bfd_vma
) insn
, loc
);
2724 case R_SH_PCRELIMM8BY4
:
2725 /* This reloc ignores the least significant 3 bits of
2726 the program counter before adding in the offset.
2727 This means that if ADDR is at an even address, the
2728 swap will not affect the offset. If ADDR is an at an
2729 odd address, then the instruction will be crossing a
2730 four byte boundary, and must be adjusted. */
2731 if ((addr
& 3) != 0)
2733 insn
= bfd_get_16 (abfd
, loc
);
2736 if ((oinsn
& 0xff00) != (insn
& 0xff00))
2738 bfd_put_16 (abfd
, (bfd_vma
) insn
, loc
);
2746 ((*_bfd_error_handler
)
2747 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
2748 abfd
, (unsigned long) irel
->r_vaddr
));
2749 bfd_set_error (bfd_error_bad_value
);
2758 /* This is a modification of _bfd_coff_generic_relocate_section, which
2759 will handle SH relaxing. */
2762 sh_relocate_section (output_bfd
, info
, input_bfd
, input_section
, contents
,
2763 relocs
, syms
, sections
)
2764 bfd
*output_bfd ATTRIBUTE_UNUSED
;
2765 struct bfd_link_info
*info
;
2767 asection
*input_section
;
2769 struct internal_reloc
*relocs
;
2770 struct internal_syment
*syms
;
2771 asection
**sections
;
2773 struct internal_reloc
*rel
;
2774 struct internal_reloc
*relend
;
2777 relend
= rel
+ input_section
->reloc_count
;
2778 for (; rel
< relend
; rel
++)
2781 struct coff_link_hash_entry
*h
;
2782 struct internal_syment
*sym
;
2785 reloc_howto_type
*howto
;
2786 bfd_reloc_status_type rstat
;
2788 /* Almost all relocs have to do with relaxing. If any work must
2789 be done for them, it has been done in sh_relax_section. */
2790 if (rel
->r_type
!= R_SH_IMM32
2792 && rel
->r_type
!= R_SH_IMM32CE
2793 && rel
->r_type
!= R_SH_IMAGEBASE
2795 && rel
->r_type
!= R_SH_PCDISP
)
2798 symndx
= rel
->r_symndx
;
2808 || (unsigned long) symndx
>= obj_raw_syment_count (input_bfd
))
2810 (*_bfd_error_handler
)
2811 ("%B: illegal symbol index %ld in relocs",
2813 bfd_set_error (bfd_error_bad_value
);
2816 h
= obj_coff_sym_hashes (input_bfd
)[symndx
];
2817 sym
= syms
+ symndx
;
2820 if (sym
!= NULL
&& sym
->n_scnum
!= 0)
2821 addend
= - sym
->n_value
;
2825 if (rel
->r_type
== R_SH_PCDISP
)
2828 if (rel
->r_type
>= SH_COFF_HOWTO_COUNT
)
2831 howto
= &sh_coff_howtos
[rel
->r_type
];
2835 bfd_set_error (bfd_error_bad_value
);
2840 if (rel
->r_type
== R_SH_IMAGEBASE
)
2841 addend
-= pe_data (input_section
->output_section
->owner
)->pe_opthdr
.ImageBase
;
2850 /* There is nothing to do for an internal PCDISP reloc. */
2851 if (rel
->r_type
== R_SH_PCDISP
)
2856 sec
= bfd_abs_section_ptr
;
2861 sec
= sections
[symndx
];
2862 val
= (sec
->output_section
->vma
2863 + sec
->output_offset
2870 if (h
->root
.type
== bfd_link_hash_defined
2871 || h
->root
.type
== bfd_link_hash_defweak
)
2875 sec
= h
->root
.u
.def
.section
;
2876 val
= (h
->root
.u
.def
.value
2877 + sec
->output_section
->vma
2878 + sec
->output_offset
);
2880 else if (! info
->relocatable
)
2882 if (! ((*info
->callbacks
->undefined_symbol
)
2883 (info
, h
->root
.root
.string
, input_bfd
, input_section
,
2884 rel
->r_vaddr
- input_section
->vma
, TRUE
)))
2889 rstat
= _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
2891 rel
->r_vaddr
- input_section
->vma
,
2900 case bfd_reloc_overflow
:
2903 char buf
[SYMNMLEN
+ 1];
2909 else if (sym
->_n
._n_n
._n_zeroes
== 0
2910 && sym
->_n
._n_n
._n_offset
!= 0)
2911 name
= obj_coff_strings (input_bfd
) + sym
->_n
._n_n
._n_offset
;
2914 strncpy (buf
, sym
->_n
._n_name
, SYMNMLEN
);
2915 buf
[SYMNMLEN
] = '\0';
2919 if (! ((*info
->callbacks
->reloc_overflow
)
2920 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
,
2921 (bfd_vma
) 0, input_bfd
, input_section
,
2922 rel
->r_vaddr
- input_section
->vma
)))
2931 /* This is a version of bfd_generic_get_relocated_section_contents
2932 which uses sh_relocate_section. */
2935 sh_coff_get_relocated_section_contents (output_bfd
, link_info
, link_order
,
2936 data
, relocatable
, symbols
)
2938 struct bfd_link_info
*link_info
;
2939 struct bfd_link_order
*link_order
;
2941 bfd_boolean relocatable
;
2944 asection
*input_section
= link_order
->u
.indirect
.section
;
2945 bfd
*input_bfd
= input_section
->owner
;
2946 asection
**sections
= NULL
;
2947 struct internal_reloc
*internal_relocs
= NULL
;
2948 struct internal_syment
*internal_syms
= NULL
;
2950 /* We only need to handle the case of relaxing, or of having a
2951 particular set of section contents, specially. */
2953 || coff_section_data (input_bfd
, input_section
) == NULL
2954 || coff_section_data (input_bfd
, input_section
)->contents
== NULL
)
2955 return bfd_generic_get_relocated_section_contents (output_bfd
, link_info
,
2960 memcpy (data
, coff_section_data (input_bfd
, input_section
)->contents
,
2961 (size_t) input_section
->size
);
2963 if ((input_section
->flags
& SEC_RELOC
) != 0
2964 && input_section
->reloc_count
> 0)
2966 bfd_size_type symesz
= bfd_coff_symesz (input_bfd
);
2967 bfd_byte
*esym
, *esymend
;
2968 struct internal_syment
*isymp
;
2972 if (! _bfd_coff_get_external_symbols (input_bfd
))
2975 internal_relocs
= (_bfd_coff_read_internal_relocs
2976 (input_bfd
, input_section
, FALSE
, (bfd_byte
*) NULL
,
2977 FALSE
, (struct internal_reloc
*) NULL
));
2978 if (internal_relocs
== NULL
)
2981 amt
= obj_raw_syment_count (input_bfd
);
2982 amt
*= sizeof (struct internal_syment
);
2983 internal_syms
= (struct internal_syment
*) bfd_malloc (amt
);
2984 if (internal_syms
== NULL
)
2987 amt
= obj_raw_syment_count (input_bfd
);
2988 amt
*= sizeof (asection
*);
2989 sections
= (asection
**) bfd_malloc (amt
);
2990 if (sections
== NULL
)
2993 isymp
= internal_syms
;
2995 esym
= (bfd_byte
*) obj_coff_external_syms (input_bfd
);
2996 esymend
= esym
+ obj_raw_syment_count (input_bfd
) * symesz
;
2997 while (esym
< esymend
)
2999 bfd_coff_swap_sym_in (input_bfd
, (PTR
) esym
, (PTR
) isymp
);
3001 if (isymp
->n_scnum
!= 0)
3002 *secpp
= coff_section_from_bfd_index (input_bfd
, isymp
->n_scnum
);
3005 if (isymp
->n_value
== 0)
3006 *secpp
= bfd_und_section_ptr
;
3008 *secpp
= bfd_com_section_ptr
;
3011 esym
+= (isymp
->n_numaux
+ 1) * symesz
;
3012 secpp
+= isymp
->n_numaux
+ 1;
3013 isymp
+= isymp
->n_numaux
+ 1;
3016 if (! sh_relocate_section (output_bfd
, link_info
, input_bfd
,
3017 input_section
, data
, internal_relocs
,
3018 internal_syms
, sections
))
3023 free (internal_syms
);
3024 internal_syms
= NULL
;
3025 free (internal_relocs
);
3026 internal_relocs
= NULL
;
3032 if (internal_relocs
!= NULL
)
3033 free (internal_relocs
);
3034 if (internal_syms
!= NULL
)
3035 free (internal_syms
);
3036 if (sections
!= NULL
)
3041 /* The target vectors. */
3043 #ifndef TARGET_SHL_SYM
3044 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec
, "coff-sh", BFD_IS_RELAXABLE
, 0, '_', NULL
, COFF_SWAP_TABLE
)
3047 #ifdef TARGET_SHL_SYM
3048 #define TARGET_SYM TARGET_SHL_SYM
3050 #define TARGET_SYM shlcoff_vec
3053 #ifndef TARGET_SHL_NAME
3054 #define TARGET_SHL_NAME "coff-shl"
3058 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM
, TARGET_SHL_NAME
, BFD_IS_RELAXABLE
,
3059 SEC_CODE
| SEC_DATA
, '_', NULL
, COFF_SWAP_TABLE
);
3061 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM
, TARGET_SHL_NAME
, BFD_IS_RELAXABLE
,
3062 0, '_', NULL
, COFF_SWAP_TABLE
)
3065 #ifndef TARGET_SHL_SYM
3066 static const bfd_target
* coff_small_object_p
PARAMS ((bfd
*));
3067 static bfd_boolean coff_small_new_section_hook
PARAMS ((bfd
*, asection
*));
3068 /* Some people want versions of the SH COFF target which do not align
3069 to 16 byte boundaries. We implement that by adding a couple of new
3070 target vectors. These are just like the ones above, but they
3071 change the default section alignment. To generate them in the
3072 assembler, use -small. To use them in the linker, use -b
3073 coff-sh{l}-small and -oformat coff-sh{l}-small.
3075 Yes, this is a horrible hack. A general solution for setting
3076 section alignment in COFF is rather complex. ELF handles this
3079 /* Only recognize the small versions if the target was not defaulted.
3080 Otherwise we won't recognize the non default endianness. */
3082 static const bfd_target
*
3083 coff_small_object_p (abfd
)
3086 if (abfd
->target_defaulted
)
3088 bfd_set_error (bfd_error_wrong_format
);
3091 return coff_object_p (abfd
);
3094 /* Set the section alignment for the small versions. */
3097 coff_small_new_section_hook (abfd
, section
)
3101 if (! coff_new_section_hook (abfd
, section
))
3104 /* We must align to at least a four byte boundary, because longword
3105 accesses must be on a four byte boundary. */
3106 if (section
->alignment_power
== COFF_DEFAULT_SECTION_ALIGNMENT_POWER
)
3107 section
->alignment_power
= 2;
3112 /* This is copied from bfd_coff_std_swap_table so that we can change
3113 the default section alignment power. */
3115 static const bfd_coff_backend_data bfd_coff_small_swap_table
=
3117 coff_swap_aux_in
, coff_swap_sym_in
, coff_swap_lineno_in
,
3118 coff_swap_aux_out
, coff_swap_sym_out
,
3119 coff_swap_lineno_out
, coff_swap_reloc_out
,
3120 coff_swap_filehdr_out
, coff_swap_aouthdr_out
,
3121 coff_swap_scnhdr_out
,
3122 FILHSZ
, AOUTSZ
, SCNHSZ
, SYMESZ
, AUXESZ
, RELSZ
, LINESZ
, FILNMLEN
,
3123 #ifdef COFF_LONG_FILENAMES
3128 #ifdef COFF_LONG_SECTION_NAMES
3134 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3139 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3144 coff_swap_filehdr_in
, coff_swap_aouthdr_in
, coff_swap_scnhdr_in
,
3145 coff_swap_reloc_in
, coff_bad_format_hook
, coff_set_arch_mach_hook
,
3146 coff_mkobject_hook
, styp_to_sec_flags
, coff_set_alignment_hook
,
3147 coff_slurp_symbol_table
, symname_in_debug_hook
, coff_pointerize_aux_hook
,
3148 coff_print_aux
, coff_reloc16_extra_cases
, coff_reloc16_estimate
,
3149 coff_classify_symbol
, coff_compute_section_file_positions
,
3150 coff_start_final_link
, coff_relocate_section
, coff_rtype_to_howto
,
3151 coff_adjust_symndx
, coff_link_add_one_symbol
,
3152 coff_link_output_has_begun
, coff_final_link_postscript
3155 #define coff_small_close_and_cleanup \
3156 coff_close_and_cleanup
3157 #define coff_small_bfd_free_cached_info \
3158 coff_bfd_free_cached_info
3159 #define coff_small_get_section_contents \
3160 coff_get_section_contents
3161 #define coff_small_get_section_contents_in_window \
3162 coff_get_section_contents_in_window
3164 extern const bfd_target shlcoff_small_vec
;
3166 const bfd_target shcoff_small_vec
=
3168 "coff-sh-small", /* name */
3169 bfd_target_coff_flavour
,
3170 BFD_ENDIAN_BIG
, /* data byte order is big */
3171 BFD_ENDIAN_BIG
, /* header byte order is big */
3173 (HAS_RELOC
| EXEC_P
| /* object flags */
3174 HAS_LINENO
| HAS_DEBUG
|
3175 HAS_SYMS
| HAS_LOCALS
| WP_TEXT
| BFD_IS_RELAXABLE
),
3177 (SEC_HAS_CONTENTS
| SEC_ALLOC
| SEC_LOAD
| SEC_RELOC
),
3178 '_', /* leading symbol underscore */
3179 '/', /* ar_pad_char */
3180 15, /* ar_max_namelen */
3181 bfd_getb64
, bfd_getb_signed_64
, bfd_putb64
,
3182 bfd_getb32
, bfd_getb_signed_32
, bfd_putb32
,
3183 bfd_getb16
, bfd_getb_signed_16
, bfd_putb16
, /* data */
3184 bfd_getb64
, bfd_getb_signed_64
, bfd_putb64
,
3185 bfd_getb32
, bfd_getb_signed_32
, bfd_putb32
,
3186 bfd_getb16
, bfd_getb_signed_16
, bfd_putb16
, /* hdrs */
3188 {_bfd_dummy_target
, coff_small_object_p
, /* bfd_check_format */
3189 bfd_generic_archive_p
, _bfd_dummy_target
},
3190 {bfd_false
, coff_mkobject
, _bfd_generic_mkarchive
, /* bfd_set_format */
3192 {bfd_false
, coff_write_object_contents
, /* bfd_write_contents */
3193 _bfd_write_archive_contents
, bfd_false
},
3195 BFD_JUMP_TABLE_GENERIC (coff_small
),
3196 BFD_JUMP_TABLE_COPY (coff
),
3197 BFD_JUMP_TABLE_CORE (_bfd_nocore
),
3198 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff
),
3199 BFD_JUMP_TABLE_SYMBOLS (coff
),
3200 BFD_JUMP_TABLE_RELOCS (coff
),
3201 BFD_JUMP_TABLE_WRITE (coff
),
3202 BFD_JUMP_TABLE_LINK (coff
),
3203 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic
),
3205 & shlcoff_small_vec
,
3207 (PTR
) &bfd_coff_small_swap_table
3210 const bfd_target shlcoff_small_vec
=
3212 "coff-shl-small", /* name */
3213 bfd_target_coff_flavour
,
3214 BFD_ENDIAN_LITTLE
, /* data byte order is little */
3215 BFD_ENDIAN_LITTLE
, /* header byte order is little endian too*/
3217 (HAS_RELOC
| EXEC_P
| /* object flags */
3218 HAS_LINENO
| HAS_DEBUG
|
3219 HAS_SYMS
| HAS_LOCALS
| WP_TEXT
| BFD_IS_RELAXABLE
),
3221 (SEC_HAS_CONTENTS
| SEC_ALLOC
| SEC_LOAD
| SEC_RELOC
),
3222 '_', /* leading symbol underscore */
3223 '/', /* ar_pad_char */
3224 15, /* ar_max_namelen */
3225 bfd_getl64
, bfd_getl_signed_64
, bfd_putl64
,
3226 bfd_getl32
, bfd_getl_signed_32
, bfd_putl32
,
3227 bfd_getl16
, bfd_getl_signed_16
, bfd_putl16
, /* data */
3228 bfd_getl64
, bfd_getl_signed_64
, bfd_putl64
,
3229 bfd_getl32
, bfd_getl_signed_32
, bfd_putl32
,
3230 bfd_getl16
, bfd_getl_signed_16
, bfd_putl16
, /* hdrs */
3232 {_bfd_dummy_target
, coff_small_object_p
, /* bfd_check_format */
3233 bfd_generic_archive_p
, _bfd_dummy_target
},
3234 {bfd_false
, coff_mkobject
, _bfd_generic_mkarchive
, /* bfd_set_format */
3236 {bfd_false
, coff_write_object_contents
, /* bfd_write_contents */
3237 _bfd_write_archive_contents
, bfd_false
},
3239 BFD_JUMP_TABLE_GENERIC (coff_small
),
3240 BFD_JUMP_TABLE_COPY (coff
),
3241 BFD_JUMP_TABLE_CORE (_bfd_nocore
),
3242 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff
),
3243 BFD_JUMP_TABLE_SYMBOLS (coff
),
3244 BFD_JUMP_TABLE_RELOCS (coff
),
3245 BFD_JUMP_TABLE_WRITE (coff
),
3246 BFD_JUMP_TABLE_LINK (coff
),
3247 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic
),
3251 (PTR
) &bfd_coff_small_swap_table