1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
27 #include "elf32-spu.h"
29 /* We use RELA style relocs. Don't define USE_REL. */
31 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
38 static reloc_howto_type elf_howto_table
[] = {
39 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
40 bfd_elf_generic_reloc
, "SPU_NONE",
41 FALSE
, 0, 0x00000000, FALSE
),
42 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
43 bfd_elf_generic_reloc
, "SPU_ADDR10",
44 FALSE
, 0, 0x00ffc000, FALSE
),
45 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
46 bfd_elf_generic_reloc
, "SPU_ADDR16",
47 FALSE
, 0, 0x007fff80, FALSE
),
48 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
49 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
50 FALSE
, 0, 0x007fff80, FALSE
),
51 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
52 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
53 FALSE
, 0, 0x007fff80, FALSE
),
54 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
55 bfd_elf_generic_reloc
, "SPU_ADDR18",
56 FALSE
, 0, 0x01ffff80, FALSE
),
57 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
58 bfd_elf_generic_reloc
, "SPU_ADDR32",
59 FALSE
, 0, 0xffffffff, FALSE
),
60 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
61 bfd_elf_generic_reloc
, "SPU_REL16",
62 FALSE
, 0, 0x007fff80, TRUE
),
63 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
64 bfd_elf_generic_reloc
, "SPU_ADDR7",
65 FALSE
, 0, 0x001fc000, FALSE
),
66 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
67 spu_elf_rel9
, "SPU_REL9",
68 FALSE
, 0, 0x0180007f, TRUE
),
69 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
70 spu_elf_rel9
, "SPU_REL9I",
71 FALSE
, 0, 0x0000c07f, TRUE
),
72 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
73 bfd_elf_generic_reloc
, "SPU_ADDR10I",
74 FALSE
, 0, 0x00ffc000, FALSE
),
75 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
76 bfd_elf_generic_reloc
, "SPU_ADDR16I",
77 FALSE
, 0, 0x007fff80, FALSE
),
78 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
79 bfd_elf_generic_reloc
, "SPU_REL32",
80 FALSE
, 0, 0xffffffff, TRUE
),
81 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
82 bfd_elf_generic_reloc
, "SPU_ADDR16X",
83 FALSE
, 0, 0x007fff80, FALSE
),
84 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
85 bfd_elf_generic_reloc
, "SPU_PPU32",
86 FALSE
, 0, 0xffffffff, FALSE
),
87 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
88 bfd_elf_generic_reloc
, "SPU_PPU64",
92 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
93 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
104 case BFD_RELOC_SPU_IMM10W
:
106 case BFD_RELOC_SPU_IMM16W
:
108 case BFD_RELOC_SPU_LO16
:
109 return R_SPU_ADDR16_LO
;
110 case BFD_RELOC_SPU_HI16
:
111 return R_SPU_ADDR16_HI
;
112 case BFD_RELOC_SPU_IMM18
:
114 case BFD_RELOC_SPU_PCREL16
:
116 case BFD_RELOC_SPU_IMM7
:
118 case BFD_RELOC_SPU_IMM8
:
120 case BFD_RELOC_SPU_PCREL9a
:
122 case BFD_RELOC_SPU_PCREL9b
:
124 case BFD_RELOC_SPU_IMM10
:
125 return R_SPU_ADDR10I
;
126 case BFD_RELOC_SPU_IMM16
:
127 return R_SPU_ADDR16I
;
130 case BFD_RELOC_32_PCREL
:
132 case BFD_RELOC_SPU_PPU32
:
134 case BFD_RELOC_SPU_PPU64
:
140 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
142 Elf_Internal_Rela
*dst
)
144 enum elf_spu_reloc_type r_type
;
146 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
147 BFD_ASSERT (r_type
< R_SPU_max
);
148 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
151 static reloc_howto_type
*
152 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
153 bfd_reloc_code_real_type code
)
155 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
157 if (r_type
== R_SPU_NONE
)
160 return elf_howto_table
+ r_type
;
163 static reloc_howto_type
*
164 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
169 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
170 if (elf_howto_table
[i
].name
!= NULL
171 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
172 return &elf_howto_table
[i
];
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
181 void *data
, asection
*input_section
,
182 bfd
*output_bfd
, char **error_message
)
184 bfd_size_type octets
;
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
191 if (output_bfd
!= NULL
)
192 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
193 input_section
, output_bfd
, error_message
);
195 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
196 return bfd_reloc_outofrange
;
197 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
199 /* Get symbol value. */
201 if (!bfd_is_com_section (symbol
->section
))
203 if (symbol
->section
->output_section
)
204 val
+= symbol
->section
->output_section
->vma
;
206 val
+= reloc_entry
->addend
;
208 /* Make it pc-relative. */
209 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
212 if (val
+ 256 >= 512)
213 return bfd_reloc_overflow
;
215 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
220 insn
&= ~reloc_entry
->howto
->dst_mask
;
221 insn
|= val
& reloc_entry
->howto
->dst_mask
;
222 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
227 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
229 if (!sec
->used_by_bfd
)
231 struct _spu_elf_section_data
*sdata
;
233 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
236 sec
->used_by_bfd
= sdata
;
239 return _bfd_elf_new_section_hook (abfd
, sec
);
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
246 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
248 if (sym
->name
!= NULL
249 && sym
->section
!= bfd_abs_section_ptr
250 && strncmp (sym
->name
, "_EAR_", 5) == 0)
251 sym
->flags
|= BSF_KEEP
;
254 /* SPU ELF linker hash table. */
256 struct spu_link_hash_table
258 struct elf_link_hash_table elf
;
260 /* The stub hash table. */
261 struct bfd_hash_table stub_hash_table
;
263 /* Shortcuts to overlay sections. */
267 struct elf_link_hash_entry
*ovly_load
;
269 /* An array of two output sections per overlay region, chosen such that
270 the first section vma is the overlay buffer vma (ie. the section has
271 the lowest vma in the group that occupy the region), and the second
272 section vma+size specifies the end of the region. We keep pointers
273 to sections like this because section vmas may change when laying
275 asection
**ovl_region
;
277 /* Number of overlay buffers. */
278 unsigned int num_buf
;
280 /* Total number of overlays. */
281 unsigned int num_overlays
;
283 /* Set if we should emit symbols for stubs. */
284 unsigned int emit_stub_syms
:1;
286 /* Set if we want stubs on calls out of overlay regions to
287 non-overlay regions. */
288 unsigned int non_overlay_stubs
: 1;
291 unsigned int stub_overflow
: 1;
293 /* Set if stack size analysis should be done. */
294 unsigned int stack_analysis
: 1;
296 /* Set if __stack_* syms will be emitted. */
297 unsigned int emit_stack_syms
: 1;
300 #define spu_hash_table(p) \
301 ((struct spu_link_hash_table *) ((p)->hash))
303 struct spu_stub_hash_entry
305 struct bfd_hash_entry root
;
307 /* Destination of this stub. */
308 asection
*target_section
;
311 /* Offset of entry in stub section. */
314 /* Offset from this stub to stub that loads the overlay index. */
318 /* Create an entry in a spu stub hash table. */
320 static struct bfd_hash_entry
*
321 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
322 struct bfd_hash_table
*table
,
325 /* Allocate the structure if it has not already been allocated by a
329 entry
= bfd_hash_allocate (table
, sizeof (struct spu_stub_hash_entry
));
334 /* Call the allocation method of the superclass. */
335 entry
= bfd_hash_newfunc (entry
, table
, string
);
338 struct spu_stub_hash_entry
*sh
= (struct spu_stub_hash_entry
*) entry
;
340 sh
->target_section
= NULL
;
349 /* Create a spu ELF linker hash table. */
351 static struct bfd_link_hash_table
*
352 spu_elf_link_hash_table_create (bfd
*abfd
)
354 struct spu_link_hash_table
*htab
;
356 htab
= bfd_malloc (sizeof (*htab
));
360 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
361 _bfd_elf_link_hash_newfunc
,
362 sizeof (struct elf_link_hash_entry
)))
368 /* Init the stub hash table too. */
369 if (!bfd_hash_table_init (&htab
->stub_hash_table
, stub_hash_newfunc
,
370 sizeof (struct spu_stub_hash_entry
)))
373 memset (&htab
->stub
, 0,
374 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, stub
));
376 return &htab
->elf
.root
;
379 /* Free the derived linker hash table. */
382 spu_elf_link_hash_table_free (struct bfd_link_hash_table
*hash
)
384 struct spu_link_hash_table
*ret
= (struct spu_link_hash_table
*) hash
;
386 bfd_hash_table_free (&ret
->stub_hash_table
);
387 _bfd_generic_link_hash_table_free (hash
);
390 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
391 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
392 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
395 get_sym_h (struct elf_link_hash_entry
**hp
,
396 Elf_Internal_Sym
**symp
,
398 Elf_Internal_Sym
**locsymsp
,
399 unsigned long r_symndx
,
402 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
404 if (r_symndx
>= symtab_hdr
->sh_info
)
406 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
407 struct elf_link_hash_entry
*h
;
409 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
410 while (h
->root
.type
== bfd_link_hash_indirect
411 || h
->root
.type
== bfd_link_hash_warning
)
412 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
422 asection
*symsec
= NULL
;
423 if (h
->root
.type
== bfd_link_hash_defined
424 || h
->root
.type
== bfd_link_hash_defweak
)
425 symsec
= h
->root
.u
.def
.section
;
431 Elf_Internal_Sym
*sym
;
432 Elf_Internal_Sym
*locsyms
= *locsymsp
;
436 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
439 size_t symcount
= symtab_hdr
->sh_info
;
441 /* If we are reading symbols into the contents, then
442 read the global syms too. This is done to cache
443 syms for later stack analysis. */
444 if ((unsigned char **) locsymsp
== &symtab_hdr
->contents
)
445 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
446 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
453 sym
= locsyms
+ r_symndx
;
463 asection
*symsec
= NULL
;
464 if ((sym
->st_shndx
!= SHN_UNDEF
465 && sym
->st_shndx
< SHN_LORESERVE
)
466 || sym
->st_shndx
> SHN_HIRESERVE
)
467 symsec
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
475 /* Build a name for an entry in the stub hash table. We can't use a
476 local symbol name because ld -r might generate duplicate local symbols. */
479 spu_stub_name (const asection
*sym_sec
,
480 const struct elf_link_hash_entry
*h
,
481 const Elf_Internal_Rela
*rel
)
488 len
= strlen (h
->root
.root
.string
) + 1 + 8 + 1;
489 stub_name
= bfd_malloc (len
);
490 if (stub_name
== NULL
)
493 sprintf (stub_name
, "%s+%x",
495 (int) rel
->r_addend
& 0xffffffff);
500 len
= 8 + 1 + 8 + 1 + 8 + 1;
501 stub_name
= bfd_malloc (len
);
502 if (stub_name
== NULL
)
505 sprintf (stub_name
, "%x:%x+%x",
506 sym_sec
->id
& 0xffffffff,
507 (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
508 (int) rel
->r_addend
& 0xffffffff);
509 len
= strlen (stub_name
);
512 if (stub_name
[len
- 2] == '+'
513 && stub_name
[len
- 1] == '0'
514 && stub_name
[len
] == 0)
515 stub_name
[len
- 2] = 0;
520 /* Create the note section if not already present. This is done early so
521 that the linker maps the sections to the right place in the output. */
524 spu_elf_create_sections (bfd
*output_bfd
,
525 struct bfd_link_info
*info
,
530 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
532 /* Stash some options away where we can get at them later. */
533 htab
->stack_analysis
= stack_analysis
;
534 htab
->emit_stack_syms
= emit_stack_syms
;
536 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->next
)
537 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
542 /* Make SPU_PTNOTE_SPUNAME section. */
549 ibfd
= info
->input_bfds
;
550 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
551 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
553 || !bfd_set_section_alignment (ibfd
, s
, 4))
556 name_len
= strlen (bfd_get_filename (output_bfd
)) + 1;
557 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
558 size
+= (name_len
+ 3) & -4;
560 if (!bfd_set_section_size (ibfd
, s
, size
))
563 data
= bfd_zalloc (ibfd
, size
);
567 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
568 bfd_put_32 (ibfd
, name_len
, data
+ 4);
569 bfd_put_32 (ibfd
, 1, data
+ 8);
570 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
571 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
572 bfd_get_filename (output_bfd
), name_len
);
579 /* qsort predicate to sort sections by vma. */
582 sort_sections (const void *a
, const void *b
)
584 const asection
*const *s1
= a
;
585 const asection
*const *s2
= b
;
586 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
589 return delta
< 0 ? -1 : 1;
591 return (*s1
)->index
- (*s2
)->index
;
594 /* Identify overlays in the output bfd, and number them. */
597 spu_elf_find_overlays (bfd
*output_bfd
, struct bfd_link_info
*info
)
599 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
600 asection
**alloc_sec
;
601 unsigned int i
, n
, ovl_index
, num_buf
;
605 if (output_bfd
->section_count
< 2)
608 alloc_sec
= bfd_malloc (output_bfd
->section_count
* sizeof (*alloc_sec
));
609 if (alloc_sec
== NULL
)
612 /* Pick out all the alloced sections. */
613 for (n
= 0, s
= output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
614 if ((s
->flags
& SEC_ALLOC
) != 0
615 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
625 /* Sort them by vma. */
626 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
628 /* Look for overlapping vmas. Any with overlap must be overlays.
629 Count them. Also count the number of overlay regions and for
630 each region save a section from that region with the lowest vma
631 and another section with the highest end vma. */
632 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
633 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
636 if (s
->vma
< ovl_end
)
638 asection
*s0
= alloc_sec
[i
- 1];
640 if (spu_elf_section_data (s0
)->ovl_index
== 0)
642 spu_elf_section_data (s0
)->ovl_index
= ++ovl_index
;
643 alloc_sec
[num_buf
* 2] = s0
;
644 alloc_sec
[num_buf
* 2 + 1] = s0
;
647 spu_elf_section_data (s
)->ovl_index
= ++ovl_index
;
648 if (ovl_end
< s
->vma
+ s
->size
)
650 ovl_end
= s
->vma
+ s
->size
;
651 alloc_sec
[num_buf
* 2 - 1] = s
;
655 ovl_end
= s
->vma
+ s
->size
;
658 htab
->num_overlays
= ovl_index
;
659 htab
->num_buf
= num_buf
;
666 alloc_sec
= bfd_realloc (alloc_sec
, num_buf
* 2 * sizeof (*alloc_sec
));
667 if (alloc_sec
== NULL
)
670 htab
->ovl_region
= alloc_sec
;
674 /* One of these per stub. */
675 #define SIZEOF_STUB1 8
676 #define ILA_79 0x4200004f /* ila $79,function_address */
677 #define BR 0x32000000 /* br stub2 */
679 /* One of these per overlay. */
680 #define SIZEOF_STUB2 8
681 #define ILA_78 0x4200004e /* ila $78,overlay_number */
683 #define NOP 0x40200000
685 /* Return true for all relative and absolute branch instructions.
693 brhnz 00100011 0.. */
696 is_branch (const unsigned char *insn
)
698 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
701 /* Return true for branch hint instructions.
706 is_hint (const unsigned char *insn
)
708 return (insn
[0] & 0xfc) == 0x10;
711 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
714 needs_ovl_stub (const char *sym_name
,
716 asection
*input_section
,
717 struct spu_link_hash_table
*htab
,
718 bfd_boolean is_branch
)
720 if (htab
->num_overlays
== 0)
724 || sym_sec
->output_section
== NULL
725 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
728 /* setjmp always goes via an overlay stub, because then the return
729 and hence the longjmp goes via __ovly_return. That magically
730 makes setjmp/longjmp between overlays work. */
731 if (strncmp (sym_name
, "setjmp", 6) == 0
732 && (sym_name
[6] == '\0' || sym_name
[6] == '@'))
735 /* Usually, symbols in non-overlay sections don't need stubs. */
736 if (spu_elf_section_data (sym_sec
->output_section
)->ovl_index
== 0
737 && !htab
->non_overlay_stubs
)
740 /* A reference from some other section to a symbol in an overlay
741 section needs a stub. */
742 if (spu_elf_section_data (sym_sec
->output_section
)->ovl_index
743 != spu_elf_section_data (input_section
->output_section
)->ovl_index
)
746 /* If this insn isn't a branch then we are possibly taking the
747 address of a function and passing it out somehow. */
752 struct bfd_hash_table
*stub_hash_table
;
753 struct spu_stub_hash_entry
**sh
;
758 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
762 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
764 /* Symbols starting with _SPUEAR_ need a stub because they may be
765 invoked by the PPU. */
766 if ((h
->root
.type
== bfd_link_hash_defined
767 || h
->root
.type
== bfd_link_hash_defweak
)
769 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
771 struct stubarr
*stubs
= inf
;
772 static Elf_Internal_Rela zero_rel
;
773 char *stub_name
= spu_stub_name (h
->root
.u
.def
.section
, h
, &zero_rel
);
774 struct spu_stub_hash_entry
*sh
;
776 if (stub_name
== NULL
)
782 sh
= (struct spu_stub_hash_entry
*)
783 bfd_hash_lookup (stubs
->stub_hash_table
, stub_name
, TRUE
, FALSE
);
790 /* If this entry isn't new, we already have a stub. */
791 if (sh
->target_section
!= NULL
)
797 sh
->target_section
= h
->root
.u
.def
.section
;
798 sh
->target_off
= h
->root
.u
.def
.value
;
805 /* Called via bfd_hash_traverse to set up pointers to all symbols
806 in the stub hash table. */
809 populate_stubs (struct bfd_hash_entry
*bh
, void *inf
)
811 struct stubarr
*stubs
= inf
;
813 stubs
->sh
[--stubs
->count
] = (struct spu_stub_hash_entry
*) bh
;
817 /* qsort predicate to sort stubs by overlay number. */
820 sort_stubs (const void *a
, const void *b
)
822 const struct spu_stub_hash_entry
*const *sa
= a
;
823 const struct spu_stub_hash_entry
*const *sb
= b
;
827 i
= spu_elf_section_data ((*sa
)->target_section
->output_section
)->ovl_index
;
828 i
-= spu_elf_section_data ((*sb
)->target_section
->output_section
)->ovl_index
;
832 d
= ((*sa
)->target_section
->output_section
->vma
833 + (*sa
)->target_section
->output_offset
835 - (*sb
)->target_section
->output_section
->vma
836 - (*sb
)->target_section
->output_offset
837 - (*sb
)->target_off
);
839 return d
< 0 ? -1 : 1;
841 /* Two functions at the same address. Aliases perhaps. */
842 i
= strcmp ((*sb
)->root
.string
, (*sa
)->root
.string
);
847 /* Allocate space for overlay call and return stubs. */
850 spu_elf_size_stubs (bfd
*output_bfd
,
851 struct bfd_link_info
*info
,
852 int non_overlay_stubs
,
858 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
860 struct stubarr stubs
;
864 htab
->non_overlay_stubs
= non_overlay_stubs
;
865 stubs
.stub_hash_table
= &htab
->stub_hash_table
;
868 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
870 extern const bfd_target bfd_elf32_spu_vec
;
871 Elf_Internal_Shdr
*symtab_hdr
;
873 Elf_Internal_Sym
*local_syms
= NULL
;
876 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
879 /* We'll need the symbol table in a second. */
880 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
881 if (symtab_hdr
->sh_info
== 0)
884 /* Arrange to read and keep global syms for later stack analysis. */
887 psyms
= &symtab_hdr
->contents
;
889 /* Walk over each section attached to the input bfd. */
890 for (section
= ibfd
->sections
; section
!= NULL
; section
= section
->next
)
892 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
894 /* If there aren't any relocs, then there's nothing more to do. */
895 if ((section
->flags
& SEC_RELOC
) == 0
896 || (section
->flags
& SEC_ALLOC
) == 0
897 || (section
->flags
& SEC_LOAD
) == 0
898 || section
->reloc_count
== 0)
901 /* If this section is a link-once section that will be
902 discarded, then don't create any stubs. */
903 if (section
->output_section
== NULL
904 || section
->output_section
->owner
!= output_bfd
)
907 /* Get the relocs. */
909 = _bfd_elf_link_read_relocs (ibfd
, section
, NULL
, NULL
,
911 if (internal_relocs
== NULL
)
912 goto error_ret_free_local
;
914 /* Now examine each relocation. */
915 irela
= internal_relocs
;
916 irelaend
= irela
+ section
->reloc_count
;
917 for (; irela
< irelaend
; irela
++)
919 enum elf_spu_reloc_type r_type
;
922 Elf_Internal_Sym
*sym
;
923 struct elf_link_hash_entry
*h
;
924 const char *sym_name
;
926 struct spu_stub_hash_entry
*sh
;
927 unsigned int sym_type
;
928 enum _insn_type
{ non_branch
, branch
, call
} insn_type
;
930 r_type
= ELF32_R_TYPE (irela
->r_info
);
931 r_indx
= ELF32_R_SYM (irela
->r_info
);
933 if (r_type
>= R_SPU_max
)
935 bfd_set_error (bfd_error_bad_value
);
936 goto error_ret_free_internal
;
939 /* Determine the reloc target section. */
940 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, ibfd
))
941 goto error_ret_free_internal
;
944 || sym_sec
->output_section
== NULL
945 || sym_sec
->output_section
->owner
!= output_bfd
)
948 /* Ensure no stubs for user supplied overlay manager syms. */
950 && (strcmp (h
->root
.root
.string
, "__ovly_load") == 0
951 || strcmp (h
->root
.root
.string
, "__ovly_return") == 0))
954 insn_type
= non_branch
;
955 if (r_type
== R_SPU_REL16
956 || r_type
== R_SPU_ADDR16
)
958 unsigned char insn
[4];
960 if (!bfd_get_section_contents (ibfd
, section
, insn
,
962 goto error_ret_free_internal
;
964 if (is_branch (insn
) || is_hint (insn
))
967 if ((insn
[0] & 0xfd) == 0x31)
972 /* We are only interested in function symbols. */
976 sym_name
= h
->root
.root
.string
;
980 sym_type
= ELF_ST_TYPE (sym
->st_info
);
981 sym_name
= bfd_elf_sym_name (sym_sec
->owner
,
986 if (sym_type
!= STT_FUNC
)
988 /* It's common for people to write assembly and forget
989 to give function symbols the right type. Handle
990 calls to such symbols, but warn so that (hopefully)
991 people will fix their code. We need the symbol
992 type to be correct to distinguish function pointer
993 initialisation from other pointer initialisation. */
994 if (insn_type
== call
)
995 (*_bfd_error_handler
) (_("warning: call to non-function"
996 " symbol %s defined in %B"),
997 sym_sec
->owner
, sym_name
);
1002 if (!needs_ovl_stub (sym_name
, sym_sec
, section
, htab
,
1003 insn_type
!= non_branch
))
1006 stub_name
= spu_stub_name (sym_sec
, h
, irela
);
1007 if (stub_name
== NULL
)
1008 goto error_ret_free_internal
;
1010 sh
= (struct spu_stub_hash_entry
*)
1011 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
,
1016 error_ret_free_internal
:
1017 if (elf_section_data (section
)->relocs
!= internal_relocs
)
1018 free (internal_relocs
);
1019 error_ret_free_local
:
1020 if (local_syms
!= NULL
1021 && (symtab_hdr
->contents
1022 != (unsigned char *) local_syms
))
1027 /* If this entry isn't new, we already have a stub. */
1028 if (sh
->target_section
!= NULL
)
1034 sh
->target_section
= sym_sec
;
1036 sh
->target_off
= h
->root
.u
.def
.value
;
1038 sh
->target_off
= sym
->st_value
;
1039 sh
->target_off
+= irela
->r_addend
;
1044 /* We're done with the internal relocs, free them. */
1045 if (elf_section_data (section
)->relocs
!= internal_relocs
)
1046 free (internal_relocs
);
1049 if (local_syms
!= NULL
1050 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1052 if (!info
->keep_memory
)
1055 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1059 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, &stubs
);
1064 if (stubs
.count
== 0)
1067 ibfd
= info
->input_bfds
;
1068 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1069 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1070 htab
->stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1072 if (htab
->stub
== NULL
1073 || !bfd_set_section_alignment (ibfd
, htab
->stub
, 2))
1076 flags
= (SEC_ALLOC
| SEC_LOAD
1077 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1078 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1079 *ovtab
= htab
->ovtab
;
1080 if (htab
->ovtab
== NULL
1081 || !bfd_set_section_alignment (ibfd
, htab
->stub
, 4))
1084 *toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1086 || !bfd_set_section_alignment (ibfd
, *toe
, 4))
1090 /* Retrieve all the stubs and sort. */
1091 stubs
.sh
= bfd_malloc (stubs
.count
* sizeof (*stubs
.sh
));
1092 if (stubs
.sh
== NULL
)
1095 bfd_hash_traverse (&htab
->stub_hash_table
, populate_stubs
, &stubs
);
1096 BFD_ASSERT (stubs
.count
== 0);
1099 qsort (stubs
.sh
, stubs
.count
, sizeof (*stubs
.sh
), sort_stubs
);
1101 /* Now that the stubs are sorted, place them in the stub section.
1102 Stubs are grouped per overlay
1116 for (i
= 0; i
< stubs
.count
; i
++)
1118 if (spu_elf_section_data (stubs
.sh
[group
]->target_section
1119 ->output_section
)->ovl_index
1120 != spu_elf_section_data (stubs
.sh
[i
]->target_section
1121 ->output_section
)->ovl_index
)
1123 htab
->stub
->size
+= SIZEOF_STUB2
;
1124 for (; group
!= i
; group
++)
1125 stubs
.sh
[group
]->delta
1126 = stubs
.sh
[i
- 1]->off
- stubs
.sh
[group
]->off
;
1129 || ((stubs
.sh
[i
- 1]->target_section
->output_section
->vma
1130 + stubs
.sh
[i
- 1]->target_section
->output_offset
1131 + stubs
.sh
[i
- 1]->target_off
)
1132 != (stubs
.sh
[i
]->target_section
->output_section
->vma
1133 + stubs
.sh
[i
]->target_section
->output_offset
1134 + stubs
.sh
[i
]->target_off
)))
1136 stubs
.sh
[i
]->off
= htab
->stub
->size
;
1137 htab
->stub
->size
+= SIZEOF_STUB1
;
1140 stubs
.sh
[i
]->off
= stubs
.sh
[i
- 1]->off
;
1143 htab
->stub
->size
+= SIZEOF_STUB2
;
1144 for (; group
!= i
; group
++)
1145 stubs
.sh
[group
]->delta
= stubs
.sh
[i
- 1]->off
- stubs
.sh
[group
]->off
;
1147 /* htab->ovtab consists of two arrays.
1157 . } _ovly_buf_table[]; */
1159 htab
->ovtab
->alignment_power
= 4;
1160 htab
->ovtab
->size
= htab
->num_overlays
* 16 + htab
->num_buf
* 4;
1165 /* Functions to handle embedded spu_ovl.o object. */
1168 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1174 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1180 struct _ovl_stream
*os
;
1184 os
= (struct _ovl_stream
*) stream
;
1185 max
= (const char *) os
->end
- (const char *) os
->start
;
1187 if ((ufile_ptr
) offset
>= max
)
1191 if (count
> max
- offset
)
1192 count
= max
- offset
;
1194 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1199 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1201 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1208 return *ovl_bfd
!= NULL
;
1211 /* Fill in the ila and br for a stub. On the last stub for a group,
1212 write the stub that sets the overlay number too. */
1215 write_one_stub (struct bfd_hash_entry
*bh
, void *inf
)
1217 struct spu_stub_hash_entry
*ent
= (struct spu_stub_hash_entry
*) bh
;
1218 struct spu_link_hash_table
*htab
= inf
;
1219 asection
*sec
= htab
->stub
;
1220 asection
*s
= ent
->target_section
;
1224 val
= ent
->target_off
+ s
->output_offset
+ s
->output_section
->vma
;
1225 bfd_put_32 (sec
->owner
, ILA_79
+ ((val
<< 7) & 0x01ffff80),
1226 sec
->contents
+ ent
->off
);
1227 val
= ent
->delta
+ 4;
1228 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
1229 sec
->contents
+ ent
->off
+ 4);
1231 /* If this is the last stub of this group, write stub2. */
1232 if (ent
->delta
== 0)
1234 bfd_put_32 (sec
->owner
, NOP
,
1235 sec
->contents
+ ent
->off
+ 4);
1237 ovl
= spu_elf_section_data (s
->output_section
)->ovl_index
;
1238 bfd_put_32 (sec
->owner
, ILA_78
+ ((ovl
<< 7) & 0x01ffff80),
1239 sec
->contents
+ ent
->off
+ 8);
1241 val
= (htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
1242 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
1243 + htab
->ovly_load
->root
.u
.def
.value
1244 - (sec
->output_section
->vma
1245 + sec
->output_offset
1248 if (val
+ 0x20000 >= 0x40000)
1249 htab
->stub_overflow
= TRUE
;
1251 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
1252 sec
->contents
+ ent
->off
+ 12);
1255 if (htab
->emit_stub_syms
)
1257 struct elf_link_hash_entry
*h
;
1261 len1
= sizeof ("00000000.ovl_call.") - 1;
1262 len2
= strlen (ent
->root
.string
);
1263 name
= bfd_malloc (len1
+ len2
+ 1);
1266 memcpy (name
, "00000000.ovl_call.", len1
);
1267 memcpy (name
+ len1
, ent
->root
.string
, len2
+ 1);
1268 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1272 if (h
->root
.type
== bfd_link_hash_new
)
1274 h
->root
.type
= bfd_link_hash_defined
;
1275 h
->root
.u
.def
.section
= sec
;
1276 h
->root
.u
.def
.value
= ent
->off
;
1277 h
->size
= (ent
->delta
== 0
1278 ? SIZEOF_STUB1
+ SIZEOF_STUB2
: SIZEOF_STUB1
);
1282 h
->ref_regular_nonweak
= 1;
1283 h
->forced_local
= 1;
1291 /* Define an STT_OBJECT symbol. */
1293 static struct elf_link_hash_entry
*
1294 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1296 struct elf_link_hash_entry
*h
;
1298 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1302 if (h
->root
.type
!= bfd_link_hash_defined
1305 h
->root
.type
= bfd_link_hash_defined
;
1306 h
->root
.u
.def
.section
= htab
->ovtab
;
1307 h
->type
= STT_OBJECT
;
1310 h
->ref_regular_nonweak
= 1;
1315 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1316 h
->root
.u
.def
.section
->owner
,
1317 h
->root
.root
.string
);
1318 bfd_set_error (bfd_error_bad_value
);
1325 /* Fill in all stubs and the overlay tables. */
1328 spu_elf_build_stubs (struct bfd_link_info
*info
, int emit_syms
, asection
*toe
)
1330 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1331 struct elf_link_hash_entry
*h
;
1337 htab
->emit_stub_syms
= emit_syms
;
1338 htab
->stub
->contents
= bfd_zalloc (htab
->stub
->owner
, htab
->stub
->size
);
1339 if (htab
->stub
->contents
== NULL
)
1342 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1343 htab
->ovly_load
= h
;
1344 BFD_ASSERT (h
!= NULL
1345 && (h
->root
.type
== bfd_link_hash_defined
1346 || h
->root
.type
== bfd_link_hash_defweak
)
1349 s
= h
->root
.u
.def
.section
->output_section
;
1350 if (spu_elf_section_data (s
)->ovl_index
)
1352 (*_bfd_error_handler
) (_("%s in overlay section"),
1353 h
->root
.u
.def
.section
->owner
);
1354 bfd_set_error (bfd_error_bad_value
);
1358 /* Write out all the stubs. */
1359 bfd_hash_traverse (&htab
->stub_hash_table
, write_one_stub
, htab
);
1361 if (htab
->stub_overflow
)
1363 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1364 bfd_set_error (bfd_error_bad_value
);
1368 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1369 if (htab
->ovtab
->contents
== NULL
)
1372 /* Write out _ovly_table. */
1373 p
= htab
->ovtab
->contents
;
1374 obfd
= htab
->ovtab
->output_section
->owner
;
1375 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1377 unsigned int ovl_index
= spu_elf_section_data (s
)->ovl_index
;
1381 unsigned int lo
, hi
, mid
;
1382 unsigned long off
= (ovl_index
- 1) * 16;
1383 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1384 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1385 /* file_off written later in spu_elf_modify_program_headers. */
1391 mid
= (lo
+ hi
) >> 1;
1392 if (htab
->ovl_region
[2 * mid
+ 1]->vma
1393 + htab
->ovl_region
[2 * mid
+ 1]->size
<= s
->vma
)
1395 else if (htab
->ovl_region
[2 * mid
]->vma
> s
->vma
)
1399 bfd_put_32 (htab
->ovtab
->owner
, mid
+ 1, p
+ off
+ 12);
1403 BFD_ASSERT (lo
< hi
);
1407 /* Write out _ovly_buf_table. */
1408 p
= htab
->ovtab
->contents
+ htab
->num_overlays
* 16;
1409 for (i
= 0; i
< htab
->num_buf
; i
++)
1411 bfd_put_32 (htab
->ovtab
->owner
, 0, p
);
1415 h
= define_ovtab_symbol (htab
, "_ovly_table");
1418 h
->root
.u
.def
.value
= 0;
1419 h
->size
= htab
->num_overlays
* 16;
1421 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1424 h
->root
.u
.def
.value
= htab
->num_overlays
* 16;
1427 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1430 h
->root
.u
.def
.value
= htab
->num_overlays
* 16;
1431 h
->size
= htab
->num_buf
* 4;
1433 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1436 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + htab
->num_buf
* 4;
1439 h
= define_ovtab_symbol (htab
, "_EAR_");
1442 h
->root
.u
.def
.section
= toe
;
1443 h
->root
.u
.def
.value
= 0;
1449 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1450 Search for stack adjusting insns, and return the sp delta. */
1453 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1458 memset (reg
, 0, sizeof (reg
));
1459 for (unrecog
= 0; offset
+ 4 <= sec
->size
&& unrecog
< 32; offset
+= 4)
1461 unsigned char buf
[4];
1465 /* Assume no relocs on stack adjusing insns. */
1466 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1469 if (buf
[0] == 0x24 /* stqd */)
1473 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1474 /* Partly decoded immediate field. */
1475 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1477 if (buf
[0] == 0x1c /* ai */)
1480 imm
= (imm
^ 0x200) - 0x200;
1481 reg
[rt
] = reg
[ra
] + imm
;
1483 if (rt
== 1 /* sp */)
1490 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1492 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1494 reg
[rt
] = reg
[ra
] + reg
[rb
];
1498 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1500 if (buf
[0] >= 0x42 /* ila */)
1501 imm
|= (buf
[0] & 1) << 17;
1506 if (buf
[0] == 0x40 /* il */)
1508 if ((buf
[1] & 0x80) == 0)
1510 imm
= (imm
^ 0x8000) - 0x8000;
1512 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1518 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1520 reg
[rt
] |= imm
& 0xffff;
1523 else if (buf
[0] == 0x04 /* ori */)
1526 imm
= (imm
^ 0x200) - 0x200;
1527 reg
[rt
] = reg
[ra
] | imm
;
1530 else if ((buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1531 || (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */))
1533 /* Used in pic reg load. Say rt is trashed. */
1537 else if (is_branch (buf
))
1538 /* If we hit a branch then we must be out of the prologue. */
1547 /* qsort predicate to sort symbols by section and value. */
1549 static Elf_Internal_Sym
*sort_syms_syms
;
1550 static asection
**sort_syms_psecs
;
1553 sort_syms (const void *a
, const void *b
)
1555 Elf_Internal_Sym
*const *s1
= a
;
1556 Elf_Internal_Sym
*const *s2
= b
;
1557 asection
*sec1
,*sec2
;
1558 bfd_signed_vma delta
;
1560 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1561 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1564 return sec1
->index
- sec2
->index
;
1566 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1568 return delta
< 0 ? -1 : 1;
1570 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1572 return delta
< 0 ? -1 : 1;
1574 return *s1
< *s2
? -1 : 1;
1579 struct function_info
*fun
;
1580 struct call_info
*next
;
1584 struct function_info
1586 /* List of functions called. Also branches to hot/cold part of
1588 struct call_info
*call_list
;
1589 /* For hot/cold part of function, point to owner. */
1590 struct function_info
*start
;
1591 /* Symbol at start of function. */
1593 Elf_Internal_Sym
*sym
;
1594 struct elf_link_hash_entry
*h
;
1596 /* Function section. */
1598 /* Address range of (this part of) function. */
1602 /* Set if global symbol. */
1603 unsigned int global
: 1;
1604 /* Set if known to be start of function (as distinct from a hunk
1605 in hot/cold section. */
1606 unsigned int is_func
: 1;
1607 /* Flags used during call tree traversal. */
1608 unsigned int visit1
: 1;
1609 unsigned int non_root
: 1;
1610 unsigned int visit2
: 1;
1611 unsigned int marking
: 1;
1612 unsigned int visit3
: 1;
1615 struct spu_elf_stack_info
1619 /* Variable size array describing functions, one per contiguous
1620 address range belonging to a function. */
1621 struct function_info fun
[1];
1624 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1625 entries for section SEC. */
1627 static struct spu_elf_stack_info
*
1628 alloc_stack_info (asection
*sec
, int max_fun
)
1630 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1633 amt
= sizeof (struct spu_elf_stack_info
);
1634 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1635 sec_data
->stack_info
= bfd_zmalloc (amt
);
1636 if (sec_data
->stack_info
!= NULL
)
1637 sec_data
->stack_info
->max_fun
= max_fun
;
1638 return sec_data
->stack_info
;
1641 /* Add a new struct function_info describing a (part of a) function
1642 starting at SYM_H. Keep the array sorted by address. */
1644 static struct function_info
*
1645 maybe_insert_function (asection
*sec
,
1648 bfd_boolean is_func
)
1650 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1651 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1657 sinfo
= alloc_stack_info (sec
, 20);
1664 Elf_Internal_Sym
*sym
= sym_h
;
1665 off
= sym
->st_value
;
1666 size
= sym
->st_size
;
1670 struct elf_link_hash_entry
*h
= sym_h
;
1671 off
= h
->root
.u
.def
.value
;
1675 for (i
= sinfo
->num_fun
; --i
>= 0; )
1676 if (sinfo
->fun
[i
].lo
<= off
)
1681 /* Don't add another entry for an alias, but do update some
1683 if (sinfo
->fun
[i
].lo
== off
)
1685 /* Prefer globals over local syms. */
1686 if (global
&& !sinfo
->fun
[i
].global
)
1688 sinfo
->fun
[i
].global
= TRUE
;
1689 sinfo
->fun
[i
].u
.h
= sym_h
;
1692 sinfo
->fun
[i
].is_func
= TRUE
;
1693 return &sinfo
->fun
[i
];
1695 /* Ignore a zero-size symbol inside an existing function. */
1696 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1697 return &sinfo
->fun
[i
];
1700 if (++i
< sinfo
->num_fun
)
1701 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1702 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1703 else if (i
>= sinfo
->max_fun
)
1705 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1706 bfd_size_type old
= amt
;
1708 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1709 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1710 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1711 sinfo
= bfd_realloc (sinfo
, amt
);
1714 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1715 sec_data
->stack_info
= sinfo
;
1717 sinfo
->fun
[i
].is_func
= is_func
;
1718 sinfo
->fun
[i
].global
= global
;
1719 sinfo
->fun
[i
].sec
= sec
;
1721 sinfo
->fun
[i
].u
.h
= sym_h
;
1723 sinfo
->fun
[i
].u
.sym
= sym_h
;
1724 sinfo
->fun
[i
].lo
= off
;
1725 sinfo
->fun
[i
].hi
= off
+ size
;
1726 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1727 sinfo
->num_fun
+= 1;
1728 return &sinfo
->fun
[i
];
1731 /* Return the name of FUN. */
1734 func_name (struct function_info
*fun
)
1738 Elf_Internal_Shdr
*symtab_hdr
;
1740 while (fun
->start
!= NULL
)
1744 return fun
->u
.h
->root
.root
.string
;
1747 if (fun
->u
.sym
->st_name
== 0)
1749 size_t len
= strlen (sec
->name
);
1750 char *name
= bfd_malloc (len
+ 10);
1753 sprintf (name
, "%s+%lx", sec
->name
,
1754 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1758 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1759 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1762 /* Read the instruction at OFF in SEC. Return true iff the instruction
1763 is a nop, lnop, or stop 0 (all zero insn). */
1766 is_nop (asection
*sec
, bfd_vma off
)
1768 unsigned char insn
[4];
1770 if (off
+ 4 > sec
->size
1771 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1773 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1775 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1780 /* Extend the range of FUN to cover nop padding up to LIMIT.
1781 Return TRUE iff some instruction other than a NOP was found. */
1784 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1786 bfd_vma off
= (fun
->hi
+ 3) & -4;
1788 while (off
< limit
&& is_nop (fun
->sec
, off
))
1799 /* Check and fix overlapping function ranges. Return TRUE iff there
1800 are gaps in the current info we have about functions in SEC. */
1803 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1805 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1806 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1808 bfd_boolean gaps
= FALSE
;
1813 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1814 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1816 /* Fix overlapping symbols. */
1817 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1818 const char *f2
= func_name (&sinfo
->fun
[i
]);
1820 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
1821 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
1823 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
1826 if (sinfo
->num_fun
== 0)
1830 if (sinfo
->fun
[0].lo
!= 0)
1832 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
1834 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
1836 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
1837 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
1839 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
1845 /* Search current function info for a function that contains address
1846 OFFSET in section SEC. */
1848 static struct function_info
*
1849 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
1851 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1852 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1856 hi
= sinfo
->num_fun
;
1859 mid
= (lo
+ hi
) / 2;
1860 if (offset
< sinfo
->fun
[mid
].lo
)
1862 else if (offset
>= sinfo
->fun
[mid
].hi
)
1865 return &sinfo
->fun
[mid
];
1867 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
1872 /* Add CALLEE to CALLER call list if not already present. */
1875 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
1877 struct call_info
*p
;
1878 for (p
= caller
->call_list
; p
!= NULL
; p
= p
->next
)
1879 if (p
->fun
== callee
->fun
)
1881 /* Tail calls use less stack than normal calls. Retain entry
1882 for normal call over one for tail call. */
1883 if (p
->is_tail
> callee
->is_tail
)
1884 p
->is_tail
= callee
->is_tail
;
1887 callee
->next
= caller
->call_list
;
1888 caller
->call_list
= callee
;
1892 /* Rummage through the relocs for SEC, looking for function calls.
1893 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1894 mark destination symbols on calls as being functions. Also
1895 look at branches, which may be tail calls or go to hot/cold
1896 section part of same function. */
1899 mark_functions_via_relocs (asection
*sec
,
1900 struct bfd_link_info
*info
,
1903 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1904 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1905 Elf_Internal_Sym
*syms
;
1907 static bfd_boolean warned
;
1909 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
1911 if (internal_relocs
== NULL
)
1914 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1915 psyms
= &symtab_hdr
->contents
;
1916 syms
= *(Elf_Internal_Sym
**) psyms
;
1917 irela
= internal_relocs
;
1918 irelaend
= irela
+ sec
->reloc_count
;
1919 for (; irela
< irelaend
; irela
++)
1921 enum elf_spu_reloc_type r_type
;
1922 unsigned int r_indx
;
1924 Elf_Internal_Sym
*sym
;
1925 struct elf_link_hash_entry
*h
;
1927 unsigned char insn
[4];
1928 bfd_boolean is_call
;
1929 struct function_info
*caller
;
1930 struct call_info
*callee
;
1932 r_type
= ELF32_R_TYPE (irela
->r_info
);
1933 if (r_type
!= R_SPU_REL16
1934 && r_type
!= R_SPU_ADDR16
)
1937 r_indx
= ELF32_R_SYM (irela
->r_info
);
1938 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
1942 || sym_sec
->output_section
== NULL
1943 || sym_sec
->output_section
->owner
!= sec
->output_section
->owner
)
1946 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
1947 irela
->r_offset
, 4))
1949 if (!is_branch (insn
))
1952 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
1953 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
1957 if (!call_tree
|| !warned
)
1958 info
->callbacks
->einfo (_("%B(%A+0x%v): call to non-code section"
1959 " %B(%A), stack analysis incomplete\n"),
1960 sec
->owner
, sec
, irela
->r_offset
,
1961 sym_sec
->owner
, sym_sec
);
1965 is_call
= (insn
[0] & 0xfd) == 0x31;
1968 val
= h
->root
.u
.def
.value
;
1970 val
= sym
->st_value
;
1971 val
+= irela
->r_addend
;
1975 struct function_info
*fun
;
1977 if (irela
->r_addend
!= 0)
1979 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
1982 fake
->st_value
= val
;
1984 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
1988 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
1990 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
1993 if (irela
->r_addend
!= 0
1994 && fun
->u
.sym
!= sym
)
1999 caller
= find_function (sec
, irela
->r_offset
, info
);
2002 callee
= bfd_malloc (sizeof *callee
);
2006 callee
->fun
= find_function (sym_sec
, val
, info
);
2007 if (callee
->fun
== NULL
)
2009 callee
->is_tail
= !is_call
;
2010 if (!insert_callee (caller
, callee
))
2013 && !callee
->fun
->is_func
2014 && callee
->fun
->stack
== 0)
2016 /* This is either a tail call or a branch from one part of
2017 the function to another, ie. hot/cold section. If the
2018 destination has been called by some other function then
2019 it is a separate function. We also assume that functions
2020 are not split across input files. */
2021 if (callee
->fun
->start
!= NULL
2022 || sec
->owner
!= sym_sec
->owner
)
2024 callee
->fun
->start
= NULL
;
2025 callee
->fun
->is_func
= TRUE
;
2028 callee
->fun
->start
= caller
;
2035 /* Handle something like .init or .fini, which has a piece of a function.
2036 These sections are pasted together to form a single function. */
2039 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2041 struct bfd_link_order
*l
;
2042 struct _spu_elf_section_data
*sec_data
;
2043 struct spu_elf_stack_info
*sinfo
;
2044 Elf_Internal_Sym
*fake
;
2045 struct function_info
*fun
, *fun_start
;
2047 fake
= bfd_zmalloc (sizeof (*fake
));
2051 fake
->st_size
= sec
->size
;
2053 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2054 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2058 /* Find a function immediately preceding this section. */
2060 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2062 if (l
->u
.indirect
.section
== sec
)
2064 if (fun_start
!= NULL
)
2066 if (fun_start
->start
)
2067 fun_start
= fun_start
->start
;
2068 fun
->start
= fun_start
;
2072 if (l
->type
== bfd_indirect_link_order
2073 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2074 && (sinfo
= sec_data
->stack_info
) != NULL
2075 && sinfo
->num_fun
!= 0)
2076 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2079 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2083 /* We're only interested in code sections. */
2086 interesting_section (asection
*s
, bfd
*obfd
, struct spu_link_hash_table
*htab
)
2088 return (s
!= htab
->stub
2089 && s
->output_section
!= NULL
2090 && s
->output_section
->owner
== obfd
2091 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2092 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2096 /* Map address ranges in code sections to functions. */
2099 discover_functions (bfd
*output_bfd
, struct bfd_link_info
*info
)
2101 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2104 Elf_Internal_Sym
***psym_arr
;
2105 asection
***sec_arr
;
2106 bfd_boolean gaps
= FALSE
;
2109 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2112 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2113 if (psym_arr
== NULL
)
2115 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2116 if (sec_arr
== NULL
)
2120 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2122 ibfd
= ibfd
->link_next
, bfd_idx
++)
2124 extern const bfd_target bfd_elf32_spu_vec
;
2125 Elf_Internal_Shdr
*symtab_hdr
;
2128 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2129 asection
**psecs
, **p
;
2131 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2134 /* Read all the symbols. */
2135 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2136 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2140 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2143 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2145 symtab_hdr
->contents
= (void *) syms
;
2150 /* Select defined function symbols that are going to be output. */
2151 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2154 psym_arr
[bfd_idx
] = psyms
;
2155 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2158 sec_arr
[bfd_idx
] = psecs
;
2159 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2160 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2161 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2165 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2166 if (s
!= NULL
&& interesting_section (s
, output_bfd
, htab
))
2169 symcount
= psy
- psyms
;
2172 /* Sort them by section and offset within section. */
2173 sort_syms_syms
= syms
;
2174 sort_syms_psecs
= psecs
;
2175 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2177 /* Now inspect the function symbols. */
2178 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2180 asection
*s
= psecs
[*psy
- syms
];
2181 Elf_Internal_Sym
**psy2
;
2183 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2184 if (psecs
[*psy2
- syms
] != s
)
2187 if (!alloc_stack_info (s
, psy2
- psy
))
2192 /* First install info about properly typed and sized functions.
2193 In an ideal world this will cover all code sections, except
2194 when partitioning functions into hot and cold sections,
2195 and the horrible pasted together .init and .fini functions. */
2196 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2199 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2201 asection
*s
= psecs
[sy
- syms
];
2202 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2207 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2208 if (interesting_section (sec
, output_bfd
, htab
))
2209 gaps
|= check_function_ranges (sec
, info
);
2214 /* See if we can discover more function symbols by looking at
2216 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2218 ibfd
= ibfd
->link_next
, bfd_idx
++)
2222 if (psym_arr
[bfd_idx
] == NULL
)
2225 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2226 if (interesting_section (sec
, output_bfd
, htab
)
2227 && sec
->reloc_count
!= 0)
2229 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2234 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2236 ibfd
= ibfd
->link_next
, bfd_idx
++)
2238 Elf_Internal_Shdr
*symtab_hdr
;
2240 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2243 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2246 psecs
= sec_arr
[bfd_idx
];
2248 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2249 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2252 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2253 if (interesting_section (sec
, output_bfd
, htab
))
2254 gaps
|= check_function_ranges (sec
, info
);
2258 /* Finally, install all globals. */
2259 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2263 s
= psecs
[sy
- syms
];
2265 /* Global syms might be improperly typed functions. */
2266 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2267 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2269 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2274 /* Some of the symbols we've installed as marking the
2275 beginning of functions may have a size of zero. Extend
2276 the range of such functions to the beginning of the
2277 next symbol of interest. */
2278 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2279 if (interesting_section (sec
, output_bfd
, htab
))
2281 struct _spu_elf_section_data
*sec_data
;
2282 struct spu_elf_stack_info
*sinfo
;
2284 sec_data
= spu_elf_section_data (sec
);
2285 sinfo
= sec_data
->stack_info
;
2289 bfd_vma hi
= sec
->size
;
2291 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2293 sinfo
->fun
[fun_idx
].hi
= hi
;
2294 hi
= sinfo
->fun
[fun_idx
].lo
;
2297 /* No symbols in this section. Must be .init or .fini
2298 or something similar. */
2299 else if (!pasted_function (sec
, info
))
2305 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2307 ibfd
= ibfd
->link_next
, bfd_idx
++)
2309 if (psym_arr
[bfd_idx
] == NULL
)
2312 free (psym_arr
[bfd_idx
]);
2313 free (sec_arr
[bfd_idx
]);
2322 /* Mark nodes in the call graph that are called by some other node. */
2325 mark_non_root (struct function_info
*fun
)
2327 struct call_info
*call
;
2330 for (call
= fun
->call_list
; call
; call
= call
->next
)
2332 call
->fun
->non_root
= TRUE
;
2333 if (!call
->fun
->visit1
)
2334 mark_non_root (call
->fun
);
2338 /* Remove cycles from the call graph. */
2341 call_graph_traverse (struct function_info
*fun
, struct bfd_link_info
*info
)
2343 struct call_info
**callp
, *call
;
2346 fun
->marking
= TRUE
;
2348 callp
= &fun
->call_list
;
2349 while ((call
= *callp
) != NULL
)
2351 if (!call
->fun
->visit2
)
2352 call_graph_traverse (call
->fun
, info
);
2353 else if (call
->fun
->marking
)
2355 const char *f1
= func_name (fun
);
2356 const char *f2
= func_name (call
->fun
);
2358 info
->callbacks
->info (_("Stack analysis will ignore the call "
2361 *callp
= call
->next
;
2364 callp
= &call
->next
;
2366 fun
->marking
= FALSE
;
2369 /* Populate call_list for each function. */
2372 build_call_tree (bfd
*output_bfd
, struct bfd_link_info
*info
)
2374 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2377 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2379 extern const bfd_target bfd_elf32_spu_vec
;
2382 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2385 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2387 if (!interesting_section (sec
, output_bfd
, htab
)
2388 || sec
->reloc_count
== 0)
2391 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2395 /* Transfer call info from hot/cold section part of function
2397 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2399 struct _spu_elf_section_data
*sec_data
;
2400 struct spu_elf_stack_info
*sinfo
;
2402 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2403 && (sinfo
= sec_data
->stack_info
) != NULL
)
2406 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2408 if (sinfo
->fun
[i
].start
!= NULL
)
2410 struct call_info
*call
= sinfo
->fun
[i
].call_list
;
2412 while (call
!= NULL
)
2414 struct call_info
*call_next
= call
->next
;
2415 if (!insert_callee (sinfo
->fun
[i
].start
, call
))
2419 sinfo
->fun
[i
].call_list
= NULL
;
2420 sinfo
->fun
[i
].non_root
= TRUE
;
2427 /* Find the call graph root(s). */
2428 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2430 extern const bfd_target bfd_elf32_spu_vec
;
2433 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2436 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2438 struct _spu_elf_section_data
*sec_data
;
2439 struct spu_elf_stack_info
*sinfo
;
2441 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2442 && (sinfo
= sec_data
->stack_info
) != NULL
)
2445 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2446 if (!sinfo
->fun
[i
].visit1
)
2447 mark_non_root (&sinfo
->fun
[i
]);
2452 /* Remove cycles from the call graph. We start from the root node(s)
2453 so that we break cycles in a reasonable place. */
2454 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2456 extern const bfd_target bfd_elf32_spu_vec
;
2459 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2462 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2464 struct _spu_elf_section_data
*sec_data
;
2465 struct spu_elf_stack_info
*sinfo
;
2467 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2468 && (sinfo
= sec_data
->stack_info
) != NULL
)
2471 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2472 if (!sinfo
->fun
[i
].non_root
)
2473 call_graph_traverse (&sinfo
->fun
[i
], info
);
2481 /* Descend the call graph for FUN, accumulating total stack required. */
2484 sum_stack (struct function_info
*fun
,
2485 struct bfd_link_info
*info
,
2486 int emit_stack_syms
)
2488 struct call_info
*call
;
2489 struct function_info
*max
= NULL
;
2490 bfd_vma max_stack
= fun
->stack
;
2497 for (call
= fun
->call_list
; call
; call
= call
->next
)
2499 stack
= sum_stack (call
->fun
, info
, emit_stack_syms
);
2500 /* Include caller stack for normal calls, don't do so for
2501 tail calls. fun->stack here is local stack usage for
2504 stack
+= fun
->stack
;
2505 if (max_stack
< stack
)
2512 f1
= func_name (fun
);
2513 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"), f1
, fun
->stack
, max_stack
);
2517 info
->callbacks
->minfo (_(" calls:\n"));
2518 for (call
= fun
->call_list
; call
; call
= call
->next
)
2520 const char *f2
= func_name (call
->fun
);
2521 const char *ann1
= call
->fun
== max
? "*" : " ";
2522 const char *ann2
= call
->is_tail
? "t" : " ";
2524 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
2528 /* Now fun->stack holds cumulative stack. */
2529 fun
->stack
= max_stack
;
2532 if (emit_stack_syms
)
2534 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2535 char *name
= bfd_malloc (18 + strlen (f1
));
2536 struct elf_link_hash_entry
*h
;
2540 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
2541 sprintf (name
, "__stack_%s", f1
);
2543 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
2545 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
2548 && (h
->root
.type
== bfd_link_hash_new
2549 || h
->root
.type
== bfd_link_hash_undefined
2550 || h
->root
.type
== bfd_link_hash_undefweak
))
2552 h
->root
.type
= bfd_link_hash_defined
;
2553 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2554 h
->root
.u
.def
.value
= max_stack
;
2559 h
->ref_regular_nonweak
= 1;
2560 h
->forced_local
= 1;
2569 /* Provide an estimate of total stack required. */
2572 spu_elf_stack_analysis (bfd
*output_bfd
,
2573 struct bfd_link_info
*info
,
2574 int emit_stack_syms
)
2577 bfd_vma max_stack
= 0;
2579 if (!discover_functions (output_bfd
, info
))
2582 if (!build_call_tree (output_bfd
, info
))
2585 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
2586 info
->callbacks
->minfo (_("\nStack size for functions. "
2587 "Annotations: '*' max stack, 't' tail call\n"));
2588 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2590 extern const bfd_target bfd_elf32_spu_vec
;
2593 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2596 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2598 struct _spu_elf_section_data
*sec_data
;
2599 struct spu_elf_stack_info
*sinfo
;
2601 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2602 && (sinfo
= sec_data
->stack_info
) != NULL
)
2605 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2607 if (!sinfo
->fun
[i
].non_root
)
2612 stack
= sum_stack (&sinfo
->fun
[i
], info
,
2614 f1
= func_name (&sinfo
->fun
[i
]);
2615 info
->callbacks
->info (_(" %s: 0x%v\n"),
2617 if (max_stack
< stack
)
2625 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"), max_stack
);
2629 /* Perform a final link. */
2632 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
2634 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2636 if (htab
->stack_analysis
2637 && !spu_elf_stack_analysis (output_bfd
, info
, htab
->emit_stack_syms
))
2638 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
2640 return bfd_elf_final_link (output_bfd
, info
);
2643 /* Called when not normally emitting relocs, ie. !info->relocatable
2644 and !info->emitrelocations. Returns a count of special relocs
2645 that need to be emitted. */
2648 spu_elf_count_relocs (asection
*sec
, Elf_Internal_Rela
*relocs
)
2650 unsigned int count
= 0;
2651 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
2653 for (; relocs
< relend
; relocs
++)
2655 int r_type
= ELF32_R_TYPE (relocs
->r_info
);
2656 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2663 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2666 spu_elf_relocate_section (bfd
*output_bfd
,
2667 struct bfd_link_info
*info
,
2669 asection
*input_section
,
2671 Elf_Internal_Rela
*relocs
,
2672 Elf_Internal_Sym
*local_syms
,
2673 asection
**local_sections
)
2675 Elf_Internal_Shdr
*symtab_hdr
;
2676 struct elf_link_hash_entry
**sym_hashes
;
2677 Elf_Internal_Rela
*rel
, *relend
;
2678 struct spu_link_hash_table
*htab
;
2679 bfd_boolean ret
= TRUE
;
2680 bfd_boolean emit_these_relocs
= FALSE
;
2682 htab
= spu_hash_table (info
);
2683 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
2684 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
2687 relend
= relocs
+ input_section
->reloc_count
;
2688 for (; rel
< relend
; rel
++)
2691 reloc_howto_type
*howto
;
2692 unsigned long r_symndx
;
2693 Elf_Internal_Sym
*sym
;
2695 struct elf_link_hash_entry
*h
;
2696 const char *sym_name
;
2699 bfd_reloc_status_type r
;
2700 bfd_boolean unresolved_reloc
;
2704 r_symndx
= ELF32_R_SYM (rel
->r_info
);
2705 r_type
= ELF32_R_TYPE (rel
->r_info
);
2706 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2708 emit_these_relocs
= TRUE
;
2712 howto
= elf_howto_table
+ r_type
;
2713 unresolved_reloc
= FALSE
;
2718 if (r_symndx
< symtab_hdr
->sh_info
)
2720 sym
= local_syms
+ r_symndx
;
2721 sec
= local_sections
[r_symndx
];
2722 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
2723 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
2727 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
2728 r_symndx
, symtab_hdr
, sym_hashes
,
2730 unresolved_reloc
, warned
);
2731 sym_name
= h
->root
.root
.string
;
2734 if (sec
!= NULL
&& elf_discarded_section (sec
))
2736 /* For relocs against symbols from removed linkonce sections,
2737 or sections discarded by a linker script, we just want the
2738 section contents zeroed. Avoid any special processing. */
2739 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
2745 if (info
->relocatable
)
2748 if (unresolved_reloc
)
2750 (*_bfd_error_handler
)
2751 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2753 bfd_get_section_name (input_bfd
, input_section
),
2754 (long) rel
->r_offset
,
2760 /* If this symbol is in an overlay area, we may need to relocate
2761 to the overlay stub. */
2762 addend
= rel
->r_addend
;
2763 branch
= (is_branch (contents
+ rel
->r_offset
)
2764 || is_hint (contents
+ rel
->r_offset
));
2765 if (needs_ovl_stub (sym_name
, sec
, input_section
, htab
, branch
))
2768 struct spu_stub_hash_entry
*sh
;
2770 stub_name
= spu_stub_name (sec
, h
, rel
);
2771 if (stub_name
== NULL
)
2774 sh
= (struct spu_stub_hash_entry
*)
2775 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
, FALSE
);
2778 relocation
= (htab
->stub
->output_section
->vma
2779 + htab
->stub
->output_offset
2786 r
= _bfd_final_link_relocate (howto
,
2790 rel
->r_offset
, relocation
, addend
);
2792 if (r
!= bfd_reloc_ok
)
2794 const char *msg
= (const char *) 0;
2798 case bfd_reloc_overflow
:
2799 if (!((*info
->callbacks
->reloc_overflow
)
2800 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
2801 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
2805 case bfd_reloc_undefined
:
2806 if (!((*info
->callbacks
->undefined_symbol
)
2807 (info
, sym_name
, input_bfd
, input_section
,
2808 rel
->r_offset
, TRUE
)))
2812 case bfd_reloc_outofrange
:
2813 msg
= _("internal error: out of range error");
2816 case bfd_reloc_notsupported
:
2817 msg
= _("internal error: unsupported relocation error");
2820 case bfd_reloc_dangerous
:
2821 msg
= _("internal error: dangerous error");
2825 msg
= _("internal error: unknown error");
2829 if (!((*info
->callbacks
->warning
)
2830 (info
, msg
, sym_name
, input_bfd
, input_section
,
2839 && emit_these_relocs
2840 && !info
->relocatable
2841 && !info
->emitrelocations
)
2843 Elf_Internal_Rela
*wrel
;
2844 Elf_Internal_Shdr
*rel_hdr
;
2846 wrel
= rel
= relocs
;
2847 relend
= relocs
+ input_section
->reloc_count
;
2848 for (; rel
< relend
; rel
++)
2852 r_type
= ELF32_R_TYPE (rel
->r_info
);
2853 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2856 input_section
->reloc_count
= wrel
- relocs
;
2857 /* Backflips for _bfd_elf_link_output_relocs. */
2858 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
2859 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
2866 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2869 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
2870 const char *sym_name ATTRIBUTE_UNUSED
,
2871 Elf_Internal_Sym
*sym
,
2872 asection
*sym_sec ATTRIBUTE_UNUSED
,
2873 struct elf_link_hash_entry
*h
)
2875 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2877 if (!info
->relocatable
2878 && htab
->num_overlays
!= 0
2880 && (h
->root
.type
== bfd_link_hash_defined
2881 || h
->root
.type
== bfd_link_hash_defweak
)
2883 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
2885 static Elf_Internal_Rela zero_rel
;
2886 char *stub_name
= spu_stub_name (h
->root
.u
.def
.section
, h
, &zero_rel
);
2887 struct spu_stub_hash_entry
*sh
;
2889 if (stub_name
== NULL
)
2891 sh
= (struct spu_stub_hash_entry
*)
2892 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
, FALSE
);
2897 = _bfd_elf_section_from_bfd_section (htab
->stub
->output_section
->owner
,
2898 htab
->stub
->output_section
);
2899 sym
->st_value
= (htab
->stub
->output_section
->vma
2900 + htab
->stub
->output_offset
2907 static int spu_plugin
= 0;
2910 spu_elf_plugin (int val
)
2915 /* Set ELF header e_type for plugins. */
2918 spu_elf_post_process_headers (bfd
*abfd
,
2919 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
2923 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
2925 i_ehdrp
->e_type
= ET_DYN
;
2929 /* We may add an extra PT_LOAD segment for .toe. We also need extra
2930 segments for overlays. */
2933 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
2935 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2936 int extra
= htab
->num_overlays
;
2942 sec
= bfd_get_section_by_name (abfd
, ".toe");
2943 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
2949 /* Remove .toe section from other PT_LOAD segments and put it in
2950 a segment of its own. Put overlays in separate segments too. */
2953 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
2956 struct elf_segment_map
*m
;
2962 toe
= bfd_get_section_by_name (abfd
, ".toe");
2963 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
2964 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
2965 for (i
= 0; i
< m
->count
; i
++)
2966 if ((s
= m
->sections
[i
]) == toe
2967 || spu_elf_section_data (s
)->ovl_index
!= 0)
2969 struct elf_segment_map
*m2
;
2972 if (i
+ 1 < m
->count
)
2974 amt
= sizeof (struct elf_segment_map
);
2975 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
2976 m2
= bfd_zalloc (abfd
, amt
);
2979 m2
->count
= m
->count
- (i
+ 1);
2980 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
2981 m2
->count
* sizeof (m
->sections
[0]));
2982 m2
->p_type
= PT_LOAD
;
2990 amt
= sizeof (struct elf_segment_map
);
2991 m2
= bfd_zalloc (abfd
, amt
);
2994 m2
->p_type
= PT_LOAD
;
2996 m2
->sections
[0] = s
;
3006 /* Check that all loadable section VMAs lie in the range
3007 LO .. HI inclusive. */
3010 spu_elf_check_vma (bfd
*abfd
, bfd_vma lo
, bfd_vma hi
)
3012 struct elf_segment_map
*m
;
3015 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3016 if (m
->p_type
== PT_LOAD
)
3017 for (i
= 0; i
< m
->count
; i
++)
3018 if (m
->sections
[i
]->size
!= 0
3019 && (m
->sections
[i
]->vma
< lo
3020 || m
->sections
[i
]->vma
> hi
3021 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
3022 return m
->sections
[i
];
3027 /* Tweak phdrs before writing them out. */
3030 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
3032 const struct elf_backend_data
*bed
;
3033 struct elf_obj_tdata
*tdata
;
3034 Elf_Internal_Phdr
*phdr
, *last
;
3035 struct spu_link_hash_table
*htab
;
3042 bed
= get_elf_backend_data (abfd
);
3043 tdata
= elf_tdata (abfd
);
3045 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
3046 htab
= spu_hash_table (info
);
3047 if (htab
->num_overlays
!= 0)
3049 struct elf_segment_map
*m
;
3052 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
3054 && (o
= spu_elf_section_data (m
->sections
[0])->ovl_index
) != 0)
3056 /* Mark this as an overlay header. */
3057 phdr
[i
].p_flags
|= PF_OVERLAY
;
3059 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
3061 bfd_byte
*p
= htab
->ovtab
->contents
;
3062 unsigned int off
= (o
- 1) * 16 + 8;
3064 /* Write file_off into _ovly_table. */
3065 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
3070 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3071 of 16. This should always be possible when using the standard
3072 linker scripts, but don't create overlapping segments if
3073 someone is playing games with linker scripts. */
3075 for (i
= count
; i
-- != 0; )
3076 if (phdr
[i
].p_type
== PT_LOAD
)
3080 adjust
= -phdr
[i
].p_filesz
& 15;
3083 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
3086 adjust
= -phdr
[i
].p_memsz
& 15;
3089 && phdr
[i
].p_filesz
!= 0
3090 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
3091 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
3094 if (phdr
[i
].p_filesz
!= 0)
3098 if (i
== (unsigned int) -1)
3099 for (i
= count
; i
-- != 0; )
3100 if (phdr
[i
].p_type
== PT_LOAD
)
3104 adjust
= -phdr
[i
].p_filesz
& 15;
3105 phdr
[i
].p_filesz
+= adjust
;
3107 adjust
= -phdr
[i
].p_memsz
& 15;
3108 phdr
[i
].p_memsz
+= adjust
;
3114 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3115 #define TARGET_BIG_NAME "elf32-spu"
3116 #define ELF_ARCH bfd_arch_spu
3117 #define ELF_MACHINE_CODE EM_SPU
3118 /* This matches the alignment need for DMA. */
3119 #define ELF_MAXPAGESIZE 0x80
3120 #define elf_backend_rela_normal 1
3121 #define elf_backend_can_gc_sections 1
3123 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3124 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3125 #define elf_info_to_howto spu_elf_info_to_howto
3126 #define elf_backend_count_relocs spu_elf_count_relocs
3127 #define elf_backend_relocate_section spu_elf_relocate_section
3128 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3129 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3130 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3131 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3132 #define bfd_elf32_bfd_link_hash_table_free spu_elf_link_hash_table_free
3134 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3135 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3136 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3137 #define elf_backend_post_process_headers spu_elf_post_process_headers
3138 #define elf_backend_special_sections spu_elf_special_sections
3139 #define bfd_elf32_bfd_final_link spu_elf_final_link
3141 #include "elf32-target.h"