1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
93 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
94 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
95 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
106 case BFD_RELOC_SPU_IMM10W
:
108 case BFD_RELOC_SPU_IMM16W
:
110 case BFD_RELOC_SPU_LO16
:
111 return R_SPU_ADDR16_LO
;
112 case BFD_RELOC_SPU_HI16
:
113 return R_SPU_ADDR16_HI
;
114 case BFD_RELOC_SPU_IMM18
:
116 case BFD_RELOC_SPU_PCREL16
:
118 case BFD_RELOC_SPU_IMM7
:
120 case BFD_RELOC_SPU_IMM8
:
122 case BFD_RELOC_SPU_PCREL9a
:
124 case BFD_RELOC_SPU_PCREL9b
:
126 case BFD_RELOC_SPU_IMM10
:
127 return R_SPU_ADDR10I
;
128 case BFD_RELOC_SPU_IMM16
:
129 return R_SPU_ADDR16I
;
132 case BFD_RELOC_32_PCREL
:
134 case BFD_RELOC_SPU_PPU32
:
136 case BFD_RELOC_SPU_PPU64
:
142 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
144 Elf_Internal_Rela
*dst
)
146 enum elf_spu_reloc_type r_type
;
148 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
149 BFD_ASSERT (r_type
< R_SPU_max
);
150 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
153 static reloc_howto_type
*
154 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
155 bfd_reloc_code_real_type code
)
157 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
159 if (r_type
== R_SPU_NONE
)
162 return elf_howto_table
+ r_type
;
165 static reloc_howto_type
*
166 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
171 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
172 if (elf_howto_table
[i
].name
!= NULL
173 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
174 return &elf_howto_table
[i
];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
183 void *data
, asection
*input_section
,
184 bfd
*output_bfd
, char **error_message
)
186 bfd_size_type octets
;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd
!= NULL
)
194 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
195 input_section
, output_bfd
, error_message
);
197 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
198 return bfd_reloc_outofrange
;
199 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol
->section
))
205 if (symbol
->section
->output_section
)
206 val
+= symbol
->section
->output_section
->vma
;
208 val
+= reloc_entry
->addend
;
210 /* Make it pc-relative. */
211 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
214 if (val
+ 256 >= 512)
215 return bfd_reloc_overflow
;
217 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
222 insn
&= ~reloc_entry
->howto
->dst_mask
;
223 insn
|= val
& reloc_entry
->howto
->dst_mask
;
224 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
229 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
231 if (!sec
->used_by_bfd
)
233 struct _spu_elf_section_data
*sdata
;
235 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
238 sec
->used_by_bfd
= sdata
;
241 return _bfd_elf_new_section_hook (abfd
, sec
);
244 /* Set up overlay info for executables. */
247 spu_elf_object_p (bfd
*abfd
)
249 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
251 unsigned int i
, num_ovl
, num_buf
;
252 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
253 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
254 Elf_Internal_Phdr
*last_phdr
= NULL
;
256 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
257 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
262 if (last_phdr
== NULL
263 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
266 for (j
= 1; j
< elf_numsections (abfd
); j
++)
268 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr
, phdr
))
272 asection
*sec
= shdr
->bfd_section
;
273 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
274 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
286 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
288 if (sym
->name
!= NULL
289 && sym
->section
!= bfd_abs_section_ptr
290 && strncmp (sym
->name
, "_EAR_", 5) == 0)
291 sym
->flags
|= BSF_KEEP
;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf
;
300 /* Shortcuts to overlay sections. */
305 /* Count of stubs in each overlay section. */
306 unsigned int *stub_count
;
308 /* The stub section for each overlay section. */
311 struct elf_link_hash_entry
*ovly_load
;
312 struct elf_link_hash_entry
*ovly_return
;
313 unsigned long ovly_load_r_symndx
;
315 /* Number of overlay buffers. */
316 unsigned int num_buf
;
318 /* Total number of overlays. */
319 unsigned int num_overlays
;
321 /* How much memory we have. */
322 unsigned int local_store
;
323 /* Local store --auto-overlay should reserve for non-overlay
324 functions and data. */
325 unsigned int overlay_fixed
;
326 /* Local store --auto-overlay should reserve for stack and heap. */
327 unsigned int reserved
;
328 /* If reserved is not specified, stack analysis will calculate a value
329 for the stack. This parameter adjusts that value to allow for
330 negative sp access (the ABI says 2000 bytes below sp are valid,
331 and the overlay manager uses some of this area). */
332 int extra_stack_space
;
333 /* Count of overlay stubs needed in non-overlay area. */
334 unsigned int non_ovly_stub
;
336 /* Stash various callbacks for --auto-overlay. */
337 void (*spu_elf_load_ovl_mgr
) (void);
338 FILE *(*spu_elf_open_overlay_script
) (void);
339 void (*spu_elf_relink
) (void);
341 /* Bit 0 set if --auto-overlay.
342 Bit 1 set if --auto-relink.
343 Bit 2 set if --overlay-rodata. */
344 unsigned int auto_overlay
: 3;
345 #define AUTO_OVERLAY 1
346 #define AUTO_RELINK 2
347 #define OVERLAY_RODATA 4
349 /* Set if we should emit symbols for stubs. */
350 unsigned int emit_stub_syms
:1;
352 /* Set if we want stubs on calls out of overlay regions to
353 non-overlay regions. */
354 unsigned int non_overlay_stubs
: 1;
357 unsigned int stub_err
: 1;
359 /* Set if stack size analysis should be done. */
360 unsigned int stack_analysis
: 1;
362 /* Set if __stack_* syms will be emitted. */
363 unsigned int emit_stack_syms
: 1;
366 /* Hijack the generic got fields for overlay stub accounting. */
370 struct got_entry
*next
;
376 #define spu_hash_table(p) \
377 ((struct spu_link_hash_table *) ((p)->hash))
379 /* Create a spu ELF linker hash table. */
381 static struct bfd_link_hash_table
*
382 spu_elf_link_hash_table_create (bfd
*abfd
)
384 struct spu_link_hash_table
*htab
;
386 htab
= bfd_malloc (sizeof (*htab
));
390 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
391 _bfd_elf_link_hash_newfunc
,
392 sizeof (struct elf_link_hash_entry
)))
398 memset (&htab
->ovtab
, 0,
399 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
401 htab
->elf
.init_got_refcount
.refcount
= 0;
402 htab
->elf
.init_got_refcount
.glist
= NULL
;
403 htab
->elf
.init_got_offset
.offset
= 0;
404 htab
->elf
.init_got_offset
.glist
= NULL
;
405 return &htab
->elf
.root
;
408 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
409 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
410 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
413 get_sym_h (struct elf_link_hash_entry
**hp
,
414 Elf_Internal_Sym
**symp
,
416 Elf_Internal_Sym
**locsymsp
,
417 unsigned long r_symndx
,
420 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
422 if (r_symndx
>= symtab_hdr
->sh_info
)
424 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
425 struct elf_link_hash_entry
*h
;
427 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
428 while (h
->root
.type
== bfd_link_hash_indirect
429 || h
->root
.type
== bfd_link_hash_warning
)
430 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
440 asection
*symsec
= NULL
;
441 if (h
->root
.type
== bfd_link_hash_defined
442 || h
->root
.type
== bfd_link_hash_defweak
)
443 symsec
= h
->root
.u
.def
.section
;
449 Elf_Internal_Sym
*sym
;
450 Elf_Internal_Sym
*locsyms
= *locsymsp
;
454 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
456 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
458 0, NULL
, NULL
, NULL
);
463 sym
= locsyms
+ r_symndx
;
472 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
478 /* Create the note section if not already present. This is done early so
479 that the linker maps the sections to the right place in the output. */
482 spu_elf_create_sections (struct bfd_link_info
*info
,
487 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
489 /* Stash some options away where we can get at them later. */
490 htab
->stack_analysis
= stack_analysis
;
491 htab
->emit_stack_syms
= emit_stack_syms
;
493 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
494 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
499 /* Make SPU_PTNOTE_SPUNAME section. */
506 ibfd
= info
->input_bfds
;
507 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
508 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
510 || !bfd_set_section_alignment (ibfd
, s
, 4))
513 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
514 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
515 size
+= (name_len
+ 3) & -4;
517 if (!bfd_set_section_size (ibfd
, s
, size
))
520 data
= bfd_zalloc (ibfd
, size
);
524 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
525 bfd_put_32 (ibfd
, name_len
, data
+ 4);
526 bfd_put_32 (ibfd
, 1, data
+ 8);
527 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
528 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
529 bfd_get_filename (info
->output_bfd
), name_len
);
536 /* qsort predicate to sort sections by vma. */
539 sort_sections (const void *a
, const void *b
)
541 const asection
*const *s1
= a
;
542 const asection
*const *s2
= b
;
543 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
546 return delta
< 0 ? -1 : 1;
548 return (*s1
)->index
- (*s2
)->index
;
551 /* Identify overlays in the output bfd, and number them. */
554 spu_elf_find_overlays (struct bfd_link_info
*info
)
556 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
557 asection
**alloc_sec
;
558 unsigned int i
, n
, ovl_index
, num_buf
;
562 if (info
->output_bfd
->section_count
< 2)
566 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
567 if (alloc_sec
== NULL
)
570 /* Pick out all the alloced sections. */
571 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
572 if ((s
->flags
& SEC_ALLOC
) != 0
573 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
583 /* Sort them by vma. */
584 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
586 /* Look for overlapping vmas. Any with overlap must be overlays.
587 Count them. Also count the number of overlay regions. */
588 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
589 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
592 if (s
->vma
< ovl_end
)
594 asection
*s0
= alloc_sec
[i
- 1];
596 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
598 alloc_sec
[ovl_index
] = s0
;
599 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
600 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= ++num_buf
;
602 alloc_sec
[ovl_index
] = s
;
603 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
604 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
605 if (s0
->vma
!= s
->vma
)
607 info
->callbacks
->einfo (_("%X%P: overlay sections %A and %A "
608 "do not start at the same address.\n"),
612 if (ovl_end
< s
->vma
+ s
->size
)
613 ovl_end
= s
->vma
+ s
->size
;
616 ovl_end
= s
->vma
+ s
->size
;
619 htab
->num_overlays
= ovl_index
;
620 htab
->num_buf
= num_buf
;
621 htab
->ovl_sec
= alloc_sec
;
622 htab
->ovly_load
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
623 FALSE
, FALSE
, FALSE
);
624 htab
->ovly_return
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return",
625 FALSE
, FALSE
, FALSE
);
626 return ovl_index
!= 0;
629 /* Support two sizes of overlay stubs, a slower more compact stub of two
630 intructions, and a faster stub of four instructions. */
631 #ifndef OVL_STUB_SIZE
632 /* Default to faster. */
633 #define OVL_STUB_SIZE 16
634 /* #define OVL_STUB_SIZE 8 */
636 #define BRSL 0x33000000
637 #define BR 0x32000000
638 #define NOP 0x40200000
639 #define LNOP 0x00200000
640 #define ILA 0x42000000
642 /* Return true for all relative and absolute branch instructions.
650 brhnz 00100011 0.. */
653 is_branch (const unsigned char *insn
)
655 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
658 /* Return true for all indirect branch instructions.
666 bihnz 00100101 011 */
669 is_indirect_branch (const unsigned char *insn
)
671 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
674 /* Return true for branch hint instructions.
679 is_hint (const unsigned char *insn
)
681 return (insn
[0] & 0xfc) == 0x10;
684 /* True if INPUT_SECTION might need overlay stubs. */
687 maybe_needs_stubs (asection
*input_section
, bfd
*output_bfd
)
689 /* No stubs for debug sections and suchlike. */
690 if ((input_section
->flags
& SEC_ALLOC
) == 0)
693 /* No stubs for link-once sections that will be discarded. */
694 if (input_section
->output_section
== NULL
695 || input_section
->output_section
->owner
!= output_bfd
)
698 /* Don't create stubs for .eh_frame references. */
699 if (strcmp (input_section
->name
, ".eh_frame") == 0)
713 /* Return non-zero if this reloc symbol should go via an overlay stub.
714 Return 2 if the stub must be in non-overlay area. */
716 static enum _stub_type
717 needs_ovl_stub (struct elf_link_hash_entry
*h
,
718 Elf_Internal_Sym
*sym
,
720 asection
*input_section
,
721 Elf_Internal_Rela
*irela
,
723 struct bfd_link_info
*info
)
725 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
726 enum elf_spu_reloc_type r_type
;
727 unsigned int sym_type
;
729 enum _stub_type ret
= no_stub
;
732 || sym_sec
->output_section
== NULL
733 || sym_sec
->output_section
->owner
!= info
->output_bfd
734 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
739 /* Ensure no stubs for user supplied overlay manager syms. */
740 if (h
== htab
->ovly_load
|| h
== htab
->ovly_return
)
743 /* setjmp always goes via an overlay stub, because then the return
744 and hence the longjmp goes via __ovly_return. That magically
745 makes setjmp/longjmp between overlays work. */
746 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
747 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
751 /* Usually, symbols in non-overlay sections don't need stubs. */
752 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
753 && !htab
->non_overlay_stubs
)
759 sym_type
= ELF_ST_TYPE (sym
->st_info
);
761 r_type
= ELF32_R_TYPE (irela
->r_info
);
763 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
767 if (contents
== NULL
)
770 if (!bfd_get_section_contents (input_section
->owner
,
777 contents
+= irela
->r_offset
;
779 if (is_branch (contents
) || is_hint (contents
))
782 if ((contents
[0] & 0xfd) == 0x31
783 && sym_type
!= STT_FUNC
786 /* It's common for people to write assembly and forget
787 to give function symbols the right type. Handle
788 calls to such symbols, but warn so that (hopefully)
789 people will fix their code. We need the symbol
790 type to be correct to distinguish function pointer
791 initialisation from other pointer initialisations. */
792 const char *sym_name
;
795 sym_name
= h
->root
.root
.string
;
798 Elf_Internal_Shdr
*symtab_hdr
;
799 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
800 sym_name
= bfd_elf_sym_name (input_section
->owner
,
805 (*_bfd_error_handler
) (_("warning: call to non-function"
806 " symbol %s defined in %B"),
807 sym_sec
->owner
, sym_name
);
813 if (sym_type
!= STT_FUNC
815 && (sym_sec
->flags
& SEC_CODE
) == 0)
818 /* A reference from some other section to a symbol in an overlay
819 section needs a stub. */
820 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
821 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
824 /* If this insn isn't a branch then we are possibly taking the
825 address of a function and passing it out somehow. */
826 return !branch
&& sym_type
== STT_FUNC
? nonovl_stub
: ret
;
830 count_stub (struct spu_link_hash_table
*htab
,
833 enum _stub_type stub_type
,
834 struct elf_link_hash_entry
*h
,
835 const Elf_Internal_Rela
*irela
)
837 unsigned int ovl
= 0;
838 struct got_entry
*g
, **head
;
841 /* If this instruction is a branch or call, we need a stub
842 for it. One stub per function per overlay.
843 If it isn't a branch, then we are taking the address of
844 this function so need a stub in the non-overlay area
845 for it. One stub per function. */
846 if (stub_type
!= nonovl_stub
)
847 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
850 head
= &h
->got
.glist
;
853 if (elf_local_got_ents (ibfd
) == NULL
)
855 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
856 * sizeof (*elf_local_got_ents (ibfd
)));
857 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
858 if (elf_local_got_ents (ibfd
) == NULL
)
861 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
866 addend
= irela
->r_addend
;
870 struct got_entry
*gnext
;
872 for (g
= *head
; g
!= NULL
; g
= g
->next
)
873 if (g
->addend
== addend
&& g
->ovl
== 0)
878 /* Need a new non-overlay area stub. Zap other stubs. */
879 for (g
= *head
; g
!= NULL
; g
= gnext
)
882 if (g
->addend
== addend
)
884 htab
->stub_count
[g
->ovl
] -= 1;
892 for (g
= *head
; g
!= NULL
; g
= g
->next
)
893 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
899 g
= bfd_malloc (sizeof *g
);
904 g
->stub_addr
= (bfd_vma
) -1;
908 htab
->stub_count
[ovl
] += 1;
914 /* Two instruction overlay stubs look like:
917 .word target_ovl_and_address
919 ovl_and_address is a word with the overlay number in the top 14 bits
920 and local store address in the bottom 18 bits.
922 Four instruction overlay stubs look like:
926 ila $79,target_address
930 build_stub (struct spu_link_hash_table
*htab
,
933 enum _stub_type stub_type
,
934 struct elf_link_hash_entry
*h
,
935 const Elf_Internal_Rela
*irela
,
940 struct got_entry
*g
, **head
;
942 bfd_vma addend
, val
, from
, to
;
945 if (stub_type
!= nonovl_stub
)
946 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
949 head
= &h
->got
.glist
;
951 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
955 addend
= irela
->r_addend
;
957 for (g
= *head
; g
!= NULL
; g
= g
->next
)
958 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
963 if (g
->ovl
== 0 && ovl
!= 0)
966 if (g
->stub_addr
!= (bfd_vma
) -1)
969 sec
= htab
->stub_sec
[ovl
];
970 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
971 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
973 to
= (htab
->ovly_load
->root
.u
.def
.value
974 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
975 + htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
);
977 if (OVL_STUB_SIZE
== 16)
979 if (((dest
| to
| from
) & 3) != 0
980 || val
+ 0x20000 >= 0x40000)
985 ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
987 if (OVL_STUB_SIZE
== 16)
989 bfd_put_32 (sec
->owner
, ILA
+ ((ovl
<< 7) & 0x01ffff80) + 78,
990 sec
->contents
+ sec
->size
);
991 bfd_put_32 (sec
->owner
, LNOP
,
992 sec
->contents
+ sec
->size
+ 4);
993 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
994 sec
->contents
+ sec
->size
+ 8);
995 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
996 sec
->contents
+ sec
->size
+ 12);
998 else if (OVL_STUB_SIZE
== 8)
1000 bfd_put_32 (sec
->owner
, BRSL
+ ((val
<< 5) & 0x007fff80) + 75,
1001 sec
->contents
+ sec
->size
);
1003 val
= (dest
& 0x3ffff) | (ovl
<< 18);
1004 bfd_put_32 (sec
->owner
, val
,
1005 sec
->contents
+ sec
->size
+ 4);
1009 sec
->size
+= OVL_STUB_SIZE
;
1011 if (htab
->emit_stub_syms
)
1017 len
= 8 + sizeof (".ovl_call.") - 1;
1019 len
+= strlen (h
->root
.root
.string
);
1024 add
= (int) irela
->r_addend
& 0xffffffff;
1027 name
= bfd_malloc (len
);
1031 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1033 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1035 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1036 dest_sec
->id
& 0xffffffff,
1037 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1039 sprintf (name
+ len
- 9, "+%x", add
);
1041 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1045 if (h
->root
.type
== bfd_link_hash_new
)
1047 h
->root
.type
= bfd_link_hash_defined
;
1048 h
->root
.u
.def
.section
= sec
;
1049 h
->root
.u
.def
.value
= sec
->size
- OVL_STUB_SIZE
;
1050 h
->size
= OVL_STUB_SIZE
;
1054 h
->ref_regular_nonweak
= 1;
1055 h
->forced_local
= 1;
1063 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1067 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1069 /* Symbols starting with _SPUEAR_ need a stub because they may be
1070 invoked by the PPU. */
1071 struct bfd_link_info
*info
= inf
;
1072 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1075 if ((h
->root
.type
== bfd_link_hash_defined
1076 || h
->root
.type
== bfd_link_hash_defweak
)
1078 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1079 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1080 && sym_sec
->output_section
!= NULL
1081 && sym_sec
->output_section
->owner
== info
->output_bfd
1082 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1083 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1084 || htab
->non_overlay_stubs
))
1086 count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1093 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1095 /* Symbols starting with _SPUEAR_ need a stub because they may be
1096 invoked by the PPU. */
1097 struct bfd_link_info
*info
= inf
;
1098 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1101 if ((h
->root
.type
== bfd_link_hash_defined
1102 || h
->root
.type
== bfd_link_hash_defweak
)
1104 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1105 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1106 && sym_sec
->output_section
!= NULL
1107 && sym_sec
->output_section
->owner
== info
->output_bfd
1108 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1109 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1110 || htab
->non_overlay_stubs
))
1112 build_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1113 h
->root
.u
.def
.value
, sym_sec
);
1119 /* Size or build stubs. */
1122 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1124 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1127 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1129 extern const bfd_target bfd_elf32_spu_vec
;
1130 Elf_Internal_Shdr
*symtab_hdr
;
1132 Elf_Internal_Sym
*local_syms
= NULL
;
1134 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1137 /* We'll need the symbol table in a second. */
1138 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1139 if (symtab_hdr
->sh_info
== 0)
1142 /* Walk over each section attached to the input bfd. */
1143 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1145 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1147 /* If there aren't any relocs, then there's nothing more to do. */
1148 if ((isec
->flags
& SEC_RELOC
) == 0
1149 || isec
->reloc_count
== 0)
1152 if (!maybe_needs_stubs (isec
, info
->output_bfd
))
1155 /* Get the relocs. */
1156 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1158 if (internal_relocs
== NULL
)
1159 goto error_ret_free_local
;
1161 /* Now examine each relocation. */
1162 irela
= internal_relocs
;
1163 irelaend
= irela
+ isec
->reloc_count
;
1164 for (; irela
< irelaend
; irela
++)
1166 enum elf_spu_reloc_type r_type
;
1167 unsigned int r_indx
;
1169 Elf_Internal_Sym
*sym
;
1170 struct elf_link_hash_entry
*h
;
1171 enum _stub_type stub_type
;
1173 r_type
= ELF32_R_TYPE (irela
->r_info
);
1174 r_indx
= ELF32_R_SYM (irela
->r_info
);
1176 if (r_type
>= R_SPU_max
)
1178 bfd_set_error (bfd_error_bad_value
);
1179 error_ret_free_internal
:
1180 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1181 free (internal_relocs
);
1182 error_ret_free_local
:
1183 if (local_syms
!= NULL
1184 && (symtab_hdr
->contents
1185 != (unsigned char *) local_syms
))
1190 /* Determine the reloc target section. */
1191 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1192 goto error_ret_free_internal
;
1194 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1196 if (stub_type
== no_stub
)
1198 else if (stub_type
== stub_error
)
1199 goto error_ret_free_internal
;
1201 if (htab
->stub_count
== NULL
)
1204 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1205 htab
->stub_count
= bfd_zmalloc (amt
);
1206 if (htab
->stub_count
== NULL
)
1207 goto error_ret_free_internal
;
1212 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1213 goto error_ret_free_internal
;
1220 dest
= h
->root
.u
.def
.value
;
1222 dest
= sym
->st_value
;
1223 dest
+= irela
->r_addend
;
1224 if (!build_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
,
1226 goto error_ret_free_internal
;
1230 /* We're done with the internal relocs, free them. */
1231 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1232 free (internal_relocs
);
1235 if (local_syms
!= NULL
1236 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1238 if (!info
->keep_memory
)
1241 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1248 /* Allocate space for overlay call and return stubs. */
1251 spu_elf_size_stubs (struct bfd_link_info
*info
,
1252 void (*place_spu_section
) (asection
*, asection
*,
1254 int non_overlay_stubs
)
1256 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1263 htab
->non_overlay_stubs
= non_overlay_stubs
;
1264 if (!process_stubs (info
, FALSE
))
1267 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1271 if (htab
->stub_count
== NULL
)
1274 ibfd
= info
->input_bfds
;
1275 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1276 htab
->stub_sec
= bfd_zmalloc (amt
);
1277 if (htab
->stub_sec
== NULL
)
1280 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1281 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1282 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1283 htab
->stub_sec
[0] = stub
;
1285 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1287 stub
->size
= htab
->stub_count
[0] * OVL_STUB_SIZE
;
1288 (*place_spu_section
) (stub
, NULL
, ".text");
1290 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1292 asection
*osec
= htab
->ovl_sec
[i
];
1293 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1294 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1295 htab
->stub_sec
[ovl
] = stub
;
1297 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1299 stub
->size
= htab
->stub_count
[ovl
] * OVL_STUB_SIZE
;
1300 (*place_spu_section
) (stub
, osec
, NULL
);
1303 /* htab->ovtab consists of two arrays.
1313 . } _ovly_buf_table[];
1316 flags
= (SEC_ALLOC
| SEC_LOAD
1317 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1318 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1319 if (htab
->ovtab
== NULL
1320 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1323 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1324 (*place_spu_section
) (htab
->ovtab
, NULL
, ".data");
1326 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1327 if (htab
->toe
== NULL
1328 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1330 htab
->toe
->size
= 16;
1331 (*place_spu_section
) (htab
->toe
, NULL
, ".toe");
1336 /* Functions to handle embedded spu_ovl.o object. */
1339 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1345 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1351 struct _ovl_stream
*os
;
1355 os
= (struct _ovl_stream
*) stream
;
1356 max
= (const char *) os
->end
- (const char *) os
->start
;
1358 if ((ufile_ptr
) offset
>= max
)
1362 if (count
> max
- offset
)
1363 count
= max
- offset
;
1365 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1370 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1372 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1379 return *ovl_bfd
!= NULL
;
1382 /* Define an STT_OBJECT symbol. */
1384 static struct elf_link_hash_entry
*
1385 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1387 struct elf_link_hash_entry
*h
;
1389 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1393 if (h
->root
.type
!= bfd_link_hash_defined
1396 h
->root
.type
= bfd_link_hash_defined
;
1397 h
->root
.u
.def
.section
= htab
->ovtab
;
1398 h
->type
= STT_OBJECT
;
1401 h
->ref_regular_nonweak
= 1;
1406 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1407 h
->root
.u
.def
.section
->owner
,
1408 h
->root
.root
.string
);
1409 bfd_set_error (bfd_error_bad_value
);
1416 /* Fill in all stubs and the overlay tables. */
1419 spu_elf_build_stubs (struct bfd_link_info
*info
, int emit_syms
)
1421 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1422 struct elf_link_hash_entry
*h
;
1428 htab
->emit_stub_syms
= emit_syms
;
1429 if (htab
->stub_count
== NULL
)
1432 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1433 if (htab
->stub_sec
[i
]->size
!= 0)
1435 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1436 htab
->stub_sec
[i
]->size
);
1437 if (htab
->stub_sec
[i
]->contents
== NULL
)
1439 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1440 htab
->stub_sec
[i
]->size
= 0;
1443 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1444 htab
->ovly_load
= h
;
1445 BFD_ASSERT (h
!= NULL
1446 && (h
->root
.type
== bfd_link_hash_defined
1447 || h
->root
.type
== bfd_link_hash_defweak
)
1450 s
= h
->root
.u
.def
.section
->output_section
;
1451 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1453 (*_bfd_error_handler
) (_("%s in overlay section"),
1454 h
->root
.root
.string
);
1455 bfd_set_error (bfd_error_bad_value
);
1459 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return", FALSE
, FALSE
, FALSE
);
1460 htab
->ovly_return
= h
;
1462 /* Fill in all the stubs. */
1463 process_stubs (info
, TRUE
);
1465 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1469 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1471 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1473 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1474 bfd_set_error (bfd_error_bad_value
);
1477 htab
->stub_sec
[i
]->rawsize
= 0;
1482 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1483 bfd_set_error (bfd_error_bad_value
);
1487 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1488 if (htab
->ovtab
->contents
== NULL
)
1491 /* Write out _ovly_table. */
1492 p
= htab
->ovtab
->contents
;
1493 /* set low bit of .size to mark non-overlay area as present. */
1495 obfd
= htab
->ovtab
->output_section
->owner
;
1496 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1498 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
1502 unsigned long off
= ovl_index
* 16;
1503 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
1505 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1506 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1507 /* file_off written later in spu_elf_modify_program_headers. */
1508 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
1512 h
= define_ovtab_symbol (htab
, "_ovly_table");
1515 h
->root
.u
.def
.value
= 16;
1516 h
->size
= htab
->num_overlays
* 16;
1518 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1521 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1524 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1527 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1528 h
->size
= htab
->num_buf
* 4;
1530 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1533 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1536 h
= define_ovtab_symbol (htab
, "_EAR_");
1539 h
->root
.u
.def
.section
= htab
->toe
;
1540 h
->root
.u
.def
.value
= 0;
1546 /* Check that all loadable section VMAs lie in the range
1547 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
1550 spu_elf_check_vma (struct bfd_link_info
*info
,
1554 unsigned int overlay_fixed
,
1555 unsigned int reserved
,
1556 int extra_stack_space
,
1557 void (*spu_elf_load_ovl_mgr
) (void),
1558 FILE *(*spu_elf_open_overlay_script
) (void),
1559 void (*spu_elf_relink
) (void))
1561 struct elf_segment_map
*m
;
1563 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1564 bfd
*abfd
= info
->output_bfd
;
1566 if (auto_overlay
& AUTO_OVERLAY
)
1567 htab
->auto_overlay
= auto_overlay
;
1568 htab
->local_store
= hi
+ 1 - lo
;
1569 htab
->overlay_fixed
= overlay_fixed
;
1570 htab
->reserved
= reserved
;
1571 htab
->extra_stack_space
= extra_stack_space
;
1572 htab
->spu_elf_load_ovl_mgr
= spu_elf_load_ovl_mgr
;
1573 htab
->spu_elf_open_overlay_script
= spu_elf_open_overlay_script
;
1574 htab
->spu_elf_relink
= spu_elf_relink
;
1576 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
1577 if (m
->p_type
== PT_LOAD
)
1578 for (i
= 0; i
< m
->count
; i
++)
1579 if (m
->sections
[i
]->size
!= 0
1580 && (m
->sections
[i
]->vma
< lo
1581 || m
->sections
[i
]->vma
> hi
1582 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
1583 return m
->sections
[i
];
1585 /* No need for overlays if it all fits. */
1586 htab
->auto_overlay
= 0;
1590 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1591 Search for stack adjusting insns, and return the sp delta. */
1594 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1599 memset (reg
, 0, sizeof (reg
));
1600 for (unrecog
= 0; offset
+ 4 <= sec
->size
&& unrecog
< 32; offset
+= 4)
1602 unsigned char buf
[4];
1606 /* Assume no relocs on stack adjusing insns. */
1607 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1610 if (buf
[0] == 0x24 /* stqd */)
1614 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1615 /* Partly decoded immediate field. */
1616 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1618 if (buf
[0] == 0x1c /* ai */)
1621 imm
= (imm
^ 0x200) - 0x200;
1622 reg
[rt
] = reg
[ra
] + imm
;
1624 if (rt
== 1 /* sp */)
1631 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1633 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1635 reg
[rt
] = reg
[ra
] + reg
[rb
];
1639 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1641 if (buf
[0] >= 0x42 /* ila */)
1642 imm
|= (buf
[0] & 1) << 17;
1647 if (buf
[0] == 0x40 /* il */)
1649 if ((buf
[1] & 0x80) == 0)
1651 imm
= (imm
^ 0x8000) - 0x8000;
1653 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1659 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1661 reg
[rt
] |= imm
& 0xffff;
1664 else if (buf
[0] == 0x04 /* ori */)
1667 imm
= (imm
^ 0x200) - 0x200;
1668 reg
[rt
] = reg
[ra
] | imm
;
1671 else if ((buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1672 || (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */))
1674 /* Used in pic reg load. Say rt is trashed. */
1678 else if (is_branch (buf
) || is_indirect_branch (buf
))
1679 /* If we hit a branch then we must be out of the prologue. */
1688 /* qsort predicate to sort symbols by section and value. */
1690 static Elf_Internal_Sym
*sort_syms_syms
;
1691 static asection
**sort_syms_psecs
;
1694 sort_syms (const void *a
, const void *b
)
1696 Elf_Internal_Sym
*const *s1
= a
;
1697 Elf_Internal_Sym
*const *s2
= b
;
1698 asection
*sec1
,*sec2
;
1699 bfd_signed_vma delta
;
1701 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1702 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1705 return sec1
->index
- sec2
->index
;
1707 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1709 return delta
< 0 ? -1 : 1;
1711 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1713 return delta
< 0 ? -1 : 1;
1715 return *s1
< *s2
? -1 : 1;
1720 struct function_info
*fun
;
1721 struct call_info
*next
;
1723 unsigned int max_depth
;
1724 unsigned int is_tail
: 1;
1725 unsigned int is_pasted
: 1;
1728 struct function_info
1730 /* List of functions called. Also branches to hot/cold part of
1732 struct call_info
*call_list
;
1733 /* For hot/cold part of function, point to owner. */
1734 struct function_info
*start
;
1735 /* Symbol at start of function. */
1737 Elf_Internal_Sym
*sym
;
1738 struct elf_link_hash_entry
*h
;
1740 /* Function section. */
1743 /* Where last called from, and number of sections called from. */
1744 asection
*last_caller
;
1745 unsigned int call_count
;
1746 /* Address range of (this part of) function. */
1750 /* Distance from root of call tree. Tail and hot/cold branches
1751 count as one deeper. We aren't counting stack frames here. */
1753 /* Set if global symbol. */
1754 unsigned int global
: 1;
1755 /* Set if known to be start of function (as distinct from a hunk
1756 in hot/cold section. */
1757 unsigned int is_func
: 1;
1758 /* Set if not a root node. */
1759 unsigned int non_root
: 1;
1760 /* Flags used during call tree traversal. It's cheaper to replicate
1761 the visit flags than have one which needs clearing after a traversal. */
1762 unsigned int visit1
: 1;
1763 unsigned int visit2
: 1;
1764 unsigned int marking
: 1;
1765 unsigned int visit3
: 1;
1766 unsigned int visit4
: 1;
1767 unsigned int visit5
: 1;
1768 unsigned int visit6
: 1;
1769 unsigned int visit7
: 1;
1772 struct spu_elf_stack_info
1776 /* Variable size array describing functions, one per contiguous
1777 address range belonging to a function. */
1778 struct function_info fun
[1];
1781 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1782 entries for section SEC. */
1784 static struct spu_elf_stack_info
*
1785 alloc_stack_info (asection
*sec
, int max_fun
)
1787 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1790 amt
= sizeof (struct spu_elf_stack_info
);
1791 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1792 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
1793 if (sec_data
->u
.i
.stack_info
!= NULL
)
1794 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
1795 return sec_data
->u
.i
.stack_info
;
1798 /* Add a new struct function_info describing a (part of a) function
1799 starting at SYM_H. Keep the array sorted by address. */
1801 static struct function_info
*
1802 maybe_insert_function (asection
*sec
,
1805 bfd_boolean is_func
)
1807 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1808 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1814 sinfo
= alloc_stack_info (sec
, 20);
1821 Elf_Internal_Sym
*sym
= sym_h
;
1822 off
= sym
->st_value
;
1823 size
= sym
->st_size
;
1827 struct elf_link_hash_entry
*h
= sym_h
;
1828 off
= h
->root
.u
.def
.value
;
1832 for (i
= sinfo
->num_fun
; --i
>= 0; )
1833 if (sinfo
->fun
[i
].lo
<= off
)
1838 /* Don't add another entry for an alias, but do update some
1840 if (sinfo
->fun
[i
].lo
== off
)
1842 /* Prefer globals over local syms. */
1843 if (global
&& !sinfo
->fun
[i
].global
)
1845 sinfo
->fun
[i
].global
= TRUE
;
1846 sinfo
->fun
[i
].u
.h
= sym_h
;
1849 sinfo
->fun
[i
].is_func
= TRUE
;
1850 return &sinfo
->fun
[i
];
1852 /* Ignore a zero-size symbol inside an existing function. */
1853 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1854 return &sinfo
->fun
[i
];
1857 if (sinfo
->num_fun
>= sinfo
->max_fun
)
1859 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1860 bfd_size_type old
= amt
;
1862 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1863 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1864 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1865 sinfo
= bfd_realloc (sinfo
, amt
);
1868 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1869 sec_data
->u
.i
.stack_info
= sinfo
;
1872 if (++i
< sinfo
->num_fun
)
1873 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1874 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1875 sinfo
->fun
[i
].is_func
= is_func
;
1876 sinfo
->fun
[i
].global
= global
;
1877 sinfo
->fun
[i
].sec
= sec
;
1879 sinfo
->fun
[i
].u
.h
= sym_h
;
1881 sinfo
->fun
[i
].u
.sym
= sym_h
;
1882 sinfo
->fun
[i
].lo
= off
;
1883 sinfo
->fun
[i
].hi
= off
+ size
;
1884 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1885 sinfo
->num_fun
+= 1;
1886 return &sinfo
->fun
[i
];
1889 /* Return the name of FUN. */
1892 func_name (struct function_info
*fun
)
1896 Elf_Internal_Shdr
*symtab_hdr
;
1898 while (fun
->start
!= NULL
)
1902 return fun
->u
.h
->root
.root
.string
;
1905 if (fun
->u
.sym
->st_name
== 0)
1907 size_t len
= strlen (sec
->name
);
1908 char *name
= bfd_malloc (len
+ 10);
1911 sprintf (name
, "%s+%lx", sec
->name
,
1912 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1916 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1917 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1920 /* Read the instruction at OFF in SEC. Return true iff the instruction
1921 is a nop, lnop, or stop 0 (all zero insn). */
1924 is_nop (asection
*sec
, bfd_vma off
)
1926 unsigned char insn
[4];
1928 if (off
+ 4 > sec
->size
1929 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1931 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1933 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1938 /* Extend the range of FUN to cover nop padding up to LIMIT.
1939 Return TRUE iff some instruction other than a NOP was found. */
1942 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1944 bfd_vma off
= (fun
->hi
+ 3) & -4;
1946 while (off
< limit
&& is_nop (fun
->sec
, off
))
1957 /* Check and fix overlapping function ranges. Return TRUE iff there
1958 are gaps in the current info we have about functions in SEC. */
1961 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1963 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1964 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1966 bfd_boolean gaps
= FALSE
;
1971 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1972 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1974 /* Fix overlapping symbols. */
1975 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1976 const char *f2
= func_name (&sinfo
->fun
[i
]);
1978 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
1979 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
1981 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
1984 if (sinfo
->num_fun
== 0)
1988 if (sinfo
->fun
[0].lo
!= 0)
1990 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
1992 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
1994 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
1995 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
1997 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2003 /* Search current function info for a function that contains address
2004 OFFSET in section SEC. */
2006 static struct function_info
*
2007 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2009 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2010 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2014 hi
= sinfo
->num_fun
;
2017 mid
= (lo
+ hi
) / 2;
2018 if (offset
< sinfo
->fun
[mid
].lo
)
2020 else if (offset
>= sinfo
->fun
[mid
].hi
)
2023 return &sinfo
->fun
[mid
];
2025 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2030 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2031 if CALLEE was new. If this function return FALSE, CALLEE should
2035 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2037 struct call_info
**pp
, *p
;
2039 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2040 if (p
->fun
== callee
->fun
)
2042 /* Tail calls use less stack than normal calls. Retain entry
2043 for normal call over one for tail call. */
2044 p
->is_tail
&= callee
->is_tail
;
2047 p
->fun
->start
= NULL
;
2048 p
->fun
->is_func
= TRUE
;
2051 /* Reorder list so most recent call is first. */
2053 p
->next
= caller
->call_list
;
2054 caller
->call_list
= p
;
2057 callee
->next
= caller
->call_list
;
2059 caller
->call_list
= callee
;
2063 /* Copy CALL and insert the copy into CALLER. */
2066 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2068 struct call_info
*callee
;
2069 callee
= bfd_malloc (sizeof (*callee
));
2073 if (!insert_callee (caller
, callee
))
2078 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2079 overlay stub sections. */
2082 interesting_section (asection
*s
, bfd
*obfd
)
2084 return (s
->output_section
!= NULL
2085 && s
->output_section
->owner
== obfd
2086 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2087 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2091 /* Rummage through the relocs for SEC, looking for function calls.
2092 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2093 mark destination symbols on calls as being functions. Also
2094 look at branches, which may be tail calls or go to hot/cold
2095 section part of same function. */
2098 mark_functions_via_relocs (asection
*sec
,
2099 struct bfd_link_info
*info
,
2102 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2103 Elf_Internal_Shdr
*symtab_hdr
;
2105 static bfd_boolean warned
;
2107 if (!interesting_section (sec
, info
->output_bfd
)
2108 || sec
->reloc_count
== 0)
2111 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2113 if (internal_relocs
== NULL
)
2116 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2117 psyms
= &symtab_hdr
->contents
;
2118 irela
= internal_relocs
;
2119 irelaend
= irela
+ sec
->reloc_count
;
2120 for (; irela
< irelaend
; irela
++)
2122 enum elf_spu_reloc_type r_type
;
2123 unsigned int r_indx
;
2125 Elf_Internal_Sym
*sym
;
2126 struct elf_link_hash_entry
*h
;
2128 bfd_boolean reject
, is_call
;
2129 struct function_info
*caller
;
2130 struct call_info
*callee
;
2133 r_type
= ELF32_R_TYPE (irela
->r_info
);
2134 if (r_type
!= R_SPU_REL16
2135 && r_type
!= R_SPU_ADDR16
)
2138 if (!(call_tree
&& spu_hash_table (info
)->auto_overlay
))
2142 r_indx
= ELF32_R_SYM (irela
->r_info
);
2143 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2147 || sym_sec
->output_section
== NULL
2148 || sym_sec
->output_section
->owner
!= info
->output_bfd
)
2154 unsigned char insn
[4];
2156 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2157 irela
->r_offset
, 4))
2159 if (is_branch (insn
))
2161 is_call
= (insn
[0] & 0xfd) == 0x31;
2162 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2163 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2166 info
->callbacks
->einfo
2167 (_("%B(%A+0x%v): call to non-code section"
2168 " %B(%A), analysis incomplete\n"),
2169 sec
->owner
, sec
, irela
->r_offset
,
2170 sym_sec
->owner
, sym_sec
);
2178 if (!(call_tree
&& spu_hash_table (info
)->auto_overlay
)
2186 /* For --auto-overlay, count possible stubs we need for
2187 function pointer references. */
2188 unsigned int sym_type
;
2192 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2193 if (sym_type
== STT_FUNC
)
2194 spu_hash_table (info
)->non_ovly_stub
+= 1;
2199 val
= h
->root
.u
.def
.value
;
2201 val
= sym
->st_value
;
2202 val
+= irela
->r_addend
;
2206 struct function_info
*fun
;
2208 if (irela
->r_addend
!= 0)
2210 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2213 fake
->st_value
= val
;
2215 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2219 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2221 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2224 if (irela
->r_addend
!= 0
2225 && fun
->u
.sym
!= sym
)
2230 caller
= find_function (sec
, irela
->r_offset
, info
);
2233 callee
= bfd_malloc (sizeof *callee
);
2237 callee
->fun
= find_function (sym_sec
, val
, info
);
2238 if (callee
->fun
== NULL
)
2240 callee
->is_tail
= !is_call
;
2241 callee
->is_pasted
= FALSE
;
2243 if (callee
->fun
->last_caller
!= sec
)
2245 callee
->fun
->last_caller
= sec
;
2246 callee
->fun
->call_count
+= 1;
2248 if (!insert_callee (caller
, callee
))
2251 && !callee
->fun
->is_func
2252 && callee
->fun
->stack
== 0)
2254 /* This is either a tail call or a branch from one part of
2255 the function to another, ie. hot/cold section. If the
2256 destination has been called by some other function then
2257 it is a separate function. We also assume that functions
2258 are not split across input files. */
2259 if (sec
->owner
!= sym_sec
->owner
)
2261 callee
->fun
->start
= NULL
;
2262 callee
->fun
->is_func
= TRUE
;
2264 else if (callee
->fun
->start
== NULL
)
2265 callee
->fun
->start
= caller
;
2268 struct function_info
*callee_start
;
2269 struct function_info
*caller_start
;
2270 callee_start
= callee
->fun
;
2271 while (callee_start
->start
)
2272 callee_start
= callee_start
->start
;
2273 caller_start
= caller
;
2274 while (caller_start
->start
)
2275 caller_start
= caller_start
->start
;
2276 if (caller_start
!= callee_start
)
2278 callee
->fun
->start
= NULL
;
2279 callee
->fun
->is_func
= TRUE
;
2288 /* Handle something like .init or .fini, which has a piece of a function.
2289 These sections are pasted together to form a single function. */
2292 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2294 struct bfd_link_order
*l
;
2295 struct _spu_elf_section_data
*sec_data
;
2296 struct spu_elf_stack_info
*sinfo
;
2297 Elf_Internal_Sym
*fake
;
2298 struct function_info
*fun
, *fun_start
;
2300 fake
= bfd_zmalloc (sizeof (*fake
));
2304 fake
->st_size
= sec
->size
;
2306 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2307 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2311 /* Find a function immediately preceding this section. */
2313 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2315 if (l
->u
.indirect
.section
== sec
)
2317 if (fun_start
!= NULL
)
2319 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2323 fun
->start
= fun_start
;
2325 callee
->is_tail
= TRUE
;
2326 callee
->is_pasted
= TRUE
;
2328 if (!insert_callee (fun_start
, callee
))
2334 if (l
->type
== bfd_indirect_link_order
2335 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2336 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2337 && sinfo
->num_fun
!= 0)
2338 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2341 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2345 /* Map address ranges in code sections to functions. */
2348 discover_functions (struct bfd_link_info
*info
)
2352 Elf_Internal_Sym
***psym_arr
;
2353 asection
***sec_arr
;
2354 bfd_boolean gaps
= FALSE
;
2357 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2360 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2361 if (psym_arr
== NULL
)
2363 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2364 if (sec_arr
== NULL
)
2368 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2370 ibfd
= ibfd
->link_next
, bfd_idx
++)
2372 extern const bfd_target bfd_elf32_spu_vec
;
2373 Elf_Internal_Shdr
*symtab_hdr
;
2376 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2377 asection
**psecs
, **p
;
2379 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2382 /* Read all the symbols. */
2383 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2384 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2388 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2389 if (interesting_section (sec
, info
->output_bfd
))
2397 if (symtab_hdr
->contents
!= NULL
)
2399 /* Don't use cached symbols since the generic ELF linker
2400 code only reads local symbols, and we need globals too. */
2401 free (symtab_hdr
->contents
);
2402 symtab_hdr
->contents
= NULL
;
2404 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2406 symtab_hdr
->contents
= (void *) syms
;
2410 /* Select defined function symbols that are going to be output. */
2411 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2414 psym_arr
[bfd_idx
] = psyms
;
2415 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2418 sec_arr
[bfd_idx
] = psecs
;
2419 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2420 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2421 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2425 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2426 if (s
!= NULL
&& interesting_section (s
, info
->output_bfd
))
2429 symcount
= psy
- psyms
;
2432 /* Sort them by section and offset within section. */
2433 sort_syms_syms
= syms
;
2434 sort_syms_psecs
= psecs
;
2435 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2437 /* Now inspect the function symbols. */
2438 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2440 asection
*s
= psecs
[*psy
- syms
];
2441 Elf_Internal_Sym
**psy2
;
2443 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2444 if (psecs
[*psy2
- syms
] != s
)
2447 if (!alloc_stack_info (s
, psy2
- psy
))
2452 /* First install info about properly typed and sized functions.
2453 In an ideal world this will cover all code sections, except
2454 when partitioning functions into hot and cold sections,
2455 and the horrible pasted together .init and .fini functions. */
2456 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2459 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2461 asection
*s
= psecs
[sy
- syms
];
2462 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2467 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2468 if (interesting_section (sec
, info
->output_bfd
))
2469 gaps
|= check_function_ranges (sec
, info
);
2474 /* See if we can discover more function symbols by looking at
2476 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2478 ibfd
= ibfd
->link_next
, bfd_idx
++)
2482 if (psym_arr
[bfd_idx
] == NULL
)
2485 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2486 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2490 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2492 ibfd
= ibfd
->link_next
, bfd_idx
++)
2494 Elf_Internal_Shdr
*symtab_hdr
;
2496 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2499 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2502 psecs
= sec_arr
[bfd_idx
];
2504 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2505 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2508 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2509 if (interesting_section (sec
, info
->output_bfd
))
2510 gaps
|= check_function_ranges (sec
, info
);
2514 /* Finally, install all globals. */
2515 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2519 s
= psecs
[sy
- syms
];
2521 /* Global syms might be improperly typed functions. */
2522 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2523 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2525 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2531 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2533 extern const bfd_target bfd_elf32_spu_vec
;
2536 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2539 /* Some of the symbols we've installed as marking the
2540 beginning of functions may have a size of zero. Extend
2541 the range of such functions to the beginning of the
2542 next symbol of interest. */
2543 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2544 if (interesting_section (sec
, info
->output_bfd
))
2546 struct _spu_elf_section_data
*sec_data
;
2547 struct spu_elf_stack_info
*sinfo
;
2549 sec_data
= spu_elf_section_data (sec
);
2550 sinfo
= sec_data
->u
.i
.stack_info
;
2554 bfd_vma hi
= sec
->size
;
2556 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2558 sinfo
->fun
[fun_idx
].hi
= hi
;
2559 hi
= sinfo
->fun
[fun_idx
].lo
;
2562 /* No symbols in this section. Must be .init or .fini
2563 or something similar. */
2564 else if (!pasted_function (sec
, info
))
2570 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2572 ibfd
= ibfd
->link_next
, bfd_idx
++)
2574 if (psym_arr
[bfd_idx
] == NULL
)
2577 free (psym_arr
[bfd_idx
]);
2578 free (sec_arr
[bfd_idx
]);
2587 /* Iterate over all function_info we have collected, calling DOIT on
2588 each node if ROOT_ONLY is false. Only call DOIT on root nodes
2592 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
2593 struct bfd_link_info
*,
2595 struct bfd_link_info
*info
,
2601 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2603 extern const bfd_target bfd_elf32_spu_vec
;
2606 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2609 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2611 struct _spu_elf_section_data
*sec_data
;
2612 struct spu_elf_stack_info
*sinfo
;
2614 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2615 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2618 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2619 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
2620 if (!doit (&sinfo
->fun
[i
], info
, param
))
2628 /* Transfer call info attached to struct function_info entries for
2629 all of a given function's sections to the first entry. */
2632 transfer_calls (struct function_info
*fun
,
2633 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2634 void *param ATTRIBUTE_UNUSED
)
2636 struct function_info
*start
= fun
->start
;
2640 struct call_info
*call
, *call_next
;
2642 while (start
->start
!= NULL
)
2643 start
= start
->start
;
2644 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
2646 call_next
= call
->next
;
2647 if (!insert_callee (start
, call
))
2650 fun
->call_list
= NULL
;
2655 /* Mark nodes in the call graph that are called by some other node. */
2658 mark_non_root (struct function_info
*fun
,
2659 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2660 void *param ATTRIBUTE_UNUSED
)
2662 struct call_info
*call
;
2667 for (call
= fun
->call_list
; call
; call
= call
->next
)
2669 call
->fun
->non_root
= TRUE
;
2670 mark_non_root (call
->fun
, 0, 0);
2675 /* Remove cycles from the call graph. Set depth of nodes. */
2678 remove_cycles (struct function_info
*fun
,
2679 struct bfd_link_info
*info
,
2682 struct call_info
**callp
, *call
;
2683 unsigned int depth
= *(unsigned int *) param
;
2684 unsigned int max_depth
= depth
;
2688 fun
->marking
= TRUE
;
2690 callp
= &fun
->call_list
;
2691 while ((call
= *callp
) != NULL
)
2693 if (!call
->fun
->visit2
)
2695 call
->max_depth
= depth
+ !call
->is_pasted
;
2696 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
2698 if (max_depth
< call
->max_depth
)
2699 max_depth
= call
->max_depth
;
2701 else if (call
->fun
->marking
)
2703 if (!spu_hash_table (info
)->auto_overlay
)
2705 const char *f1
= func_name (fun
);
2706 const char *f2
= func_name (call
->fun
);
2708 info
->callbacks
->info (_("Stack analysis will ignore the call "
2712 *callp
= call
->next
;
2716 callp
= &call
->next
;
2718 fun
->marking
= FALSE
;
2719 *(unsigned int *) param
= max_depth
;
2723 /* Populate call_list for each function. */
2726 build_call_tree (struct bfd_link_info
*info
)
2731 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2733 extern const bfd_target bfd_elf32_spu_vec
;
2736 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2739 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2740 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2744 /* Transfer call info from hot/cold section part of function
2746 if (!spu_hash_table (info
)->auto_overlay
2747 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
2750 /* Find the call graph root(s). */
2751 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
2754 /* Remove cycles from the call graph. We start from the root node(s)
2755 so that we break cycles in a reasonable place. */
2757 return for_each_node (remove_cycles
, info
, &depth
, TRUE
);
2760 /* qsort predicate to sort calls by max_depth then count. */
2763 sort_calls (const void *a
, const void *b
)
2765 struct call_info
*const *c1
= a
;
2766 struct call_info
*const *c2
= b
;
2769 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
2773 delta
= (*c2
)->count
- (*c1
)->count
;
2781 unsigned int max_overlay_size
;
2784 /* Set linker_mark and gc_mark on any sections that we will put in
2785 overlays. These flags are used by the generic ELF linker, but we
2786 won't be continuing on to bfd_elf_final_link so it is OK to use
2787 them. linker_mark is clear before we get here. Set segment_mark
2788 on sections that are part of a pasted function (excluding the last
2791 Set up function rodata section if --overlay-rodata. We don't
2792 currently include merged string constant rodata sections since
2794 Sort the call graph so that the deepest nodes will be visited
2798 mark_overlay_section (struct function_info
*fun
,
2799 struct bfd_link_info
*info
,
2802 struct call_info
*call
;
2804 struct _mos_param
*mos_param
= param
;
2810 if (!fun
->sec
->linker_mark
)
2812 fun
->sec
->linker_mark
= 1;
2813 fun
->sec
->gc_mark
= 1;
2814 fun
->sec
->segment_mark
= 0;
2815 /* Ensure SEC_CODE is set on this text section (it ought to
2816 be!), and SEC_CODE is clear on rodata sections. We use
2817 this flag to differentiate the two overlay section types. */
2818 fun
->sec
->flags
|= SEC_CODE
;
2819 if (spu_hash_table (info
)->auto_overlay
& OVERLAY_RODATA
)
2824 /* Find the rodata section corresponding to this function's
2826 if (strcmp (fun
->sec
->name
, ".text") == 0)
2828 name
= bfd_malloc (sizeof (".rodata"));
2831 memcpy (name
, ".rodata", sizeof (".rodata"));
2833 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
2835 size_t len
= strlen (fun
->sec
->name
);
2836 name
= bfd_malloc (len
+ 3);
2839 memcpy (name
, ".rodata", sizeof (".rodata"));
2840 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
2842 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
2844 size_t len
= strlen (fun
->sec
->name
) + 1;
2845 name
= bfd_malloc (len
);
2848 memcpy (name
, fun
->sec
->name
, len
);
2854 asection
*rodata
= NULL
;
2855 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
2856 if (group_sec
== NULL
)
2857 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
2859 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
2861 if (strcmp (group_sec
->name
, name
) == 0)
2866 group_sec
= elf_section_data (group_sec
)->next_in_group
;
2868 fun
->rodata
= rodata
;
2871 fun
->rodata
->linker_mark
= 1;
2872 fun
->rodata
->gc_mark
= 1;
2873 fun
->rodata
->flags
&= ~SEC_CODE
;
2877 size
= fun
->sec
->size
;
2879 size
+= fun
->rodata
->size
;
2880 if (mos_param
->max_overlay_size
< size
)
2881 mos_param
->max_overlay_size
= size
;
2885 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2890 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
2894 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2895 calls
[count
++] = call
;
2897 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
2899 fun
->call_list
= NULL
;
2903 calls
[count
]->next
= fun
->call_list
;
2904 fun
->call_list
= calls
[count
];
2909 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2911 if (call
->is_pasted
)
2913 /* There can only be one is_pasted call per function_info. */
2914 BFD_ASSERT (!fun
->sec
->segment_mark
);
2915 fun
->sec
->segment_mark
= 1;
2917 if (!mark_overlay_section (call
->fun
, info
, param
))
2921 /* Don't put entry code into an overlay. The overlay manager needs
2923 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
2924 == info
->output_bfd
->start_address
)
2926 fun
->sec
->linker_mark
= 0;
2927 if (fun
->rodata
!= NULL
)
2928 fun
->rodata
->linker_mark
= 0;
2933 /* If non-zero then unmark functions called from those within sections
2934 that we need to unmark. Unfortunately this isn't reliable since the
2935 call graph cannot know the destination of function pointer calls. */
2936 #define RECURSE_UNMARK 0
2939 asection
*exclude_input_section
;
2940 asection
*exclude_output_section
;
2941 unsigned long clearing
;
2944 /* Undo some of mark_overlay_section's work. */
2947 unmark_overlay_section (struct function_info
*fun
,
2948 struct bfd_link_info
*info
,
2951 struct call_info
*call
;
2952 struct _uos_param
*uos_param
= param
;
2953 unsigned int excluded
= 0;
2961 if (fun
->sec
== uos_param
->exclude_input_section
2962 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
2966 uos_param
->clearing
+= excluded
;
2968 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
2970 fun
->sec
->linker_mark
= 0;
2972 fun
->rodata
->linker_mark
= 0;
2975 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2976 if (!unmark_overlay_section (call
->fun
, info
, param
))
2980 uos_param
->clearing
-= excluded
;
2985 unsigned int lib_size
;
2986 asection
**lib_sections
;
2989 /* Add sections we have marked as belonging to overlays to an array
2990 for consideration as non-overlay sections. The array consist of
2991 pairs of sections, (text,rodata), for functions in the call graph. */
2994 collect_lib_sections (struct function_info
*fun
,
2995 struct bfd_link_info
*info
,
2998 struct _cl_param
*lib_param
= param
;
2999 struct call_info
*call
;
3006 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3009 size
= fun
->sec
->size
;
3011 size
+= fun
->rodata
->size
;
3012 if (size
> lib_param
->lib_size
)
3015 *lib_param
->lib_sections
++ = fun
->sec
;
3016 fun
->sec
->gc_mark
= 0;
3017 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3019 *lib_param
->lib_sections
++ = fun
->rodata
;
3020 fun
->rodata
->gc_mark
= 0;
3023 *lib_param
->lib_sections
++ = NULL
;
3025 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3026 collect_lib_sections (call
->fun
, info
, param
);
3031 /* qsort predicate to sort sections by call count. */
3034 sort_lib (const void *a
, const void *b
)
3036 asection
*const *s1
= a
;
3037 asection
*const *s2
= b
;
3038 struct _spu_elf_section_data
*sec_data
;
3039 struct spu_elf_stack_info
*sinfo
;
3043 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3044 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3047 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3048 delta
-= sinfo
->fun
[i
].call_count
;
3051 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3052 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3055 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3056 delta
+= sinfo
->fun
[i
].call_count
;
3065 /* Remove some sections from those marked to be in overlays. Choose
3066 those that are called from many places, likely library functions. */
3069 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3072 asection
**lib_sections
;
3073 unsigned int i
, lib_count
;
3074 struct _cl_param collect_lib_param
;
3075 struct function_info dummy_caller
;
3077 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3079 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3081 extern const bfd_target bfd_elf32_spu_vec
;
3084 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3087 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3088 if (sec
->linker_mark
3089 && sec
->size
< lib_size
3090 && (sec
->flags
& SEC_CODE
) != 0)
3093 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3094 if (lib_sections
== NULL
)
3095 return (unsigned int) -1;
3096 collect_lib_param
.lib_size
= lib_size
;
3097 collect_lib_param
.lib_sections
= lib_sections
;
3098 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3100 return (unsigned int) -1;
3101 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3103 /* Sort sections so that those with the most calls are first. */
3105 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3107 for (i
= 0; i
< lib_count
; i
++)
3109 unsigned int tmp
, stub_size
;
3111 struct _spu_elf_section_data
*sec_data
;
3112 struct spu_elf_stack_info
*sinfo
;
3114 sec
= lib_sections
[2 * i
];
3115 /* If this section is OK, its size must be less than lib_size. */
3117 /* If it has a rodata section, then add that too. */
3118 if (lib_sections
[2 * i
+ 1])
3119 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3120 /* Add any new overlay call stubs needed by the section. */
3123 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3124 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3127 struct call_info
*call
;
3129 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3130 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3131 if (call
->fun
->sec
->linker_mark
)
3133 struct call_info
*p
;
3134 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3135 if (p
->fun
== call
->fun
)
3138 stub_size
+= OVL_STUB_SIZE
;
3141 if (tmp
+ stub_size
< lib_size
)
3143 struct call_info
**pp
, *p
;
3145 /* This section fits. Mark it as non-overlay. */
3146 lib_sections
[2 * i
]->linker_mark
= 0;
3147 if (lib_sections
[2 * i
+ 1])
3148 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3149 lib_size
-= tmp
+ stub_size
;
3150 /* Call stubs to the section we just added are no longer
3152 pp
= &dummy_caller
.call_list
;
3153 while ((p
= *pp
) != NULL
)
3154 if (!p
->fun
->sec
->linker_mark
)
3156 lib_size
+= OVL_STUB_SIZE
;
3162 /* Add new call stubs to dummy_caller. */
3163 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3164 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3167 struct call_info
*call
;
3169 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3170 for (call
= sinfo
->fun
[k
].call_list
;
3173 if (call
->fun
->sec
->linker_mark
)
3175 struct call_info
*callee
;
3176 callee
= bfd_malloc (sizeof (*callee
));
3178 return (unsigned int) -1;
3180 if (!insert_callee (&dummy_caller
, callee
))
3186 while (dummy_caller
.call_list
!= NULL
)
3188 struct call_info
*call
= dummy_caller
.call_list
;
3189 dummy_caller
.call_list
= call
->next
;
3192 for (i
= 0; i
< 2 * lib_count
; i
++)
3193 if (lib_sections
[i
])
3194 lib_sections
[i
]->gc_mark
= 1;
3195 free (lib_sections
);
3199 /* Build an array of overlay sections. The deepest node's section is
3200 added first, then its parent node's section, then everything called
3201 from the parent section. The idea being to group sections to
3202 minimise calls between different overlays. */
3205 collect_overlays (struct function_info
*fun
,
3206 struct bfd_link_info
*info
,
3209 struct call_info
*call
;
3210 bfd_boolean added_fun
;
3211 asection
***ovly_sections
= param
;
3217 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3218 if (!call
->is_pasted
)
3220 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3226 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3228 fun
->sec
->gc_mark
= 0;
3229 *(*ovly_sections
)++ = fun
->sec
;
3230 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3232 fun
->rodata
->gc_mark
= 0;
3233 *(*ovly_sections
)++ = fun
->rodata
;
3236 *(*ovly_sections
)++ = NULL
;
3239 /* Pasted sections must stay with the first section. We don't
3240 put pasted sections in the array, just the first section.
3241 Mark subsequent sections as already considered. */
3242 if (fun
->sec
->segment_mark
)
3244 struct function_info
*call_fun
= fun
;
3247 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3248 if (call
->is_pasted
)
3250 call_fun
= call
->fun
;
3251 call_fun
->sec
->gc_mark
= 0;
3252 if (call_fun
->rodata
)
3253 call_fun
->rodata
->gc_mark
= 0;
3259 while (call_fun
->sec
->segment_mark
);
3263 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3264 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3269 struct _spu_elf_section_data
*sec_data
;
3270 struct spu_elf_stack_info
*sinfo
;
3272 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3273 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3276 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3277 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3285 struct _sum_stack_param
{
3287 size_t overall_stack
;
3288 bfd_boolean emit_stack_syms
;
3291 /* Descend the call graph for FUN, accumulating total stack required. */
3294 sum_stack (struct function_info
*fun
,
3295 struct bfd_link_info
*info
,
3298 struct call_info
*call
;
3299 struct function_info
*max
;
3300 size_t stack
, cum_stack
;
3302 bfd_boolean has_call
;
3303 struct _sum_stack_param
*sum_stack_param
= param
;
3304 struct spu_link_hash_table
*htab
;
3306 cum_stack
= fun
->stack
;
3307 sum_stack_param
->cum_stack
= cum_stack
;
3313 for (call
= fun
->call_list
; call
; call
= call
->next
)
3315 if (!call
->is_pasted
)
3317 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3319 stack
= sum_stack_param
->cum_stack
;
3320 /* Include caller stack for normal calls, don't do so for
3321 tail calls. fun->stack here is local stack usage for
3323 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3324 stack
+= fun
->stack
;
3325 if (cum_stack
< stack
)
3332 sum_stack_param
->cum_stack
= cum_stack
;
3334 /* Now fun->stack holds cumulative stack. */
3335 fun
->stack
= cum_stack
;
3339 && sum_stack_param
->overall_stack
< cum_stack
)
3340 sum_stack_param
->overall_stack
= cum_stack
;
3342 htab
= spu_hash_table (info
);
3343 if (htab
->auto_overlay
)
3346 f1
= func_name (fun
);
3348 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3349 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3350 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3354 info
->callbacks
->minfo (_(" calls:\n"));
3355 for (call
= fun
->call_list
; call
; call
= call
->next
)
3356 if (!call
->is_pasted
)
3358 const char *f2
= func_name (call
->fun
);
3359 const char *ann1
= call
->fun
== max
? "*" : " ";
3360 const char *ann2
= call
->is_tail
? "t" : " ";
3362 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
3366 if (sum_stack_param
->emit_stack_syms
)
3368 char *name
= bfd_malloc (18 + strlen (f1
));
3369 struct elf_link_hash_entry
*h
;
3374 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
3375 sprintf (name
, "__stack_%s", f1
);
3377 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
3379 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
3382 && (h
->root
.type
== bfd_link_hash_new
3383 || h
->root
.type
== bfd_link_hash_undefined
3384 || h
->root
.type
== bfd_link_hash_undefweak
))
3386 h
->root
.type
= bfd_link_hash_defined
;
3387 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
3388 h
->root
.u
.def
.value
= cum_stack
;
3393 h
->ref_regular_nonweak
= 1;
3394 h
->forced_local
= 1;
3402 /* SEC is part of a pasted function. Return the call_info for the
3403 next section of this function. */
3405 static struct call_info
*
3406 find_pasted_call (asection
*sec
)
3408 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
3409 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
3410 struct call_info
*call
;
3413 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3414 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
3415 if (call
->is_pasted
)
3421 /* qsort predicate to sort bfds by file name. */
3424 sort_bfds (const void *a
, const void *b
)
3426 bfd
*const *abfd1
= a
;
3427 bfd
*const *abfd2
= b
;
3429 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
3432 /* Handle --auto-overlay. */
3434 static void spu_elf_auto_overlay (struct bfd_link_info
*, void (*) (void))
3438 spu_elf_auto_overlay (struct bfd_link_info
*info
,
3439 void (*spu_elf_load_ovl_mgr
) (void))
3443 struct elf_segment_map
*m
;
3444 unsigned int fixed_size
, lo
, hi
;
3445 struct spu_link_hash_table
*htab
;
3446 unsigned int base
, i
, count
, bfd_count
;
3448 asection
**ovly_sections
, **ovly_p
;
3450 unsigned int total_overlay_size
, overlay_size
;
3451 struct elf_link_hash_entry
*h
;
3452 struct _mos_param mos_param
;
3453 struct _uos_param uos_param
;
3454 struct function_info dummy_caller
;
3456 /* Find the extents of our loadable image. */
3457 lo
= (unsigned int) -1;
3459 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3460 if (m
->p_type
== PT_LOAD
)
3461 for (i
= 0; i
< m
->count
; i
++)
3462 if (m
->sections
[i
]->size
!= 0)
3464 if (m
->sections
[i
]->vma
< lo
)
3465 lo
= m
->sections
[i
]->vma
;
3466 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
3467 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
3469 fixed_size
= hi
+ 1 - lo
;
3471 if (!discover_functions (info
))
3474 if (!build_call_tree (info
))
3477 uos_param
.exclude_input_section
= 0;
3478 uos_param
.exclude_output_section
3479 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
3481 htab
= spu_hash_table (info
);
3482 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
3483 FALSE
, FALSE
, FALSE
);
3485 && (h
->root
.type
== bfd_link_hash_defined
3486 || h
->root
.type
== bfd_link_hash_defweak
)
3489 /* We have a user supplied overlay manager. */
3490 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
3494 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3495 builtin version to .text, and will adjust .text size. */
3496 asection
*text
= bfd_get_section_by_name (info
->output_bfd
, ".text");
3498 fixed_size
-= text
->size
;
3499 spu_elf_load_ovl_mgr ();
3500 text
= bfd_get_section_by_name (info
->output_bfd
, ".text");
3502 fixed_size
+= text
->size
;
3505 /* Mark overlay sections, and find max overlay section size. */
3506 mos_param
.max_overlay_size
= 0;
3507 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
3510 /* We can't put the overlay manager or interrupt routines in
3512 uos_param
.clearing
= 0;
3513 if ((uos_param
.exclude_input_section
3514 || uos_param
.exclude_output_section
)
3515 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
3519 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3521 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
3522 if (bfd_arr
== NULL
)
3525 /* Count overlay sections, and subtract their sizes from "fixed_size". */
3528 total_overlay_size
= 0;
3529 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3531 extern const bfd_target bfd_elf32_spu_vec
;
3533 unsigned int old_count
;
3535 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3539 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3540 if (sec
->linker_mark
)
3542 if ((sec
->flags
& SEC_CODE
) != 0)
3544 fixed_size
-= sec
->size
;
3545 total_overlay_size
+= sec
->size
;
3547 if (count
!= old_count
)
3548 bfd_arr
[bfd_count
++] = ibfd
;
3551 /* Since the overlay link script selects sections by file name and
3552 section name, ensure that file names are unique. */
3555 bfd_boolean ok
= TRUE
;
3557 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
3558 for (i
= 1; i
< bfd_count
; ++i
)
3559 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
3561 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
3563 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
3564 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
3565 bfd_arr
[i
]->filename
,
3566 bfd_arr
[i
]->my_archive
->filename
);
3568 info
->callbacks
->einfo (_("%s duplicated\n"),
3569 bfd_arr
[i
]->filename
);
3575 info
->callbacks
->einfo (_("sorry, no support for duplicate "
3576 "object files in auto-overlay script\n"));
3577 bfd_set_error (bfd_error_bad_value
);
3583 if (htab
->reserved
== 0)
3585 struct _sum_stack_param sum_stack_param
;
3587 sum_stack_param
.emit_stack_syms
= 0;
3588 sum_stack_param
.overall_stack
= 0;
3589 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3591 htab
->reserved
= sum_stack_param
.overall_stack
+ htab
->extra_stack_space
;
3593 fixed_size
+= htab
->reserved
;
3594 fixed_size
+= htab
->non_ovly_stub
* OVL_STUB_SIZE
;
3595 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
3597 /* Guess number of overlays. Assuming overlay buffer is on
3598 average only half full should be conservative. */
3599 ovlynum
= total_overlay_size
* 2 / (htab
->local_store
- fixed_size
);
3600 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
3601 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
3604 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
3605 info
->callbacks
->einfo (_("non-overlay plus maximum overlay size "
3606 "of 0x%x exceeds local store\n"),
3607 fixed_size
+ mos_param
.max_overlay_size
);
3609 /* Now see if we should put some functions in the non-overlay area. */
3610 if (fixed_size
< htab
->overlay_fixed
3611 && htab
->overlay_fixed
+ mos_param
.max_overlay_size
< htab
->local_store
)
3613 unsigned int lib_size
= htab
->overlay_fixed
- fixed_size
;
3614 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
3615 if (lib_size
== (unsigned int) -1)
3617 fixed_size
= htab
->overlay_fixed
- lib_size
;
3620 /* Build an array of sections, suitably sorted to place into
3622 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
3623 if (ovly_sections
== NULL
)
3625 ovly_p
= ovly_sections
;
3626 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
3628 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
3630 script
= htab
->spu_elf_open_overlay_script ();
3632 if (fprintf (script
, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3635 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3636 overlay_size
= htab
->local_store
- fixed_size
;
3639 while (base
< count
)
3641 unsigned int size
= 0;
3644 for (i
= base
; i
< count
; i
++)
3648 unsigned int stub_size
;
3649 struct call_info
*call
, *pasty
;
3650 struct _spu_elf_section_data
*sec_data
;
3651 struct spu_elf_stack_info
*sinfo
;
3654 /* See whether we can add this section to the current
3655 overlay without overflowing our overlay buffer. */
3656 sec
= ovly_sections
[2 * i
];
3657 tmp
= size
+ sec
->size
;
3658 if (ovly_sections
[2 * i
+ 1])
3659 tmp
+= ovly_sections
[2 * i
+ 1]->size
;
3660 if (tmp
> overlay_size
)
3662 if (sec
->segment_mark
)
3664 /* Pasted sections must stay together, so add their
3666 struct call_info
*pasty
= find_pasted_call (sec
);
3667 while (pasty
!= NULL
)
3669 struct function_info
*call_fun
= pasty
->fun
;
3670 tmp
+= call_fun
->sec
->size
;
3671 if (call_fun
->rodata
)
3672 tmp
+= call_fun
->rodata
->size
;
3673 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
3674 if (pasty
->is_pasted
)
3678 if (tmp
> overlay_size
)
3681 /* If we add this section, we might need new overlay call
3682 stubs. Add any overlay section calls to dummy_call. */
3684 sec_data
= spu_elf_section_data (sec
);
3685 sinfo
= sec_data
->u
.i
.stack_info
;
3686 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3687 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3688 if (call
->is_pasted
)
3690 BFD_ASSERT (pasty
== NULL
);
3693 else if (call
->fun
->sec
->linker_mark
)
3695 if (!copy_callee (&dummy_caller
, call
))
3698 while (pasty
!= NULL
)
3700 struct function_info
*call_fun
= pasty
->fun
;
3702 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3703 if (call
->is_pasted
)
3705 BFD_ASSERT (pasty
== NULL
);
3708 else if (!copy_callee (&dummy_caller
, call
))
3712 /* Calculate call stub size. */
3714 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
3718 stub_size
+= OVL_STUB_SIZE
;
3719 /* If the call is within this overlay, we won't need a
3721 for (k
= base
; k
< i
+ 1; k
++)
3722 if (call
->fun
->sec
== ovly_sections
[2 * k
])
3724 stub_size
-= OVL_STUB_SIZE
;
3728 if (tmp
+ stub_size
> overlay_size
)
3736 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
3737 ovly_sections
[2 * i
]->owner
,
3738 ovly_sections
[2 * i
],
3739 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
3740 bfd_set_error (bfd_error_bad_value
);
3744 if (fprintf (script
, " .ovly%d {\n", ++ovlynum
) <= 0)
3746 for (j
= base
; j
< i
; j
++)
3748 asection
*sec
= ovly_sections
[2 * j
];
3750 if (fprintf (script
, " %s%c%s (%s)\n",
3751 (sec
->owner
->my_archive
!= NULL
3752 ? sec
->owner
->my_archive
->filename
: ""),
3753 info
->path_separator
,
3754 sec
->owner
->filename
,
3757 if (sec
->segment_mark
)
3759 struct call_info
*call
= find_pasted_call (sec
);
3760 while (call
!= NULL
)
3762 struct function_info
*call_fun
= call
->fun
;
3763 sec
= call_fun
->sec
;
3764 if (fprintf (script
, " %s%c%s (%s)\n",
3765 (sec
->owner
->my_archive
!= NULL
3766 ? sec
->owner
->my_archive
->filename
: ""),
3767 info
->path_separator
,
3768 sec
->owner
->filename
,
3771 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3772 if (call
->is_pasted
)
3778 for (j
= base
; j
< i
; j
++)
3780 asection
*sec
= ovly_sections
[2 * j
+ 1];
3782 && fprintf (script
, " %s%c%s (%s)\n",
3783 (sec
->owner
->my_archive
!= NULL
3784 ? sec
->owner
->my_archive
->filename
: ""),
3785 info
->path_separator
,
3786 sec
->owner
->filename
,
3790 sec
= ovly_sections
[2 * j
];
3791 if (sec
->segment_mark
)
3793 struct call_info
*call
= find_pasted_call (sec
);
3794 while (call
!= NULL
)
3796 struct function_info
*call_fun
= call
->fun
;
3797 sec
= call_fun
->rodata
;
3799 && fprintf (script
, " %s%c%s (%s)\n",
3800 (sec
->owner
->my_archive
!= NULL
3801 ? sec
->owner
->my_archive
->filename
: ""),
3802 info
->path_separator
,
3803 sec
->owner
->filename
,
3806 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3807 if (call
->is_pasted
)
3813 if (fprintf (script
, " }\n") <= 0)
3816 while (dummy_caller
.call_list
!= NULL
)
3818 struct call_info
*call
= dummy_caller
.call_list
;
3819 dummy_caller
.call_list
= call
->next
;
3825 free (ovly_sections
);
3827 if (fprintf (script
, " }\n}\nINSERT AFTER .text;\n") <= 0)
3829 if (fclose (script
) != 0)
3832 if (htab
->auto_overlay
& AUTO_RELINK
)
3833 htab
->spu_elf_relink ();
3838 bfd_set_error (bfd_error_system_call
);
3840 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
3844 /* Provide an estimate of total stack required. */
3847 spu_elf_stack_analysis (struct bfd_link_info
*info
, int emit_stack_syms
)
3849 struct _sum_stack_param sum_stack_param
;
3851 if (!discover_functions (info
))
3854 if (!build_call_tree (info
))
3857 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
3858 info
->callbacks
->minfo (_("\nStack size for functions. "
3859 "Annotations: '*' max stack, 't' tail call\n"));
3861 sum_stack_param
.emit_stack_syms
= emit_stack_syms
;
3862 sum_stack_param
.overall_stack
= 0;
3863 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3866 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
3867 (bfd_vma
) sum_stack_param
.overall_stack
);
3871 /* Perform a final link. */
3874 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
3876 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3878 if (htab
->auto_overlay
)
3879 spu_elf_auto_overlay (info
, htab
->spu_elf_load_ovl_mgr
);
3881 if (htab
->stack_analysis
3882 && !spu_elf_stack_analysis (info
, htab
->emit_stack_syms
))
3883 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
3885 return bfd_elf_final_link (output_bfd
, info
);
3888 /* Called when not normally emitting relocs, ie. !info->relocatable
3889 and !info->emitrelocations. Returns a count of special relocs
3890 that need to be emitted. */
3893 spu_elf_count_relocs (asection
*sec
, Elf_Internal_Rela
*relocs
)
3895 unsigned int count
= 0;
3896 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
3898 for (; relocs
< relend
; relocs
++)
3900 int r_type
= ELF32_R_TYPE (relocs
->r_info
);
3901 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
3908 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
3911 spu_elf_relocate_section (bfd
*output_bfd
,
3912 struct bfd_link_info
*info
,
3914 asection
*input_section
,
3916 Elf_Internal_Rela
*relocs
,
3917 Elf_Internal_Sym
*local_syms
,
3918 asection
**local_sections
)
3920 Elf_Internal_Shdr
*symtab_hdr
;
3921 struct elf_link_hash_entry
**sym_hashes
;
3922 Elf_Internal_Rela
*rel
, *relend
;
3923 struct spu_link_hash_table
*htab
;
3924 asection
*ea
= bfd_get_section_by_name (output_bfd
, "._ea");
3926 bfd_boolean emit_these_relocs
= FALSE
;
3927 bfd_boolean is_ea_sym
;
3930 htab
= spu_hash_table (info
);
3931 stubs
= (htab
->stub_sec
!= NULL
3932 && maybe_needs_stubs (input_section
, output_bfd
));
3933 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
3934 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
3937 relend
= relocs
+ input_section
->reloc_count
;
3938 for (; rel
< relend
; rel
++)
3941 reloc_howto_type
*howto
;
3942 unsigned int r_symndx
;
3943 Elf_Internal_Sym
*sym
;
3945 struct elf_link_hash_entry
*h
;
3946 const char *sym_name
;
3949 bfd_reloc_status_type r
;
3950 bfd_boolean unresolved_reloc
;
3952 enum _stub_type stub_type
;
3954 r_symndx
= ELF32_R_SYM (rel
->r_info
);
3955 r_type
= ELF32_R_TYPE (rel
->r_info
);
3956 howto
= elf_howto_table
+ r_type
;
3957 unresolved_reloc
= FALSE
;
3962 if (r_symndx
< symtab_hdr
->sh_info
)
3964 sym
= local_syms
+ r_symndx
;
3965 sec
= local_sections
[r_symndx
];
3966 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
3967 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
3971 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
3972 r_symndx
, symtab_hdr
, sym_hashes
,
3974 unresolved_reloc
, warned
);
3975 sym_name
= h
->root
.root
.string
;
3978 if (sec
!= NULL
&& elf_discarded_section (sec
))
3980 /* For relocs against symbols from removed linkonce sections,
3981 or sections discarded by a linker script, we just want the
3982 section contents zeroed. Avoid any special processing. */
3983 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
3989 if (info
->relocatable
)
3992 is_ea_sym
= (ea
!= NULL
3994 && sec
->output_section
== ea
);
3996 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4000 /* ._ea is a special section that isn't allocated in SPU
4001 memory, but rather occupies space in PPU memory as
4002 part of an embedded ELF image. If this reloc is
4003 against a symbol defined in ._ea, then transform the
4004 reloc into an equivalent one without a symbol
4005 relative to the start of the ELF image. */
4006 rel
->r_addend
+= (relocation
4008 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4009 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4011 emit_these_relocs
= TRUE
;
4016 unresolved_reloc
= TRUE
;
4018 if (unresolved_reloc
)
4020 (*_bfd_error_handler
)
4021 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4023 bfd_get_section_name (input_bfd
, input_section
),
4024 (long) rel
->r_offset
,
4030 /* If this symbol is in an overlay area, we may need to relocate
4031 to the overlay stub. */
4032 addend
= rel
->r_addend
;
4034 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4035 contents
, info
)) != no_stub
)
4037 unsigned int ovl
= 0;
4038 struct got_entry
*g
, **head
;
4040 if (stub_type
!= nonovl_stub
)
4041 ovl
= (spu_elf_section_data (input_section
->output_section
)
4045 head
= &h
->got
.glist
;
4047 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4049 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4050 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4055 relocation
= g
->stub_addr
;
4059 r
= _bfd_final_link_relocate (howto
,
4063 rel
->r_offset
, relocation
, addend
);
4065 if (r
!= bfd_reloc_ok
)
4067 const char *msg
= (const char *) 0;
4071 case bfd_reloc_overflow
:
4072 if (!((*info
->callbacks
->reloc_overflow
)
4073 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
4074 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
4078 case bfd_reloc_undefined
:
4079 if (!((*info
->callbacks
->undefined_symbol
)
4080 (info
, sym_name
, input_bfd
, input_section
,
4081 rel
->r_offset
, TRUE
)))
4085 case bfd_reloc_outofrange
:
4086 msg
= _("internal error: out of range error");
4089 case bfd_reloc_notsupported
:
4090 msg
= _("internal error: unsupported relocation error");
4093 case bfd_reloc_dangerous
:
4094 msg
= _("internal error: dangerous error");
4098 msg
= _("internal error: unknown error");
4103 if (!((*info
->callbacks
->warning
)
4104 (info
, msg
, sym_name
, input_bfd
, input_section
,
4113 && emit_these_relocs
4114 && !info
->emitrelocations
)
4116 Elf_Internal_Rela
*wrel
;
4117 Elf_Internal_Shdr
*rel_hdr
;
4119 wrel
= rel
= relocs
;
4120 relend
= relocs
+ input_section
->reloc_count
;
4121 for (; rel
< relend
; rel
++)
4125 r_type
= ELF32_R_TYPE (rel
->r_info
);
4126 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4129 input_section
->reloc_count
= wrel
- relocs
;
4130 /* Backflips for _bfd_elf_link_output_relocs. */
4131 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
4132 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
4139 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4142 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
4143 const char *sym_name ATTRIBUTE_UNUSED
,
4144 Elf_Internal_Sym
*sym
,
4145 asection
*sym_sec ATTRIBUTE_UNUSED
,
4146 struct elf_link_hash_entry
*h
)
4148 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4150 if (!info
->relocatable
4151 && htab
->stub_sec
!= NULL
4153 && (h
->root
.type
== bfd_link_hash_defined
4154 || h
->root
.type
== bfd_link_hash_defweak
)
4156 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
4158 struct got_entry
*g
;
4160 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
4161 if (g
->addend
== 0 && g
->ovl
== 0)
4163 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
4164 (htab
->stub_sec
[0]->output_section
->owner
,
4165 htab
->stub_sec
[0]->output_section
));
4166 sym
->st_value
= g
->stub_addr
;
4174 static int spu_plugin
= 0;
4177 spu_elf_plugin (int val
)
4182 /* Set ELF header e_type for plugins. */
4185 spu_elf_post_process_headers (bfd
*abfd
,
4186 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
4190 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
4192 i_ehdrp
->e_type
= ET_DYN
;
4196 /* We may add an extra PT_LOAD segment for .toe. We also need extra
4197 segments for overlays. */
4200 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4202 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4203 int extra
= htab
->num_overlays
;
4209 sec
= bfd_get_section_by_name (abfd
, ".toe");
4210 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
4216 /* Remove .toe section from other PT_LOAD segments and put it in
4217 a segment of its own. Put overlays in separate segments too. */
4220 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
4223 struct elf_segment_map
*m
;
4229 toe
= bfd_get_section_by_name (abfd
, ".toe");
4230 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4231 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
4232 for (i
= 0; i
< m
->count
; i
++)
4233 if ((s
= m
->sections
[i
]) == toe
4234 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
4236 struct elf_segment_map
*m2
;
4239 if (i
+ 1 < m
->count
)
4241 amt
= sizeof (struct elf_segment_map
);
4242 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
4243 m2
= bfd_zalloc (abfd
, amt
);
4246 m2
->count
= m
->count
- (i
+ 1);
4247 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
4248 m2
->count
* sizeof (m
->sections
[0]));
4249 m2
->p_type
= PT_LOAD
;
4257 amt
= sizeof (struct elf_segment_map
);
4258 m2
= bfd_zalloc (abfd
, amt
);
4261 m2
->p_type
= PT_LOAD
;
4263 m2
->sections
[0] = s
;
4273 /* Tweak the section type of .note.spu_name. */
4276 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
4277 Elf_Internal_Shdr
*hdr
,
4280 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
4281 hdr
->sh_type
= SHT_NOTE
;
4285 /* Tweak phdrs before writing them out. */
4288 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4290 const struct elf_backend_data
*bed
;
4291 struct elf_obj_tdata
*tdata
;
4292 Elf_Internal_Phdr
*phdr
, *last
;
4293 struct spu_link_hash_table
*htab
;
4300 bed
= get_elf_backend_data (abfd
);
4301 tdata
= elf_tdata (abfd
);
4303 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
4304 htab
= spu_hash_table (info
);
4305 if (htab
->num_overlays
!= 0)
4307 struct elf_segment_map
*m
;
4310 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
4312 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
4314 /* Mark this as an overlay header. */
4315 phdr
[i
].p_flags
|= PF_OVERLAY
;
4317 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
4319 bfd_byte
*p
= htab
->ovtab
->contents
;
4320 unsigned int off
= o
* 16 + 8;
4322 /* Write file_off into _ovly_table. */
4323 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
4328 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4329 of 16. This should always be possible when using the standard
4330 linker scripts, but don't create overlapping segments if
4331 someone is playing games with linker scripts. */
4333 for (i
= count
; i
-- != 0; )
4334 if (phdr
[i
].p_type
== PT_LOAD
)
4338 adjust
= -phdr
[i
].p_filesz
& 15;
4341 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
4344 adjust
= -phdr
[i
].p_memsz
& 15;
4347 && phdr
[i
].p_filesz
!= 0
4348 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
4349 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
4352 if (phdr
[i
].p_filesz
!= 0)
4356 if (i
== (unsigned int) -1)
4357 for (i
= count
; i
-- != 0; )
4358 if (phdr
[i
].p_type
== PT_LOAD
)
4362 adjust
= -phdr
[i
].p_filesz
& 15;
4363 phdr
[i
].p_filesz
+= adjust
;
4365 adjust
= -phdr
[i
].p_memsz
& 15;
4366 phdr
[i
].p_memsz
+= adjust
;
4372 #define TARGET_BIG_SYM bfd_elf32_spu_vec
4373 #define TARGET_BIG_NAME "elf32-spu"
4374 #define ELF_ARCH bfd_arch_spu
4375 #define ELF_MACHINE_CODE EM_SPU
4376 /* This matches the alignment need for DMA. */
4377 #define ELF_MAXPAGESIZE 0x80
4378 #define elf_backend_rela_normal 1
4379 #define elf_backend_can_gc_sections 1
4381 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
4382 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
4383 #define elf_info_to_howto spu_elf_info_to_howto
4384 #define elf_backend_count_relocs spu_elf_count_relocs
4385 #define elf_backend_relocate_section spu_elf_relocate_section
4386 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
4387 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
4388 #define elf_backend_object_p spu_elf_object_p
4389 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
4390 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
4392 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
4393 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
4394 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
4395 #define elf_backend_post_process_headers spu_elf_post_process_headers
4396 #define elf_backend_fake_sections spu_elf_fake_sections
4397 #define elf_backend_special_sections spu_elf_special_sections
4398 #define bfd_elf32_bfd_final_link spu_elf_final_link
4400 #include "elf32-target.h"