1 /* SPU specific support for 32-bit ELF
3 Copyright (C) 2006-2023 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* All users of this file have bfd_octets_per_byte (abfd, sec) == 1. */
31 #define OCTETS_PER_BYTE(ABFD, SEC) 1
33 /* We use RELA style relocs. Don't define USE_REL. */
35 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
39 /* Values of type 'enum elf_spu_reloc_type' are used to index this
40 array, so it must be declared in the order of that type. */
42 static reloc_howto_type elf_howto_table
[] = {
43 HOWTO (R_SPU_NONE
, 0, 0, 0, false, 0, complain_overflow_dont
,
44 bfd_elf_generic_reloc
, "SPU_NONE",
45 false, 0, 0x00000000, false),
46 HOWTO (R_SPU_ADDR10
, 4, 4, 10, false, 14, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR10",
48 false, 0, 0x00ffc000, false),
49 HOWTO (R_SPU_ADDR16
, 2, 4, 16, false, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16",
51 false, 0, 0x007fff80, false),
52 HOWTO (R_SPU_ADDR16_HI
, 16, 4, 16, false, 7, complain_overflow_bitfield
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
54 false, 0, 0x007fff80, false),
55 HOWTO (R_SPU_ADDR16_LO
, 0, 4, 16, false, 7, complain_overflow_dont
,
56 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
57 false, 0, 0x007fff80, false),
58 HOWTO (R_SPU_ADDR18
, 0, 4, 18, false, 7, complain_overflow_bitfield
,
59 bfd_elf_generic_reloc
, "SPU_ADDR18",
60 false, 0, 0x01ffff80, false),
61 HOWTO (R_SPU_ADDR32
, 0, 4, 32, false, 0, complain_overflow_dont
,
62 bfd_elf_generic_reloc
, "SPU_ADDR32",
63 false, 0, 0xffffffff, false),
64 HOWTO (R_SPU_REL16
, 2, 4, 16, true, 7, complain_overflow_bitfield
,
65 bfd_elf_generic_reloc
, "SPU_REL16",
66 false, 0, 0x007fff80, true),
67 HOWTO (R_SPU_ADDR7
, 0, 4, 7, false, 14, complain_overflow_dont
,
68 bfd_elf_generic_reloc
, "SPU_ADDR7",
69 false, 0, 0x001fc000, false),
70 HOWTO (R_SPU_REL9
, 2, 4, 9, true, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9",
72 false, 0, 0x0180007f, true),
73 HOWTO (R_SPU_REL9I
, 2, 4, 9, true, 0, complain_overflow_signed
,
74 spu_elf_rel9
, "SPU_REL9I",
75 false, 0, 0x0000c07f, true),
76 HOWTO (R_SPU_ADDR10I
, 0, 4, 10, false, 14, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR10I",
78 false, 0, 0x00ffc000, false),
79 HOWTO (R_SPU_ADDR16I
, 0, 4, 16, false, 7, complain_overflow_signed
,
80 bfd_elf_generic_reloc
, "SPU_ADDR16I",
81 false, 0, 0x007fff80, false),
82 HOWTO (R_SPU_REL32
, 0, 4, 32, true, 0, complain_overflow_dont
,
83 bfd_elf_generic_reloc
, "SPU_REL32",
84 false, 0, 0xffffffff, true),
85 HOWTO (R_SPU_ADDR16X
, 0, 4, 16, false, 7, complain_overflow_bitfield
,
86 bfd_elf_generic_reloc
, "SPU_ADDR16X",
87 false, 0, 0x007fff80, false),
88 HOWTO (R_SPU_PPU32
, 0, 4, 32, false, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU32",
90 false, 0, 0xffffffff, false),
91 HOWTO (R_SPU_PPU64
, 0, 8, 64, false, 0, complain_overflow_dont
,
92 bfd_elf_generic_reloc
, "SPU_PPU64",
94 HOWTO (R_SPU_ADD_PIC
, 0, 0, 0, false, 0, complain_overflow_dont
,
95 bfd_elf_generic_reloc
, "SPU_ADD_PIC",
96 false, 0, 0x00000000, false),
99 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
100 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
101 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
105 static enum elf_spu_reloc_type
106 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
111 return (enum elf_spu_reloc_type
) -1;
114 case BFD_RELOC_SPU_IMM10W
:
116 case BFD_RELOC_SPU_IMM16W
:
118 case BFD_RELOC_SPU_LO16
:
119 return R_SPU_ADDR16_LO
;
120 case BFD_RELOC_SPU_HI16
:
121 return R_SPU_ADDR16_HI
;
122 case BFD_RELOC_SPU_IMM18
:
124 case BFD_RELOC_SPU_PCREL16
:
126 case BFD_RELOC_SPU_IMM7
:
128 case BFD_RELOC_SPU_IMM8
:
130 case BFD_RELOC_SPU_PCREL9a
:
132 case BFD_RELOC_SPU_PCREL9b
:
134 case BFD_RELOC_SPU_IMM10
:
135 return R_SPU_ADDR10I
;
136 case BFD_RELOC_SPU_IMM16
:
137 return R_SPU_ADDR16I
;
140 case BFD_RELOC_32_PCREL
:
142 case BFD_RELOC_SPU_PPU32
:
144 case BFD_RELOC_SPU_PPU64
:
146 case BFD_RELOC_SPU_ADD_PIC
:
147 return R_SPU_ADD_PIC
;
152 spu_elf_info_to_howto (bfd
*abfd
,
154 Elf_Internal_Rela
*dst
)
156 enum elf_spu_reloc_type r_type
;
158 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
159 /* PR 17512: file: 90c2a92e. */
160 if (r_type
>= R_SPU_max
)
162 /* xgettext:c-format */
163 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
165 bfd_set_error (bfd_error_bad_value
);
168 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
172 static reloc_howto_type
*
173 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
174 bfd_reloc_code_real_type code
)
176 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
178 if (r_type
== (enum elf_spu_reloc_type
) -1)
181 return elf_howto_table
+ r_type
;
184 static reloc_howto_type
*
185 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
190 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
191 if (elf_howto_table
[i
].name
!= NULL
192 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
193 return &elf_howto_table
[i
];
198 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
200 static bfd_reloc_status_type
201 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
202 void *data
, asection
*input_section
,
203 bfd
*output_bfd
, char **error_message
)
205 bfd_size_type octets
;
209 /* If this is a relocatable link (output_bfd test tells us), just
210 call the generic function. Any adjustment will be done at final
212 if (output_bfd
!= NULL
)
213 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
214 input_section
, output_bfd
, error_message
);
216 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
217 return bfd_reloc_outofrange
;
218 octets
= reloc_entry
->address
* OCTETS_PER_BYTE (abfd
, input_section
);
220 /* Get symbol value. */
222 if (!bfd_is_com_section (symbol
->section
))
224 if (symbol
->section
->output_section
)
225 val
+= symbol
->section
->output_section
->vma
;
227 val
+= reloc_entry
->addend
;
229 /* Make it pc-relative. */
230 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
233 if (val
+ 256 >= 512)
234 return bfd_reloc_overflow
;
236 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
238 /* Move two high bits of value to REL9I and REL9 position.
239 The mask will take care of selecting the right field. */
240 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
241 insn
&= ~reloc_entry
->howto
->dst_mask
;
242 insn
|= val
& reloc_entry
->howto
->dst_mask
;
243 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
248 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
250 if (!sec
->used_by_bfd
)
252 struct _spu_elf_section_data
*sdata
;
254 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
257 sec
->used_by_bfd
= sdata
;
260 return _bfd_elf_new_section_hook (abfd
, sec
);
263 /* Set up overlay info for executables. */
266 spu_elf_object_p (bfd
*abfd
)
268 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
270 unsigned int i
, num_ovl
, num_buf
;
271 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
272 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
273 Elf_Internal_Phdr
*last_phdr
= NULL
;
275 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
276 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
281 if (last_phdr
== NULL
282 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
285 for (j
= 1; j
< elf_numsections (abfd
); j
++)
287 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
289 if (shdr
->bfd_section
!= NULL
290 && ELF_SECTION_SIZE (shdr
, phdr
) != 0
291 && ELF_SECTION_IN_SEGMENT (shdr
, phdr
))
293 asection
*sec
= shdr
->bfd_section
;
294 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
295 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
303 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
304 strip --strip-unneeded will not remove them. */
307 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
309 if (sym
->name
!= NULL
310 && sym
->section
!= bfd_abs_section_ptr
311 && startswith (sym
->name
, "_EAR_"))
312 sym
->flags
|= BSF_KEEP
;
315 /* SPU ELF linker hash table. */
317 struct spu_link_hash_table
319 struct elf_link_hash_table elf
;
321 struct spu_elf_params
*params
;
323 /* Shortcuts to overlay sections. */
329 /* Count of stubs in each overlay section. */
330 unsigned int *stub_count
;
332 /* The stub section for each overlay section. */
335 struct elf_link_hash_entry
*ovly_entry
[2];
337 /* Number of overlay buffers. */
338 unsigned int num_buf
;
340 /* Total number of overlays. */
341 unsigned int num_overlays
;
343 /* For soft icache. */
344 unsigned int line_size_log2
;
345 unsigned int num_lines_log2
;
346 unsigned int fromelem_size_log2
;
348 /* How much memory we have. */
349 unsigned int local_store
;
351 /* Count of overlay stubs needed in non-overlay area. */
352 unsigned int non_ovly_stub
;
354 /* Pointer to the fixup section */
358 unsigned int stub_err
: 1;
361 /* Hijack the generic got fields for overlay stub accounting. */
365 struct got_entry
*next
;
374 #define spu_hash_table(p) \
375 ((is_elf_hash_table ((p)->hash) \
376 && elf_hash_table_id (elf_hash_table (p)) == SPU_ELF_DATA) \
377 ? (struct spu_link_hash_table *) (p)->hash : NULL)
381 struct function_info
*fun
;
382 struct call_info
*next
;
384 unsigned int max_depth
;
385 unsigned int is_tail
: 1;
386 unsigned int is_pasted
: 1;
387 unsigned int broken_cycle
: 1;
388 unsigned int priority
: 13;
393 /* List of functions called. Also branches to hot/cold part of
395 struct call_info
*call_list
;
396 /* For hot/cold part of function, point to owner. */
397 struct function_info
*start
;
398 /* Symbol at start of function. */
400 Elf_Internal_Sym
*sym
;
401 struct elf_link_hash_entry
*h
;
403 /* Function section. */
406 /* Where last called from, and number of sections called from. */
407 asection
*last_caller
;
408 unsigned int call_count
;
409 /* Address range of (this part of) function. */
411 /* Offset where we found a store of lr, or -1 if none found. */
413 /* Offset where we found the stack adjustment insn. */
417 /* Distance from root of call tree. Tail and hot/cold branches
418 count as one deeper. We aren't counting stack frames here. */
420 /* Set if global symbol. */
421 unsigned int global
: 1;
422 /* Set if known to be start of function (as distinct from a hunk
423 in hot/cold section. */
424 unsigned int is_func
: 1;
425 /* Set if not a root node. */
426 unsigned int non_root
: 1;
427 /* Flags used during call tree traversal. It's cheaper to replicate
428 the visit flags than have one which needs clearing after a traversal. */
429 unsigned int visit1
: 1;
430 unsigned int visit2
: 1;
431 unsigned int marking
: 1;
432 unsigned int visit3
: 1;
433 unsigned int visit4
: 1;
434 unsigned int visit5
: 1;
435 unsigned int visit6
: 1;
436 unsigned int visit7
: 1;
439 struct spu_elf_stack_info
443 /* Variable size array describing functions, one per contiguous
444 address range belonging to a function. */
445 struct function_info fun
[1];
448 static struct function_info
*find_function (asection
*, bfd_vma
,
449 struct bfd_link_info
*);
451 /* Create a spu ELF linker hash table. */
453 static struct bfd_link_hash_table
*
454 spu_elf_link_hash_table_create (bfd
*abfd
)
456 struct spu_link_hash_table
*htab
;
458 htab
= bfd_zmalloc (sizeof (*htab
));
462 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
463 _bfd_elf_link_hash_newfunc
,
464 sizeof (struct elf_link_hash_entry
),
471 htab
->elf
.init_got_refcount
.refcount
= 0;
472 htab
->elf
.init_got_refcount
.glist
= NULL
;
473 htab
->elf
.init_got_offset
.offset
= 0;
474 htab
->elf
.init_got_offset
.glist
= NULL
;
475 return &htab
->elf
.root
;
479 spu_elf_setup (struct bfd_link_info
*info
, struct spu_elf_params
*params
)
481 bfd_vma max_branch_log2
;
483 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
484 htab
->params
= params
;
485 htab
->line_size_log2
= bfd_log2 (htab
->params
->line_size
);
486 htab
->num_lines_log2
= bfd_log2 (htab
->params
->num_lines
);
488 /* For the software i-cache, we provide a "from" list whose size
489 is a power-of-two number of quadwords, big enough to hold one
490 byte per outgoing branch. Compute this number here. */
491 max_branch_log2
= bfd_log2 (htab
->params
->max_branch
);
492 htab
->fromelem_size_log2
= max_branch_log2
> 4 ? max_branch_log2
- 4 : 0;
495 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
496 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
497 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
500 get_sym_h (struct elf_link_hash_entry
**hp
,
501 Elf_Internal_Sym
**symp
,
503 Elf_Internal_Sym
**locsymsp
,
504 unsigned long r_symndx
,
507 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
509 if (r_symndx
>= symtab_hdr
->sh_info
)
511 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
512 struct elf_link_hash_entry
*h
;
514 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
515 while (h
->root
.type
== bfd_link_hash_indirect
516 || h
->root
.type
== bfd_link_hash_warning
)
517 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
527 asection
*symsec
= NULL
;
528 if (h
->root
.type
== bfd_link_hash_defined
529 || h
->root
.type
== bfd_link_hash_defweak
)
530 symsec
= h
->root
.u
.def
.section
;
536 Elf_Internal_Sym
*sym
;
537 Elf_Internal_Sym
*locsyms
= *locsymsp
;
541 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
543 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
545 0, NULL
, NULL
, NULL
);
550 sym
= locsyms
+ r_symndx
;
559 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
565 /* Create the note section if not already present. This is done early so
566 that the linker maps the sections to the right place in the output. */
569 spu_elf_create_sections (struct bfd_link_info
*info
)
571 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
574 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
575 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
580 /* Make SPU_PTNOTE_SPUNAME section. */
587 ibfd
= info
->input_bfds
;
588 /* This should really be SEC_LINKER_CREATED, but then we'd need
589 to write out the section ourselves. */
590 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
591 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
593 || !bfd_set_section_alignment (s
, 4))
595 /* Because we didn't set SEC_LINKER_CREATED we need to set the
596 proper section type. */
597 elf_section_type (s
) = SHT_NOTE
;
599 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
600 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
601 size
+= (name_len
+ 3) & -4;
603 if (!bfd_set_section_size (s
, size
))
606 data
= bfd_zalloc (ibfd
, size
);
610 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
611 bfd_put_32 (ibfd
, name_len
, data
+ 4);
612 bfd_put_32 (ibfd
, 1, data
+ 8);
613 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
614 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
615 bfd_get_filename (info
->output_bfd
), name_len
);
619 if (htab
->params
->emit_fixups
)
624 if (htab
->elf
.dynobj
== NULL
)
625 htab
->elf
.dynobj
= ibfd
;
626 ibfd
= htab
->elf
.dynobj
;
627 flags
= (SEC_LOAD
| SEC_ALLOC
| SEC_READONLY
| SEC_HAS_CONTENTS
628 | SEC_IN_MEMORY
| SEC_LINKER_CREATED
);
629 s
= bfd_make_section_anyway_with_flags (ibfd
, ".fixup", flags
);
630 if (s
== NULL
|| !bfd_set_section_alignment (s
, 2))
638 /* qsort predicate to sort sections by vma. */
641 sort_sections (const void *a
, const void *b
)
643 const asection
*const *s1
= a
;
644 const asection
*const *s2
= b
;
645 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
648 return delta
< 0 ? -1 : 1;
650 return (*s1
)->index
- (*s2
)->index
;
653 /* Identify overlays in the output bfd, and number them.
654 Returns 0 on error, 1 if no overlays, 2 if overlays. */
657 spu_elf_find_overlays (struct bfd_link_info
*info
)
659 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
660 asection
**alloc_sec
;
661 unsigned int i
, n
, ovl_index
, num_buf
;
664 static const char *const entry_names
[2][2] = {
665 { "__ovly_load", "__icache_br_handler" },
666 { "__ovly_return", "__icache_call_handler" }
669 if (info
->output_bfd
->section_count
< 2)
673 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
674 if (alloc_sec
== NULL
)
677 /* Pick out all the alloced sections. */
678 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
679 if ((s
->flags
& SEC_ALLOC
) != 0
680 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
690 /* Sort them by vma. */
691 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
693 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
694 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
696 unsigned int prev_buf
= 0, set_id
= 0;
698 /* Look for an overlapping vma to find the first overlay section. */
699 bfd_vma vma_start
= 0;
701 for (i
= 1; i
< n
; i
++)
704 if (s
->vma
< ovl_end
)
706 asection
*s0
= alloc_sec
[i
- 1];
710 << (htab
->num_lines_log2
+ htab
->line_size_log2
)));
715 ovl_end
= s
->vma
+ s
->size
;
718 /* Now find any sections within the cache area. */
719 for (ovl_index
= 0, num_buf
= 0; i
< n
; i
++)
722 if (s
->vma
>= ovl_end
)
725 /* A section in an overlay area called .ovl.init is not
726 an overlay, in the sense that it might be loaded in
727 by the overlay manager, but rather the initial
728 section contents for the overlay buffer. */
729 if (!startswith (s
->name
, ".ovl.init"))
731 num_buf
= ((s
->vma
- vma_start
) >> htab
->line_size_log2
) + 1;
732 set_id
= (num_buf
== prev_buf
)? set_id
+ 1 : 0;
735 if ((s
->vma
- vma_start
) & (htab
->params
->line_size
- 1))
737 info
->callbacks
->einfo (_("%X%P: overlay section %pA "
738 "does not start on a cache line\n"),
740 bfd_set_error (bfd_error_bad_value
);
743 else if (s
->size
> htab
->params
->line_size
)
745 info
->callbacks
->einfo (_("%X%P: overlay section %pA "
746 "is larger than a cache line\n"),
748 bfd_set_error (bfd_error_bad_value
);
752 alloc_sec
[ovl_index
++] = s
;
753 spu_elf_section_data (s
)->u
.o
.ovl_index
754 = (set_id
<< htab
->num_lines_log2
) + num_buf
;
755 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
759 /* Ensure there are no more overlay sections. */
763 if (s
->vma
< ovl_end
)
765 info
->callbacks
->einfo (_("%X%P: overlay section %pA "
766 "is not in cache area\n"),
768 bfd_set_error (bfd_error_bad_value
);
772 ovl_end
= s
->vma
+ s
->size
;
777 /* Look for overlapping vmas. Any with overlap must be overlays.
778 Count them. Also count the number of overlay regions. */
779 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
782 if (s
->vma
< ovl_end
)
784 asection
*s0
= alloc_sec
[i
- 1];
786 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
789 if (!startswith (s0
->name
, ".ovl.init"))
791 alloc_sec
[ovl_index
] = s0
;
792 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
793 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= num_buf
;
796 ovl_end
= s
->vma
+ s
->size
;
798 if (!startswith (s
->name
, ".ovl.init"))
800 alloc_sec
[ovl_index
] = s
;
801 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
802 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
803 if (s0
->vma
!= s
->vma
)
805 /* xgettext:c-format */
806 info
->callbacks
->einfo (_("%X%P: overlay sections %pA "
807 "and %pA do not start at the "
810 bfd_set_error (bfd_error_bad_value
);
813 if (ovl_end
< s
->vma
+ s
->size
)
814 ovl_end
= s
->vma
+ s
->size
;
818 ovl_end
= s
->vma
+ s
->size
;
822 htab
->num_overlays
= ovl_index
;
823 htab
->num_buf
= num_buf
;
824 htab
->ovl_sec
= alloc_sec
;
829 for (i
= 0; i
< 2; i
++)
832 struct elf_link_hash_entry
*h
;
834 name
= entry_names
[i
][htab
->params
->ovly_flavour
];
835 h
= elf_link_hash_lookup (&htab
->elf
, name
, true, false, false);
839 if (h
->root
.type
== bfd_link_hash_new
)
841 h
->root
.type
= bfd_link_hash_undefined
;
843 h
->ref_regular_nonweak
= 1;
846 htab
->ovly_entry
[i
] = h
;
852 /* Non-zero to use bra in overlay stubs rather than br. */
855 #define BRA 0x30000000
856 #define BRASL 0x31000000
857 #define BR 0x32000000
858 #define BRSL 0x33000000
859 #define NOP 0x40200000
860 #define LNOP 0x00200000
861 #define ILA 0x42000000
863 /* Return true for all relative and absolute branch instructions.
871 brhnz 00100011 0.. */
874 is_branch (const unsigned char *insn
)
876 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
879 /* Return true for all indirect branch instructions.
887 bihnz 00100101 011 */
890 is_indirect_branch (const unsigned char *insn
)
892 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
895 /* Return true for branch hint instructions.
900 is_hint (const unsigned char *insn
)
902 return (insn
[0] & 0xfc) == 0x10;
905 /* True if INPUT_SECTION might need overlay stubs. */
908 maybe_needs_stubs (asection
*input_section
)
910 /* No stubs for debug sections and suchlike. */
911 if ((input_section
->flags
& SEC_ALLOC
) == 0)
914 /* No stubs for link-once sections that will be discarded. */
915 if (input_section
->output_section
== bfd_abs_section_ptr
)
918 /* Don't create stubs for .eh_frame references. */
919 if (strcmp (input_section
->name
, ".eh_frame") == 0)
941 /* Return non-zero if this reloc symbol should go via an overlay stub.
942 Return 2 if the stub must be in non-overlay area. */
944 static enum _stub_type
945 needs_ovl_stub (struct elf_link_hash_entry
*h
,
946 Elf_Internal_Sym
*sym
,
948 asection
*input_section
,
949 Elf_Internal_Rela
*irela
,
951 struct bfd_link_info
*info
)
953 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
954 enum elf_spu_reloc_type r_type
;
955 unsigned int sym_type
;
956 bool branch
, hint
, call
;
957 enum _stub_type ret
= no_stub
;
961 || sym_sec
->output_section
== bfd_abs_section_ptr
962 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
967 /* Ensure no stubs for user supplied overlay manager syms. */
968 if (h
== htab
->ovly_entry
[0] || h
== htab
->ovly_entry
[1])
971 /* setjmp always goes via an overlay stub, because then the return
972 and hence the longjmp goes via __ovly_return. That magically
973 makes setjmp/longjmp between overlays work. */
974 if (startswith (h
->root
.root
.string
, "setjmp")
975 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
982 sym_type
= ELF_ST_TYPE (sym
->st_info
);
984 r_type
= ELF32_R_TYPE (irela
->r_info
);
988 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
990 if (contents
== NULL
)
993 if (!bfd_get_section_contents (input_section
->owner
,
1000 contents
+= irela
->r_offset
;
1002 branch
= is_branch (contents
);
1003 hint
= is_hint (contents
);
1006 call
= (contents
[0] & 0xfd) == 0x31;
1008 && sym_type
!= STT_FUNC
1009 && contents
!= insn
)
1011 /* It's common for people to write assembly and forget
1012 to give function symbols the right type. Handle
1013 calls to such symbols, but warn so that (hopefully)
1014 people will fix their code. We need the symbol
1015 type to be correct to distinguish function pointer
1016 initialisation from other pointer initialisations. */
1017 const char *sym_name
;
1020 sym_name
= h
->root
.root
.string
;
1023 Elf_Internal_Shdr
*symtab_hdr
;
1024 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
1025 sym_name
= bfd_elf_sym_name (input_section
->owner
,
1031 /* xgettext:c-format */
1032 (_("warning: call to non-function symbol %s defined in %pB"),
1033 sym_name
, sym_sec
->owner
);
1039 if ((!branch
&& htab
->params
->ovly_flavour
== ovly_soft_icache
)
1040 || (sym_type
!= STT_FUNC
1041 && !(branch
|| hint
)
1042 && (sym_sec
->flags
& SEC_CODE
) == 0))
1045 /* Usually, symbols in non-overlay sections don't need stubs. */
1046 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
1047 && !htab
->params
->non_overlay_stubs
)
1050 /* A reference from some other section to a symbol in an overlay
1051 section needs a stub. */
1052 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
1053 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
1055 unsigned int lrlive
= 0;
1057 lrlive
= (contents
[1] & 0x70) >> 4;
1059 if (!lrlive
&& (call
|| sym_type
== STT_FUNC
))
1060 ret
= call_ovl_stub
;
1062 ret
= br000_ovl_stub
+ lrlive
;
1065 /* If this insn isn't a branch then we are possibly taking the
1066 address of a function and passing it out somehow. Soft-icache code
1067 always generates inline code to do indirect branches. */
1068 if (!(branch
|| hint
)
1069 && sym_type
== STT_FUNC
1070 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1077 count_stub (struct spu_link_hash_table
*htab
,
1080 enum _stub_type stub_type
,
1081 struct elf_link_hash_entry
*h
,
1082 const Elf_Internal_Rela
*irela
)
1084 unsigned int ovl
= 0;
1085 struct got_entry
*g
, **head
;
1088 /* If this instruction is a branch or call, we need a stub
1089 for it. One stub per function per overlay.
1090 If it isn't a branch, then we are taking the address of
1091 this function so need a stub in the non-overlay area
1092 for it. One stub per function. */
1093 if (stub_type
!= nonovl_stub
)
1094 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1097 head
= &h
->got
.glist
;
1100 if (elf_local_got_ents (ibfd
) == NULL
)
1102 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
1103 * sizeof (*elf_local_got_ents (ibfd
)));
1104 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
1105 if (elf_local_got_ents (ibfd
) == NULL
)
1108 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1111 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1113 htab
->stub_count
[ovl
] += 1;
1119 addend
= irela
->r_addend
;
1123 struct got_entry
*gnext
;
1125 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1126 if (g
->addend
== addend
&& g
->ovl
== 0)
1131 /* Need a new non-overlay area stub. Zap other stubs. */
1132 for (g
= *head
; g
!= NULL
; g
= gnext
)
1135 if (g
->addend
== addend
)
1137 htab
->stub_count
[g
->ovl
] -= 1;
1145 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1146 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1152 g
= bfd_malloc (sizeof *g
);
1157 g
->stub_addr
= (bfd_vma
) -1;
1161 htab
->stub_count
[ovl
] += 1;
1167 /* Support two sizes of overlay stubs, a slower more compact stub of two
1168 instructions, and a faster stub of four instructions.
1169 Soft-icache stubs are four or eight words. */
1172 ovl_stub_size (struct spu_elf_params
*params
)
1174 return 16 << params
->ovly_flavour
>> params
->compact_stub
;
1178 ovl_stub_size_log2 (struct spu_elf_params
*params
)
1180 return 4 + params
->ovly_flavour
- params
->compact_stub
;
1183 /* Two instruction overlay stubs look like:
1185 brsl $75,__ovly_load
1186 .word target_ovl_and_address
1188 ovl_and_address is a word with the overlay number in the top 14 bits
1189 and local store address in the bottom 18 bits.
1191 Four instruction overlay stubs look like:
1195 ila $79,target_address
1198 Software icache stubs are:
1202 .word lrlive_branchlocalstoreaddr;
1203 brasl $75,__icache_br_handler
1208 build_stub (struct bfd_link_info
*info
,
1211 enum _stub_type stub_type
,
1212 struct elf_link_hash_entry
*h
,
1213 const Elf_Internal_Rela
*irela
,
1217 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1218 unsigned int ovl
, dest_ovl
, set_id
;
1219 struct got_entry
*g
, **head
;
1221 bfd_vma addend
, from
, to
, br_dest
, patt
;
1222 unsigned int lrlive
;
1225 if (stub_type
!= nonovl_stub
)
1226 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1229 head
= &h
->got
.glist
;
1231 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1235 addend
= irela
->r_addend
;
1237 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1239 g
= bfd_malloc (sizeof *g
);
1245 g
->br_addr
= (irela
->r_offset
1246 + isec
->output_offset
1247 + isec
->output_section
->vma
);
1253 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1254 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1259 if (g
->ovl
== 0 && ovl
!= 0)
1262 if (g
->stub_addr
!= (bfd_vma
) -1)
1266 sec
= htab
->stub_sec
[ovl
];
1267 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
1268 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
1269 g
->stub_addr
= from
;
1270 to
= (htab
->ovly_entry
[0]->root
.u
.def
.value
1271 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_offset
1272 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_section
->vma
);
1274 if (((dest
| to
| from
) & 3) != 0)
1279 dest_ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
1281 if (htab
->params
->ovly_flavour
== ovly_normal
1282 && !htab
->params
->compact_stub
)
1284 bfd_put_32 (sec
->owner
, ILA
+ ((dest_ovl
<< 7) & 0x01ffff80) + 78,
1285 sec
->contents
+ sec
->size
);
1286 bfd_put_32 (sec
->owner
, LNOP
,
1287 sec
->contents
+ sec
->size
+ 4);
1288 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
1289 sec
->contents
+ sec
->size
+ 8);
1291 bfd_put_32 (sec
->owner
, BR
+ (((to
- (from
+ 12)) << 5) & 0x007fff80),
1292 sec
->contents
+ sec
->size
+ 12);
1294 bfd_put_32 (sec
->owner
, BRA
+ ((to
<< 5) & 0x007fff80),
1295 sec
->contents
+ sec
->size
+ 12);
1297 else if (htab
->params
->ovly_flavour
== ovly_normal
1298 && htab
->params
->compact_stub
)
1301 bfd_put_32 (sec
->owner
, BRSL
+ (((to
- from
) << 5) & 0x007fff80) + 75,
1302 sec
->contents
+ sec
->size
);
1304 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1305 sec
->contents
+ sec
->size
);
1306 bfd_put_32 (sec
->owner
, (dest
& 0x3ffff) | (dest_ovl
<< 18),
1307 sec
->contents
+ sec
->size
+ 4);
1309 else if (htab
->params
->ovly_flavour
== ovly_soft_icache
1310 && htab
->params
->compact_stub
)
1313 if (stub_type
== nonovl_stub
)
1315 else if (stub_type
== call_ovl_stub
)
1316 /* A brsl makes lr live and *(*sp+16) is live.
1317 Tail calls have the same liveness. */
1319 else if (!htab
->params
->lrlive_analysis
)
1320 /* Assume stack frame and lr save. */
1322 else if (irela
!= NULL
)
1324 /* Analyse branch instructions. */
1325 struct function_info
*caller
;
1328 caller
= find_function (isec
, irela
->r_offset
, info
);
1329 if (caller
->start
== NULL
)
1330 off
= irela
->r_offset
;
1333 struct function_info
*found
= NULL
;
1335 /* Find the earliest piece of this function that
1336 has frame adjusting instructions. We might
1337 see dynamic frame adjustment (eg. for alloca)
1338 in some later piece, but functions using
1339 alloca always set up a frame earlier. Frame
1340 setup instructions are always in one piece. */
1341 if (caller
->lr_store
!= (bfd_vma
) -1
1342 || caller
->sp_adjust
!= (bfd_vma
) -1)
1344 while (caller
->start
!= NULL
)
1346 caller
= caller
->start
;
1347 if (caller
->lr_store
!= (bfd_vma
) -1
1348 || caller
->sp_adjust
!= (bfd_vma
) -1)
1356 if (off
> caller
->sp_adjust
)
1358 if (off
> caller
->lr_store
)
1359 /* Only *(*sp+16) is live. */
1362 /* If no lr save, then we must be in a
1363 leaf function with a frame.
1364 lr is still live. */
1367 else if (off
> caller
->lr_store
)
1369 /* Between lr save and stack adjust. */
1371 /* This should never happen since prologues won't
1376 /* On entry to function. */
1379 if (stub_type
!= br000_ovl_stub
1380 && lrlive
!= stub_type
- br000_ovl_stub
)
1381 /* xgettext:c-format */
1382 info
->callbacks
->einfo (_("%pA:0x%v lrlive .brinfo (%u) differs "
1383 "from analysis (%u)\n"),
1384 isec
, irela
->r_offset
, lrlive
,
1385 stub_type
- br000_ovl_stub
);
1388 /* If given lrlive info via .brinfo, use it. */
1389 if (stub_type
> br000_ovl_stub
)
1390 lrlive
= stub_type
- br000_ovl_stub
;
1393 to
= (htab
->ovly_entry
[1]->root
.u
.def
.value
1394 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_offset
1395 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_section
->vma
);
1397 /* The branch that uses this stub goes to stub_addr + 4. We'll
1398 set up an xor pattern that can be used by the icache manager
1399 to modify this branch to go directly to its destination. */
1401 br_dest
= g
->stub_addr
;
1404 /* Except in the case of _SPUEAR_ stubs, the branch in
1405 question is the one in the stub itself. */
1406 BFD_ASSERT (stub_type
== nonovl_stub
);
1407 g
->br_addr
= g
->stub_addr
;
1411 set_id
= ((dest_ovl
- 1) >> htab
->num_lines_log2
) + 1;
1412 bfd_put_32 (sec
->owner
, (set_id
<< 18) | (dest
& 0x3ffff),
1413 sec
->contents
+ sec
->size
);
1414 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1415 sec
->contents
+ sec
->size
+ 4);
1416 bfd_put_32 (sec
->owner
, (lrlive
<< 29) | (g
->br_addr
& 0x3ffff),
1417 sec
->contents
+ sec
->size
+ 8);
1418 patt
= dest
^ br_dest
;
1419 if (irela
!= NULL
&& ELF32_R_TYPE (irela
->r_info
) == R_SPU_REL16
)
1420 patt
= (dest
- g
->br_addr
) ^ (br_dest
- g
->br_addr
);
1421 bfd_put_32 (sec
->owner
, (patt
<< 5) & 0x007fff80,
1422 sec
->contents
+ sec
->size
+ 12);
1425 /* Extra space for linked list entries. */
1431 sec
->size
+= ovl_stub_size (htab
->params
);
1433 if (htab
->params
->emit_stub_syms
)
1439 len
= 8 + sizeof (".ovl_call.") - 1;
1441 len
+= strlen (h
->root
.root
.string
);
1446 add
= (int) irela
->r_addend
& 0xffffffff;
1449 name
= bfd_malloc (len
+ 1);
1453 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1455 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1457 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1458 dest_sec
->id
& 0xffffffff,
1459 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1461 sprintf (name
+ len
- 9, "+%x", add
);
1463 h
= elf_link_hash_lookup (&htab
->elf
, name
, true, true, false);
1467 if (h
->root
.type
== bfd_link_hash_new
)
1469 h
->root
.type
= bfd_link_hash_defined
;
1470 h
->root
.u
.def
.section
= sec
;
1471 h
->size
= ovl_stub_size (htab
->params
);
1472 h
->root
.u
.def
.value
= sec
->size
- h
->size
;
1476 h
->ref_regular_nonweak
= 1;
1477 h
->forced_local
= 1;
1485 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1489 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1491 /* Symbols starting with _SPUEAR_ need a stub because they may be
1492 invoked by the PPU. */
1493 struct bfd_link_info
*info
= inf
;
1494 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1497 if ((h
->root
.type
== bfd_link_hash_defined
1498 || h
->root
.type
== bfd_link_hash_defweak
)
1500 && startswith (h
->root
.root
.string
, "_SPUEAR_")
1501 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1502 && sym_sec
->output_section
!= bfd_abs_section_ptr
1503 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1504 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1505 || htab
->params
->non_overlay_stubs
))
1507 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1514 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1516 /* Symbols starting with _SPUEAR_ need a stub because they may be
1517 invoked by the PPU. */
1518 struct bfd_link_info
*info
= inf
;
1519 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1522 if ((h
->root
.type
== bfd_link_hash_defined
1523 || h
->root
.type
== bfd_link_hash_defweak
)
1525 && startswith (h
->root
.root
.string
, "_SPUEAR_")
1526 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1527 && sym_sec
->output_section
!= bfd_abs_section_ptr
1528 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1529 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1530 || htab
->params
->non_overlay_stubs
))
1532 return build_stub (info
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1533 h
->root
.u
.def
.value
, sym_sec
);
1539 /* Size or build stubs. */
1542 process_stubs (struct bfd_link_info
*info
, bool build
)
1544 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1547 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
1549 extern const bfd_target spu_elf32_vec
;
1550 Elf_Internal_Shdr
*symtab_hdr
;
1552 Elf_Internal_Sym
*local_syms
= NULL
;
1554 if (ibfd
->xvec
!= &spu_elf32_vec
)
1557 /* We'll need the symbol table in a second. */
1558 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1559 if (symtab_hdr
->sh_info
== 0)
1562 /* Walk over each section attached to the input bfd. */
1563 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1565 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1567 /* If there aren't any relocs, then there's nothing more to do. */
1568 if ((isec
->flags
& SEC_RELOC
) == 0
1569 || isec
->reloc_count
== 0)
1572 if (!maybe_needs_stubs (isec
))
1575 /* Get the relocs. */
1576 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1578 if (internal_relocs
== NULL
)
1579 goto error_ret_free_local
;
1581 /* Now examine each relocation. */
1582 irela
= internal_relocs
;
1583 irelaend
= irela
+ isec
->reloc_count
;
1584 for (; irela
< irelaend
; irela
++)
1586 enum elf_spu_reloc_type r_type
;
1587 unsigned int r_indx
;
1589 Elf_Internal_Sym
*sym
;
1590 struct elf_link_hash_entry
*h
;
1591 enum _stub_type stub_type
;
1593 r_type
= ELF32_R_TYPE (irela
->r_info
);
1594 r_indx
= ELF32_R_SYM (irela
->r_info
);
1596 if (r_type
>= R_SPU_max
)
1598 bfd_set_error (bfd_error_bad_value
);
1599 error_ret_free_internal
:
1600 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1601 free (internal_relocs
);
1602 error_ret_free_local
:
1603 if (symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1608 /* Determine the reloc target section. */
1609 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1610 goto error_ret_free_internal
;
1612 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1614 if (stub_type
== no_stub
)
1616 else if (stub_type
== stub_error
)
1617 goto error_ret_free_internal
;
1619 if (htab
->stub_count
== NULL
)
1622 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1623 htab
->stub_count
= bfd_zmalloc (amt
);
1624 if (htab
->stub_count
== NULL
)
1625 goto error_ret_free_internal
;
1630 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1631 goto error_ret_free_internal
;
1638 dest
= h
->root
.u
.def
.value
;
1640 dest
= sym
->st_value
;
1641 dest
+= irela
->r_addend
;
1642 if (!build_stub (info
, ibfd
, isec
, stub_type
, h
, irela
,
1644 goto error_ret_free_internal
;
1648 /* We're done with the internal relocs, free them. */
1649 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1650 free (internal_relocs
);
1653 if (local_syms
!= NULL
1654 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1656 if (!info
->keep_memory
)
1659 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1666 /* Allocate space for overlay call and return stubs.
1667 Return 0 on error, 1 if no overlays, 2 otherwise. */
1670 spu_elf_size_stubs (struct bfd_link_info
*info
)
1672 struct spu_link_hash_table
*htab
;
1679 if (!process_stubs (info
, false))
1682 htab
= spu_hash_table (info
);
1683 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1687 ibfd
= info
->input_bfds
;
1688 if (htab
->stub_count
!= NULL
)
1690 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1691 htab
->stub_sec
= bfd_zmalloc (amt
);
1692 if (htab
->stub_sec
== NULL
)
1695 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1696 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1697 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1698 htab
->stub_sec
[0] = stub
;
1700 || !bfd_set_section_alignment (stub
,
1701 ovl_stub_size_log2 (htab
->params
)))
1703 stub
->size
= htab
->stub_count
[0] * ovl_stub_size (htab
->params
);
1704 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1705 /* Extra space for linked list entries. */
1706 stub
->size
+= htab
->stub_count
[0] * 16;
1708 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1710 asection
*osec
= htab
->ovl_sec
[i
];
1711 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1712 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1713 htab
->stub_sec
[ovl
] = stub
;
1715 || !bfd_set_section_alignment (stub
,
1716 ovl_stub_size_log2 (htab
->params
)))
1718 stub
->size
= htab
->stub_count
[ovl
] * ovl_stub_size (htab
->params
);
1722 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1724 /* Space for icache manager tables.
1725 a) Tag array, one quadword per cache line.
1726 b) Rewrite "to" list, one quadword per cache line.
1727 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1728 a power-of-two number of full quadwords) per cache line. */
1731 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1732 if (htab
->ovtab
== NULL
1733 || !bfd_set_section_alignment (htab
->ovtab
, 4))
1736 htab
->ovtab
->size
= (16 + 16 + (16 << htab
->fromelem_size_log2
))
1737 << htab
->num_lines_log2
;
1739 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1740 htab
->init
= bfd_make_section_anyway_with_flags (ibfd
, ".ovini", flags
);
1741 if (htab
->init
== NULL
1742 || !bfd_set_section_alignment (htab
->init
, 4))
1745 htab
->init
->size
= 16;
1747 else if (htab
->stub_count
== NULL
)
1751 /* htab->ovtab consists of two arrays.
1761 . } _ovly_buf_table[];
1764 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1765 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1766 if (htab
->ovtab
== NULL
1767 || !bfd_set_section_alignment (htab
->ovtab
, 4))
1770 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1773 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1774 if (htab
->toe
== NULL
1775 || !bfd_set_section_alignment (htab
->toe
, 4))
1777 htab
->toe
->size
= 16;
1782 /* Called from ld to place overlay manager data sections. This is done
1783 after the overlay manager itself is loaded, mainly so that the
1784 linker's htab->init section is placed after any other .ovl.init
1788 spu_elf_place_overlay_data (struct bfd_link_info
*info
)
1790 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1793 if (htab
->stub_sec
!= NULL
)
1795 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[0], NULL
, ".text");
1797 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1799 asection
*osec
= htab
->ovl_sec
[i
];
1800 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1801 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[ovl
], osec
, NULL
);
1805 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1806 (*htab
->params
->place_spu_section
) (htab
->init
, NULL
, ".ovl.init");
1808 if (htab
->ovtab
!= NULL
)
1810 const char *ovout
= ".data";
1811 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1813 (*htab
->params
->place_spu_section
) (htab
->ovtab
, NULL
, ovout
);
1816 if (htab
->toe
!= NULL
)
1817 (*htab
->params
->place_spu_section
) (htab
->toe
, NULL
, ".toe");
1820 /* Functions to handle embedded spu_ovl.o object. */
1823 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1829 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1835 struct _ovl_stream
*os
;
1839 os
= (struct _ovl_stream
*) stream
;
1840 max
= (const char *) os
->end
- (const char *) os
->start
;
1842 if ((ufile_ptr
) offset
>= max
)
1846 if (count
> max
- offset
)
1847 count
= max
- offset
;
1849 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1854 ovl_mgr_stat (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1858 struct _ovl_stream
*os
= (struct _ovl_stream
*) stream
;
1860 memset (sb
, 0, sizeof (*sb
));
1861 sb
->st_size
= (const char *) os
->end
- (const char *) os
->start
;
1866 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1868 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1875 return *ovl_bfd
!= NULL
;
1879 overlay_index (asection
*sec
)
1882 || sec
->output_section
== bfd_abs_section_ptr
)
1884 return spu_elf_section_data (sec
->output_section
)->u
.o
.ovl_index
;
1887 /* Define an STT_OBJECT symbol. */
1889 static struct elf_link_hash_entry
*
1890 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1892 struct elf_link_hash_entry
*h
;
1894 h
= elf_link_hash_lookup (&htab
->elf
, name
, true, false, false);
1898 if (h
->root
.type
!= bfd_link_hash_defined
1901 h
->root
.type
= bfd_link_hash_defined
;
1902 h
->root
.u
.def
.section
= htab
->ovtab
;
1903 h
->type
= STT_OBJECT
;
1906 h
->ref_regular_nonweak
= 1;
1909 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1911 /* xgettext:c-format */
1912 _bfd_error_handler (_("%pB is not allowed to define %s"),
1913 h
->root
.u
.def
.section
->owner
,
1914 h
->root
.root
.string
);
1915 bfd_set_error (bfd_error_bad_value
);
1920 _bfd_error_handler (_("you are not allowed to define %s in a script"),
1921 h
->root
.root
.string
);
1922 bfd_set_error (bfd_error_bad_value
);
1929 /* Fill in all stubs and the overlay tables. */
1932 spu_elf_build_stubs (struct bfd_link_info
*info
)
1934 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1935 struct elf_link_hash_entry
*h
;
1941 if (htab
->num_overlays
!= 0)
1943 for (i
= 0; i
< 2; i
++)
1945 h
= htab
->ovly_entry
[i
];
1947 && (h
->root
.type
== bfd_link_hash_defined
1948 || h
->root
.type
== bfd_link_hash_defweak
)
1951 s
= h
->root
.u
.def
.section
->output_section
;
1952 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1954 _bfd_error_handler (_("%s in overlay section"),
1955 h
->root
.root
.string
);
1956 bfd_set_error (bfd_error_bad_value
);
1963 if (htab
->stub_sec
!= NULL
)
1965 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1966 if (htab
->stub_sec
[i
]->size
!= 0)
1968 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1969 htab
->stub_sec
[i
]->size
);
1970 if (htab
->stub_sec
[i
]->contents
== NULL
)
1972 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1973 htab
->stub_sec
[i
]->size
= 0;
1976 /* Fill in all the stubs. */
1977 process_stubs (info
, true);
1978 if (!htab
->stub_err
)
1979 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1983 _bfd_error_handler (_("overlay stub relocation overflow"));
1984 bfd_set_error (bfd_error_bad_value
);
1988 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1990 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1992 _bfd_error_handler (_("stubs don't match calculated size"));
1993 bfd_set_error (bfd_error_bad_value
);
1996 htab
->stub_sec
[i
]->rawsize
= 0;
2000 if (htab
->ovtab
== NULL
|| htab
->ovtab
->size
== 0)
2003 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
2004 if (htab
->ovtab
->contents
== NULL
)
2007 p
= htab
->ovtab
->contents
;
2008 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
2012 h
= define_ovtab_symbol (htab
, "__icache_tag_array");
2015 h
->root
.u
.def
.value
= 0;
2016 h
->size
= 16 << htab
->num_lines_log2
;
2019 h
= define_ovtab_symbol (htab
, "__icache_tag_array_size");
2022 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
2023 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2025 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to");
2028 h
->root
.u
.def
.value
= off
;
2029 h
->size
= 16 << htab
->num_lines_log2
;
2032 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to_size");
2035 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
2036 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2038 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from");
2041 h
->root
.u
.def
.value
= off
;
2042 h
->size
= 16 << (htab
->fromelem_size_log2
+ htab
->num_lines_log2
);
2045 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from_size");
2048 h
->root
.u
.def
.value
= 16 << (htab
->fromelem_size_log2
2049 + htab
->num_lines_log2
);
2050 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2052 h
= define_ovtab_symbol (htab
, "__icache_log2_fromelemsize");
2055 h
->root
.u
.def
.value
= htab
->fromelem_size_log2
;
2056 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2058 h
= define_ovtab_symbol (htab
, "__icache_base");
2061 h
->root
.u
.def
.value
= htab
->ovl_sec
[0]->vma
;
2062 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2063 h
->size
= htab
->num_buf
<< htab
->line_size_log2
;
2065 h
= define_ovtab_symbol (htab
, "__icache_linesize");
2068 h
->root
.u
.def
.value
= 1 << htab
->line_size_log2
;
2069 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2071 h
= define_ovtab_symbol (htab
, "__icache_log2_linesize");
2074 h
->root
.u
.def
.value
= htab
->line_size_log2
;
2075 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2077 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_linesize");
2080 h
->root
.u
.def
.value
= -htab
->line_size_log2
;
2081 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2083 h
= define_ovtab_symbol (htab
, "__icache_cachesize");
2086 h
->root
.u
.def
.value
= 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
);
2087 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2089 h
= define_ovtab_symbol (htab
, "__icache_log2_cachesize");
2092 h
->root
.u
.def
.value
= htab
->num_lines_log2
+ htab
->line_size_log2
;
2093 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2095 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_cachesize");
2098 h
->root
.u
.def
.value
= -(htab
->num_lines_log2
+ htab
->line_size_log2
);
2099 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2101 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
2103 htab
->init
->contents
= bfd_zalloc (htab
->init
->owner
,
2105 if (htab
->init
->contents
== NULL
)
2108 h
= define_ovtab_symbol (htab
, "__icache_fileoff");
2111 h
->root
.u
.def
.value
= 0;
2112 h
->root
.u
.def
.section
= htab
->init
;
2118 /* Write out _ovly_table. */
2119 /* set low bit of .size to mark non-overlay area as present. */
2121 obfd
= htab
->ovtab
->output_section
->owner
;
2122 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
2124 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
2128 unsigned long off
= ovl_index
* 16;
2129 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
2131 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
2132 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16,
2134 /* file_off written later in spu_elf_modify_headers. */
2135 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
2139 h
= define_ovtab_symbol (htab
, "_ovly_table");
2142 h
->root
.u
.def
.value
= 16;
2143 h
->size
= htab
->num_overlays
* 16;
2145 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
2148 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2151 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
2154 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2155 h
->size
= htab
->num_buf
* 4;
2157 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
2160 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
2164 h
= define_ovtab_symbol (htab
, "_EAR_");
2167 h
->root
.u
.def
.section
= htab
->toe
;
2168 h
->root
.u
.def
.value
= 0;
2174 /* Check that all loadable section VMAs lie in the range
2175 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2178 spu_elf_check_vma (struct bfd_link_info
*info
)
2180 struct elf_segment_map
*m
;
2182 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2183 bfd
*abfd
= info
->output_bfd
;
2184 bfd_vma hi
= htab
->params
->local_store_hi
;
2185 bfd_vma lo
= htab
->params
->local_store_lo
;
2187 htab
->local_store
= hi
+ 1 - lo
;
2189 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
2190 if (m
->p_type
== PT_LOAD
)
2191 for (i
= 0; i
< m
->count
; i
++)
2192 if (m
->sections
[i
]->size
!= 0
2193 && (m
->sections
[i
]->vma
< lo
2194 || m
->sections
[i
]->vma
> hi
2195 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
2196 return m
->sections
[i
];
2201 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2202 Search for stack adjusting insns, and return the sp delta.
2203 If a store of lr is found save the instruction offset to *LR_STORE.
2204 If a stack adjusting instruction is found, save that offset to
2208 find_function_stack_adjust (asection
*sec
,
2215 memset (reg
, 0, sizeof (reg
));
2216 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
2218 unsigned char buf
[4];
2222 /* Assume no relocs on stack adjusing insns. */
2223 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
2227 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
2229 if (buf
[0] == 0x24 /* stqd */)
2231 if (rt
== 0 /* lr */ && ra
== 1 /* sp */)
2236 /* Partly decoded immediate field. */
2237 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
2239 if (buf
[0] == 0x1c /* ai */)
2242 imm
= (imm
^ 0x200) - 0x200;
2243 reg
[rt
] = reg
[ra
] + imm
;
2245 if (rt
== 1 /* sp */)
2249 *sp_adjust
= offset
;
2253 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
2255 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2257 reg
[rt
] = reg
[ra
] + reg
[rb
];
2262 *sp_adjust
= offset
;
2266 else if (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */)
2268 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2270 reg
[rt
] = reg
[rb
] - reg
[ra
];
2275 *sp_adjust
= offset
;
2279 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2281 if (buf
[0] >= 0x42 /* ila */)
2282 imm
|= (buf
[0] & 1) << 17;
2287 if (buf
[0] == 0x40 /* il */)
2289 if ((buf
[1] & 0x80) == 0)
2291 imm
= (imm
^ 0x8000) - 0x8000;
2293 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
2299 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
2301 reg
[rt
] |= imm
& 0xffff;
2304 else if (buf
[0] == 0x04 /* ori */)
2307 imm
= (imm
^ 0x200) - 0x200;
2308 reg
[rt
] = reg
[ra
] | imm
;
2311 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
2313 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
2314 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
2315 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
2316 | ((imm
& 0x1000) ? 0x000000ff : 0));
2319 else if (buf
[0] == 0x16 /* andbi */)
2325 reg
[rt
] = reg
[ra
] & imm
;
2328 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
2330 /* Used in pic reg load. Say rt is trashed. Won't be used
2331 in stack adjust, but we need to continue past this branch. */
2335 else if (is_branch (buf
) || is_indirect_branch (buf
))
2336 /* If we hit a branch then we must be out of the prologue. */
2343 /* qsort predicate to sort symbols by section and value. */
2345 static Elf_Internal_Sym
*sort_syms_syms
;
2346 static asection
**sort_syms_psecs
;
2349 sort_syms (const void *a
, const void *b
)
2351 Elf_Internal_Sym
*const *s1
= a
;
2352 Elf_Internal_Sym
*const *s2
= b
;
2353 asection
*sec1
,*sec2
;
2354 bfd_signed_vma delta
;
2356 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
2357 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
2360 return sec1
->index
- sec2
->index
;
2362 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
2364 return delta
< 0 ? -1 : 1;
2366 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
2368 return delta
< 0 ? -1 : 1;
2370 return *s1
< *s2
? -1 : 1;
2373 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2374 entries for section SEC. */
2376 static struct spu_elf_stack_info
*
2377 alloc_stack_info (asection
*sec
, int max_fun
)
2379 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2382 amt
= sizeof (struct spu_elf_stack_info
);
2383 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
2384 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
2385 if (sec_data
->u
.i
.stack_info
!= NULL
)
2386 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
2387 return sec_data
->u
.i
.stack_info
;
2390 /* Add a new struct function_info describing a (part of a) function
2391 starting at SYM_H. Keep the array sorted by address. */
2393 static struct function_info
*
2394 maybe_insert_function (asection
*sec
,
2399 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2400 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2406 sinfo
= alloc_stack_info (sec
, 20);
2413 Elf_Internal_Sym
*sym
= sym_h
;
2414 off
= sym
->st_value
;
2415 size
= sym
->st_size
;
2419 struct elf_link_hash_entry
*h
= sym_h
;
2420 off
= h
->root
.u
.def
.value
;
2424 for (i
= sinfo
->num_fun
; --i
>= 0; )
2425 if (sinfo
->fun
[i
].lo
<= off
)
2430 /* Don't add another entry for an alias, but do update some
2432 if (sinfo
->fun
[i
].lo
== off
)
2434 /* Prefer globals over local syms. */
2435 if (global
&& !sinfo
->fun
[i
].global
)
2437 sinfo
->fun
[i
].global
= true;
2438 sinfo
->fun
[i
].u
.h
= sym_h
;
2441 sinfo
->fun
[i
].is_func
= true;
2442 return &sinfo
->fun
[i
];
2444 /* Ignore a zero-size symbol inside an existing function. */
2445 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
2446 return &sinfo
->fun
[i
];
2449 if (sinfo
->num_fun
>= sinfo
->max_fun
)
2451 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
2452 bfd_size_type old
= amt
;
2454 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2455 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
2456 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2457 sinfo
= bfd_realloc (sinfo
, amt
);
2460 memset ((char *) sinfo
+ old
, 0, amt
- old
);
2461 sec_data
->u
.i
.stack_info
= sinfo
;
2464 if (++i
< sinfo
->num_fun
)
2465 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
2466 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
2467 sinfo
->fun
[i
].is_func
= is_func
;
2468 sinfo
->fun
[i
].global
= global
;
2469 sinfo
->fun
[i
].sec
= sec
;
2471 sinfo
->fun
[i
].u
.h
= sym_h
;
2473 sinfo
->fun
[i
].u
.sym
= sym_h
;
2474 sinfo
->fun
[i
].lo
= off
;
2475 sinfo
->fun
[i
].hi
= off
+ size
;
2476 sinfo
->fun
[i
].lr_store
= -1;
2477 sinfo
->fun
[i
].sp_adjust
= -1;
2478 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
,
2479 &sinfo
->fun
[i
].lr_store
,
2480 &sinfo
->fun
[i
].sp_adjust
);
2481 sinfo
->num_fun
+= 1;
2482 return &sinfo
->fun
[i
];
2485 /* Return the name of FUN. */
2488 func_name (struct function_info
*fun
)
2492 Elf_Internal_Shdr
*symtab_hdr
;
2494 while (fun
->start
!= NULL
)
2498 return fun
->u
.h
->root
.root
.string
;
2501 if (fun
->u
.sym
->st_name
== 0)
2503 size_t len
= strlen (sec
->name
);
2504 char *name
= bfd_malloc (len
+ 10);
2507 sprintf (name
, "%s+%lx", sec
->name
,
2508 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
2512 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2513 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
2516 /* Read the instruction at OFF in SEC. Return true iff the instruction
2517 is a nop, lnop, or stop 0 (all zero insn). */
2520 is_nop (asection
*sec
, bfd_vma off
)
2522 unsigned char insn
[4];
2524 if (off
+ 4 > sec
->size
2525 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
2527 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
2529 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
2534 /* Extend the range of FUN to cover nop padding up to LIMIT.
2535 Return TRUE iff some instruction other than a NOP was found. */
2538 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
2540 bfd_vma off
= (fun
->hi
+ 3) & -4;
2542 while (off
< limit
&& is_nop (fun
->sec
, off
))
2553 /* Check and fix overlapping function ranges. Return TRUE iff there
2554 are gaps in the current info we have about functions in SEC. */
2557 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
2559 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2560 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2567 for (i
= 1; i
< sinfo
->num_fun
; i
++)
2568 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
2570 /* Fix overlapping symbols. */
2571 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
2572 const char *f2
= func_name (&sinfo
->fun
[i
]);
2574 /* xgettext:c-format */
2575 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
2576 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
2578 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2581 if (sinfo
->num_fun
== 0)
2585 if (sinfo
->fun
[0].lo
!= 0)
2587 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2589 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2591 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2592 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2594 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2600 /* Search current function info for a function that contains address
2601 OFFSET in section SEC. */
2603 static struct function_info
*
2604 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2606 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2607 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2611 hi
= sinfo
->num_fun
;
2614 mid
= (lo
+ hi
) / 2;
2615 if (offset
< sinfo
->fun
[mid
].lo
)
2617 else if (offset
>= sinfo
->fun
[mid
].hi
)
2620 return &sinfo
->fun
[mid
];
2622 /* xgettext:c-format */
2623 info
->callbacks
->einfo (_("%pA:0x%v not found in function table\n"),
2625 bfd_set_error (bfd_error_bad_value
);
2629 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2630 if CALLEE was new. If this function return FALSE, CALLEE should
2634 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2636 struct call_info
**pp
, *p
;
2638 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2639 if (p
->fun
== callee
->fun
)
2641 /* Tail calls use less stack than normal calls. Retain entry
2642 for normal call over one for tail call. */
2643 p
->is_tail
&= callee
->is_tail
;
2646 p
->fun
->start
= NULL
;
2647 p
->fun
->is_func
= true;
2649 p
->count
+= callee
->count
;
2650 /* Reorder list so most recent call is first. */
2652 p
->next
= caller
->call_list
;
2653 caller
->call_list
= p
;
2656 callee
->next
= caller
->call_list
;
2657 caller
->call_list
= callee
;
2661 /* Copy CALL and insert the copy into CALLER. */
2664 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2666 struct call_info
*callee
;
2667 callee
= bfd_malloc (sizeof (*callee
));
2671 if (!insert_callee (caller
, callee
))
2676 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2677 overlay stub sections. */
2680 interesting_section (asection
*s
)
2682 return (s
->output_section
!= bfd_abs_section_ptr
2683 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2684 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2688 /* Rummage through the relocs for SEC, looking for function calls.
2689 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2690 mark destination symbols on calls as being functions. Also
2691 look at branches, which may be tail calls or go to hot/cold
2692 section part of same function. */
2695 mark_functions_via_relocs (asection
*sec
,
2696 struct bfd_link_info
*info
,
2699 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2700 Elf_Internal_Shdr
*symtab_hdr
;
2702 unsigned int priority
= 0;
2705 if (!interesting_section (sec
)
2706 || sec
->reloc_count
== 0)
2709 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2711 if (internal_relocs
== NULL
)
2714 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2715 psyms
= &symtab_hdr
->contents
;
2716 irela
= internal_relocs
;
2717 irelaend
= irela
+ sec
->reloc_count
;
2718 for (; irela
< irelaend
; irela
++)
2720 enum elf_spu_reloc_type r_type
;
2721 unsigned int r_indx
;
2723 Elf_Internal_Sym
*sym
;
2724 struct elf_link_hash_entry
*h
;
2726 bool nonbranch
, is_call
;
2727 struct function_info
*caller
;
2728 struct call_info
*callee
;
2730 r_type
= ELF32_R_TYPE (irela
->r_info
);
2731 nonbranch
= r_type
!= R_SPU_REL16
&& r_type
!= R_SPU_ADDR16
;
2733 r_indx
= ELF32_R_SYM (irela
->r_info
);
2734 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2738 || sym_sec
->output_section
== bfd_abs_section_ptr
)
2744 unsigned char insn
[4];
2746 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2747 irela
->r_offset
, 4))
2749 if (is_branch (insn
))
2751 is_call
= (insn
[0] & 0xfd) == 0x31;
2752 priority
= insn
[1] & 0x0f;
2754 priority
|= insn
[2];
2756 priority
|= insn
[3];
2758 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2759 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2762 info
->callbacks
->einfo
2763 /* xgettext:c-format */
2764 (_("%pB(%pA+0x%v): call to non-code section"
2765 " %pB(%pA), analysis incomplete\n"),
2766 sec
->owner
, sec
, irela
->r_offset
,
2767 sym_sec
->owner
, sym_sec
);
2782 /* For --auto-overlay, count possible stubs we need for
2783 function pointer references. */
2784 unsigned int sym_type
;
2788 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2789 if (sym_type
== STT_FUNC
)
2791 if (call_tree
&& spu_hash_table (info
)->params
->auto_overlay
)
2792 spu_hash_table (info
)->non_ovly_stub
+= 1;
2793 /* If the symbol type is STT_FUNC then this must be a
2794 function pointer initialisation. */
2797 /* Ignore data references. */
2798 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2799 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2801 /* Otherwise we probably have a jump table reloc for
2802 a switch statement or some other reference to a
2807 val
= h
->root
.u
.def
.value
;
2809 val
= sym
->st_value
;
2810 val
+= irela
->r_addend
;
2814 struct function_info
*fun
;
2816 if (irela
->r_addend
!= 0)
2818 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2821 fake
->st_value
= val
;
2823 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2827 fun
= maybe_insert_function (sym_sec
, sym
, false, is_call
);
2829 fun
= maybe_insert_function (sym_sec
, h
, true, is_call
);
2832 if (irela
->r_addend
!= 0
2833 && fun
->u
.sym
!= sym
)
2838 caller
= find_function (sec
, irela
->r_offset
, info
);
2841 callee
= bfd_malloc (sizeof *callee
);
2845 callee
->fun
= find_function (sym_sec
, val
, info
);
2846 if (callee
->fun
== NULL
)
2848 callee
->is_tail
= !is_call
;
2849 callee
->is_pasted
= false;
2850 callee
->broken_cycle
= false;
2851 callee
->priority
= priority
;
2852 callee
->count
= nonbranch
? 0 : 1;
2853 if (callee
->fun
->last_caller
!= sec
)
2855 callee
->fun
->last_caller
= sec
;
2856 callee
->fun
->call_count
+= 1;
2858 if (!insert_callee (caller
, callee
))
2861 && !callee
->fun
->is_func
2862 && callee
->fun
->stack
== 0)
2864 /* This is either a tail call or a branch from one part of
2865 the function to another, ie. hot/cold section. If the
2866 destination has been called by some other function then
2867 it is a separate function. We also assume that functions
2868 are not split across input files. */
2869 if (sec
->owner
!= sym_sec
->owner
)
2871 callee
->fun
->start
= NULL
;
2872 callee
->fun
->is_func
= true;
2874 else if (callee
->fun
->start
== NULL
)
2876 struct function_info
*caller_start
= caller
;
2877 while (caller_start
->start
)
2878 caller_start
= caller_start
->start
;
2880 if (caller_start
!= callee
->fun
)
2881 callee
->fun
->start
= caller_start
;
2885 struct function_info
*callee_start
;
2886 struct function_info
*caller_start
;
2887 callee_start
= callee
->fun
;
2888 while (callee_start
->start
)
2889 callee_start
= callee_start
->start
;
2890 caller_start
= caller
;
2891 while (caller_start
->start
)
2892 caller_start
= caller_start
->start
;
2893 if (caller_start
!= callee_start
)
2895 callee
->fun
->start
= NULL
;
2896 callee
->fun
->is_func
= true;
2905 /* Handle something like .init or .fini, which has a piece of a function.
2906 These sections are pasted together to form a single function. */
2909 pasted_function (asection
*sec
)
2911 struct bfd_link_order
*l
;
2912 struct _spu_elf_section_data
*sec_data
;
2913 struct spu_elf_stack_info
*sinfo
;
2914 Elf_Internal_Sym
*fake
;
2915 struct function_info
*fun
, *fun_start
;
2917 fake
= bfd_zmalloc (sizeof (*fake
));
2921 fake
->st_size
= sec
->size
;
2923 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2924 fun
= maybe_insert_function (sec
, fake
, false, false);
2928 /* Find a function immediately preceding this section. */
2930 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2932 if (l
->u
.indirect
.section
== sec
)
2934 if (fun_start
!= NULL
)
2936 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2940 fun
->start
= fun_start
;
2942 callee
->is_tail
= true;
2943 callee
->is_pasted
= true;
2944 callee
->broken_cycle
= false;
2945 callee
->priority
= 0;
2947 if (!insert_callee (fun_start
, callee
))
2953 if (l
->type
== bfd_indirect_link_order
2954 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2955 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2956 && sinfo
->num_fun
!= 0)
2957 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2960 /* Don't return an error if we did not find a function preceding this
2961 section. The section may have incorrect flags. */
2965 /* Map address ranges in code sections to functions. */
2968 discover_functions (struct bfd_link_info
*info
)
2972 Elf_Internal_Sym
***psym_arr
;
2973 asection
***sec_arr
;
2977 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
2980 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2981 if (psym_arr
== NULL
)
2983 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2984 if (sec_arr
== NULL
)
2987 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2989 ibfd
= ibfd
->link
.next
, bfd_idx
++)
2991 extern const bfd_target spu_elf32_vec
;
2992 Elf_Internal_Shdr
*symtab_hdr
;
2995 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2996 asection
**psecs
, **p
;
2998 if (ibfd
->xvec
!= &spu_elf32_vec
)
3001 /* Read all the symbols. */
3002 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
3003 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
3007 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3008 if (interesting_section (sec
))
3016 /* Don't use cached symbols since the generic ELF linker
3017 code only reads local symbols, and we need globals too. */
3018 free (symtab_hdr
->contents
);
3019 symtab_hdr
->contents
= NULL
;
3020 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
3022 symtab_hdr
->contents
= (void *) syms
;
3026 /* Select defined function symbols that are going to be output. */
3027 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
3030 psym_arr
[bfd_idx
] = psyms
;
3031 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
3034 sec_arr
[bfd_idx
] = psecs
;
3035 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
3036 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
3037 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3041 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
3042 if (s
!= NULL
&& interesting_section (s
))
3045 symcount
= psy
- psyms
;
3048 /* Sort them by section and offset within section. */
3049 sort_syms_syms
= syms
;
3050 sort_syms_psecs
= psecs
;
3051 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
3053 /* Now inspect the function symbols. */
3054 for (psy
= psyms
; psy
< psyms
+ symcount
; )
3056 asection
*s
= psecs
[*psy
- syms
];
3057 Elf_Internal_Sym
**psy2
;
3059 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
3060 if (psecs
[*psy2
- syms
] != s
)
3063 if (!alloc_stack_info (s
, psy2
- psy
))
3068 /* First install info about properly typed and sized functions.
3069 In an ideal world this will cover all code sections, except
3070 when partitioning functions into hot and cold sections,
3071 and the horrible pasted together .init and .fini functions. */
3072 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
3075 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3077 asection
*s
= psecs
[sy
- syms
];
3078 if (!maybe_insert_function (s
, sy
, false, true))
3083 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3084 if (interesting_section (sec
))
3085 gaps
|= check_function_ranges (sec
, info
);
3090 /* See if we can discover more function symbols by looking at
3092 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3094 ibfd
= ibfd
->link
.next
, bfd_idx
++)
3098 if (psym_arr
[bfd_idx
] == NULL
)
3101 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3102 if (!mark_functions_via_relocs (sec
, info
, false))
3106 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3108 ibfd
= ibfd
->link
.next
, bfd_idx
++)
3110 Elf_Internal_Shdr
*symtab_hdr
;
3112 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
3115 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
3118 psecs
= sec_arr
[bfd_idx
];
3120 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
3121 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
3124 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3125 if (interesting_section (sec
))
3126 gaps
|= check_function_ranges (sec
, info
);
3130 /* Finally, install all globals. */
3131 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
3135 s
= psecs
[sy
- syms
];
3137 /* Global syms might be improperly typed functions. */
3138 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
3139 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
3141 if (!maybe_insert_function (s
, sy
, false, false))
3147 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3149 extern const bfd_target spu_elf32_vec
;
3152 if (ibfd
->xvec
!= &spu_elf32_vec
)
3155 /* Some of the symbols we've installed as marking the
3156 beginning of functions may have a size of zero. Extend
3157 the range of such functions to the beginning of the
3158 next symbol of interest. */
3159 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3160 if (interesting_section (sec
))
3162 struct _spu_elf_section_data
*sec_data
;
3163 struct spu_elf_stack_info
*sinfo
;
3165 sec_data
= spu_elf_section_data (sec
);
3166 sinfo
= sec_data
->u
.i
.stack_info
;
3167 if (sinfo
!= NULL
&& sinfo
->num_fun
!= 0)
3170 bfd_vma hi
= sec
->size
;
3172 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
3174 sinfo
->fun
[fun_idx
].hi
= hi
;
3175 hi
= sinfo
->fun
[fun_idx
].lo
;
3178 sinfo
->fun
[0].lo
= 0;
3180 /* No symbols in this section. Must be .init or .fini
3181 or something similar. */
3182 else if (!pasted_function (sec
))
3188 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3190 ibfd
= ibfd
->link
.next
, bfd_idx
++)
3192 if (psym_arr
[bfd_idx
] == NULL
)
3195 free (psym_arr
[bfd_idx
]);
3196 free (sec_arr
[bfd_idx
]);
3205 /* Iterate over all function_info we have collected, calling DOIT on
3206 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3210 for_each_node (bool (*doit
) (struct function_info
*,
3211 struct bfd_link_info
*,
3213 struct bfd_link_info
*info
,
3219 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3221 extern const bfd_target spu_elf32_vec
;
3224 if (ibfd
->xvec
!= &spu_elf32_vec
)
3227 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3229 struct _spu_elf_section_data
*sec_data
;
3230 struct spu_elf_stack_info
*sinfo
;
3232 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3233 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3236 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3237 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
3238 if (!doit (&sinfo
->fun
[i
], info
, param
))
3246 /* Transfer call info attached to struct function_info entries for
3247 all of a given function's sections to the first entry. */
3250 transfer_calls (struct function_info
*fun
,
3251 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3252 void *param ATTRIBUTE_UNUSED
)
3254 struct function_info
*start
= fun
->start
;
3258 struct call_info
*call
, *call_next
;
3260 while (start
->start
!= NULL
)
3261 start
= start
->start
;
3262 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
3264 call_next
= call
->next
;
3265 if (!insert_callee (start
, call
))
3268 fun
->call_list
= NULL
;
3273 /* Mark nodes in the call graph that are called by some other node. */
3276 mark_non_root (struct function_info
*fun
,
3277 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3278 void *param ATTRIBUTE_UNUSED
)
3280 struct call_info
*call
;
3285 for (call
= fun
->call_list
; call
; call
= call
->next
)
3287 call
->fun
->non_root
= true;
3288 mark_non_root (call
->fun
, 0, 0);
3293 /* Remove cycles from the call graph. Set depth of nodes. */
3296 remove_cycles (struct function_info
*fun
,
3297 struct bfd_link_info
*info
,
3300 struct call_info
**callp
, *call
;
3301 unsigned int depth
= *(unsigned int *) param
;
3302 unsigned int max_depth
= depth
;
3306 fun
->marking
= true;
3308 callp
= &fun
->call_list
;
3309 while ((call
= *callp
) != NULL
)
3311 call
->max_depth
= depth
+ !call
->is_pasted
;
3312 if (!call
->fun
->visit2
)
3314 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
3316 if (max_depth
< call
->max_depth
)
3317 max_depth
= call
->max_depth
;
3319 else if (call
->fun
->marking
)
3321 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3323 if (!htab
->params
->auto_overlay
3324 && htab
->params
->stack_analysis
)
3326 const char *f1
= func_name (fun
);
3327 const char *f2
= func_name (call
->fun
);
3329 /* xgettext:c-format */
3330 info
->callbacks
->info (_("stack analysis will ignore the call "
3335 call
->broken_cycle
= true;
3337 callp
= &call
->next
;
3339 fun
->marking
= false;
3340 *(unsigned int *) param
= max_depth
;
3344 /* Check that we actually visited all nodes in remove_cycles. If we
3345 didn't, then there is some cycle in the call graph not attached to
3346 any root node. Arbitrarily choose a node in the cycle as a new
3347 root and break the cycle. */
3350 mark_detached_root (struct function_info
*fun
,
3351 struct bfd_link_info
*info
,
3356 fun
->non_root
= false;
3357 *(unsigned int *) param
= 0;
3358 return remove_cycles (fun
, info
, param
);
3361 /* Populate call_list for each function. */
3364 build_call_tree (struct bfd_link_info
*info
)
3369 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3371 extern const bfd_target spu_elf32_vec
;
3374 if (ibfd
->xvec
!= &spu_elf32_vec
)
3377 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3378 if (!mark_functions_via_relocs (sec
, info
, true))
3382 /* Transfer call info from hot/cold section part of function
3384 if (!spu_hash_table (info
)->params
->auto_overlay
3385 && !for_each_node (transfer_calls
, info
, 0, false))
3388 /* Find the call graph root(s). */
3389 if (!for_each_node (mark_non_root
, info
, 0, false))
3392 /* Remove cycles from the call graph. We start from the root node(s)
3393 so that we break cycles in a reasonable place. */
3395 if (!for_each_node (remove_cycles
, info
, &depth
, true))
3398 return for_each_node (mark_detached_root
, info
, &depth
, false);
3401 /* qsort predicate to sort calls by priority, max_depth then count. */
3404 sort_calls (const void *a
, const void *b
)
3406 struct call_info
*const *c1
= a
;
3407 struct call_info
*const *c2
= b
;
3410 delta
= (*c2
)->priority
- (*c1
)->priority
;
3414 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
3418 delta
= (*c2
)->count
- (*c1
)->count
;
3422 return (char *) c1
- (char *) c2
;
3426 unsigned int max_overlay_size
;
3429 /* Set linker_mark and gc_mark on any sections that we will put in
3430 overlays. These flags are used by the generic ELF linker, but we
3431 won't be continuing on to bfd_elf_final_link so it is OK to use
3432 them. linker_mark is clear before we get here. Set segment_mark
3433 on sections that are part of a pasted function (excluding the last
3436 Set up function rodata section if --overlay-rodata. We don't
3437 currently include merged string constant rodata sections since
3439 Sort the call graph so that the deepest nodes will be visited
3443 mark_overlay_section (struct function_info
*fun
,
3444 struct bfd_link_info
*info
,
3447 struct call_info
*call
;
3449 struct _mos_param
*mos_param
= param
;
3450 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3456 if (!fun
->sec
->linker_mark
3457 && (htab
->params
->ovly_flavour
!= ovly_soft_icache
3458 || htab
->params
->non_ia_text
3459 || startswith (fun
->sec
->name
, ".text.ia.")
3460 || strcmp (fun
->sec
->name
, ".init") == 0
3461 || strcmp (fun
->sec
->name
, ".fini") == 0))
3465 fun
->sec
->linker_mark
= 1;
3466 fun
->sec
->gc_mark
= 1;
3467 fun
->sec
->segment_mark
= 0;
3468 /* Ensure SEC_CODE is set on this text section (it ought to
3469 be!), and SEC_CODE is clear on rodata sections. We use
3470 this flag to differentiate the two overlay section types. */
3471 fun
->sec
->flags
|= SEC_CODE
;
3473 size
= fun
->sec
->size
;
3474 if (htab
->params
->auto_overlay
& OVERLAY_RODATA
)
3478 /* Find the rodata section corresponding to this function's
3480 if (strcmp (fun
->sec
->name
, ".text") == 0)
3482 name
= bfd_malloc (sizeof (".rodata"));
3485 memcpy (name
, ".rodata", sizeof (".rodata"));
3487 else if (startswith (fun
->sec
->name
, ".text."))
3489 size_t len
= strlen (fun
->sec
->name
);
3490 name
= bfd_malloc (len
+ 3);
3493 memcpy (name
, ".rodata", sizeof (".rodata"));
3494 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
3496 else if (startswith (fun
->sec
->name
, ".gnu.linkonce.t."))
3498 size_t len
= strlen (fun
->sec
->name
) + 1;
3499 name
= bfd_malloc (len
);
3502 memcpy (name
, fun
->sec
->name
, len
);
3508 asection
*rodata
= NULL
;
3509 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
3510 if (group_sec
== NULL
)
3511 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
3513 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
3515 if (strcmp (group_sec
->name
, name
) == 0)
3520 group_sec
= elf_section_data (group_sec
)->next_in_group
;
3522 fun
->rodata
= rodata
;
3525 size
+= fun
->rodata
->size
;
3526 if (htab
->params
->line_size
!= 0
3527 && size
> htab
->params
->line_size
)
3529 size
-= fun
->rodata
->size
;
3534 fun
->rodata
->linker_mark
= 1;
3535 fun
->rodata
->gc_mark
= 1;
3536 fun
->rodata
->flags
&= ~SEC_CODE
;
3542 if (mos_param
->max_overlay_size
< size
)
3543 mos_param
->max_overlay_size
= size
;
3546 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3551 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
3555 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3556 calls
[count
++] = call
;
3558 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
3560 fun
->call_list
= NULL
;
3564 calls
[count
]->next
= fun
->call_list
;
3565 fun
->call_list
= calls
[count
];
3570 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3572 if (call
->is_pasted
)
3574 /* There can only be one is_pasted call per function_info. */
3575 BFD_ASSERT (!fun
->sec
->segment_mark
);
3576 fun
->sec
->segment_mark
= 1;
3578 if (!call
->broken_cycle
3579 && !mark_overlay_section (call
->fun
, info
, param
))
3583 /* Don't put entry code into an overlay. The overlay manager needs
3584 a stack! Also, don't mark .ovl.init as an overlay. */
3585 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
3586 == info
->output_bfd
->start_address
3587 || startswith (fun
->sec
->output_section
->name
, ".ovl.init"))
3589 fun
->sec
->linker_mark
= 0;
3590 if (fun
->rodata
!= NULL
)
3591 fun
->rodata
->linker_mark
= 0;
3596 /* If non-zero then unmark functions called from those within sections
3597 that we need to unmark. Unfortunately this isn't reliable since the
3598 call graph cannot know the destination of function pointer calls. */
3599 #define RECURSE_UNMARK 0
3602 asection
*exclude_input_section
;
3603 asection
*exclude_output_section
;
3604 unsigned long clearing
;
3607 /* Undo some of mark_overlay_section's work. */
3610 unmark_overlay_section (struct function_info
*fun
,
3611 struct bfd_link_info
*info
,
3614 struct call_info
*call
;
3615 struct _uos_param
*uos_param
= param
;
3616 unsigned int excluded
= 0;
3624 if (fun
->sec
== uos_param
->exclude_input_section
3625 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3629 uos_param
->clearing
+= excluded
;
3631 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3633 fun
->sec
->linker_mark
= 0;
3635 fun
->rodata
->linker_mark
= 0;
3638 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3639 if (!call
->broken_cycle
3640 && !unmark_overlay_section (call
->fun
, info
, param
))
3644 uos_param
->clearing
-= excluded
;
3649 unsigned int lib_size
;
3650 asection
**lib_sections
;
3653 /* Add sections we have marked as belonging to overlays to an array
3654 for consideration as non-overlay sections. The array consist of
3655 pairs of sections, (text,rodata), for functions in the call graph. */
3658 collect_lib_sections (struct function_info
*fun
,
3659 struct bfd_link_info
*info
,
3662 struct _cl_param
*lib_param
= param
;
3663 struct call_info
*call
;
3670 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3673 size
= fun
->sec
->size
;
3675 size
+= fun
->rodata
->size
;
3677 if (size
<= lib_param
->lib_size
)
3679 *lib_param
->lib_sections
++ = fun
->sec
;
3680 fun
->sec
->gc_mark
= 0;
3681 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3683 *lib_param
->lib_sections
++ = fun
->rodata
;
3684 fun
->rodata
->gc_mark
= 0;
3687 *lib_param
->lib_sections
++ = NULL
;
3690 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3691 if (!call
->broken_cycle
)
3692 collect_lib_sections (call
->fun
, info
, param
);
3697 /* qsort predicate to sort sections by call count. */
3700 sort_lib (const void *a
, const void *b
)
3702 asection
*const *s1
= a
;
3703 asection
*const *s2
= b
;
3704 struct _spu_elf_section_data
*sec_data
;
3705 struct spu_elf_stack_info
*sinfo
;
3709 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3710 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3713 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3714 delta
-= sinfo
->fun
[i
].call_count
;
3717 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3718 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3721 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3722 delta
+= sinfo
->fun
[i
].call_count
;
3731 /* Remove some sections from those marked to be in overlays. Choose
3732 those that are called from many places, likely library functions. */
3735 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3738 asection
**lib_sections
;
3739 unsigned int i
, lib_count
;
3740 struct _cl_param collect_lib_param
;
3741 struct function_info dummy_caller
;
3742 struct spu_link_hash_table
*htab
;
3744 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3746 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3748 extern const bfd_target spu_elf32_vec
;
3751 if (ibfd
->xvec
!= &spu_elf32_vec
)
3754 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3755 if (sec
->linker_mark
3756 && sec
->size
< lib_size
3757 && (sec
->flags
& SEC_CODE
) != 0)
3760 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3761 if (lib_sections
== NULL
)
3762 return (unsigned int) -1;
3763 collect_lib_param
.lib_size
= lib_size
;
3764 collect_lib_param
.lib_sections
= lib_sections
;
3765 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3767 return (unsigned int) -1;
3768 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3770 /* Sort sections so that those with the most calls are first. */
3772 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3774 htab
= spu_hash_table (info
);
3775 for (i
= 0; i
< lib_count
; i
++)
3777 unsigned int tmp
, stub_size
;
3779 struct _spu_elf_section_data
*sec_data
;
3780 struct spu_elf_stack_info
*sinfo
;
3782 sec
= lib_sections
[2 * i
];
3783 /* If this section is OK, its size must be less than lib_size. */
3785 /* If it has a rodata section, then add that too. */
3786 if (lib_sections
[2 * i
+ 1])
3787 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3788 /* Add any new overlay call stubs needed by the section. */
3791 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3792 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3795 struct call_info
*call
;
3797 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3798 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3799 if (call
->fun
->sec
->linker_mark
)
3801 struct call_info
*p
;
3802 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3803 if (p
->fun
== call
->fun
)
3806 stub_size
+= ovl_stub_size (htab
->params
);
3809 if (tmp
+ stub_size
< lib_size
)
3811 struct call_info
**pp
, *p
;
3813 /* This section fits. Mark it as non-overlay. */
3814 lib_sections
[2 * i
]->linker_mark
= 0;
3815 if (lib_sections
[2 * i
+ 1])
3816 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3817 lib_size
-= tmp
+ stub_size
;
3818 /* Call stubs to the section we just added are no longer
3820 pp
= &dummy_caller
.call_list
;
3821 while ((p
= *pp
) != NULL
)
3822 if (!p
->fun
->sec
->linker_mark
)
3824 lib_size
+= ovl_stub_size (htab
->params
);
3830 /* Add new call stubs to dummy_caller. */
3831 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3832 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3835 struct call_info
*call
;
3837 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3838 for (call
= sinfo
->fun
[k
].call_list
;
3841 if (call
->fun
->sec
->linker_mark
)
3843 struct call_info
*callee
;
3844 callee
= bfd_malloc (sizeof (*callee
));
3846 return (unsigned int) -1;
3848 if (!insert_callee (&dummy_caller
, callee
))
3854 while (dummy_caller
.call_list
!= NULL
)
3856 struct call_info
*call
= dummy_caller
.call_list
;
3857 dummy_caller
.call_list
= call
->next
;
3860 for (i
= 0; i
< 2 * lib_count
; i
++)
3861 if (lib_sections
[i
])
3862 lib_sections
[i
]->gc_mark
= 1;
3863 free (lib_sections
);
3867 /* Build an array of overlay sections. The deepest node's section is
3868 added first, then its parent node's section, then everything called
3869 from the parent section. The idea being to group sections to
3870 minimise calls between different overlays. */
3873 collect_overlays (struct function_info
*fun
,
3874 struct bfd_link_info
*info
,
3877 struct call_info
*call
;
3879 asection
***ovly_sections
= param
;
3885 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3886 if (!call
->is_pasted
&& !call
->broken_cycle
)
3888 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3894 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3896 fun
->sec
->gc_mark
= 0;
3897 *(*ovly_sections
)++ = fun
->sec
;
3898 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3900 fun
->rodata
->gc_mark
= 0;
3901 *(*ovly_sections
)++ = fun
->rodata
;
3904 *(*ovly_sections
)++ = NULL
;
3907 /* Pasted sections must stay with the first section. We don't
3908 put pasted sections in the array, just the first section.
3909 Mark subsequent sections as already considered. */
3910 if (fun
->sec
->segment_mark
)
3912 struct function_info
*call_fun
= fun
;
3915 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3916 if (call
->is_pasted
)
3918 call_fun
= call
->fun
;
3919 call_fun
->sec
->gc_mark
= 0;
3920 if (call_fun
->rodata
)
3921 call_fun
->rodata
->gc_mark
= 0;
3927 while (call_fun
->sec
->segment_mark
);
3931 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3932 if (!call
->broken_cycle
3933 && !collect_overlays (call
->fun
, info
, ovly_sections
))
3938 struct _spu_elf_section_data
*sec_data
;
3939 struct spu_elf_stack_info
*sinfo
;
3941 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3942 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3945 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3946 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3954 struct _sum_stack_param
{
3956 size_t overall_stack
;
3957 bool emit_stack_syms
;
3960 /* Descend the call graph for FUN, accumulating total stack required. */
3963 sum_stack (struct function_info
*fun
,
3964 struct bfd_link_info
*info
,
3967 struct call_info
*call
;
3968 struct function_info
*max
;
3969 size_t stack
, cum_stack
;
3972 struct _sum_stack_param
*sum_stack_param
= param
;
3973 struct spu_link_hash_table
*htab
;
3975 cum_stack
= fun
->stack
;
3976 sum_stack_param
->cum_stack
= cum_stack
;
3982 for (call
= fun
->call_list
; call
; call
= call
->next
)
3984 if (call
->broken_cycle
)
3986 if (!call
->is_pasted
)
3988 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3990 stack
= sum_stack_param
->cum_stack
;
3991 /* Include caller stack for normal calls, don't do so for
3992 tail calls. fun->stack here is local stack usage for
3994 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3995 stack
+= fun
->stack
;
3996 if (cum_stack
< stack
)
4003 sum_stack_param
->cum_stack
= cum_stack
;
4005 /* Now fun->stack holds cumulative stack. */
4006 fun
->stack
= cum_stack
;
4010 && sum_stack_param
->overall_stack
< cum_stack
)
4011 sum_stack_param
->overall_stack
= cum_stack
;
4013 htab
= spu_hash_table (info
);
4014 if (htab
->params
->auto_overlay
)
4017 f1
= func_name (fun
);
4018 if (htab
->params
->stack_analysis
)
4021 info
->callbacks
->info (" %s: 0x%v\n", f1
, (bfd_vma
) cum_stack
);
4022 info
->callbacks
->minfo ("%s: 0x%v 0x%v\n",
4023 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
4027 info
->callbacks
->minfo (_(" calls:\n"));
4028 for (call
= fun
->call_list
; call
; call
= call
->next
)
4029 if (!call
->is_pasted
&& !call
->broken_cycle
)
4031 const char *f2
= func_name (call
->fun
);
4032 const char *ann1
= call
->fun
== max
? "*" : " ";
4033 const char *ann2
= call
->is_tail
? "t" : " ";
4035 info
->callbacks
->minfo (" %s%s %s\n", ann1
, ann2
, f2
);
4040 if (sum_stack_param
->emit_stack_syms
)
4042 char *name
= bfd_malloc (18 + strlen (f1
));
4043 struct elf_link_hash_entry
*h
;
4048 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
4049 sprintf (name
, "__stack_%s", f1
);
4051 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
4053 h
= elf_link_hash_lookup (&htab
->elf
, name
, true, true, false);
4056 && (h
->root
.type
== bfd_link_hash_new
4057 || h
->root
.type
== bfd_link_hash_undefined
4058 || h
->root
.type
== bfd_link_hash_undefweak
))
4060 h
->root
.type
= bfd_link_hash_defined
;
4061 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
4062 h
->root
.u
.def
.value
= cum_stack
;
4067 h
->ref_regular_nonweak
= 1;
4068 h
->forced_local
= 1;
4076 /* SEC is part of a pasted function. Return the call_info for the
4077 next section of this function. */
4079 static struct call_info
*
4080 find_pasted_call (asection
*sec
)
4082 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
4083 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
4084 struct call_info
*call
;
4087 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4088 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
4089 if (call
->is_pasted
)
4095 /* qsort predicate to sort bfds by file name. */
4098 sort_bfds (const void *a
, const void *b
)
4100 bfd
*const *abfd1
= a
;
4101 bfd
*const *abfd2
= b
;
4103 return filename_cmp (bfd_get_filename (*abfd1
), bfd_get_filename (*abfd2
));
4107 print_one_overlay_section (FILE *script
,
4110 unsigned int ovlynum
,
4111 unsigned int *ovly_map
,
4112 asection
**ovly_sections
,
4113 struct bfd_link_info
*info
)
4117 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4119 asection
*sec
= ovly_sections
[2 * j
];
4121 if (fprintf (script
, " %s%c%s (%s)\n",
4122 (sec
->owner
->my_archive
!= NULL
4123 ? bfd_get_filename (sec
->owner
->my_archive
) : ""),
4124 info
->path_separator
,
4125 bfd_get_filename (sec
->owner
),
4128 if (sec
->segment_mark
)
4130 struct call_info
*call
= find_pasted_call (sec
);
4131 while (call
!= NULL
)
4133 struct function_info
*call_fun
= call
->fun
;
4134 sec
= call_fun
->sec
;
4135 if (fprintf (script
, " %s%c%s (%s)\n",
4136 (sec
->owner
->my_archive
!= NULL
4137 ? bfd_get_filename (sec
->owner
->my_archive
) : ""),
4138 info
->path_separator
,
4139 bfd_get_filename (sec
->owner
),
4142 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4143 if (call
->is_pasted
)
4149 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4151 asection
*sec
= ovly_sections
[2 * j
+ 1];
4153 && fprintf (script
, " %s%c%s (%s)\n",
4154 (sec
->owner
->my_archive
!= NULL
4155 ? bfd_get_filename (sec
->owner
->my_archive
) : ""),
4156 info
->path_separator
,
4157 bfd_get_filename (sec
->owner
),
4161 sec
= ovly_sections
[2 * j
];
4162 if (sec
->segment_mark
)
4164 struct call_info
*call
= find_pasted_call (sec
);
4165 while (call
!= NULL
)
4167 struct function_info
*call_fun
= call
->fun
;
4168 sec
= call_fun
->rodata
;
4170 && fprintf (script
, " %s%c%s (%s)\n",
4171 (sec
->owner
->my_archive
!= NULL
4172 ? bfd_get_filename (sec
->owner
->my_archive
) : ""),
4173 info
->path_separator
,
4174 bfd_get_filename (sec
->owner
),
4177 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4178 if (call
->is_pasted
)
4187 /* Handle --auto-overlay. */
4190 spu_elf_auto_overlay (struct bfd_link_info
*info
)
4194 struct elf_segment_map
*m
;
4195 unsigned int fixed_size
, lo
, hi
;
4196 unsigned int reserved
;
4197 struct spu_link_hash_table
*htab
;
4198 unsigned int base
, i
, count
, bfd_count
;
4199 unsigned int region
, ovlynum
;
4200 asection
**ovly_sections
, **ovly_p
;
4201 unsigned int *ovly_map
;
4203 unsigned int total_overlay_size
, overlay_size
;
4204 const char *ovly_mgr_entry
;
4205 struct elf_link_hash_entry
*h
;
4206 struct _mos_param mos_param
;
4207 struct _uos_param uos_param
;
4208 struct function_info dummy_caller
;
4210 /* Find the extents of our loadable image. */
4211 lo
= (unsigned int) -1;
4213 for (m
= elf_seg_map (info
->output_bfd
); m
!= NULL
; m
= m
->next
)
4214 if (m
->p_type
== PT_LOAD
)
4215 for (i
= 0; i
< m
->count
; i
++)
4216 if (m
->sections
[i
]->size
!= 0)
4218 if (m
->sections
[i
]->vma
< lo
)
4219 lo
= m
->sections
[i
]->vma
;
4220 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
4221 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
4223 fixed_size
= hi
+ 1 - lo
;
4225 if (!discover_functions (info
))
4228 if (!build_call_tree (info
))
4231 htab
= spu_hash_table (info
);
4232 reserved
= htab
->params
->auto_overlay_reserved
;
4235 struct _sum_stack_param sum_stack_param
;
4237 sum_stack_param
.emit_stack_syms
= 0;
4238 sum_stack_param
.overall_stack
= 0;
4239 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, true))
4241 reserved
= (sum_stack_param
.overall_stack
4242 + htab
->params
->extra_stack_space
);
4245 /* No need for overlays if everything already fits. */
4246 if (fixed_size
+ reserved
<= htab
->local_store
4247 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
4249 htab
->params
->auto_overlay
= 0;
4253 uos_param
.exclude_input_section
= 0;
4254 uos_param
.exclude_output_section
4255 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
4257 ovly_mgr_entry
= "__ovly_load";
4258 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4259 ovly_mgr_entry
= "__icache_br_handler";
4260 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
4261 false, false, false);
4263 && (h
->root
.type
== bfd_link_hash_defined
4264 || h
->root
.type
== bfd_link_hash_defweak
)
4267 /* We have a user supplied overlay manager. */
4268 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
4272 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4273 builtin version to .text, and will adjust .text size. */
4274 fixed_size
+= (*htab
->params
->spu_elf_load_ovl_mgr
) ();
4277 /* Mark overlay sections, and find max overlay section size. */
4278 mos_param
.max_overlay_size
= 0;
4279 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, true))
4282 /* We can't put the overlay manager or interrupt routines in
4284 uos_param
.clearing
= 0;
4285 if ((uos_param
.exclude_input_section
4286 || uos_param
.exclude_output_section
)
4287 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, true))
4291 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
4293 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
4294 if (bfd_arr
== NULL
)
4297 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4300 total_overlay_size
= 0;
4301 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
4303 extern const bfd_target spu_elf32_vec
;
4305 unsigned int old_count
;
4307 if (ibfd
->xvec
!= &spu_elf32_vec
)
4311 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4312 if (sec
->linker_mark
)
4314 if ((sec
->flags
& SEC_CODE
) != 0)
4316 fixed_size
-= sec
->size
;
4317 total_overlay_size
+= sec
->size
;
4319 else if ((sec
->flags
& (SEC_ALLOC
| SEC_LOAD
)) == (SEC_ALLOC
| SEC_LOAD
)
4320 && sec
->output_section
->owner
== info
->output_bfd
4321 && startswith (sec
->output_section
->name
, ".ovl.init"))
4322 fixed_size
-= sec
->size
;
4323 if (count
!= old_count
)
4324 bfd_arr
[bfd_count
++] = ibfd
;
4327 /* Since the overlay link script selects sections by file name and
4328 section name, ensure that file names are unique. */
4333 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
4334 for (i
= 1; i
< bfd_count
; ++i
)
4335 if (filename_cmp (bfd_get_filename (bfd_arr
[i
- 1]),
4336 bfd_get_filename (bfd_arr
[i
])) == 0)
4338 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
4340 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
4341 /* xgettext:c-format */
4342 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
4343 bfd_get_filename (bfd_arr
[i
]),
4344 bfd_get_filename (bfd_arr
[i
]->my_archive
));
4346 info
->callbacks
->einfo (_("%s duplicated\n"),
4347 bfd_get_filename (bfd_arr
[i
]));
4353 info
->callbacks
->einfo (_("sorry, no support for duplicate "
4354 "object files in auto-overlay script\n"));
4355 bfd_set_error (bfd_error_bad_value
);
4361 fixed_size
+= reserved
;
4362 fixed_size
+= htab
->non_ovly_stub
* ovl_stub_size (htab
->params
);
4363 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
4365 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4367 /* Stubs in the non-icache area are bigger. */
4368 fixed_size
+= htab
->non_ovly_stub
* 16;
4369 /* Space for icache manager tables.
4370 a) Tag array, one quadword per cache line.
4371 - word 0: ia address of present line, init to zero. */
4372 fixed_size
+= 16 << htab
->num_lines_log2
;
4373 /* b) Rewrite "to" list, one quadword per cache line. */
4374 fixed_size
+= 16 << htab
->num_lines_log2
;
4375 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4376 to a power-of-two number of full quadwords) per cache line. */
4377 fixed_size
+= 16 << (htab
->fromelem_size_log2
4378 + htab
->num_lines_log2
);
4379 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4384 /* Guess number of overlays. Assuming overlay buffer is on
4385 average only half full should be conservative. */
4386 ovlynum
= (total_overlay_size
* 2 * htab
->params
->num_lines
4387 / (htab
->local_store
- fixed_size
));
4388 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4389 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
4393 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
4394 /* xgettext:c-format */
4395 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4396 "size of 0x%v exceeds local store\n"),
4397 (bfd_vma
) fixed_size
,
4398 (bfd_vma
) mos_param
.max_overlay_size
);
4400 /* Now see if we should put some functions in the non-overlay area. */
4401 else if (fixed_size
< htab
->params
->auto_overlay_fixed
)
4403 unsigned int max_fixed
, lib_size
;
4405 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
4406 if (max_fixed
> htab
->params
->auto_overlay_fixed
)
4407 max_fixed
= htab
->params
->auto_overlay_fixed
;
4408 lib_size
= max_fixed
- fixed_size
;
4409 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
4410 if (lib_size
== (unsigned int) -1)
4412 fixed_size
= max_fixed
- lib_size
;
4415 /* Build an array of sections, suitably sorted to place into
4417 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
4418 if (ovly_sections
== NULL
)
4420 ovly_p
= ovly_sections
;
4421 if (!for_each_node (collect_overlays
, info
, &ovly_p
, true))
4423 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
4424 ovly_map
= bfd_malloc (count
* sizeof (*ovly_map
));
4425 if (ovly_map
== NULL
)
4428 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
4429 overlay_size
= (htab
->local_store
- fixed_size
) / htab
->params
->num_lines
;
4430 if (htab
->params
->line_size
!= 0)
4431 overlay_size
= htab
->params
->line_size
;
4434 while (base
< count
)
4436 unsigned int size
= 0, rosize
= 0, roalign
= 0;
4438 for (i
= base
; i
< count
; i
++)
4440 asection
*sec
, *rosec
;
4441 unsigned int tmp
, rotmp
;
4442 unsigned int num_stubs
;
4443 struct call_info
*call
, *pasty
;
4444 struct _spu_elf_section_data
*sec_data
;
4445 struct spu_elf_stack_info
*sinfo
;
4448 /* See whether we can add this section to the current
4449 overlay without overflowing our overlay buffer. */
4450 sec
= ovly_sections
[2 * i
];
4451 tmp
= align_power (size
, sec
->alignment_power
) + sec
->size
;
4453 rosec
= ovly_sections
[2 * i
+ 1];
4456 rotmp
= align_power (rotmp
, rosec
->alignment_power
) + rosec
->size
;
4457 if (roalign
< rosec
->alignment_power
)
4458 roalign
= rosec
->alignment_power
;
4460 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4462 if (sec
->segment_mark
)
4464 /* Pasted sections must stay together, so add their
4466 pasty
= find_pasted_call (sec
);
4467 while (pasty
!= NULL
)
4469 struct function_info
*call_fun
= pasty
->fun
;
4470 tmp
= (align_power (tmp
, call_fun
->sec
->alignment_power
)
4471 + call_fun
->sec
->size
);
4472 if (call_fun
->rodata
)
4474 rotmp
= (align_power (rotmp
,
4475 call_fun
->rodata
->alignment_power
)
4476 + call_fun
->rodata
->size
);
4477 if (roalign
< rosec
->alignment_power
)
4478 roalign
= rosec
->alignment_power
;
4480 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
4481 if (pasty
->is_pasted
)
4485 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4488 /* If we add this section, we might need new overlay call
4489 stubs. Add any overlay section calls to dummy_call. */
4491 sec_data
= spu_elf_section_data (sec
);
4492 sinfo
= sec_data
->u
.i
.stack_info
;
4493 for (k
= 0; k
< (unsigned) sinfo
->num_fun
; ++k
)
4494 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
4495 if (call
->is_pasted
)
4497 BFD_ASSERT (pasty
== NULL
);
4500 else if (call
->fun
->sec
->linker_mark
)
4502 if (!copy_callee (&dummy_caller
, call
))
4505 while (pasty
!= NULL
)
4507 struct function_info
*call_fun
= pasty
->fun
;
4509 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4510 if (call
->is_pasted
)
4512 BFD_ASSERT (pasty
== NULL
);
4515 else if (!copy_callee (&dummy_caller
, call
))
4519 /* Calculate call stub size. */
4521 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
4523 unsigned int stub_delta
= 1;
4525 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4526 stub_delta
= call
->count
;
4527 num_stubs
+= stub_delta
;
4529 /* If the call is within this overlay, we won't need a
4531 for (k
= base
; k
< i
+ 1; k
++)
4532 if (call
->fun
->sec
== ovly_sections
[2 * k
])
4534 num_stubs
-= stub_delta
;
4538 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4539 && num_stubs
> htab
->params
->max_branch
)
4541 if (align_power (tmp
, roalign
) + rotmp
4542 + num_stubs
* ovl_stub_size (htab
->params
) > overlay_size
)
4550 /* xgettext:c-format */
4551 info
->callbacks
->einfo (_("%pB:%pA%s exceeds overlay size\n"),
4552 ovly_sections
[2 * i
]->owner
,
4553 ovly_sections
[2 * i
],
4554 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
4555 bfd_set_error (bfd_error_bad_value
);
4559 while (dummy_caller
.call_list
!= NULL
)
4561 struct call_info
*call
= dummy_caller
.call_list
;
4562 dummy_caller
.call_list
= call
->next
;
4568 ovly_map
[base
++] = ovlynum
;
4571 script
= htab
->params
->spu_elf_open_overlay_script ();
4573 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4575 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4578 if (fprintf (script
,
4579 " . = ALIGN (%u);\n"
4580 " .ovl.init : { *(.ovl.init) }\n"
4581 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4582 htab
->params
->line_size
) <= 0)
4587 while (base
< count
)
4589 unsigned int indx
= ovlynum
- 1;
4590 unsigned int vma
, lma
;
4592 vma
= (indx
& (htab
->params
->num_lines
- 1)) << htab
->line_size_log2
;
4593 lma
= vma
+ (((indx
>> htab
->num_lines_log2
) + 1) << 18);
4595 if (fprintf (script
, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4596 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4597 ovlynum
, vma
, lma
) <= 0)
4600 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4601 ovly_map
, ovly_sections
, info
);
4602 if (base
== (unsigned) -1)
4605 if (fprintf (script
, " }\n") <= 0)
4611 if (fprintf (script
, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4612 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
)) <= 0)
4615 if (fprintf (script
, "}\nINSERT AFTER .toe;\n") <= 0)
4620 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4623 if (fprintf (script
,
4624 " . = ALIGN (16);\n"
4625 " .ovl.init : { *(.ovl.init) }\n"
4626 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4629 for (region
= 1; region
<= htab
->params
->num_lines
; region
++)
4633 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4641 /* We need to set lma since we are overlaying .ovl.init. */
4642 if (fprintf (script
,
4643 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4648 if (fprintf (script
, " OVERLAY :\n {\n") <= 0)
4652 while (base
< count
)
4654 if (fprintf (script
, " .ovly%u {\n", ovlynum
) <= 0)
4657 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4658 ovly_map
, ovly_sections
, info
);
4659 if (base
== (unsigned) -1)
4662 if (fprintf (script
, " }\n") <= 0)
4665 ovlynum
+= htab
->params
->num_lines
;
4666 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4670 if (fprintf (script
, " }\n") <= 0)
4674 if (fprintf (script
, "}\nINSERT BEFORE .text;\n") <= 0)
4679 free (ovly_sections
);
4681 if (fclose (script
) != 0)
4684 if (htab
->params
->auto_overlay
& AUTO_RELINK
)
4685 (*htab
->params
->spu_elf_relink
) ();
4690 bfd_set_error (bfd_error_system_call
);
4692 info
->callbacks
->einfo (_("%F%P: auto overlay error: %E\n"));
4696 /* Provide an estimate of total stack required. */
4699 spu_elf_stack_analysis (struct bfd_link_info
*info
)
4701 struct spu_link_hash_table
*htab
;
4702 struct _sum_stack_param sum_stack_param
;
4704 if (!discover_functions (info
))
4707 if (!build_call_tree (info
))
4710 htab
= spu_hash_table (info
);
4711 if (htab
->params
->stack_analysis
)
4713 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
4714 info
->callbacks
->minfo (_("\nStack size for functions. "
4715 "Annotations: '*' max stack, 't' tail call\n"));
4718 sum_stack_param
.emit_stack_syms
= htab
->params
->emit_stack_syms
;
4719 sum_stack_param
.overall_stack
= 0;
4720 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, true))
4723 if (htab
->params
->stack_analysis
)
4724 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
4725 (bfd_vma
) sum_stack_param
.overall_stack
);
4729 /* Perform a final link. */
4732 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
4734 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4736 if (htab
->params
->auto_overlay
)
4737 spu_elf_auto_overlay (info
);
4739 if ((htab
->params
->stack_analysis
4740 || (htab
->params
->ovly_flavour
== ovly_soft_icache
4741 && htab
->params
->lrlive_analysis
))
4742 && !spu_elf_stack_analysis (info
))
4743 info
->callbacks
->einfo (_("%X%P: stack/lrlive analysis error: %E\n"));
4745 if (!spu_elf_build_stubs (info
))
4746 info
->callbacks
->einfo (_("%F%P: can not build overlay stubs: %E\n"));
4748 return bfd_elf_final_link (output_bfd
, info
);
4751 /* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
4752 and !info->emitrelocations. Returns a count of special relocs
4753 that need to be emitted. */
4756 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
4758 Elf_Internal_Rela
*relocs
;
4759 unsigned int count
= 0;
4761 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
4765 Elf_Internal_Rela
*rel
;
4766 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
4768 for (rel
= relocs
; rel
< relend
; rel
++)
4770 int r_type
= ELF32_R_TYPE (rel
->r_info
);
4771 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4775 if (elf_section_data (sec
)->relocs
!= relocs
)
4782 /* Functions for adding fixup records to .fixup */
4784 #define FIXUP_RECORD_SIZE 4
4786 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4787 bfd_put_32 (output_bfd, addr, \
4788 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4789 #define FIXUP_GET(output_bfd,htab,index) \
4790 bfd_get_32 (output_bfd, \
4791 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4793 /* Store OFFSET in .fixup. This assumes it will be called with an
4794 increasing OFFSET. When this OFFSET fits with the last base offset,
4795 it just sets a bit, otherwise it adds a new fixup record. */
4797 spu_elf_emit_fixup (bfd
* output_bfd
, struct bfd_link_info
*info
,
4800 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4801 asection
*sfixup
= htab
->sfixup
;
4802 bfd_vma qaddr
= offset
& ~(bfd_vma
) 15;
4803 bfd_vma bit
= ((bfd_vma
) 8) >> ((offset
& 15) >> 2);
4804 if (sfixup
->reloc_count
== 0)
4806 FIXUP_PUT (output_bfd
, htab
, 0, qaddr
| bit
);
4807 sfixup
->reloc_count
++;
4811 bfd_vma base
= FIXUP_GET (output_bfd
, htab
, sfixup
->reloc_count
- 1);
4812 if (qaddr
!= (base
& ~(bfd_vma
) 15))
4814 if ((sfixup
->reloc_count
+ 1) * FIXUP_RECORD_SIZE
> sfixup
->size
)
4815 _bfd_error_handler (_("fatal error while creating .fixup"));
4816 FIXUP_PUT (output_bfd
, htab
, sfixup
->reloc_count
, qaddr
| bit
);
4817 sfixup
->reloc_count
++;
4820 FIXUP_PUT (output_bfd
, htab
, sfixup
->reloc_count
- 1, base
| bit
);
4824 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4827 spu_elf_relocate_section (bfd
*output_bfd
,
4828 struct bfd_link_info
*info
,
4830 asection
*input_section
,
4832 Elf_Internal_Rela
*relocs
,
4833 Elf_Internal_Sym
*local_syms
,
4834 asection
**local_sections
)
4836 Elf_Internal_Shdr
*symtab_hdr
;
4837 struct elf_link_hash_entry
**sym_hashes
;
4838 Elf_Internal_Rela
*rel
, *relend
;
4839 struct spu_link_hash_table
*htab
;
4842 bool emit_these_relocs
= false;
4845 unsigned int iovl
= 0;
4847 htab
= spu_hash_table (info
);
4848 stubs
= (htab
->stub_sec
!= NULL
4849 && maybe_needs_stubs (input_section
));
4850 iovl
= overlay_index (input_section
);
4851 ea
= bfd_get_section_by_name (output_bfd
, "._ea");
4852 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
4853 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
4856 relend
= relocs
+ input_section
->reloc_count
;
4857 for (; rel
< relend
; rel
++)
4860 reloc_howto_type
*howto
;
4861 unsigned int r_symndx
;
4862 Elf_Internal_Sym
*sym
;
4864 struct elf_link_hash_entry
*h
;
4865 const char *sym_name
;
4868 bfd_reloc_status_type r
;
4869 bool unresolved_reloc
;
4870 enum _stub_type stub_type
;
4872 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4873 r_type
= ELF32_R_TYPE (rel
->r_info
);
4874 howto
= elf_howto_table
+ r_type
;
4875 unresolved_reloc
= false;
4879 if (r_symndx
< symtab_hdr
->sh_info
)
4881 sym
= local_syms
+ r_symndx
;
4882 sec
= local_sections
[r_symndx
];
4883 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4884 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4888 if (sym_hashes
== NULL
)
4891 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4893 if (info
->wrap_hash
!= NULL
4894 && (input_section
->flags
& SEC_DEBUGGING
) != 0)
4895 h
= ((struct elf_link_hash_entry
*)
4896 unwrap_hash_lookup (info
, input_bfd
, &h
->root
));
4898 while (h
->root
.type
== bfd_link_hash_indirect
4899 || h
->root
.type
== bfd_link_hash_warning
)
4900 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4903 if (h
->root
.type
== bfd_link_hash_defined
4904 || h
->root
.type
== bfd_link_hash_defweak
)
4906 sec
= h
->root
.u
.def
.section
;
4908 || sec
->output_section
== NULL
)
4909 /* Set a flag that will be cleared later if we find a
4910 relocation value for this symbol. output_section
4911 is typically NULL for symbols satisfied by a shared
4913 unresolved_reloc
= true;
4915 relocation
= (h
->root
.u
.def
.value
4916 + sec
->output_section
->vma
4917 + sec
->output_offset
);
4919 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4921 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4922 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4924 else if (!bfd_link_relocatable (info
)
4925 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4929 err
= (info
->unresolved_syms_in_objects
== RM_DIAGNOSE
4930 && !info
->warn_unresolved_syms
)
4931 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
;
4933 info
->callbacks
->undefined_symbol
4934 (info
, h
->root
.root
.string
, input_bfd
,
4935 input_section
, rel
->r_offset
, err
);
4937 sym_name
= h
->root
.root
.string
;
4940 if (sec
!= NULL
&& discarded_section (sec
))
4941 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
4942 rel
, 1, relend
, howto
, 0, contents
);
4944 if (bfd_link_relocatable (info
))
4947 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4948 if (r_type
== R_SPU_ADD_PIC
4950 && !(h
->def_regular
|| ELF_COMMON_DEF_P (h
)))
4952 bfd_byte
*loc
= contents
+ rel
->r_offset
;
4958 is_ea_sym
= (ea
!= NULL
4960 && sec
->output_section
== ea
);
4962 /* If this symbol is in an overlay area, we may need to relocate
4963 to the overlay stub. */
4964 addend
= rel
->r_addend
;
4967 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4968 contents
, info
)) != no_stub
)
4970 unsigned int ovl
= 0;
4971 struct got_entry
*g
, **head
;
4973 if (stub_type
!= nonovl_stub
)
4977 head
= &h
->got
.glist
;
4979 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4981 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4982 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4984 && g
->br_addr
== (rel
->r_offset
4985 + input_section
->output_offset
4986 + input_section
->output_section
->vma
))
4987 : g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4992 relocation
= g
->stub_addr
;
4997 /* For soft icache, encode the overlay index into addresses. */
4998 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4999 && (r_type
== R_SPU_ADDR16_HI
5000 || r_type
== R_SPU_ADDR32
|| r_type
== R_SPU_REL32
)
5003 unsigned int ovl
= overlay_index (sec
);
5006 unsigned int set_id
= ((ovl
- 1) >> htab
->num_lines_log2
) + 1;
5007 relocation
+= set_id
<< 18;
5012 if (htab
->params
->emit_fixups
&& !bfd_link_relocatable (info
)
5013 && (input_section
->flags
& SEC_ALLOC
) != 0
5014 && r_type
== R_SPU_ADDR32
)
5017 offset
= rel
->r_offset
+ input_section
->output_section
->vma
5018 + input_section
->output_offset
;
5019 spu_elf_emit_fixup (output_bfd
, info
, offset
);
5022 if (unresolved_reloc
)
5024 else if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
5028 /* ._ea is a special section that isn't allocated in SPU
5029 memory, but rather occupies space in PPU memory as
5030 part of an embedded ELF image. If this reloc is
5031 against a symbol defined in ._ea, then transform the
5032 reloc into an equivalent one without a symbol
5033 relative to the start of the ELF image. */
5034 rel
->r_addend
+= (relocation
5036 + elf_section_data (ea
)->this_hdr
.sh_offset
);
5037 rel
->r_info
= ELF32_R_INFO (0, r_type
);
5039 emit_these_relocs
= true;
5043 unresolved_reloc
= true;
5045 if (unresolved_reloc
5046 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
5047 rel
->r_offset
) != (bfd_vma
) -1)
5050 /* xgettext:c-format */
5051 (_("%pB(%s+%#" PRIx64
"): "
5052 "unresolvable %s relocation against symbol `%s'"),
5054 bfd_section_name (input_section
),
5055 (uint64_t) rel
->r_offset
,
5061 r
= _bfd_final_link_relocate (howto
,
5065 rel
->r_offset
, relocation
, addend
);
5067 if (r
!= bfd_reloc_ok
)
5069 const char *msg
= (const char *) 0;
5073 case bfd_reloc_overflow
:
5074 (*info
->callbacks
->reloc_overflow
)
5075 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
5076 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
);
5079 case bfd_reloc_undefined
:
5080 (*info
->callbacks
->undefined_symbol
)
5081 (info
, sym_name
, input_bfd
, input_section
, rel
->r_offset
, true);
5084 case bfd_reloc_outofrange
:
5085 msg
= _("internal error: out of range error");
5088 case bfd_reloc_notsupported
:
5089 msg
= _("internal error: unsupported relocation error");
5092 case bfd_reloc_dangerous
:
5093 msg
= _("internal error: dangerous error");
5097 msg
= _("internal error: unknown error");
5102 (*info
->callbacks
->warning
) (info
, msg
, sym_name
, input_bfd
,
5103 input_section
, rel
->r_offset
);
5110 && emit_these_relocs
5111 && !info
->emitrelocations
)
5113 Elf_Internal_Rela
*wrel
;
5114 Elf_Internal_Shdr
*rel_hdr
;
5116 wrel
= rel
= relocs
;
5117 relend
= relocs
+ input_section
->reloc_count
;
5118 for (; rel
< relend
; rel
++)
5122 r_type
= ELF32_R_TYPE (rel
->r_info
);
5123 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
5126 input_section
->reloc_count
= wrel
- relocs
;
5127 /* Backflips for _bfd_elf_link_output_relocs. */
5128 rel_hdr
= _bfd_elf_single_rel_hdr (input_section
);
5129 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
5137 spu_elf_finish_dynamic_sections (bfd
*output_bfd ATTRIBUTE_UNUSED
,
5138 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
5143 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5146 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
5147 const char *sym_name ATTRIBUTE_UNUSED
,
5148 Elf_Internal_Sym
*sym
,
5149 asection
*sym_sec ATTRIBUTE_UNUSED
,
5150 struct elf_link_hash_entry
*h
)
5152 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5154 if (!bfd_link_relocatable (info
)
5155 && htab
->stub_sec
!= NULL
5157 && (h
->root
.type
== bfd_link_hash_defined
5158 || h
->root
.type
== bfd_link_hash_defweak
)
5160 && startswith (h
->root
.root
.string
, "_SPUEAR_"))
5162 struct got_entry
*g
;
5164 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
5165 if (htab
->params
->ovly_flavour
== ovly_soft_icache
5166 ? g
->br_addr
== g
->stub_addr
5167 : g
->addend
== 0 && g
->ovl
== 0)
5169 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
5170 (htab
->stub_sec
[0]->output_section
->owner
,
5171 htab
->stub_sec
[0]->output_section
));
5172 sym
->st_value
= g
->stub_addr
;
5180 static int spu_plugin
= 0;
5183 spu_elf_plugin (int val
)
5188 /* Set ELF header e_type for plugins. */
5191 spu_elf_init_file_header (bfd
*abfd
, struct bfd_link_info
*info
)
5193 if (!_bfd_elf_init_file_header (abfd
, info
))
5198 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
5200 i_ehdrp
->e_type
= ET_DYN
;
5205 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5206 segments for overlays. */
5209 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5216 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5217 extra
= htab
->num_overlays
;
5223 sec
= bfd_get_section_by_name (abfd
, ".toe");
5224 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
5230 /* Remove .toe section from other PT_LOAD segments and put it in
5231 a segment of its own. Put overlays in separate segments too. */
5234 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
5237 struct elf_segment_map
*m
, *m_overlay
;
5238 struct elf_segment_map
**p
, **p_overlay
, **first_load
;
5244 toe
= bfd_get_section_by_name (abfd
, ".toe");
5245 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
5246 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
5247 for (i
= 0; i
< m
->count
; i
++)
5248 if ((s
= m
->sections
[i
]) == toe
5249 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
5251 struct elf_segment_map
*m2
;
5254 if (i
+ 1 < m
->count
)
5256 amt
= sizeof (struct elf_segment_map
);
5257 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
5258 m2
= bfd_zalloc (abfd
, amt
);
5261 m2
->count
= m
->count
- (i
+ 1);
5262 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
5263 m2
->count
* sizeof (m
->sections
[0]));
5264 m2
->p_type
= PT_LOAD
;
5272 amt
= sizeof (struct elf_segment_map
);
5273 m2
= bfd_zalloc (abfd
, amt
);
5276 m2
->p_type
= PT_LOAD
;
5278 m2
->sections
[0] = s
;
5286 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5287 PT_LOAD segments. This can cause the .ovl.init section to be
5288 overwritten with the contents of some overlay segment. To work
5289 around this issue, we ensure that all PF_OVERLAY segments are
5290 sorted first amongst the program headers; this ensures that even
5291 with a broken loader, the .ovl.init section (which is not marked
5292 as PF_OVERLAY) will be placed into SPU local store on startup. */
5294 /* Move all overlay segments onto a separate list. */
5295 p
= &elf_seg_map (abfd
);
5296 p_overlay
= &m_overlay
;
5301 if ((*p
)->p_type
== PT_LOAD
)
5305 if ((*p
)->count
== 1
5306 && spu_elf_section_data ((*p
)->sections
[0])->u
.o
.ovl_index
!= 0)
5312 p_overlay
= &m
->next
;
5319 /* Re-insert overlay segments at the head of the segment map. */
5320 if (m_overlay
!= NULL
)
5323 if (*p
!= NULL
&& (*p
)->p_type
== PT_LOAD
&& (*p
)->includes_filehdr
)
5324 /* It doesn't really make sense for someone to include the ELF
5325 file header into an spu image, but if they do the code that
5326 assigns p_offset needs to see the segment containing the
5336 /* Tweak the section type of .note.spu_name. */
5339 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
5340 Elf_Internal_Shdr
*hdr
,
5343 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
5344 hdr
->sh_type
= SHT_NOTE
;
5348 /* Tweak phdrs before writing them out. */
5351 spu_elf_modify_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5355 const struct elf_backend_data
*bed
;
5356 struct elf_obj_tdata
*tdata
;
5357 Elf_Internal_Phdr
*phdr
, *last
;
5358 struct spu_link_hash_table
*htab
;
5362 bed
= get_elf_backend_data (abfd
);
5363 tdata
= elf_tdata (abfd
);
5365 count
= elf_program_header_size (abfd
) / bed
->s
->sizeof_phdr
;
5366 htab
= spu_hash_table (info
);
5367 if (htab
->num_overlays
!= 0)
5369 struct elf_segment_map
*m
;
5372 for (i
= 0, m
= elf_seg_map (abfd
); m
; ++i
, m
= m
->next
)
5374 && ((o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
)
5377 /* Mark this as an overlay header. */
5378 phdr
[i
].p_flags
|= PF_OVERLAY
;
5380 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0
5381 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
5383 bfd_byte
*p
= htab
->ovtab
->contents
;
5384 unsigned int off
= o
* 16 + 8;
5386 /* Write file_off into _ovly_table. */
5387 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
5390 /* Soft-icache has its file offset put in .ovl.init. */
5391 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
5394 = elf_section_data (htab
->ovl_sec
[0])->this_hdr
.sh_offset
;
5396 bfd_put_32 (htab
->init
->owner
, val
, htab
->init
->contents
+ 4);
5400 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5401 of 16. This should always be possible when using the standard
5402 linker scripts, but don't create overlapping segments if
5403 someone is playing games with linker scripts. */
5405 for (i
= count
; i
-- != 0; )
5406 if (phdr
[i
].p_type
== PT_LOAD
)
5410 adjust
= -phdr
[i
].p_filesz
& 15;
5413 && (phdr
[i
].p_offset
+ phdr
[i
].p_filesz
5414 > last
->p_offset
- adjust
))
5417 adjust
= -phdr
[i
].p_memsz
& 15;
5420 && phdr
[i
].p_filesz
!= 0
5421 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
5422 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
5425 if (phdr
[i
].p_filesz
!= 0)
5429 if (i
== (unsigned int) -1)
5430 for (i
= count
; i
-- != 0; )
5431 if (phdr
[i
].p_type
== PT_LOAD
)
5435 adjust
= -phdr
[i
].p_filesz
& 15;
5436 phdr
[i
].p_filesz
+= adjust
;
5438 adjust
= -phdr
[i
].p_memsz
& 15;
5439 phdr
[i
].p_memsz
+= adjust
;
5443 return _bfd_elf_modify_headers (abfd
, info
);
5447 spu_elf_size_sections (bfd
*obfd ATTRIBUTE_UNUSED
, struct bfd_link_info
*info
)
5449 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5450 if (htab
->params
->emit_fixups
)
5452 asection
*sfixup
= htab
->sfixup
;
5453 int fixup_count
= 0;
5457 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
5461 if (bfd_get_flavour (ibfd
) != bfd_target_elf_flavour
)
5464 /* Walk over each section attached to the input bfd. */
5465 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
5467 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
5470 /* If there aren't any relocs, then there's nothing more
5472 if ((isec
->flags
& SEC_ALLOC
) == 0
5473 || (isec
->flags
& SEC_RELOC
) == 0
5474 || isec
->reloc_count
== 0)
5477 /* Get the relocs. */
5479 _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
5481 if (internal_relocs
== NULL
)
5484 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5485 relocations. They are stored in a single word by
5486 saving the upper 28 bits of the address and setting the
5487 lower 4 bits to a bit mask of the words that have the
5488 relocation. BASE_END keeps track of the next quadword. */
5489 irela
= internal_relocs
;
5490 irelaend
= irela
+ isec
->reloc_count
;
5492 for (; irela
< irelaend
; irela
++)
5493 if (ELF32_R_TYPE (irela
->r_info
) == R_SPU_ADDR32
5494 && irela
->r_offset
>= base_end
)
5496 base_end
= (irela
->r_offset
& ~(bfd_vma
) 15) + 16;
5502 /* We always have a NULL fixup as a sentinel */
5503 size
= (fixup_count
+ 1) * FIXUP_RECORD_SIZE
;
5504 if (!bfd_set_section_size (sfixup
, size
))
5506 sfixup
->contents
= (bfd_byte
*) bfd_zalloc (info
->input_bfds
, size
);
5507 if (sfixup
->contents
== NULL
)
5513 #define TARGET_BIG_SYM spu_elf32_vec
5514 #define TARGET_BIG_NAME "elf32-spu"
5515 #define ELF_ARCH bfd_arch_spu
5516 #define ELF_TARGET_ID SPU_ELF_DATA
5517 #define ELF_MACHINE_CODE EM_SPU
5518 /* This matches the alignment need for DMA. */
5519 #define ELF_MAXPAGESIZE 0x80
5520 #define elf_backend_rela_normal 1
5521 #define elf_backend_can_gc_sections 1
5523 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5524 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5525 #define elf_info_to_howto spu_elf_info_to_howto
5526 #define elf_backend_count_relocs spu_elf_count_relocs
5527 #define elf_backend_relocate_section spu_elf_relocate_section
5528 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5529 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5530 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5531 #define elf_backend_object_p spu_elf_object_p
5532 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5533 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5535 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5536 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5537 #define elf_backend_modify_headers spu_elf_modify_headers
5538 #define elf_backend_init_file_header spu_elf_init_file_header
5539 #define elf_backend_fake_sections spu_elf_fake_sections
5540 #define elf_backend_special_sections spu_elf_special_sections
5541 #define bfd_elf32_bfd_final_link spu_elf_final_link
5543 #include "elf32-target.h"