1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
91 HOWTO (R_SPU_ADD_PIC
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
92 bfd_elf_generic_reloc
, "SPU_ADD_PIC",
93 FALSE
, 0, 0x00000000, FALSE
),
96 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
97 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
98 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
109 case BFD_RELOC_SPU_IMM10W
:
111 case BFD_RELOC_SPU_IMM16W
:
113 case BFD_RELOC_SPU_LO16
:
114 return R_SPU_ADDR16_LO
;
115 case BFD_RELOC_SPU_HI16
:
116 return R_SPU_ADDR16_HI
;
117 case BFD_RELOC_SPU_IMM18
:
119 case BFD_RELOC_SPU_PCREL16
:
121 case BFD_RELOC_SPU_IMM7
:
123 case BFD_RELOC_SPU_IMM8
:
125 case BFD_RELOC_SPU_PCREL9a
:
127 case BFD_RELOC_SPU_PCREL9b
:
129 case BFD_RELOC_SPU_IMM10
:
130 return R_SPU_ADDR10I
;
131 case BFD_RELOC_SPU_IMM16
:
132 return R_SPU_ADDR16I
;
135 case BFD_RELOC_32_PCREL
:
137 case BFD_RELOC_SPU_PPU32
:
139 case BFD_RELOC_SPU_PPU64
:
141 case BFD_RELOC_SPU_ADD_PIC
:
142 return R_SPU_ADD_PIC
;
147 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
149 Elf_Internal_Rela
*dst
)
151 enum elf_spu_reloc_type r_type
;
153 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
154 BFD_ASSERT (r_type
< R_SPU_max
);
155 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
158 static reloc_howto_type
*
159 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
160 bfd_reloc_code_real_type code
)
162 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
164 if (r_type
== R_SPU_NONE
)
167 return elf_howto_table
+ r_type
;
170 static reloc_howto_type
*
171 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
176 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
177 if (elf_howto_table
[i
].name
!= NULL
178 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
179 return &elf_howto_table
[i
];
184 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
186 static bfd_reloc_status_type
187 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
188 void *data
, asection
*input_section
,
189 bfd
*output_bfd
, char **error_message
)
191 bfd_size_type octets
;
195 /* If this is a relocatable link (output_bfd test tells us), just
196 call the generic function. Any adjustment will be done at final
198 if (output_bfd
!= NULL
)
199 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
200 input_section
, output_bfd
, error_message
);
202 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
203 return bfd_reloc_outofrange
;
204 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
206 /* Get symbol value. */
208 if (!bfd_is_com_section (symbol
->section
))
210 if (symbol
->section
->output_section
)
211 val
+= symbol
->section
->output_section
->vma
;
213 val
+= reloc_entry
->addend
;
215 /* Make it pc-relative. */
216 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
219 if (val
+ 256 >= 512)
220 return bfd_reloc_overflow
;
222 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
224 /* Move two high bits of value to REL9I and REL9 position.
225 The mask will take care of selecting the right field. */
226 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
227 insn
&= ~reloc_entry
->howto
->dst_mask
;
228 insn
|= val
& reloc_entry
->howto
->dst_mask
;
229 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
234 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
236 if (!sec
->used_by_bfd
)
238 struct _spu_elf_section_data
*sdata
;
240 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
243 sec
->used_by_bfd
= sdata
;
246 return _bfd_elf_new_section_hook (abfd
, sec
);
249 /* Set up overlay info for executables. */
252 spu_elf_object_p (bfd
*abfd
)
254 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
256 unsigned int i
, num_ovl
, num_buf
;
257 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
258 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
259 Elf_Internal_Phdr
*last_phdr
= NULL
;
261 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
262 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
267 if (last_phdr
== NULL
268 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
271 for (j
= 1; j
< elf_numsections (abfd
); j
++)
273 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
275 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr
, phdr
))
277 asection
*sec
= shdr
->bfd_section
;
278 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
279 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
287 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
288 strip --strip-unneeded will not remove them. */
291 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
293 if (sym
->name
!= NULL
294 && sym
->section
!= bfd_abs_section_ptr
295 && strncmp (sym
->name
, "_EAR_", 5) == 0)
296 sym
->flags
|= BSF_KEEP
;
299 /* SPU ELF linker hash table. */
301 struct spu_link_hash_table
303 struct elf_link_hash_table elf
;
305 struct spu_elf_params
*params
;
307 /* Shortcuts to overlay sections. */
313 /* Count of stubs in each overlay section. */
314 unsigned int *stub_count
;
316 /* The stub section for each overlay section. */
319 struct elf_link_hash_entry
*ovly_entry
[2];
321 /* Number of overlay buffers. */
322 unsigned int num_buf
;
324 /* Total number of overlays. */
325 unsigned int num_overlays
;
327 /* For soft icache. */
328 unsigned int line_size_log2
;
329 unsigned int num_lines_log2
;
330 unsigned int fromelem_size_log2
;
332 /* How much memory we have. */
333 unsigned int local_store
;
335 /* Count of overlay stubs needed in non-overlay area. */
336 unsigned int non_ovly_stub
;
338 /* Pointer to the fixup section */
342 unsigned int stub_err
: 1;
345 /* Hijack the generic got fields for overlay stub accounting. */
349 struct got_entry
*next
;
358 #define spu_hash_table(p) \
359 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
360 == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
364 struct function_info
*fun
;
365 struct call_info
*next
;
367 unsigned int max_depth
;
368 unsigned int is_tail
: 1;
369 unsigned int is_pasted
: 1;
370 unsigned int broken_cycle
: 1;
371 unsigned int priority
: 13;
376 /* List of functions called. Also branches to hot/cold part of
378 struct call_info
*call_list
;
379 /* For hot/cold part of function, point to owner. */
380 struct function_info
*start
;
381 /* Symbol at start of function. */
383 Elf_Internal_Sym
*sym
;
384 struct elf_link_hash_entry
*h
;
386 /* Function section. */
389 /* Where last called from, and number of sections called from. */
390 asection
*last_caller
;
391 unsigned int call_count
;
392 /* Address range of (this part of) function. */
394 /* Offset where we found a store of lr, or -1 if none found. */
396 /* Offset where we found the stack adjustment insn. */
400 /* Distance from root of call tree. Tail and hot/cold branches
401 count as one deeper. We aren't counting stack frames here. */
403 /* Set if global symbol. */
404 unsigned int global
: 1;
405 /* Set if known to be start of function (as distinct from a hunk
406 in hot/cold section. */
407 unsigned int is_func
: 1;
408 /* Set if not a root node. */
409 unsigned int non_root
: 1;
410 /* Flags used during call tree traversal. It's cheaper to replicate
411 the visit flags than have one which needs clearing after a traversal. */
412 unsigned int visit1
: 1;
413 unsigned int visit2
: 1;
414 unsigned int marking
: 1;
415 unsigned int visit3
: 1;
416 unsigned int visit4
: 1;
417 unsigned int visit5
: 1;
418 unsigned int visit6
: 1;
419 unsigned int visit7
: 1;
422 struct spu_elf_stack_info
426 /* Variable size array describing functions, one per contiguous
427 address range belonging to a function. */
428 struct function_info fun
[1];
431 static struct function_info
*find_function (asection
*, bfd_vma
,
432 struct bfd_link_info
*);
434 /* Create a spu ELF linker hash table. */
436 static struct bfd_link_hash_table
*
437 spu_elf_link_hash_table_create (bfd
*abfd
)
439 struct spu_link_hash_table
*htab
;
441 htab
= bfd_malloc (sizeof (*htab
));
445 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
446 _bfd_elf_link_hash_newfunc
,
447 sizeof (struct elf_link_hash_entry
),
454 memset (&htab
->ovtab
, 0,
455 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
457 htab
->elf
.init_got_refcount
.refcount
= 0;
458 htab
->elf
.init_got_refcount
.glist
= NULL
;
459 htab
->elf
.init_got_offset
.offset
= 0;
460 htab
->elf
.init_got_offset
.glist
= NULL
;
461 return &htab
->elf
.root
;
465 spu_elf_setup (struct bfd_link_info
*info
, struct spu_elf_params
*params
)
467 bfd_vma max_branch_log2
;
469 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
470 htab
->params
= params
;
471 htab
->line_size_log2
= bfd_log2 (htab
->params
->line_size
);
472 htab
->num_lines_log2
= bfd_log2 (htab
->params
->num_lines
);
474 /* For the software i-cache, we provide a "from" list whose size
475 is a power-of-two number of quadwords, big enough to hold one
476 byte per outgoing branch. Compute this number here. */
477 max_branch_log2
= bfd_log2 (htab
->params
->max_branch
);
478 htab
->fromelem_size_log2
= max_branch_log2
> 4 ? max_branch_log2
- 4 : 0;
481 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
482 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
483 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
486 get_sym_h (struct elf_link_hash_entry
**hp
,
487 Elf_Internal_Sym
**symp
,
489 Elf_Internal_Sym
**locsymsp
,
490 unsigned long r_symndx
,
493 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
495 if (r_symndx
>= symtab_hdr
->sh_info
)
497 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
498 struct elf_link_hash_entry
*h
;
500 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
501 while (h
->root
.type
== bfd_link_hash_indirect
502 || h
->root
.type
== bfd_link_hash_warning
)
503 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
513 asection
*symsec
= NULL
;
514 if (h
->root
.type
== bfd_link_hash_defined
515 || h
->root
.type
== bfd_link_hash_defweak
)
516 symsec
= h
->root
.u
.def
.section
;
522 Elf_Internal_Sym
*sym
;
523 Elf_Internal_Sym
*locsyms
= *locsymsp
;
527 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
529 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
531 0, NULL
, NULL
, NULL
);
536 sym
= locsyms
+ r_symndx
;
545 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
551 /* Create the note section if not already present. This is done early so
552 that the linker maps the sections to the right place in the output. */
555 spu_elf_create_sections (struct bfd_link_info
*info
)
557 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
560 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
561 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
566 /* Make SPU_PTNOTE_SPUNAME section. */
573 ibfd
= info
->input_bfds
;
574 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
575 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
577 || !bfd_set_section_alignment (ibfd
, s
, 4))
580 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
581 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
582 size
+= (name_len
+ 3) & -4;
584 if (!bfd_set_section_size (ibfd
, s
, size
))
587 data
= bfd_zalloc (ibfd
, size
);
591 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
592 bfd_put_32 (ibfd
, name_len
, data
+ 4);
593 bfd_put_32 (ibfd
, 1, data
+ 8);
594 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
595 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
596 bfd_get_filename (info
->output_bfd
), name_len
);
600 if (htab
->params
->emit_fixups
)
604 ibfd
= info
->input_bfds
;
605 flags
= SEC_LOAD
| SEC_ALLOC
| SEC_READONLY
| SEC_HAS_CONTENTS
607 s
= bfd_make_section_anyway_with_flags (ibfd
, ".fixup", flags
);
608 if (s
== NULL
|| !bfd_set_section_alignment (ibfd
, s
, 2))
616 /* qsort predicate to sort sections by vma. */
619 sort_sections (const void *a
, const void *b
)
621 const asection
*const *s1
= a
;
622 const asection
*const *s2
= b
;
623 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
626 return delta
< 0 ? -1 : 1;
628 return (*s1
)->index
- (*s2
)->index
;
631 /* Identify overlays in the output bfd, and number them.
632 Returns 0 on error, 1 if no overlays, 2 if overlays. */
635 spu_elf_find_overlays (struct bfd_link_info
*info
)
637 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
638 asection
**alloc_sec
;
639 unsigned int i
, n
, ovl_index
, num_buf
;
642 static const char *const entry_names
[2][2] = {
643 { "__ovly_load", "__icache_br_handler" },
644 { "__ovly_return", "__icache_call_handler" }
647 if (info
->output_bfd
->section_count
< 2)
651 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
652 if (alloc_sec
== NULL
)
655 /* Pick out all the alloced sections. */
656 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
657 if ((s
->flags
& SEC_ALLOC
) != 0
658 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
668 /* Sort them by vma. */
669 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
671 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
672 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
674 unsigned int prev_buf
= 0, set_id
= 0;
676 /* Look for an overlapping vma to find the first overlay section. */
677 bfd_vma vma_start
= 0;
679 for (i
= 1; i
< n
; i
++)
682 if (s
->vma
< ovl_end
)
684 asection
*s0
= alloc_sec
[i
- 1];
688 << (htab
->num_lines_log2
+ htab
->line_size_log2
)));
693 ovl_end
= s
->vma
+ s
->size
;
696 /* Now find any sections within the cache area. */
697 for (ovl_index
= 0, num_buf
= 0; i
< n
; i
++)
700 if (s
->vma
>= ovl_end
)
703 /* A section in an overlay area called .ovl.init is not
704 an overlay, in the sense that it might be loaded in
705 by the overlay manager, but rather the initial
706 section contents for the overlay buffer. */
707 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
709 num_buf
= ((s
->vma
- vma_start
) >> htab
->line_size_log2
) + 1;
710 set_id
= (num_buf
== prev_buf
)? set_id
+ 1 : 0;
713 if ((s
->vma
- vma_start
) & (htab
->params
->line_size
- 1))
715 info
->callbacks
->einfo (_("%X%P: overlay section %A "
716 "does not start on a cache line.\n"),
718 bfd_set_error (bfd_error_bad_value
);
721 else if (s
->size
> htab
->params
->line_size
)
723 info
->callbacks
->einfo (_("%X%P: overlay section %A "
724 "is larger than a cache line.\n"),
726 bfd_set_error (bfd_error_bad_value
);
730 alloc_sec
[ovl_index
++] = s
;
731 spu_elf_section_data (s
)->u
.o
.ovl_index
732 = (set_id
<< htab
->num_lines_log2
) + num_buf
;
733 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
737 /* Ensure there are no more overlay sections. */
741 if (s
->vma
< ovl_end
)
743 info
->callbacks
->einfo (_("%X%P: overlay section %A "
744 "is not in cache area.\n"),
746 bfd_set_error (bfd_error_bad_value
);
750 ovl_end
= s
->vma
+ s
->size
;
755 /* Look for overlapping vmas. Any with overlap must be overlays.
756 Count them. Also count the number of overlay regions. */
757 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
760 if (s
->vma
< ovl_end
)
762 asection
*s0
= alloc_sec
[i
- 1];
764 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
767 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
769 alloc_sec
[ovl_index
] = s0
;
770 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
771 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= num_buf
;
774 ovl_end
= s
->vma
+ s
->size
;
776 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
778 alloc_sec
[ovl_index
] = s
;
779 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
780 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
781 if (s0
->vma
!= s
->vma
)
783 info
->callbacks
->einfo (_("%X%P: overlay sections %A "
784 "and %A do not start at the "
787 bfd_set_error (bfd_error_bad_value
);
790 if (ovl_end
< s
->vma
+ s
->size
)
791 ovl_end
= s
->vma
+ s
->size
;
795 ovl_end
= s
->vma
+ s
->size
;
799 htab
->num_overlays
= ovl_index
;
800 htab
->num_buf
= num_buf
;
801 htab
->ovl_sec
= alloc_sec
;
806 for (i
= 0; i
< 2; i
++)
809 struct elf_link_hash_entry
*h
;
811 name
= entry_names
[i
][htab
->params
->ovly_flavour
];
812 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
816 if (h
->root
.type
== bfd_link_hash_new
)
818 h
->root
.type
= bfd_link_hash_undefined
;
820 h
->ref_regular_nonweak
= 1;
823 htab
->ovly_entry
[i
] = h
;
829 /* Non-zero to use bra in overlay stubs rather than br. */
832 #define BRA 0x30000000
833 #define BRASL 0x31000000
834 #define BR 0x32000000
835 #define BRSL 0x33000000
836 #define NOP 0x40200000
837 #define LNOP 0x00200000
838 #define ILA 0x42000000
840 /* Return true for all relative and absolute branch instructions.
848 brhnz 00100011 0.. */
851 is_branch (const unsigned char *insn
)
853 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
856 /* Return true for all indirect branch instructions.
864 bihnz 00100101 011 */
867 is_indirect_branch (const unsigned char *insn
)
869 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
872 /* Return true for branch hint instructions.
877 is_hint (const unsigned char *insn
)
879 return (insn
[0] & 0xfc) == 0x10;
882 /* True if INPUT_SECTION might need overlay stubs. */
885 maybe_needs_stubs (asection
*input_section
)
887 /* No stubs for debug sections and suchlike. */
888 if ((input_section
->flags
& SEC_ALLOC
) == 0)
891 /* No stubs for link-once sections that will be discarded. */
892 if (input_section
->output_section
== bfd_abs_section_ptr
)
895 /* Don't create stubs for .eh_frame references. */
896 if (strcmp (input_section
->name
, ".eh_frame") == 0)
918 /* Return non-zero if this reloc symbol should go via an overlay stub.
919 Return 2 if the stub must be in non-overlay area. */
921 static enum _stub_type
922 needs_ovl_stub (struct elf_link_hash_entry
*h
,
923 Elf_Internal_Sym
*sym
,
925 asection
*input_section
,
926 Elf_Internal_Rela
*irela
,
928 struct bfd_link_info
*info
)
930 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
931 enum elf_spu_reloc_type r_type
;
932 unsigned int sym_type
;
933 bfd_boolean branch
, hint
, call
;
934 enum _stub_type ret
= no_stub
;
938 || sym_sec
->output_section
== bfd_abs_section_ptr
939 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
944 /* Ensure no stubs for user supplied overlay manager syms. */
945 if (h
== htab
->ovly_entry
[0] || h
== htab
->ovly_entry
[1])
948 /* setjmp always goes via an overlay stub, because then the return
949 and hence the longjmp goes via __ovly_return. That magically
950 makes setjmp/longjmp between overlays work. */
951 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
952 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
959 sym_type
= ELF_ST_TYPE (sym
->st_info
);
961 r_type
= ELF32_R_TYPE (irela
->r_info
);
965 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
967 if (contents
== NULL
)
970 if (!bfd_get_section_contents (input_section
->owner
,
977 contents
+= irela
->r_offset
;
979 branch
= is_branch (contents
);
980 hint
= is_hint (contents
);
983 call
= (contents
[0] & 0xfd) == 0x31;
985 && sym_type
!= STT_FUNC
988 /* It's common for people to write assembly and forget
989 to give function symbols the right type. Handle
990 calls to such symbols, but warn so that (hopefully)
991 people will fix their code. We need the symbol
992 type to be correct to distinguish function pointer
993 initialisation from other pointer initialisations. */
994 const char *sym_name
;
997 sym_name
= h
->root
.root
.string
;
1000 Elf_Internal_Shdr
*symtab_hdr
;
1001 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
1002 sym_name
= bfd_elf_sym_name (input_section
->owner
,
1007 (*_bfd_error_handler
) (_("warning: call to non-function"
1008 " symbol %s defined in %B"),
1009 sym_sec
->owner
, sym_name
);
1015 if ((!branch
&& htab
->params
->ovly_flavour
== ovly_soft_icache
)
1016 || (sym_type
!= STT_FUNC
1017 && !(branch
|| hint
)
1018 && (sym_sec
->flags
& SEC_CODE
) == 0))
1021 /* Usually, symbols in non-overlay sections don't need stubs. */
1022 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
1023 && !htab
->params
->non_overlay_stubs
)
1026 /* A reference from some other section to a symbol in an overlay
1027 section needs a stub. */
1028 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
1029 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
1031 unsigned int lrlive
= 0;
1033 lrlive
= (contents
[1] & 0x70) >> 4;
1035 if (!lrlive
&& (call
|| sym_type
== STT_FUNC
))
1036 ret
= call_ovl_stub
;
1038 ret
= br000_ovl_stub
+ lrlive
;
1041 /* If this insn isn't a branch then we are possibly taking the
1042 address of a function and passing it out somehow. Soft-icache code
1043 always generates inline code to do indirect branches. */
1044 if (!(branch
|| hint
)
1045 && sym_type
== STT_FUNC
1046 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1053 count_stub (struct spu_link_hash_table
*htab
,
1056 enum _stub_type stub_type
,
1057 struct elf_link_hash_entry
*h
,
1058 const Elf_Internal_Rela
*irela
)
1060 unsigned int ovl
= 0;
1061 struct got_entry
*g
, **head
;
1064 /* If this instruction is a branch or call, we need a stub
1065 for it. One stub per function per overlay.
1066 If it isn't a branch, then we are taking the address of
1067 this function so need a stub in the non-overlay area
1068 for it. One stub per function. */
1069 if (stub_type
!= nonovl_stub
)
1070 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1073 head
= &h
->got
.glist
;
1076 if (elf_local_got_ents (ibfd
) == NULL
)
1078 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
1079 * sizeof (*elf_local_got_ents (ibfd
)));
1080 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
1081 if (elf_local_got_ents (ibfd
) == NULL
)
1084 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1087 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1089 htab
->stub_count
[ovl
] += 1;
1095 addend
= irela
->r_addend
;
1099 struct got_entry
*gnext
;
1101 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1102 if (g
->addend
== addend
&& g
->ovl
== 0)
1107 /* Need a new non-overlay area stub. Zap other stubs. */
1108 for (g
= *head
; g
!= NULL
; g
= gnext
)
1111 if (g
->addend
== addend
)
1113 htab
->stub_count
[g
->ovl
] -= 1;
1121 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1122 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1128 g
= bfd_malloc (sizeof *g
);
1133 g
->stub_addr
= (bfd_vma
) -1;
1137 htab
->stub_count
[ovl
] += 1;
1143 /* Support two sizes of overlay stubs, a slower more compact stub of two
1144 intructions, and a faster stub of four instructions.
1145 Soft-icache stubs are four or eight words. */
1148 ovl_stub_size (struct spu_elf_params
*params
)
1150 return 16 << params
->ovly_flavour
>> params
->compact_stub
;
1154 ovl_stub_size_log2 (struct spu_elf_params
*params
)
1156 return 4 + params
->ovly_flavour
- params
->compact_stub
;
1159 /* Two instruction overlay stubs look like:
1161 brsl $75,__ovly_load
1162 .word target_ovl_and_address
1164 ovl_and_address is a word with the overlay number in the top 14 bits
1165 and local store address in the bottom 18 bits.
1167 Four instruction overlay stubs look like:
1171 ila $79,target_address
1174 Software icache stubs are:
1178 .word lrlive_branchlocalstoreaddr;
1179 brasl $75,__icache_br_handler
1184 build_stub (struct bfd_link_info
*info
,
1187 enum _stub_type stub_type
,
1188 struct elf_link_hash_entry
*h
,
1189 const Elf_Internal_Rela
*irela
,
1193 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1194 unsigned int ovl
, dest_ovl
, set_id
;
1195 struct got_entry
*g
, **head
;
1197 bfd_vma addend
, from
, to
, br_dest
, patt
;
1198 unsigned int lrlive
;
1201 if (stub_type
!= nonovl_stub
)
1202 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1205 head
= &h
->got
.glist
;
1207 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1211 addend
= irela
->r_addend
;
1213 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1215 g
= bfd_malloc (sizeof *g
);
1221 g
->br_addr
= (irela
->r_offset
1222 + isec
->output_offset
1223 + isec
->output_section
->vma
);
1229 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1230 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1235 if (g
->ovl
== 0 && ovl
!= 0)
1238 if (g
->stub_addr
!= (bfd_vma
) -1)
1242 sec
= htab
->stub_sec
[ovl
];
1243 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
1244 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
1245 g
->stub_addr
= from
;
1246 to
= (htab
->ovly_entry
[0]->root
.u
.def
.value
1247 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_offset
1248 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_section
->vma
);
1250 if (((dest
| to
| from
) & 3) != 0)
1255 dest_ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
1257 if (htab
->params
->ovly_flavour
== ovly_normal
1258 && !htab
->params
->compact_stub
)
1260 bfd_put_32 (sec
->owner
, ILA
+ ((dest_ovl
<< 7) & 0x01ffff80) + 78,
1261 sec
->contents
+ sec
->size
);
1262 bfd_put_32 (sec
->owner
, LNOP
,
1263 sec
->contents
+ sec
->size
+ 4);
1264 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
1265 sec
->contents
+ sec
->size
+ 8);
1267 bfd_put_32 (sec
->owner
, BR
+ (((to
- (from
+ 12)) << 5) & 0x007fff80),
1268 sec
->contents
+ sec
->size
+ 12);
1270 bfd_put_32 (sec
->owner
, BRA
+ ((to
<< 5) & 0x007fff80),
1271 sec
->contents
+ sec
->size
+ 12);
1273 else if (htab
->params
->ovly_flavour
== ovly_normal
1274 && htab
->params
->compact_stub
)
1277 bfd_put_32 (sec
->owner
, BRSL
+ (((to
- from
) << 5) & 0x007fff80) + 75,
1278 sec
->contents
+ sec
->size
);
1280 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1281 sec
->contents
+ sec
->size
);
1282 bfd_put_32 (sec
->owner
, (dest
& 0x3ffff) | (dest_ovl
<< 18),
1283 sec
->contents
+ sec
->size
+ 4);
1285 else if (htab
->params
->ovly_flavour
== ovly_soft_icache
1286 && htab
->params
->compact_stub
)
1289 if (stub_type
== nonovl_stub
)
1291 else if (stub_type
== call_ovl_stub
)
1292 /* A brsl makes lr live and *(*sp+16) is live.
1293 Tail calls have the same liveness. */
1295 else if (!htab
->params
->lrlive_analysis
)
1296 /* Assume stack frame and lr save. */
1298 else if (irela
!= NULL
)
1300 /* Analyse branch instructions. */
1301 struct function_info
*caller
;
1304 caller
= find_function (isec
, irela
->r_offset
, info
);
1305 if (caller
->start
== NULL
)
1306 off
= irela
->r_offset
;
1309 struct function_info
*found
= NULL
;
1311 /* Find the earliest piece of this function that
1312 has frame adjusting instructions. We might
1313 see dynamic frame adjustment (eg. for alloca)
1314 in some later piece, but functions using
1315 alloca always set up a frame earlier. Frame
1316 setup instructions are always in one piece. */
1317 if (caller
->lr_store
!= (bfd_vma
) -1
1318 || caller
->sp_adjust
!= (bfd_vma
) -1)
1320 while (caller
->start
!= NULL
)
1322 caller
= caller
->start
;
1323 if (caller
->lr_store
!= (bfd_vma
) -1
1324 || caller
->sp_adjust
!= (bfd_vma
) -1)
1332 if (off
> caller
->sp_adjust
)
1334 if (off
> caller
->lr_store
)
1335 /* Only *(*sp+16) is live. */
1338 /* If no lr save, then we must be in a
1339 leaf function with a frame.
1340 lr is still live. */
1343 else if (off
> caller
->lr_store
)
1345 /* Between lr save and stack adjust. */
1347 /* This should never happen since prologues won't
1352 /* On entry to function. */
1355 if (stub_type
!= br000_ovl_stub
1356 && lrlive
!= stub_type
- br000_ovl_stub
)
1357 info
->callbacks
->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1358 "from analysis (%u)\n"),
1359 isec
, irela
->r_offset
, lrlive
,
1360 stub_type
- br000_ovl_stub
);
1363 /* If given lrlive info via .brinfo, use it. */
1364 if (stub_type
> br000_ovl_stub
)
1365 lrlive
= stub_type
- br000_ovl_stub
;
1368 to
= (htab
->ovly_entry
[1]->root
.u
.def
.value
1369 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_offset
1370 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_section
->vma
);
1372 /* The branch that uses this stub goes to stub_addr + 4. We'll
1373 set up an xor pattern that can be used by the icache manager
1374 to modify this branch to go directly to its destination. */
1376 br_dest
= g
->stub_addr
;
1379 /* Except in the case of _SPUEAR_ stubs, the branch in
1380 question is the one in the stub itself. */
1381 BFD_ASSERT (stub_type
== nonovl_stub
);
1382 g
->br_addr
= g
->stub_addr
;
1386 set_id
= ((dest_ovl
- 1) >> htab
->num_lines_log2
) + 1;
1387 bfd_put_32 (sec
->owner
, (set_id
<< 18) | (dest
& 0x3ffff),
1388 sec
->contents
+ sec
->size
);
1389 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1390 sec
->contents
+ sec
->size
+ 4);
1391 bfd_put_32 (sec
->owner
, (lrlive
<< 29) | (g
->br_addr
& 0x3ffff),
1392 sec
->contents
+ sec
->size
+ 8);
1393 patt
= dest
^ br_dest
;
1394 if (irela
!= NULL
&& ELF32_R_TYPE (irela
->r_info
) == R_SPU_REL16
)
1395 patt
= (dest
- g
->br_addr
) ^ (br_dest
- g
->br_addr
);
1396 bfd_put_32 (sec
->owner
, (patt
<< 5) & 0x007fff80,
1397 sec
->contents
+ sec
->size
+ 12);
1400 /* Extra space for linked list entries. */
1406 sec
->size
+= ovl_stub_size (htab
->params
);
1408 if (htab
->params
->emit_stub_syms
)
1414 len
= 8 + sizeof (".ovl_call.") - 1;
1416 len
+= strlen (h
->root
.root
.string
);
1421 add
= (int) irela
->r_addend
& 0xffffffff;
1424 name
= bfd_malloc (len
);
1428 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1430 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1432 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1433 dest_sec
->id
& 0xffffffff,
1434 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1436 sprintf (name
+ len
- 9, "+%x", add
);
1438 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1442 if (h
->root
.type
== bfd_link_hash_new
)
1444 h
->root
.type
= bfd_link_hash_defined
;
1445 h
->root
.u
.def
.section
= sec
;
1446 h
->size
= ovl_stub_size (htab
->params
);
1447 h
->root
.u
.def
.value
= sec
->size
- h
->size
;
1451 h
->ref_regular_nonweak
= 1;
1452 h
->forced_local
= 1;
1460 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1464 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1466 /* Symbols starting with _SPUEAR_ need a stub because they may be
1467 invoked by the PPU. */
1468 struct bfd_link_info
*info
= inf
;
1469 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1472 if ((h
->root
.type
== bfd_link_hash_defined
1473 || h
->root
.type
== bfd_link_hash_defweak
)
1475 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1476 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1477 && sym_sec
->output_section
!= bfd_abs_section_ptr
1478 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1479 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1480 || htab
->params
->non_overlay_stubs
))
1482 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1489 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1491 /* Symbols starting with _SPUEAR_ need a stub because they may be
1492 invoked by the PPU. */
1493 struct bfd_link_info
*info
= inf
;
1494 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1497 if ((h
->root
.type
== bfd_link_hash_defined
1498 || h
->root
.type
== bfd_link_hash_defweak
)
1500 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1501 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1502 && sym_sec
->output_section
!= bfd_abs_section_ptr
1503 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1504 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1505 || htab
->params
->non_overlay_stubs
))
1507 return build_stub (info
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1508 h
->root
.u
.def
.value
, sym_sec
);
1514 /* Size or build stubs. */
1517 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1519 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1522 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1524 extern const bfd_target bfd_elf32_spu_vec
;
1525 Elf_Internal_Shdr
*symtab_hdr
;
1527 Elf_Internal_Sym
*local_syms
= NULL
;
1529 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1532 /* We'll need the symbol table in a second. */
1533 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1534 if (symtab_hdr
->sh_info
== 0)
1537 /* Walk over each section attached to the input bfd. */
1538 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1540 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1542 /* If there aren't any relocs, then there's nothing more to do. */
1543 if ((isec
->flags
& SEC_RELOC
) == 0
1544 || isec
->reloc_count
== 0)
1547 if (!maybe_needs_stubs (isec
))
1550 /* Get the relocs. */
1551 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1553 if (internal_relocs
== NULL
)
1554 goto error_ret_free_local
;
1556 /* Now examine each relocation. */
1557 irela
= internal_relocs
;
1558 irelaend
= irela
+ isec
->reloc_count
;
1559 for (; irela
< irelaend
; irela
++)
1561 enum elf_spu_reloc_type r_type
;
1562 unsigned int r_indx
;
1564 Elf_Internal_Sym
*sym
;
1565 struct elf_link_hash_entry
*h
;
1566 enum _stub_type stub_type
;
1568 r_type
= ELF32_R_TYPE (irela
->r_info
);
1569 r_indx
= ELF32_R_SYM (irela
->r_info
);
1571 if (r_type
>= R_SPU_max
)
1573 bfd_set_error (bfd_error_bad_value
);
1574 error_ret_free_internal
:
1575 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1576 free (internal_relocs
);
1577 error_ret_free_local
:
1578 if (local_syms
!= NULL
1579 && (symtab_hdr
->contents
1580 != (unsigned char *) local_syms
))
1585 /* Determine the reloc target section. */
1586 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1587 goto error_ret_free_internal
;
1589 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1591 if (stub_type
== no_stub
)
1593 else if (stub_type
== stub_error
)
1594 goto error_ret_free_internal
;
1596 if (htab
->stub_count
== NULL
)
1599 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1600 htab
->stub_count
= bfd_zmalloc (amt
);
1601 if (htab
->stub_count
== NULL
)
1602 goto error_ret_free_internal
;
1607 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1608 goto error_ret_free_internal
;
1615 dest
= h
->root
.u
.def
.value
;
1617 dest
= sym
->st_value
;
1618 dest
+= irela
->r_addend
;
1619 if (!build_stub (info
, ibfd
, isec
, stub_type
, h
, irela
,
1621 goto error_ret_free_internal
;
1625 /* We're done with the internal relocs, free them. */
1626 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1627 free (internal_relocs
);
1630 if (local_syms
!= NULL
1631 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1633 if (!info
->keep_memory
)
1636 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1643 /* Allocate space for overlay call and return stubs.
1644 Return 0 on error, 1 if no overlays, 2 otherwise. */
1647 spu_elf_size_stubs (struct bfd_link_info
*info
)
1649 struct spu_link_hash_table
*htab
;
1656 if (!process_stubs (info
, FALSE
))
1659 htab
= spu_hash_table (info
);
1660 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1664 ibfd
= info
->input_bfds
;
1665 if (htab
->stub_count
!= NULL
)
1667 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1668 htab
->stub_sec
= bfd_zmalloc (amt
);
1669 if (htab
->stub_sec
== NULL
)
1672 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1673 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1674 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1675 htab
->stub_sec
[0] = stub
;
1677 || !bfd_set_section_alignment (ibfd
, stub
,
1678 ovl_stub_size_log2 (htab
->params
)))
1680 stub
->size
= htab
->stub_count
[0] * ovl_stub_size (htab
->params
);
1681 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1682 /* Extra space for linked list entries. */
1683 stub
->size
+= htab
->stub_count
[0] * 16;
1685 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1687 asection
*osec
= htab
->ovl_sec
[i
];
1688 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1689 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1690 htab
->stub_sec
[ovl
] = stub
;
1692 || !bfd_set_section_alignment (ibfd
, stub
,
1693 ovl_stub_size_log2 (htab
->params
)))
1695 stub
->size
= htab
->stub_count
[ovl
] * ovl_stub_size (htab
->params
);
1699 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1701 /* Space for icache manager tables.
1702 a) Tag array, one quadword per cache line.
1703 b) Rewrite "to" list, one quadword per cache line.
1704 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1705 a power-of-two number of full quadwords) per cache line. */
1708 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1709 if (htab
->ovtab
== NULL
1710 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1713 htab
->ovtab
->size
= (16 + 16 + (16 << htab
->fromelem_size_log2
))
1714 << htab
->num_lines_log2
;
1716 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1717 htab
->init
= bfd_make_section_anyway_with_flags (ibfd
, ".ovini", flags
);
1718 if (htab
->init
== NULL
1719 || !bfd_set_section_alignment (ibfd
, htab
->init
, 4))
1722 htab
->init
->size
= 16;
1724 else if (htab
->stub_count
== NULL
)
1728 /* htab->ovtab consists of two arrays.
1738 . } _ovly_buf_table[];
1741 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1742 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1743 if (htab
->ovtab
== NULL
1744 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1747 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1750 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1751 if (htab
->toe
== NULL
1752 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1754 htab
->toe
->size
= 16;
1759 /* Called from ld to place overlay manager data sections. This is done
1760 after the overlay manager itself is loaded, mainly so that the
1761 linker's htab->init section is placed after any other .ovl.init
1765 spu_elf_place_overlay_data (struct bfd_link_info
*info
)
1767 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1770 if (htab
->stub_sec
!= NULL
)
1772 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[0], NULL
, ".text");
1774 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1776 asection
*osec
= htab
->ovl_sec
[i
];
1777 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1778 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[ovl
], osec
, NULL
);
1782 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1783 (*htab
->params
->place_spu_section
) (htab
->init
, NULL
, ".ovl.init");
1785 if (htab
->ovtab
!= NULL
)
1787 const char *ovout
= ".data";
1788 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1790 (*htab
->params
->place_spu_section
) (htab
->ovtab
, NULL
, ovout
);
1793 if (htab
->toe
!= NULL
)
1794 (*htab
->params
->place_spu_section
) (htab
->toe
, NULL
, ".toe");
1797 /* Functions to handle embedded spu_ovl.o object. */
1800 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1806 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1812 struct _ovl_stream
*os
;
1816 os
= (struct _ovl_stream
*) stream
;
1817 max
= (const char *) os
->end
- (const char *) os
->start
;
1819 if ((ufile_ptr
) offset
>= max
)
1823 if (count
> max
- offset
)
1824 count
= max
- offset
;
1826 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1831 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1833 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1840 return *ovl_bfd
!= NULL
;
1844 overlay_index (asection
*sec
)
1847 || sec
->output_section
== bfd_abs_section_ptr
)
1849 return spu_elf_section_data (sec
->output_section
)->u
.o
.ovl_index
;
1852 /* Define an STT_OBJECT symbol. */
1854 static struct elf_link_hash_entry
*
1855 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1857 struct elf_link_hash_entry
*h
;
1859 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1863 if (h
->root
.type
!= bfd_link_hash_defined
1866 h
->root
.type
= bfd_link_hash_defined
;
1867 h
->root
.u
.def
.section
= htab
->ovtab
;
1868 h
->type
= STT_OBJECT
;
1871 h
->ref_regular_nonweak
= 1;
1874 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1876 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1877 h
->root
.u
.def
.section
->owner
,
1878 h
->root
.root
.string
);
1879 bfd_set_error (bfd_error_bad_value
);
1884 (*_bfd_error_handler
) (_("you are not allowed to define %s in a script"),
1885 h
->root
.root
.string
);
1886 bfd_set_error (bfd_error_bad_value
);
1893 /* Fill in all stubs and the overlay tables. */
1896 spu_elf_build_stubs (struct bfd_link_info
*info
)
1898 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1899 struct elf_link_hash_entry
*h
;
1905 if (htab
->num_overlays
!= 0)
1907 for (i
= 0; i
< 2; i
++)
1909 h
= htab
->ovly_entry
[i
];
1911 && (h
->root
.type
== bfd_link_hash_defined
1912 || h
->root
.type
== bfd_link_hash_defweak
)
1915 s
= h
->root
.u
.def
.section
->output_section
;
1916 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1918 (*_bfd_error_handler
) (_("%s in overlay section"),
1919 h
->root
.root
.string
);
1920 bfd_set_error (bfd_error_bad_value
);
1927 if (htab
->stub_sec
!= NULL
)
1929 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1930 if (htab
->stub_sec
[i
]->size
!= 0)
1932 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1933 htab
->stub_sec
[i
]->size
);
1934 if (htab
->stub_sec
[i
]->contents
== NULL
)
1936 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1937 htab
->stub_sec
[i
]->size
= 0;
1940 /* Fill in all the stubs. */
1941 process_stubs (info
, TRUE
);
1942 if (!htab
->stub_err
)
1943 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1947 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1948 bfd_set_error (bfd_error_bad_value
);
1952 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1954 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1956 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1957 bfd_set_error (bfd_error_bad_value
);
1960 htab
->stub_sec
[i
]->rawsize
= 0;
1964 if (htab
->ovtab
== NULL
|| htab
->ovtab
->size
== 0)
1967 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1968 if (htab
->ovtab
->contents
== NULL
)
1971 p
= htab
->ovtab
->contents
;
1972 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1976 h
= define_ovtab_symbol (htab
, "__icache_tag_array");
1979 h
->root
.u
.def
.value
= 0;
1980 h
->size
= 16 << htab
->num_lines_log2
;
1983 h
= define_ovtab_symbol (htab
, "__icache_tag_array_size");
1986 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
1987 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1989 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to");
1992 h
->root
.u
.def
.value
= off
;
1993 h
->size
= 16 << htab
->num_lines_log2
;
1996 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to_size");
1999 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
2000 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2002 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from");
2005 h
->root
.u
.def
.value
= off
;
2006 h
->size
= 16 << (htab
->fromelem_size_log2
+ htab
->num_lines_log2
);
2009 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from_size");
2012 h
->root
.u
.def
.value
= 16 << (htab
->fromelem_size_log2
2013 + htab
->num_lines_log2
);
2014 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2016 h
= define_ovtab_symbol (htab
, "__icache_log2_fromelemsize");
2019 h
->root
.u
.def
.value
= htab
->fromelem_size_log2
;
2020 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2022 h
= define_ovtab_symbol (htab
, "__icache_base");
2025 h
->root
.u
.def
.value
= htab
->ovl_sec
[0]->vma
;
2026 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2027 h
->size
= htab
->num_buf
<< htab
->line_size_log2
;
2029 h
= define_ovtab_symbol (htab
, "__icache_linesize");
2032 h
->root
.u
.def
.value
= 1 << htab
->line_size_log2
;
2033 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2035 h
= define_ovtab_symbol (htab
, "__icache_log2_linesize");
2038 h
->root
.u
.def
.value
= htab
->line_size_log2
;
2039 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2041 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_linesize");
2044 h
->root
.u
.def
.value
= -htab
->line_size_log2
;
2045 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2047 h
= define_ovtab_symbol (htab
, "__icache_cachesize");
2050 h
->root
.u
.def
.value
= 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
);
2051 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2053 h
= define_ovtab_symbol (htab
, "__icache_log2_cachesize");
2056 h
->root
.u
.def
.value
= htab
->num_lines_log2
+ htab
->line_size_log2
;
2057 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2059 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_cachesize");
2062 h
->root
.u
.def
.value
= -(htab
->num_lines_log2
+ htab
->line_size_log2
);
2063 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2065 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
2067 htab
->init
->contents
= bfd_zalloc (htab
->init
->owner
,
2069 if (htab
->init
->contents
== NULL
)
2072 h
= define_ovtab_symbol (htab
, "__icache_fileoff");
2075 h
->root
.u
.def
.value
= 0;
2076 h
->root
.u
.def
.section
= htab
->init
;
2082 /* Write out _ovly_table. */
2083 /* set low bit of .size to mark non-overlay area as present. */
2085 obfd
= htab
->ovtab
->output_section
->owner
;
2086 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
2088 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
2092 unsigned long off
= ovl_index
* 16;
2093 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
2095 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
2096 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16,
2098 /* file_off written later in spu_elf_modify_program_headers. */
2099 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
2103 h
= define_ovtab_symbol (htab
, "_ovly_table");
2106 h
->root
.u
.def
.value
= 16;
2107 h
->size
= htab
->num_overlays
* 16;
2109 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
2112 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2115 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
2118 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2119 h
->size
= htab
->num_buf
* 4;
2121 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
2124 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
2128 h
= define_ovtab_symbol (htab
, "_EAR_");
2131 h
->root
.u
.def
.section
= htab
->toe
;
2132 h
->root
.u
.def
.value
= 0;
2138 /* Check that all loadable section VMAs lie in the range
2139 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2142 spu_elf_check_vma (struct bfd_link_info
*info
)
2144 struct elf_segment_map
*m
;
2146 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2147 bfd
*abfd
= info
->output_bfd
;
2148 bfd_vma hi
= htab
->params
->local_store_hi
;
2149 bfd_vma lo
= htab
->params
->local_store_lo
;
2151 htab
->local_store
= hi
+ 1 - lo
;
2153 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
2154 if (m
->p_type
== PT_LOAD
)
2155 for (i
= 0; i
< m
->count
; i
++)
2156 if (m
->sections
[i
]->size
!= 0
2157 && (m
->sections
[i
]->vma
< lo
2158 || m
->sections
[i
]->vma
> hi
2159 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
2160 return m
->sections
[i
];
2165 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2166 Search for stack adjusting insns, and return the sp delta.
2167 If a store of lr is found save the instruction offset to *LR_STORE.
2168 If a stack adjusting instruction is found, save that offset to
2172 find_function_stack_adjust (asection
*sec
,
2179 memset (reg
, 0, sizeof (reg
));
2180 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
2182 unsigned char buf
[4];
2186 /* Assume no relocs on stack adjusing insns. */
2187 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
2191 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
2193 if (buf
[0] == 0x24 /* stqd */)
2195 if (rt
== 0 /* lr */ && ra
== 1 /* sp */)
2200 /* Partly decoded immediate field. */
2201 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
2203 if (buf
[0] == 0x1c /* ai */)
2206 imm
= (imm
^ 0x200) - 0x200;
2207 reg
[rt
] = reg
[ra
] + imm
;
2209 if (rt
== 1 /* sp */)
2213 *sp_adjust
= offset
;
2217 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
2219 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2221 reg
[rt
] = reg
[ra
] + reg
[rb
];
2226 *sp_adjust
= offset
;
2230 else if (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */)
2232 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2234 reg
[rt
] = reg
[rb
] - reg
[ra
];
2239 *sp_adjust
= offset
;
2243 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2245 if (buf
[0] >= 0x42 /* ila */)
2246 imm
|= (buf
[0] & 1) << 17;
2251 if (buf
[0] == 0x40 /* il */)
2253 if ((buf
[1] & 0x80) == 0)
2255 imm
= (imm
^ 0x8000) - 0x8000;
2257 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
2263 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
2265 reg
[rt
] |= imm
& 0xffff;
2268 else if (buf
[0] == 0x04 /* ori */)
2271 imm
= (imm
^ 0x200) - 0x200;
2272 reg
[rt
] = reg
[ra
] | imm
;
2275 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
2277 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
2278 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
2279 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
2280 | ((imm
& 0x1000) ? 0x000000ff : 0));
2283 else if (buf
[0] == 0x16 /* andbi */)
2289 reg
[rt
] = reg
[ra
] & imm
;
2292 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
2294 /* Used in pic reg load. Say rt is trashed. Won't be used
2295 in stack adjust, but we need to continue past this branch. */
2299 else if (is_branch (buf
) || is_indirect_branch (buf
))
2300 /* If we hit a branch then we must be out of the prologue. */
2307 /* qsort predicate to sort symbols by section and value. */
2309 static Elf_Internal_Sym
*sort_syms_syms
;
2310 static asection
**sort_syms_psecs
;
2313 sort_syms (const void *a
, const void *b
)
2315 Elf_Internal_Sym
*const *s1
= a
;
2316 Elf_Internal_Sym
*const *s2
= b
;
2317 asection
*sec1
,*sec2
;
2318 bfd_signed_vma delta
;
2320 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
2321 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
2324 return sec1
->index
- sec2
->index
;
2326 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
2328 return delta
< 0 ? -1 : 1;
2330 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
2332 return delta
< 0 ? -1 : 1;
2334 return *s1
< *s2
? -1 : 1;
2337 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2338 entries for section SEC. */
2340 static struct spu_elf_stack_info
*
2341 alloc_stack_info (asection
*sec
, int max_fun
)
2343 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2346 amt
= sizeof (struct spu_elf_stack_info
);
2347 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
2348 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
2349 if (sec_data
->u
.i
.stack_info
!= NULL
)
2350 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
2351 return sec_data
->u
.i
.stack_info
;
2354 /* Add a new struct function_info describing a (part of a) function
2355 starting at SYM_H. Keep the array sorted by address. */
2357 static struct function_info
*
2358 maybe_insert_function (asection
*sec
,
2361 bfd_boolean is_func
)
2363 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2364 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2370 sinfo
= alloc_stack_info (sec
, 20);
2377 Elf_Internal_Sym
*sym
= sym_h
;
2378 off
= sym
->st_value
;
2379 size
= sym
->st_size
;
2383 struct elf_link_hash_entry
*h
= sym_h
;
2384 off
= h
->root
.u
.def
.value
;
2388 for (i
= sinfo
->num_fun
; --i
>= 0; )
2389 if (sinfo
->fun
[i
].lo
<= off
)
2394 /* Don't add another entry for an alias, but do update some
2396 if (sinfo
->fun
[i
].lo
== off
)
2398 /* Prefer globals over local syms. */
2399 if (global
&& !sinfo
->fun
[i
].global
)
2401 sinfo
->fun
[i
].global
= TRUE
;
2402 sinfo
->fun
[i
].u
.h
= sym_h
;
2405 sinfo
->fun
[i
].is_func
= TRUE
;
2406 return &sinfo
->fun
[i
];
2408 /* Ignore a zero-size symbol inside an existing function. */
2409 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
2410 return &sinfo
->fun
[i
];
2413 if (sinfo
->num_fun
>= sinfo
->max_fun
)
2415 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
2416 bfd_size_type old
= amt
;
2418 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2419 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
2420 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2421 sinfo
= bfd_realloc (sinfo
, amt
);
2424 memset ((char *) sinfo
+ old
, 0, amt
- old
);
2425 sec_data
->u
.i
.stack_info
= sinfo
;
2428 if (++i
< sinfo
->num_fun
)
2429 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
2430 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
2431 sinfo
->fun
[i
].is_func
= is_func
;
2432 sinfo
->fun
[i
].global
= global
;
2433 sinfo
->fun
[i
].sec
= sec
;
2435 sinfo
->fun
[i
].u
.h
= sym_h
;
2437 sinfo
->fun
[i
].u
.sym
= sym_h
;
2438 sinfo
->fun
[i
].lo
= off
;
2439 sinfo
->fun
[i
].hi
= off
+ size
;
2440 sinfo
->fun
[i
].lr_store
= -1;
2441 sinfo
->fun
[i
].sp_adjust
= -1;
2442 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
,
2443 &sinfo
->fun
[i
].lr_store
,
2444 &sinfo
->fun
[i
].sp_adjust
);
2445 sinfo
->num_fun
+= 1;
2446 return &sinfo
->fun
[i
];
2449 /* Return the name of FUN. */
2452 func_name (struct function_info
*fun
)
2456 Elf_Internal_Shdr
*symtab_hdr
;
2458 while (fun
->start
!= NULL
)
2462 return fun
->u
.h
->root
.root
.string
;
2465 if (fun
->u
.sym
->st_name
== 0)
2467 size_t len
= strlen (sec
->name
);
2468 char *name
= bfd_malloc (len
+ 10);
2471 sprintf (name
, "%s+%lx", sec
->name
,
2472 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
2476 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2477 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
2480 /* Read the instruction at OFF in SEC. Return true iff the instruction
2481 is a nop, lnop, or stop 0 (all zero insn). */
2484 is_nop (asection
*sec
, bfd_vma off
)
2486 unsigned char insn
[4];
2488 if (off
+ 4 > sec
->size
2489 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
2491 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
2493 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
2498 /* Extend the range of FUN to cover nop padding up to LIMIT.
2499 Return TRUE iff some instruction other than a NOP was found. */
2502 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
2504 bfd_vma off
= (fun
->hi
+ 3) & -4;
2506 while (off
< limit
&& is_nop (fun
->sec
, off
))
2517 /* Check and fix overlapping function ranges. Return TRUE iff there
2518 are gaps in the current info we have about functions in SEC. */
2521 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
2523 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2524 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2526 bfd_boolean gaps
= FALSE
;
2531 for (i
= 1; i
< sinfo
->num_fun
; i
++)
2532 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
2534 /* Fix overlapping symbols. */
2535 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
2536 const char *f2
= func_name (&sinfo
->fun
[i
]);
2538 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
2539 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
2541 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2544 if (sinfo
->num_fun
== 0)
2548 if (sinfo
->fun
[0].lo
!= 0)
2550 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2552 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2554 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2555 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2557 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2563 /* Search current function info for a function that contains address
2564 OFFSET in section SEC. */
2566 static struct function_info
*
2567 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2569 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2570 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2574 hi
= sinfo
->num_fun
;
2577 mid
= (lo
+ hi
) / 2;
2578 if (offset
< sinfo
->fun
[mid
].lo
)
2580 else if (offset
>= sinfo
->fun
[mid
].hi
)
2583 return &sinfo
->fun
[mid
];
2585 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2587 bfd_set_error (bfd_error_bad_value
);
2591 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2592 if CALLEE was new. If this function return FALSE, CALLEE should
2596 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2598 struct call_info
**pp
, *p
;
2600 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2601 if (p
->fun
== callee
->fun
)
2603 /* Tail calls use less stack than normal calls. Retain entry
2604 for normal call over one for tail call. */
2605 p
->is_tail
&= callee
->is_tail
;
2608 p
->fun
->start
= NULL
;
2609 p
->fun
->is_func
= TRUE
;
2611 p
->count
+= callee
->count
;
2612 /* Reorder list so most recent call is first. */
2614 p
->next
= caller
->call_list
;
2615 caller
->call_list
= p
;
2618 callee
->next
= caller
->call_list
;
2619 caller
->call_list
= callee
;
2623 /* Copy CALL and insert the copy into CALLER. */
2626 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2628 struct call_info
*callee
;
2629 callee
= bfd_malloc (sizeof (*callee
));
2633 if (!insert_callee (caller
, callee
))
2638 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2639 overlay stub sections. */
2642 interesting_section (asection
*s
)
2644 return (s
->output_section
!= bfd_abs_section_ptr
2645 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2646 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2650 /* Rummage through the relocs for SEC, looking for function calls.
2651 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2652 mark destination symbols on calls as being functions. Also
2653 look at branches, which may be tail calls or go to hot/cold
2654 section part of same function. */
2657 mark_functions_via_relocs (asection
*sec
,
2658 struct bfd_link_info
*info
,
2661 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2662 Elf_Internal_Shdr
*symtab_hdr
;
2664 unsigned int priority
= 0;
2665 static bfd_boolean warned
;
2667 if (!interesting_section (sec
)
2668 || sec
->reloc_count
== 0)
2671 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2673 if (internal_relocs
== NULL
)
2676 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2677 psyms
= &symtab_hdr
->contents
;
2678 irela
= internal_relocs
;
2679 irelaend
= irela
+ sec
->reloc_count
;
2680 for (; irela
< irelaend
; irela
++)
2682 enum elf_spu_reloc_type r_type
;
2683 unsigned int r_indx
;
2685 Elf_Internal_Sym
*sym
;
2686 struct elf_link_hash_entry
*h
;
2688 bfd_boolean nonbranch
, is_call
;
2689 struct function_info
*caller
;
2690 struct call_info
*callee
;
2692 r_type
= ELF32_R_TYPE (irela
->r_info
);
2693 nonbranch
= r_type
!= R_SPU_REL16
&& r_type
!= R_SPU_ADDR16
;
2695 r_indx
= ELF32_R_SYM (irela
->r_info
);
2696 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2700 || sym_sec
->output_section
== bfd_abs_section_ptr
)
2706 unsigned char insn
[4];
2708 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2709 irela
->r_offset
, 4))
2711 if (is_branch (insn
))
2713 is_call
= (insn
[0] & 0xfd) == 0x31;
2714 priority
= insn
[1] & 0x0f;
2716 priority
|= insn
[2];
2718 priority
|= insn
[3];
2720 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2721 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2724 info
->callbacks
->einfo
2725 (_("%B(%A+0x%v): call to non-code section"
2726 " %B(%A), analysis incomplete\n"),
2727 sec
->owner
, sec
, irela
->r_offset
,
2728 sym_sec
->owner
, sym_sec
);
2743 /* For --auto-overlay, count possible stubs we need for
2744 function pointer references. */
2745 unsigned int sym_type
;
2749 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2750 if (sym_type
== STT_FUNC
)
2752 if (call_tree
&& spu_hash_table (info
)->params
->auto_overlay
)
2753 spu_hash_table (info
)->non_ovly_stub
+= 1;
2754 /* If the symbol type is STT_FUNC then this must be a
2755 function pointer initialisation. */
2758 /* Ignore data references. */
2759 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2760 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2762 /* Otherwise we probably have a jump table reloc for
2763 a switch statement or some other reference to a
2768 val
= h
->root
.u
.def
.value
;
2770 val
= sym
->st_value
;
2771 val
+= irela
->r_addend
;
2775 struct function_info
*fun
;
2777 if (irela
->r_addend
!= 0)
2779 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2782 fake
->st_value
= val
;
2784 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2788 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2790 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2793 if (irela
->r_addend
!= 0
2794 && fun
->u
.sym
!= sym
)
2799 caller
= find_function (sec
, irela
->r_offset
, info
);
2802 callee
= bfd_malloc (sizeof *callee
);
2806 callee
->fun
= find_function (sym_sec
, val
, info
);
2807 if (callee
->fun
== NULL
)
2809 callee
->is_tail
= !is_call
;
2810 callee
->is_pasted
= FALSE
;
2811 callee
->broken_cycle
= FALSE
;
2812 callee
->priority
= priority
;
2813 callee
->count
= nonbranch
? 0 : 1;
2814 if (callee
->fun
->last_caller
!= sec
)
2816 callee
->fun
->last_caller
= sec
;
2817 callee
->fun
->call_count
+= 1;
2819 if (!insert_callee (caller
, callee
))
2822 && !callee
->fun
->is_func
2823 && callee
->fun
->stack
== 0)
2825 /* This is either a tail call or a branch from one part of
2826 the function to another, ie. hot/cold section. If the
2827 destination has been called by some other function then
2828 it is a separate function. We also assume that functions
2829 are not split across input files. */
2830 if (sec
->owner
!= sym_sec
->owner
)
2832 callee
->fun
->start
= NULL
;
2833 callee
->fun
->is_func
= TRUE
;
2835 else if (callee
->fun
->start
== NULL
)
2837 struct function_info
*caller_start
= caller
;
2838 while (caller_start
->start
)
2839 caller_start
= caller_start
->start
;
2841 if (caller_start
!= callee
->fun
)
2842 callee
->fun
->start
= caller_start
;
2846 struct function_info
*callee_start
;
2847 struct function_info
*caller_start
;
2848 callee_start
= callee
->fun
;
2849 while (callee_start
->start
)
2850 callee_start
= callee_start
->start
;
2851 caller_start
= caller
;
2852 while (caller_start
->start
)
2853 caller_start
= caller_start
->start
;
2854 if (caller_start
!= callee_start
)
2856 callee
->fun
->start
= NULL
;
2857 callee
->fun
->is_func
= TRUE
;
2866 /* Handle something like .init or .fini, which has a piece of a function.
2867 These sections are pasted together to form a single function. */
2870 pasted_function (asection
*sec
)
2872 struct bfd_link_order
*l
;
2873 struct _spu_elf_section_data
*sec_data
;
2874 struct spu_elf_stack_info
*sinfo
;
2875 Elf_Internal_Sym
*fake
;
2876 struct function_info
*fun
, *fun_start
;
2878 fake
= bfd_zmalloc (sizeof (*fake
));
2882 fake
->st_size
= sec
->size
;
2884 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2885 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2889 /* Find a function immediately preceding this section. */
2891 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2893 if (l
->u
.indirect
.section
== sec
)
2895 if (fun_start
!= NULL
)
2897 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2901 fun
->start
= fun_start
;
2903 callee
->is_tail
= TRUE
;
2904 callee
->is_pasted
= TRUE
;
2905 callee
->broken_cycle
= FALSE
;
2906 callee
->priority
= 0;
2908 if (!insert_callee (fun_start
, callee
))
2914 if (l
->type
== bfd_indirect_link_order
2915 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2916 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2917 && sinfo
->num_fun
!= 0)
2918 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2921 /* Don't return an error if we did not find a function preceding this
2922 section. The section may have incorrect flags. */
2926 /* Map address ranges in code sections to functions. */
2929 discover_functions (struct bfd_link_info
*info
)
2933 Elf_Internal_Sym
***psym_arr
;
2934 asection
***sec_arr
;
2935 bfd_boolean gaps
= FALSE
;
2938 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2941 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2942 if (psym_arr
== NULL
)
2944 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2945 if (sec_arr
== NULL
)
2948 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2950 ibfd
= ibfd
->link_next
, bfd_idx
++)
2952 extern const bfd_target bfd_elf32_spu_vec
;
2953 Elf_Internal_Shdr
*symtab_hdr
;
2956 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2957 asection
**psecs
, **p
;
2959 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2962 /* Read all the symbols. */
2963 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2964 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2968 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2969 if (interesting_section (sec
))
2977 if (symtab_hdr
->contents
!= NULL
)
2979 /* Don't use cached symbols since the generic ELF linker
2980 code only reads local symbols, and we need globals too. */
2981 free (symtab_hdr
->contents
);
2982 symtab_hdr
->contents
= NULL
;
2984 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2986 symtab_hdr
->contents
= (void *) syms
;
2990 /* Select defined function symbols that are going to be output. */
2991 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2994 psym_arr
[bfd_idx
] = psyms
;
2995 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2998 sec_arr
[bfd_idx
] = psecs
;
2999 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
3000 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
3001 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3005 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
3006 if (s
!= NULL
&& interesting_section (s
))
3009 symcount
= psy
- psyms
;
3012 /* Sort them by section and offset within section. */
3013 sort_syms_syms
= syms
;
3014 sort_syms_psecs
= psecs
;
3015 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
3017 /* Now inspect the function symbols. */
3018 for (psy
= psyms
; psy
< psyms
+ symcount
; )
3020 asection
*s
= psecs
[*psy
- syms
];
3021 Elf_Internal_Sym
**psy2
;
3023 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
3024 if (psecs
[*psy2
- syms
] != s
)
3027 if (!alloc_stack_info (s
, psy2
- psy
))
3032 /* First install info about properly typed and sized functions.
3033 In an ideal world this will cover all code sections, except
3034 when partitioning functions into hot and cold sections,
3035 and the horrible pasted together .init and .fini functions. */
3036 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
3039 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3041 asection
*s
= psecs
[sy
- syms
];
3042 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
3047 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3048 if (interesting_section (sec
))
3049 gaps
|= check_function_ranges (sec
, info
);
3054 /* See if we can discover more function symbols by looking at
3056 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3058 ibfd
= ibfd
->link_next
, bfd_idx
++)
3062 if (psym_arr
[bfd_idx
] == NULL
)
3065 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3066 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
3070 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3072 ibfd
= ibfd
->link_next
, bfd_idx
++)
3074 Elf_Internal_Shdr
*symtab_hdr
;
3076 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
3079 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
3082 psecs
= sec_arr
[bfd_idx
];
3084 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
3085 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
3088 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3089 if (interesting_section (sec
))
3090 gaps
|= check_function_ranges (sec
, info
);
3094 /* Finally, install all globals. */
3095 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
3099 s
= psecs
[sy
- syms
];
3101 /* Global syms might be improperly typed functions. */
3102 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
3103 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
3105 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
3111 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3113 extern const bfd_target bfd_elf32_spu_vec
;
3116 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3119 /* Some of the symbols we've installed as marking the
3120 beginning of functions may have a size of zero. Extend
3121 the range of such functions to the beginning of the
3122 next symbol of interest. */
3123 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3124 if (interesting_section (sec
))
3126 struct _spu_elf_section_data
*sec_data
;
3127 struct spu_elf_stack_info
*sinfo
;
3129 sec_data
= spu_elf_section_data (sec
);
3130 sinfo
= sec_data
->u
.i
.stack_info
;
3131 if (sinfo
!= NULL
&& sinfo
->num_fun
!= 0)
3134 bfd_vma hi
= sec
->size
;
3136 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
3138 sinfo
->fun
[fun_idx
].hi
= hi
;
3139 hi
= sinfo
->fun
[fun_idx
].lo
;
3142 sinfo
->fun
[0].lo
= 0;
3144 /* No symbols in this section. Must be .init or .fini
3145 or something similar. */
3146 else if (!pasted_function (sec
))
3152 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3154 ibfd
= ibfd
->link_next
, bfd_idx
++)
3156 if (psym_arr
[bfd_idx
] == NULL
)
3159 free (psym_arr
[bfd_idx
]);
3160 free (sec_arr
[bfd_idx
]);
3169 /* Iterate over all function_info we have collected, calling DOIT on
3170 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3174 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
3175 struct bfd_link_info
*,
3177 struct bfd_link_info
*info
,
3183 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3185 extern const bfd_target bfd_elf32_spu_vec
;
3188 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3191 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3193 struct _spu_elf_section_data
*sec_data
;
3194 struct spu_elf_stack_info
*sinfo
;
3196 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3197 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3200 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3201 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
3202 if (!doit (&sinfo
->fun
[i
], info
, param
))
3210 /* Transfer call info attached to struct function_info entries for
3211 all of a given function's sections to the first entry. */
3214 transfer_calls (struct function_info
*fun
,
3215 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3216 void *param ATTRIBUTE_UNUSED
)
3218 struct function_info
*start
= fun
->start
;
3222 struct call_info
*call
, *call_next
;
3224 while (start
->start
!= NULL
)
3225 start
= start
->start
;
3226 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
3228 call_next
= call
->next
;
3229 if (!insert_callee (start
, call
))
3232 fun
->call_list
= NULL
;
3237 /* Mark nodes in the call graph that are called by some other node. */
3240 mark_non_root (struct function_info
*fun
,
3241 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3242 void *param ATTRIBUTE_UNUSED
)
3244 struct call_info
*call
;
3249 for (call
= fun
->call_list
; call
; call
= call
->next
)
3251 call
->fun
->non_root
= TRUE
;
3252 mark_non_root (call
->fun
, 0, 0);
3257 /* Remove cycles from the call graph. Set depth of nodes. */
3260 remove_cycles (struct function_info
*fun
,
3261 struct bfd_link_info
*info
,
3264 struct call_info
**callp
, *call
;
3265 unsigned int depth
= *(unsigned int *) param
;
3266 unsigned int max_depth
= depth
;
3270 fun
->marking
= TRUE
;
3272 callp
= &fun
->call_list
;
3273 while ((call
= *callp
) != NULL
)
3275 call
->max_depth
= depth
+ !call
->is_pasted
;
3276 if (!call
->fun
->visit2
)
3278 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
3280 if (max_depth
< call
->max_depth
)
3281 max_depth
= call
->max_depth
;
3283 else if (call
->fun
->marking
)
3285 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3287 if (!htab
->params
->auto_overlay
3288 && htab
->params
->stack_analysis
)
3290 const char *f1
= func_name (fun
);
3291 const char *f2
= func_name (call
->fun
);
3293 info
->callbacks
->info (_("Stack analysis will ignore the call "
3298 call
->broken_cycle
= TRUE
;
3300 callp
= &call
->next
;
3302 fun
->marking
= FALSE
;
3303 *(unsigned int *) param
= max_depth
;
3307 /* Check that we actually visited all nodes in remove_cycles. If we
3308 didn't, then there is some cycle in the call graph not attached to
3309 any root node. Arbitrarily choose a node in the cycle as a new
3310 root and break the cycle. */
3313 mark_detached_root (struct function_info
*fun
,
3314 struct bfd_link_info
*info
,
3319 fun
->non_root
= FALSE
;
3320 *(unsigned int *) param
= 0;
3321 return remove_cycles (fun
, info
, param
);
3324 /* Populate call_list for each function. */
3327 build_call_tree (struct bfd_link_info
*info
)
3332 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3334 extern const bfd_target bfd_elf32_spu_vec
;
3337 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3340 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3341 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
3345 /* Transfer call info from hot/cold section part of function
3347 if (!spu_hash_table (info
)->params
->auto_overlay
3348 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
3351 /* Find the call graph root(s). */
3352 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
3355 /* Remove cycles from the call graph. We start from the root node(s)
3356 so that we break cycles in a reasonable place. */
3358 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
3361 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
3364 /* qsort predicate to sort calls by priority, max_depth then count. */
3367 sort_calls (const void *a
, const void *b
)
3369 struct call_info
*const *c1
= a
;
3370 struct call_info
*const *c2
= b
;
3373 delta
= (*c2
)->priority
- (*c1
)->priority
;
3377 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
3381 delta
= (*c2
)->count
- (*c1
)->count
;
3385 return (char *) c1
- (char *) c2
;
3389 unsigned int max_overlay_size
;
3392 /* Set linker_mark and gc_mark on any sections that we will put in
3393 overlays. These flags are used by the generic ELF linker, but we
3394 won't be continuing on to bfd_elf_final_link so it is OK to use
3395 them. linker_mark is clear before we get here. Set segment_mark
3396 on sections that are part of a pasted function (excluding the last
3399 Set up function rodata section if --overlay-rodata. We don't
3400 currently include merged string constant rodata sections since
3402 Sort the call graph so that the deepest nodes will be visited
3406 mark_overlay_section (struct function_info
*fun
,
3407 struct bfd_link_info
*info
,
3410 struct call_info
*call
;
3412 struct _mos_param
*mos_param
= param
;
3413 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3419 if (!fun
->sec
->linker_mark
3420 && (htab
->params
->ovly_flavour
!= ovly_soft_icache
3421 || htab
->params
->non_ia_text
3422 || strncmp (fun
->sec
->name
, ".text.ia.", 9) == 0
3423 || strcmp (fun
->sec
->name
, ".init") == 0
3424 || strcmp (fun
->sec
->name
, ".fini") == 0))
3428 fun
->sec
->linker_mark
= 1;
3429 fun
->sec
->gc_mark
= 1;
3430 fun
->sec
->segment_mark
= 0;
3431 /* Ensure SEC_CODE is set on this text section (it ought to
3432 be!), and SEC_CODE is clear on rodata sections. We use
3433 this flag to differentiate the two overlay section types. */
3434 fun
->sec
->flags
|= SEC_CODE
;
3436 size
= fun
->sec
->size
;
3437 if (htab
->params
->auto_overlay
& OVERLAY_RODATA
)
3441 /* Find the rodata section corresponding to this function's
3443 if (strcmp (fun
->sec
->name
, ".text") == 0)
3445 name
= bfd_malloc (sizeof (".rodata"));
3448 memcpy (name
, ".rodata", sizeof (".rodata"));
3450 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
3452 size_t len
= strlen (fun
->sec
->name
);
3453 name
= bfd_malloc (len
+ 3);
3456 memcpy (name
, ".rodata", sizeof (".rodata"));
3457 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
3459 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
3461 size_t len
= strlen (fun
->sec
->name
) + 1;
3462 name
= bfd_malloc (len
);
3465 memcpy (name
, fun
->sec
->name
, len
);
3471 asection
*rodata
= NULL
;
3472 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
3473 if (group_sec
== NULL
)
3474 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
3476 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
3478 if (strcmp (group_sec
->name
, name
) == 0)
3483 group_sec
= elf_section_data (group_sec
)->next_in_group
;
3485 fun
->rodata
= rodata
;
3488 size
+= fun
->rodata
->size
;
3489 if (htab
->params
->line_size
!= 0
3490 && size
> htab
->params
->line_size
)
3492 size
-= fun
->rodata
->size
;
3497 fun
->rodata
->linker_mark
= 1;
3498 fun
->rodata
->gc_mark
= 1;
3499 fun
->rodata
->flags
&= ~SEC_CODE
;
3505 if (mos_param
->max_overlay_size
< size
)
3506 mos_param
->max_overlay_size
= size
;
3509 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3514 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
3518 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3519 calls
[count
++] = call
;
3521 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
3523 fun
->call_list
= NULL
;
3527 calls
[count
]->next
= fun
->call_list
;
3528 fun
->call_list
= calls
[count
];
3533 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3535 if (call
->is_pasted
)
3537 /* There can only be one is_pasted call per function_info. */
3538 BFD_ASSERT (!fun
->sec
->segment_mark
);
3539 fun
->sec
->segment_mark
= 1;
3541 if (!call
->broken_cycle
3542 && !mark_overlay_section (call
->fun
, info
, param
))
3546 /* Don't put entry code into an overlay. The overlay manager needs
3547 a stack! Also, don't mark .ovl.init as an overlay. */
3548 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
3549 == info
->output_bfd
->start_address
3550 || strncmp (fun
->sec
->output_section
->name
, ".ovl.init", 9) == 0)
3552 fun
->sec
->linker_mark
= 0;
3553 if (fun
->rodata
!= NULL
)
3554 fun
->rodata
->linker_mark
= 0;
3559 /* If non-zero then unmark functions called from those within sections
3560 that we need to unmark. Unfortunately this isn't reliable since the
3561 call graph cannot know the destination of function pointer calls. */
3562 #define RECURSE_UNMARK 0
3565 asection
*exclude_input_section
;
3566 asection
*exclude_output_section
;
3567 unsigned long clearing
;
3570 /* Undo some of mark_overlay_section's work. */
3573 unmark_overlay_section (struct function_info
*fun
,
3574 struct bfd_link_info
*info
,
3577 struct call_info
*call
;
3578 struct _uos_param
*uos_param
= param
;
3579 unsigned int excluded
= 0;
3587 if (fun
->sec
== uos_param
->exclude_input_section
3588 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3592 uos_param
->clearing
+= excluded
;
3594 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3596 fun
->sec
->linker_mark
= 0;
3598 fun
->rodata
->linker_mark
= 0;
3601 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3602 if (!call
->broken_cycle
3603 && !unmark_overlay_section (call
->fun
, info
, param
))
3607 uos_param
->clearing
-= excluded
;
3612 unsigned int lib_size
;
3613 asection
**lib_sections
;
3616 /* Add sections we have marked as belonging to overlays to an array
3617 for consideration as non-overlay sections. The array consist of
3618 pairs of sections, (text,rodata), for functions in the call graph. */
3621 collect_lib_sections (struct function_info
*fun
,
3622 struct bfd_link_info
*info
,
3625 struct _cl_param
*lib_param
= param
;
3626 struct call_info
*call
;
3633 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3636 size
= fun
->sec
->size
;
3638 size
+= fun
->rodata
->size
;
3640 if (size
<= lib_param
->lib_size
)
3642 *lib_param
->lib_sections
++ = fun
->sec
;
3643 fun
->sec
->gc_mark
= 0;
3644 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3646 *lib_param
->lib_sections
++ = fun
->rodata
;
3647 fun
->rodata
->gc_mark
= 0;
3650 *lib_param
->lib_sections
++ = NULL
;
3653 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3654 if (!call
->broken_cycle
)
3655 collect_lib_sections (call
->fun
, info
, param
);
3660 /* qsort predicate to sort sections by call count. */
3663 sort_lib (const void *a
, const void *b
)
3665 asection
*const *s1
= a
;
3666 asection
*const *s2
= b
;
3667 struct _spu_elf_section_data
*sec_data
;
3668 struct spu_elf_stack_info
*sinfo
;
3672 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3673 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3676 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3677 delta
-= sinfo
->fun
[i
].call_count
;
3680 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3681 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3684 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3685 delta
+= sinfo
->fun
[i
].call_count
;
3694 /* Remove some sections from those marked to be in overlays. Choose
3695 those that are called from many places, likely library functions. */
3698 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3701 asection
**lib_sections
;
3702 unsigned int i
, lib_count
;
3703 struct _cl_param collect_lib_param
;
3704 struct function_info dummy_caller
;
3705 struct spu_link_hash_table
*htab
;
3707 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3709 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3711 extern const bfd_target bfd_elf32_spu_vec
;
3714 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3717 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3718 if (sec
->linker_mark
3719 && sec
->size
< lib_size
3720 && (sec
->flags
& SEC_CODE
) != 0)
3723 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3724 if (lib_sections
== NULL
)
3725 return (unsigned int) -1;
3726 collect_lib_param
.lib_size
= lib_size
;
3727 collect_lib_param
.lib_sections
= lib_sections
;
3728 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3730 return (unsigned int) -1;
3731 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3733 /* Sort sections so that those with the most calls are first. */
3735 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3737 htab
= spu_hash_table (info
);
3738 for (i
= 0; i
< lib_count
; i
++)
3740 unsigned int tmp
, stub_size
;
3742 struct _spu_elf_section_data
*sec_data
;
3743 struct spu_elf_stack_info
*sinfo
;
3745 sec
= lib_sections
[2 * i
];
3746 /* If this section is OK, its size must be less than lib_size. */
3748 /* If it has a rodata section, then add that too. */
3749 if (lib_sections
[2 * i
+ 1])
3750 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3751 /* Add any new overlay call stubs needed by the section. */
3754 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3755 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3758 struct call_info
*call
;
3760 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3761 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3762 if (call
->fun
->sec
->linker_mark
)
3764 struct call_info
*p
;
3765 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3766 if (p
->fun
== call
->fun
)
3769 stub_size
+= ovl_stub_size (htab
->params
);
3772 if (tmp
+ stub_size
< lib_size
)
3774 struct call_info
**pp
, *p
;
3776 /* This section fits. Mark it as non-overlay. */
3777 lib_sections
[2 * i
]->linker_mark
= 0;
3778 if (lib_sections
[2 * i
+ 1])
3779 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3780 lib_size
-= tmp
+ stub_size
;
3781 /* Call stubs to the section we just added are no longer
3783 pp
= &dummy_caller
.call_list
;
3784 while ((p
= *pp
) != NULL
)
3785 if (!p
->fun
->sec
->linker_mark
)
3787 lib_size
+= ovl_stub_size (htab
->params
);
3793 /* Add new call stubs to dummy_caller. */
3794 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3795 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3798 struct call_info
*call
;
3800 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3801 for (call
= sinfo
->fun
[k
].call_list
;
3804 if (call
->fun
->sec
->linker_mark
)
3806 struct call_info
*callee
;
3807 callee
= bfd_malloc (sizeof (*callee
));
3809 return (unsigned int) -1;
3811 if (!insert_callee (&dummy_caller
, callee
))
3817 while (dummy_caller
.call_list
!= NULL
)
3819 struct call_info
*call
= dummy_caller
.call_list
;
3820 dummy_caller
.call_list
= call
->next
;
3823 for (i
= 0; i
< 2 * lib_count
; i
++)
3824 if (lib_sections
[i
])
3825 lib_sections
[i
]->gc_mark
= 1;
3826 free (lib_sections
);
3830 /* Build an array of overlay sections. The deepest node's section is
3831 added first, then its parent node's section, then everything called
3832 from the parent section. The idea being to group sections to
3833 minimise calls between different overlays. */
3836 collect_overlays (struct function_info
*fun
,
3837 struct bfd_link_info
*info
,
3840 struct call_info
*call
;
3841 bfd_boolean added_fun
;
3842 asection
***ovly_sections
= param
;
3848 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3849 if (!call
->is_pasted
&& !call
->broken_cycle
)
3851 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3857 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3859 fun
->sec
->gc_mark
= 0;
3860 *(*ovly_sections
)++ = fun
->sec
;
3861 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3863 fun
->rodata
->gc_mark
= 0;
3864 *(*ovly_sections
)++ = fun
->rodata
;
3867 *(*ovly_sections
)++ = NULL
;
3870 /* Pasted sections must stay with the first section. We don't
3871 put pasted sections in the array, just the first section.
3872 Mark subsequent sections as already considered. */
3873 if (fun
->sec
->segment_mark
)
3875 struct function_info
*call_fun
= fun
;
3878 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3879 if (call
->is_pasted
)
3881 call_fun
= call
->fun
;
3882 call_fun
->sec
->gc_mark
= 0;
3883 if (call_fun
->rodata
)
3884 call_fun
->rodata
->gc_mark
= 0;
3890 while (call_fun
->sec
->segment_mark
);
3894 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3895 if (!call
->broken_cycle
3896 && !collect_overlays (call
->fun
, info
, ovly_sections
))
3901 struct _spu_elf_section_data
*sec_data
;
3902 struct spu_elf_stack_info
*sinfo
;
3904 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3905 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3908 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3909 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3917 struct _sum_stack_param
{
3919 size_t overall_stack
;
3920 bfd_boolean emit_stack_syms
;
3923 /* Descend the call graph for FUN, accumulating total stack required. */
3926 sum_stack (struct function_info
*fun
,
3927 struct bfd_link_info
*info
,
3930 struct call_info
*call
;
3931 struct function_info
*max
;
3932 size_t stack
, cum_stack
;
3934 bfd_boolean has_call
;
3935 struct _sum_stack_param
*sum_stack_param
= param
;
3936 struct spu_link_hash_table
*htab
;
3938 cum_stack
= fun
->stack
;
3939 sum_stack_param
->cum_stack
= cum_stack
;
3945 for (call
= fun
->call_list
; call
; call
= call
->next
)
3947 if (call
->broken_cycle
)
3949 if (!call
->is_pasted
)
3951 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3953 stack
= sum_stack_param
->cum_stack
;
3954 /* Include caller stack for normal calls, don't do so for
3955 tail calls. fun->stack here is local stack usage for
3957 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3958 stack
+= fun
->stack
;
3959 if (cum_stack
< stack
)
3966 sum_stack_param
->cum_stack
= cum_stack
;
3968 /* Now fun->stack holds cumulative stack. */
3969 fun
->stack
= cum_stack
;
3973 && sum_stack_param
->overall_stack
< cum_stack
)
3974 sum_stack_param
->overall_stack
= cum_stack
;
3976 htab
= spu_hash_table (info
);
3977 if (htab
->params
->auto_overlay
)
3980 f1
= func_name (fun
);
3981 if (htab
->params
->stack_analysis
)
3984 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3985 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3986 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3990 info
->callbacks
->minfo (_(" calls:\n"));
3991 for (call
= fun
->call_list
; call
; call
= call
->next
)
3992 if (!call
->is_pasted
&& !call
->broken_cycle
)
3994 const char *f2
= func_name (call
->fun
);
3995 const char *ann1
= call
->fun
== max
? "*" : " ";
3996 const char *ann2
= call
->is_tail
? "t" : " ";
3998 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
4003 if (sum_stack_param
->emit_stack_syms
)
4005 char *name
= bfd_malloc (18 + strlen (f1
));
4006 struct elf_link_hash_entry
*h
;
4011 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
4012 sprintf (name
, "__stack_%s", f1
);
4014 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
4016 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
4019 && (h
->root
.type
== bfd_link_hash_new
4020 || h
->root
.type
== bfd_link_hash_undefined
4021 || h
->root
.type
== bfd_link_hash_undefweak
))
4023 h
->root
.type
= bfd_link_hash_defined
;
4024 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
4025 h
->root
.u
.def
.value
= cum_stack
;
4030 h
->ref_regular_nonweak
= 1;
4031 h
->forced_local
= 1;
4039 /* SEC is part of a pasted function. Return the call_info for the
4040 next section of this function. */
4042 static struct call_info
*
4043 find_pasted_call (asection
*sec
)
4045 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
4046 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
4047 struct call_info
*call
;
4050 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4051 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
4052 if (call
->is_pasted
)
4058 /* qsort predicate to sort bfds by file name. */
4061 sort_bfds (const void *a
, const void *b
)
4063 bfd
*const *abfd1
= a
;
4064 bfd
*const *abfd2
= b
;
4066 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
4070 print_one_overlay_section (FILE *script
,
4073 unsigned int ovlynum
,
4074 unsigned int *ovly_map
,
4075 asection
**ovly_sections
,
4076 struct bfd_link_info
*info
)
4080 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4082 asection
*sec
= ovly_sections
[2 * j
];
4084 if (fprintf (script
, " %s%c%s (%s)\n",
4085 (sec
->owner
->my_archive
!= NULL
4086 ? sec
->owner
->my_archive
->filename
: ""),
4087 info
->path_separator
,
4088 sec
->owner
->filename
,
4091 if (sec
->segment_mark
)
4093 struct call_info
*call
= find_pasted_call (sec
);
4094 while (call
!= NULL
)
4096 struct function_info
*call_fun
= call
->fun
;
4097 sec
= call_fun
->sec
;
4098 if (fprintf (script
, " %s%c%s (%s)\n",
4099 (sec
->owner
->my_archive
!= NULL
4100 ? sec
->owner
->my_archive
->filename
: ""),
4101 info
->path_separator
,
4102 sec
->owner
->filename
,
4105 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4106 if (call
->is_pasted
)
4112 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4114 asection
*sec
= ovly_sections
[2 * j
+ 1];
4116 && fprintf (script
, " %s%c%s (%s)\n",
4117 (sec
->owner
->my_archive
!= NULL
4118 ? sec
->owner
->my_archive
->filename
: ""),
4119 info
->path_separator
,
4120 sec
->owner
->filename
,
4124 sec
= ovly_sections
[2 * j
];
4125 if (sec
->segment_mark
)
4127 struct call_info
*call
= find_pasted_call (sec
);
4128 while (call
!= NULL
)
4130 struct function_info
*call_fun
= call
->fun
;
4131 sec
= call_fun
->rodata
;
4133 && fprintf (script
, " %s%c%s (%s)\n",
4134 (sec
->owner
->my_archive
!= NULL
4135 ? sec
->owner
->my_archive
->filename
: ""),
4136 info
->path_separator
,
4137 sec
->owner
->filename
,
4140 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4141 if (call
->is_pasted
)
4150 /* Handle --auto-overlay. */
4153 spu_elf_auto_overlay (struct bfd_link_info
*info
)
4157 struct elf_segment_map
*m
;
4158 unsigned int fixed_size
, lo
, hi
;
4159 unsigned int reserved
;
4160 struct spu_link_hash_table
*htab
;
4161 unsigned int base
, i
, count
, bfd_count
;
4162 unsigned int region
, ovlynum
;
4163 asection
**ovly_sections
, **ovly_p
;
4164 unsigned int *ovly_map
;
4166 unsigned int total_overlay_size
, overlay_size
;
4167 const char *ovly_mgr_entry
;
4168 struct elf_link_hash_entry
*h
;
4169 struct _mos_param mos_param
;
4170 struct _uos_param uos_param
;
4171 struct function_info dummy_caller
;
4173 /* Find the extents of our loadable image. */
4174 lo
= (unsigned int) -1;
4176 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4177 if (m
->p_type
== PT_LOAD
)
4178 for (i
= 0; i
< m
->count
; i
++)
4179 if (m
->sections
[i
]->size
!= 0)
4181 if (m
->sections
[i
]->vma
< lo
)
4182 lo
= m
->sections
[i
]->vma
;
4183 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
4184 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
4186 fixed_size
= hi
+ 1 - lo
;
4188 if (!discover_functions (info
))
4191 if (!build_call_tree (info
))
4194 htab
= spu_hash_table (info
);
4195 reserved
= htab
->params
->auto_overlay_reserved
;
4198 struct _sum_stack_param sum_stack_param
;
4200 sum_stack_param
.emit_stack_syms
= 0;
4201 sum_stack_param
.overall_stack
= 0;
4202 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4204 reserved
= (sum_stack_param
.overall_stack
4205 + htab
->params
->extra_stack_space
);
4208 /* No need for overlays if everything already fits. */
4209 if (fixed_size
+ reserved
<= htab
->local_store
4210 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
4212 htab
->params
->auto_overlay
= 0;
4216 uos_param
.exclude_input_section
= 0;
4217 uos_param
.exclude_output_section
4218 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
4220 ovly_mgr_entry
= "__ovly_load";
4221 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4222 ovly_mgr_entry
= "__icache_br_handler";
4223 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
4224 FALSE
, FALSE
, FALSE
);
4226 && (h
->root
.type
== bfd_link_hash_defined
4227 || h
->root
.type
== bfd_link_hash_defweak
)
4230 /* We have a user supplied overlay manager. */
4231 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
4235 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4236 builtin version to .text, and will adjust .text size. */
4237 fixed_size
+= (*htab
->params
->spu_elf_load_ovl_mgr
) ();
4240 /* Mark overlay sections, and find max overlay section size. */
4241 mos_param
.max_overlay_size
= 0;
4242 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
4245 /* We can't put the overlay manager or interrupt routines in
4247 uos_param
.clearing
= 0;
4248 if ((uos_param
.exclude_input_section
4249 || uos_param
.exclude_output_section
)
4250 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
4254 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4256 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
4257 if (bfd_arr
== NULL
)
4260 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4263 total_overlay_size
= 0;
4264 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4266 extern const bfd_target bfd_elf32_spu_vec
;
4268 unsigned int old_count
;
4270 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
4274 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4275 if (sec
->linker_mark
)
4277 if ((sec
->flags
& SEC_CODE
) != 0)
4279 fixed_size
-= sec
->size
;
4280 total_overlay_size
+= sec
->size
;
4282 else if ((sec
->flags
& (SEC_ALLOC
| SEC_LOAD
)) == (SEC_ALLOC
| SEC_LOAD
)
4283 && sec
->output_section
->owner
== info
->output_bfd
4284 && strncmp (sec
->output_section
->name
, ".ovl.init", 9) == 0)
4285 fixed_size
-= sec
->size
;
4286 if (count
!= old_count
)
4287 bfd_arr
[bfd_count
++] = ibfd
;
4290 /* Since the overlay link script selects sections by file name and
4291 section name, ensure that file names are unique. */
4294 bfd_boolean ok
= TRUE
;
4296 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
4297 for (i
= 1; i
< bfd_count
; ++i
)
4298 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
4300 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
4302 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
4303 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
4304 bfd_arr
[i
]->filename
,
4305 bfd_arr
[i
]->my_archive
->filename
);
4307 info
->callbacks
->einfo (_("%s duplicated\n"),
4308 bfd_arr
[i
]->filename
);
4314 info
->callbacks
->einfo (_("sorry, no support for duplicate "
4315 "object files in auto-overlay script\n"));
4316 bfd_set_error (bfd_error_bad_value
);
4322 fixed_size
+= reserved
;
4323 fixed_size
+= htab
->non_ovly_stub
* ovl_stub_size (htab
->params
);
4324 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
4326 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4328 /* Stubs in the non-icache area are bigger. */
4329 fixed_size
+= htab
->non_ovly_stub
* 16;
4330 /* Space for icache manager tables.
4331 a) Tag array, one quadword per cache line.
4332 - word 0: ia address of present line, init to zero. */
4333 fixed_size
+= 16 << htab
->num_lines_log2
;
4334 /* b) Rewrite "to" list, one quadword per cache line. */
4335 fixed_size
+= 16 << htab
->num_lines_log2
;
4336 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4337 to a power-of-two number of full quadwords) per cache line. */
4338 fixed_size
+= 16 << (htab
->fromelem_size_log2
4339 + htab
->num_lines_log2
);
4340 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4345 /* Guess number of overlays. Assuming overlay buffer is on
4346 average only half full should be conservative. */
4347 ovlynum
= (total_overlay_size
* 2 * htab
->params
->num_lines
4348 / (htab
->local_store
- fixed_size
));
4349 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4350 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
4354 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
4355 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4356 "size of 0x%v exceeds local store\n"),
4357 (bfd_vma
) fixed_size
,
4358 (bfd_vma
) mos_param
.max_overlay_size
);
4360 /* Now see if we should put some functions in the non-overlay area. */
4361 else if (fixed_size
< htab
->params
->auto_overlay_fixed
)
4363 unsigned int max_fixed
, lib_size
;
4365 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
4366 if (max_fixed
> htab
->params
->auto_overlay_fixed
)
4367 max_fixed
= htab
->params
->auto_overlay_fixed
;
4368 lib_size
= max_fixed
- fixed_size
;
4369 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
4370 if (lib_size
== (unsigned int) -1)
4372 fixed_size
= max_fixed
- lib_size
;
4375 /* Build an array of sections, suitably sorted to place into
4377 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
4378 if (ovly_sections
== NULL
)
4380 ovly_p
= ovly_sections
;
4381 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
4383 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
4384 ovly_map
= bfd_malloc (count
* sizeof (*ovly_map
));
4385 if (ovly_map
== NULL
)
4388 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
4389 overlay_size
= (htab
->local_store
- fixed_size
) / htab
->params
->num_lines
;
4390 if (htab
->params
->line_size
!= 0)
4391 overlay_size
= htab
->params
->line_size
;
4394 while (base
< count
)
4396 unsigned int size
= 0, rosize
= 0, roalign
= 0;
4398 for (i
= base
; i
< count
; i
++)
4400 asection
*sec
, *rosec
;
4401 unsigned int tmp
, rotmp
;
4402 unsigned int num_stubs
;
4403 struct call_info
*call
, *pasty
;
4404 struct _spu_elf_section_data
*sec_data
;
4405 struct spu_elf_stack_info
*sinfo
;
4408 /* See whether we can add this section to the current
4409 overlay without overflowing our overlay buffer. */
4410 sec
= ovly_sections
[2 * i
];
4411 tmp
= align_power (size
, sec
->alignment_power
) + sec
->size
;
4413 rosec
= ovly_sections
[2 * i
+ 1];
4416 rotmp
= align_power (rotmp
, rosec
->alignment_power
) + rosec
->size
;
4417 if (roalign
< rosec
->alignment_power
)
4418 roalign
= rosec
->alignment_power
;
4420 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4422 if (sec
->segment_mark
)
4424 /* Pasted sections must stay together, so add their
4426 pasty
= find_pasted_call (sec
);
4427 while (pasty
!= NULL
)
4429 struct function_info
*call_fun
= pasty
->fun
;
4430 tmp
= (align_power (tmp
, call_fun
->sec
->alignment_power
)
4431 + call_fun
->sec
->size
);
4432 if (call_fun
->rodata
)
4434 rotmp
= (align_power (rotmp
,
4435 call_fun
->rodata
->alignment_power
)
4436 + call_fun
->rodata
->size
);
4437 if (roalign
< rosec
->alignment_power
)
4438 roalign
= rosec
->alignment_power
;
4440 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
4441 if (pasty
->is_pasted
)
4445 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4448 /* If we add this section, we might need new overlay call
4449 stubs. Add any overlay section calls to dummy_call. */
4451 sec_data
= spu_elf_section_data (sec
);
4452 sinfo
= sec_data
->u
.i
.stack_info
;
4453 for (k
= 0; k
< (unsigned) sinfo
->num_fun
; ++k
)
4454 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
4455 if (call
->is_pasted
)
4457 BFD_ASSERT (pasty
== NULL
);
4460 else if (call
->fun
->sec
->linker_mark
)
4462 if (!copy_callee (&dummy_caller
, call
))
4465 while (pasty
!= NULL
)
4467 struct function_info
*call_fun
= pasty
->fun
;
4469 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4470 if (call
->is_pasted
)
4472 BFD_ASSERT (pasty
== NULL
);
4475 else if (!copy_callee (&dummy_caller
, call
))
4479 /* Calculate call stub size. */
4481 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
4483 unsigned int stub_delta
= 1;
4485 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4486 stub_delta
= call
->count
;
4487 num_stubs
+= stub_delta
;
4489 /* If the call is within this overlay, we won't need a
4491 for (k
= base
; k
< i
+ 1; k
++)
4492 if (call
->fun
->sec
== ovly_sections
[2 * k
])
4494 num_stubs
-= stub_delta
;
4498 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4499 && num_stubs
> htab
->params
->max_branch
)
4501 if (align_power (tmp
, roalign
) + rotmp
4502 + num_stubs
* ovl_stub_size (htab
->params
) > overlay_size
)
4510 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
4511 ovly_sections
[2 * i
]->owner
,
4512 ovly_sections
[2 * i
],
4513 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
4514 bfd_set_error (bfd_error_bad_value
);
4518 while (dummy_caller
.call_list
!= NULL
)
4520 struct call_info
*call
= dummy_caller
.call_list
;
4521 dummy_caller
.call_list
= call
->next
;
4527 ovly_map
[base
++] = ovlynum
;
4530 script
= htab
->params
->spu_elf_open_overlay_script ();
4532 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4534 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4537 if (fprintf (script
,
4538 " . = ALIGN (%u);\n"
4539 " .ovl.init : { *(.ovl.init) }\n"
4540 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4541 htab
->params
->line_size
) <= 0)
4546 while (base
< count
)
4548 unsigned int indx
= ovlynum
- 1;
4549 unsigned int vma
, lma
;
4551 vma
= (indx
& (htab
->params
->num_lines
- 1)) << htab
->line_size_log2
;
4552 lma
= vma
+ (((indx
>> htab
->num_lines_log2
) + 1) << 18);
4554 if (fprintf (script
, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4555 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4556 ovlynum
, vma
, lma
) <= 0)
4559 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4560 ovly_map
, ovly_sections
, info
);
4561 if (base
== (unsigned) -1)
4564 if (fprintf (script
, " }\n") <= 0)
4570 if (fprintf (script
, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4571 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
)) <= 0)
4574 if (fprintf (script
, "}\nINSERT AFTER .toe;\n") <= 0)
4579 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4582 if (fprintf (script
,
4583 " . = ALIGN (16);\n"
4584 " .ovl.init : { *(.ovl.init) }\n"
4585 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4588 for (region
= 1; region
<= htab
->params
->num_lines
; region
++)
4592 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4600 /* We need to set lma since we are overlaying .ovl.init. */
4601 if (fprintf (script
,
4602 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4607 if (fprintf (script
, " OVERLAY :\n {\n") <= 0)
4611 while (base
< count
)
4613 if (fprintf (script
, " .ovly%u {\n", ovlynum
) <= 0)
4616 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4617 ovly_map
, ovly_sections
, info
);
4618 if (base
== (unsigned) -1)
4621 if (fprintf (script
, " }\n") <= 0)
4624 ovlynum
+= htab
->params
->num_lines
;
4625 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4629 if (fprintf (script
, " }\n") <= 0)
4633 if (fprintf (script
, "}\nINSERT BEFORE .text;\n") <= 0)
4638 free (ovly_sections
);
4640 if (fclose (script
) != 0)
4643 if (htab
->params
->auto_overlay
& AUTO_RELINK
)
4644 (*htab
->params
->spu_elf_relink
) ();
4649 bfd_set_error (bfd_error_system_call
);
4651 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
4655 /* Provide an estimate of total stack required. */
4658 spu_elf_stack_analysis (struct bfd_link_info
*info
)
4660 struct spu_link_hash_table
*htab
;
4661 struct _sum_stack_param sum_stack_param
;
4663 if (!discover_functions (info
))
4666 if (!build_call_tree (info
))
4669 htab
= spu_hash_table (info
);
4670 if (htab
->params
->stack_analysis
)
4672 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
4673 info
->callbacks
->minfo (_("\nStack size for functions. "
4674 "Annotations: '*' max stack, 't' tail call\n"));
4677 sum_stack_param
.emit_stack_syms
= htab
->params
->emit_stack_syms
;
4678 sum_stack_param
.overall_stack
= 0;
4679 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4682 if (htab
->params
->stack_analysis
)
4683 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
4684 (bfd_vma
) sum_stack_param
.overall_stack
);
4688 /* Perform a final link. */
4691 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
4693 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4695 if (htab
->params
->auto_overlay
)
4696 spu_elf_auto_overlay (info
);
4698 if ((htab
->params
->stack_analysis
4699 || (htab
->params
->ovly_flavour
== ovly_soft_icache
4700 && htab
->params
->lrlive_analysis
))
4701 && !spu_elf_stack_analysis (info
))
4702 info
->callbacks
->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4704 if (!spu_elf_build_stubs (info
))
4705 info
->callbacks
->einfo ("%F%P: can not build overlay stubs: %E\n");
4707 return bfd_elf_final_link (output_bfd
, info
);
4710 /* Called when not normally emitting relocs, ie. !info->relocatable
4711 and !info->emitrelocations. Returns a count of special relocs
4712 that need to be emitted. */
4715 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
4717 Elf_Internal_Rela
*relocs
;
4718 unsigned int count
= 0;
4720 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
4724 Elf_Internal_Rela
*rel
;
4725 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
4727 for (rel
= relocs
; rel
< relend
; rel
++)
4729 int r_type
= ELF32_R_TYPE (rel
->r_info
);
4730 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4734 if (elf_section_data (sec
)->relocs
!= relocs
)
4741 /* Functions for adding fixup records to .fixup */
4743 #define FIXUP_RECORD_SIZE 4
4745 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4746 bfd_put_32 (output_bfd, addr, \
4747 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4748 #define FIXUP_GET(output_bfd,htab,index) \
4749 bfd_get_32 (output_bfd, \
4750 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4752 /* Store OFFSET in .fixup. This assumes it will be called with an
4753 increasing OFFSET. When this OFFSET fits with the last base offset,
4754 it just sets a bit, otherwise it adds a new fixup record. */
4756 spu_elf_emit_fixup (bfd
* output_bfd
, struct bfd_link_info
*info
,
4759 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4760 asection
*sfixup
= htab
->sfixup
;
4761 bfd_vma qaddr
= offset
& ~(bfd_vma
) 15;
4762 bfd_vma bit
= ((bfd_vma
) 8) >> ((offset
& 15) >> 2);
4763 if (sfixup
->reloc_count
== 0)
4765 FIXUP_PUT (output_bfd
, htab
, 0, qaddr
| bit
);
4766 sfixup
->reloc_count
++;
4770 bfd_vma base
= FIXUP_GET (output_bfd
, htab
, sfixup
->reloc_count
- 1);
4771 if (qaddr
!= (base
& ~(bfd_vma
) 15))
4773 if ((sfixup
->reloc_count
+ 1) * FIXUP_RECORD_SIZE
> sfixup
->size
)
4774 (*_bfd_error_handler
) (_("fatal error while creating .fixup"));
4775 FIXUP_PUT (output_bfd
, htab
, sfixup
->reloc_count
, qaddr
| bit
);
4776 sfixup
->reloc_count
++;
4779 FIXUP_PUT (output_bfd
, htab
, sfixup
->reloc_count
- 1, base
| bit
);
4783 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4786 spu_elf_relocate_section (bfd
*output_bfd
,
4787 struct bfd_link_info
*info
,
4789 asection
*input_section
,
4791 Elf_Internal_Rela
*relocs
,
4792 Elf_Internal_Sym
*local_syms
,
4793 asection
**local_sections
)
4795 Elf_Internal_Shdr
*symtab_hdr
;
4796 struct elf_link_hash_entry
**sym_hashes
;
4797 Elf_Internal_Rela
*rel
, *relend
;
4798 struct spu_link_hash_table
*htab
;
4801 bfd_boolean emit_these_relocs
= FALSE
;
4802 bfd_boolean is_ea_sym
;
4804 unsigned int iovl
= 0;
4806 htab
= spu_hash_table (info
);
4807 stubs
= (htab
->stub_sec
!= NULL
4808 && maybe_needs_stubs (input_section
));
4809 iovl
= overlay_index (input_section
);
4810 ea
= bfd_get_section_by_name (output_bfd
, "._ea");
4811 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
4812 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
4815 relend
= relocs
+ input_section
->reloc_count
;
4816 for (; rel
< relend
; rel
++)
4819 reloc_howto_type
*howto
;
4820 unsigned int r_symndx
;
4821 Elf_Internal_Sym
*sym
;
4823 struct elf_link_hash_entry
*h
;
4824 const char *sym_name
;
4827 bfd_reloc_status_type r
;
4828 bfd_boolean unresolved_reloc
;
4830 enum _stub_type stub_type
;
4832 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4833 r_type
= ELF32_R_TYPE (rel
->r_info
);
4834 howto
= elf_howto_table
+ r_type
;
4835 unresolved_reloc
= FALSE
;
4840 if (r_symndx
< symtab_hdr
->sh_info
)
4842 sym
= local_syms
+ r_symndx
;
4843 sec
= local_sections
[r_symndx
];
4844 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4845 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4849 if (sym_hashes
== NULL
)
4852 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4854 while (h
->root
.type
== bfd_link_hash_indirect
4855 || h
->root
.type
== bfd_link_hash_warning
)
4856 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4859 if (h
->root
.type
== bfd_link_hash_defined
4860 || h
->root
.type
== bfd_link_hash_defweak
)
4862 sec
= h
->root
.u
.def
.section
;
4864 || sec
->output_section
== NULL
)
4865 /* Set a flag that will be cleared later if we find a
4866 relocation value for this symbol. output_section
4867 is typically NULL for symbols satisfied by a shared
4869 unresolved_reloc
= TRUE
;
4871 relocation
= (h
->root
.u
.def
.value
4872 + sec
->output_section
->vma
4873 + sec
->output_offset
);
4875 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4877 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4878 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4880 else if (!info
->relocatable
4881 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4884 err
= (info
->unresolved_syms_in_objects
== RM_GENERATE_ERROR
4885 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
);
4886 if (!info
->callbacks
->undefined_symbol (info
,
4887 h
->root
.root
.string
,
4890 rel
->r_offset
, err
))
4894 sym_name
= h
->root
.root
.string
;
4897 if (sec
!= NULL
&& elf_discarded_section (sec
))
4899 /* For relocs against symbols from removed linkonce sections,
4900 or sections discarded by a linker script, we just want the
4901 section contents zeroed. Avoid any special processing. */
4902 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
4908 if (info
->relocatable
)
4911 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4912 if (r_type
== R_SPU_ADD_PIC
4914 && !(h
->def_regular
|| ELF_COMMON_DEF_P (h
)))
4916 bfd_byte
*loc
= contents
+ rel
->r_offset
;
4922 is_ea_sym
= (ea
!= NULL
4924 && sec
->output_section
== ea
);
4926 /* If this symbol is in an overlay area, we may need to relocate
4927 to the overlay stub. */
4928 addend
= rel
->r_addend
;
4931 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4932 contents
, info
)) != no_stub
)
4934 unsigned int ovl
= 0;
4935 struct got_entry
*g
, **head
;
4937 if (stub_type
!= nonovl_stub
)
4941 head
= &h
->got
.glist
;
4943 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4945 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4946 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4948 && g
->br_addr
== (rel
->r_offset
4949 + input_section
->output_offset
4950 + input_section
->output_section
->vma
))
4951 : g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4956 relocation
= g
->stub_addr
;
4961 /* For soft icache, encode the overlay index into addresses. */
4962 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4963 && (r_type
== R_SPU_ADDR16_HI
4964 || r_type
== R_SPU_ADDR32
|| r_type
== R_SPU_REL32
)
4967 unsigned int ovl
= overlay_index (sec
);
4970 unsigned int set_id
= ((ovl
- 1) >> htab
->num_lines_log2
) + 1;
4971 relocation
+= set_id
<< 18;
4976 if (htab
->params
->emit_fixups
&& !info
->relocatable
4977 && (input_section
->flags
& SEC_ALLOC
) != 0
4978 && r_type
== R_SPU_ADDR32
)
4981 offset
= rel
->r_offset
+ input_section
->output_section
->vma
4982 + input_section
->output_offset
;
4983 spu_elf_emit_fixup (output_bfd
, info
, offset
);
4986 if (unresolved_reloc
)
4988 else if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4992 /* ._ea is a special section that isn't allocated in SPU
4993 memory, but rather occupies space in PPU memory as
4994 part of an embedded ELF image. If this reloc is
4995 against a symbol defined in ._ea, then transform the
4996 reloc into an equivalent one without a symbol
4997 relative to the start of the ELF image. */
4998 rel
->r_addend
+= (relocation
5000 + elf_section_data (ea
)->this_hdr
.sh_offset
);
5001 rel
->r_info
= ELF32_R_INFO (0, r_type
);
5003 emit_these_relocs
= TRUE
;
5007 unresolved_reloc
= TRUE
;
5009 if (unresolved_reloc
)
5011 (*_bfd_error_handler
)
5012 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5014 bfd_get_section_name (input_bfd
, input_section
),
5015 (long) rel
->r_offset
,
5021 r
= _bfd_final_link_relocate (howto
,
5025 rel
->r_offset
, relocation
, addend
);
5027 if (r
!= bfd_reloc_ok
)
5029 const char *msg
= (const char *) 0;
5033 case bfd_reloc_overflow
:
5034 if (!((*info
->callbacks
->reloc_overflow
)
5035 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
5036 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
5040 case bfd_reloc_undefined
:
5041 if (!((*info
->callbacks
->undefined_symbol
)
5042 (info
, sym_name
, input_bfd
, input_section
,
5043 rel
->r_offset
, TRUE
)))
5047 case bfd_reloc_outofrange
:
5048 msg
= _("internal error: out of range error");
5051 case bfd_reloc_notsupported
:
5052 msg
= _("internal error: unsupported relocation error");
5055 case bfd_reloc_dangerous
:
5056 msg
= _("internal error: dangerous error");
5060 msg
= _("internal error: unknown error");
5065 if (!((*info
->callbacks
->warning
)
5066 (info
, msg
, sym_name
, input_bfd
, input_section
,
5075 && emit_these_relocs
5076 && !info
->emitrelocations
)
5078 Elf_Internal_Rela
*wrel
;
5079 Elf_Internal_Shdr
*rel_hdr
;
5081 wrel
= rel
= relocs
;
5082 relend
= relocs
+ input_section
->reloc_count
;
5083 for (; rel
< relend
; rel
++)
5087 r_type
= ELF32_R_TYPE (rel
->r_info
);
5088 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
5091 input_section
->reloc_count
= wrel
- relocs
;
5092 /* Backflips for _bfd_elf_link_output_relocs. */
5093 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
5094 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
5101 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5104 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
5105 const char *sym_name ATTRIBUTE_UNUSED
,
5106 Elf_Internal_Sym
*sym
,
5107 asection
*sym_sec ATTRIBUTE_UNUSED
,
5108 struct elf_link_hash_entry
*h
)
5110 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5112 if (!info
->relocatable
5113 && htab
->stub_sec
!= NULL
5115 && (h
->root
.type
== bfd_link_hash_defined
5116 || h
->root
.type
== bfd_link_hash_defweak
)
5118 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
5120 struct got_entry
*g
;
5122 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
5123 if (htab
->params
->ovly_flavour
== ovly_soft_icache
5124 ? g
->br_addr
== g
->stub_addr
5125 : g
->addend
== 0 && g
->ovl
== 0)
5127 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
5128 (htab
->stub_sec
[0]->output_section
->owner
,
5129 htab
->stub_sec
[0]->output_section
));
5130 sym
->st_value
= g
->stub_addr
;
5138 static int spu_plugin
= 0;
5141 spu_elf_plugin (int val
)
5146 /* Set ELF header e_type for plugins. */
5149 spu_elf_post_process_headers (bfd
*abfd
,
5150 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
5154 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
5156 i_ehdrp
->e_type
= ET_DYN
;
5160 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5161 segments for overlays. */
5164 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5171 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5172 extra
= htab
->num_overlays
;
5178 sec
= bfd_get_section_by_name (abfd
, ".toe");
5179 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
5185 /* Remove .toe section from other PT_LOAD segments and put it in
5186 a segment of its own. Put overlays in separate segments too. */
5189 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
5192 struct elf_segment_map
*m
, *m_overlay
;
5193 struct elf_segment_map
**p
, **p_overlay
;
5199 toe
= bfd_get_section_by_name (abfd
, ".toe");
5200 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
5201 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
5202 for (i
= 0; i
< m
->count
; i
++)
5203 if ((s
= m
->sections
[i
]) == toe
5204 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
5206 struct elf_segment_map
*m2
;
5209 if (i
+ 1 < m
->count
)
5211 amt
= sizeof (struct elf_segment_map
);
5212 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
5213 m2
= bfd_zalloc (abfd
, amt
);
5216 m2
->count
= m
->count
- (i
+ 1);
5217 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
5218 m2
->count
* sizeof (m
->sections
[0]));
5219 m2
->p_type
= PT_LOAD
;
5227 amt
= sizeof (struct elf_segment_map
);
5228 m2
= bfd_zalloc (abfd
, amt
);
5231 m2
->p_type
= PT_LOAD
;
5233 m2
->sections
[0] = s
;
5241 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5242 PT_LOAD segments. This can cause the .ovl.init section to be
5243 overwritten with the contents of some overlay segment. To work
5244 around this issue, we ensure that all PF_OVERLAY segments are
5245 sorted first amongst the program headers; this ensures that even
5246 with a broken loader, the .ovl.init section (which is not marked
5247 as PF_OVERLAY) will be placed into SPU local store on startup. */
5249 /* Move all overlay segments onto a separate list. */
5250 p
= &elf_tdata (abfd
)->segment_map
;
5251 p_overlay
= &m_overlay
;
5254 if ((*p
)->p_type
== PT_LOAD
&& (*p
)->count
== 1
5255 && spu_elf_section_data ((*p
)->sections
[0])->u
.o
.ovl_index
!= 0)
5260 p_overlay
= &m
->next
;
5267 /* Re-insert overlay segments at the head of the segment map. */
5268 *p_overlay
= elf_tdata (abfd
)->segment_map
;
5269 elf_tdata (abfd
)->segment_map
= m_overlay
;
5274 /* Tweak the section type of .note.spu_name. */
5277 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
5278 Elf_Internal_Shdr
*hdr
,
5281 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
5282 hdr
->sh_type
= SHT_NOTE
;
5286 /* Tweak phdrs before writing them out. */
5289 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5291 const struct elf_backend_data
*bed
;
5292 struct elf_obj_tdata
*tdata
;
5293 Elf_Internal_Phdr
*phdr
, *last
;
5294 struct spu_link_hash_table
*htab
;
5301 bed
= get_elf_backend_data (abfd
);
5302 tdata
= elf_tdata (abfd
);
5304 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
5305 htab
= spu_hash_table (info
);
5306 if (htab
->num_overlays
!= 0)
5308 struct elf_segment_map
*m
;
5311 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
5313 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
5315 /* Mark this as an overlay header. */
5316 phdr
[i
].p_flags
|= PF_OVERLAY
;
5318 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0
5319 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
5321 bfd_byte
*p
= htab
->ovtab
->contents
;
5322 unsigned int off
= o
* 16 + 8;
5324 /* Write file_off into _ovly_table. */
5325 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
5328 /* Soft-icache has its file offset put in .ovl.init. */
5329 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
5331 bfd_vma val
= elf_section_data (htab
->ovl_sec
[0])->this_hdr
.sh_offset
;
5333 bfd_put_32 (htab
->init
->owner
, val
, htab
->init
->contents
+ 4);
5337 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5338 of 16. This should always be possible when using the standard
5339 linker scripts, but don't create overlapping segments if
5340 someone is playing games with linker scripts. */
5342 for (i
= count
; i
-- != 0; )
5343 if (phdr
[i
].p_type
== PT_LOAD
)
5347 adjust
= -phdr
[i
].p_filesz
& 15;
5350 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
5353 adjust
= -phdr
[i
].p_memsz
& 15;
5356 && phdr
[i
].p_filesz
!= 0
5357 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
5358 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
5361 if (phdr
[i
].p_filesz
!= 0)
5365 if (i
== (unsigned int) -1)
5366 for (i
= count
; i
-- != 0; )
5367 if (phdr
[i
].p_type
== PT_LOAD
)
5371 adjust
= -phdr
[i
].p_filesz
& 15;
5372 phdr
[i
].p_filesz
+= adjust
;
5374 adjust
= -phdr
[i
].p_memsz
& 15;
5375 phdr
[i
].p_memsz
+= adjust
;
5382 spu_elf_size_sections (bfd
* output_bfd
, struct bfd_link_info
*info
)
5384 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5385 if (htab
->params
->emit_fixups
)
5387 asection
*sfixup
= htab
->sfixup
;
5388 int fixup_count
= 0;
5392 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
5396 if (bfd_get_flavour (ibfd
) != bfd_target_elf_flavour
)
5399 /* Walk over each section attached to the input bfd. */
5400 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
5402 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
5405 /* If there aren't any relocs, then there's nothing more
5407 if ((isec
->flags
& SEC_RELOC
) == 0
5408 || isec
->reloc_count
== 0)
5411 /* Get the relocs. */
5413 _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
5415 if (internal_relocs
== NULL
)
5418 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5419 relocations. They are stored in a single word by
5420 saving the upper 28 bits of the address and setting the
5421 lower 4 bits to a bit mask of the words that have the
5422 relocation. BASE_END keeps track of the next quadword. */
5423 irela
= internal_relocs
;
5424 irelaend
= irela
+ isec
->reloc_count
;
5426 for (; irela
< irelaend
; irela
++)
5427 if (ELF32_R_TYPE (irela
->r_info
) == R_SPU_ADDR32
5428 && irela
->r_offset
>= base_end
)
5430 base_end
= (irela
->r_offset
& ~(bfd_vma
) 15) + 16;
5436 /* We always have a NULL fixup as a sentinel */
5437 size
= (fixup_count
+ 1) * FIXUP_RECORD_SIZE
;
5438 if (!bfd_set_section_size (output_bfd
, sfixup
, size
))
5440 sfixup
->contents
= (bfd_byte
*) bfd_zalloc (info
->input_bfds
, size
);
5441 if (sfixup
->contents
== NULL
)
5447 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5448 #define TARGET_BIG_NAME "elf32-spu"
5449 #define ELF_ARCH bfd_arch_spu
5450 #define ELF_MACHINE_CODE EM_SPU
5451 /* This matches the alignment need for DMA. */
5452 #define ELF_MAXPAGESIZE 0x80
5453 #define elf_backend_rela_normal 1
5454 #define elf_backend_can_gc_sections 1
5456 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5457 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5458 #define elf_info_to_howto spu_elf_info_to_howto
5459 #define elf_backend_count_relocs spu_elf_count_relocs
5460 #define elf_backend_relocate_section spu_elf_relocate_section
5461 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5462 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5463 #define elf_backend_object_p spu_elf_object_p
5464 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5465 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5467 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5468 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5469 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5470 #define elf_backend_post_process_headers spu_elf_post_process_headers
5471 #define elf_backend_fake_sections spu_elf_fake_sections
5472 #define elf_backend_special_sections spu_elf_special_sections
5473 #define bfd_elf32_bfd_final_link spu_elf_final_link
5475 #include "elf32-target.h"