1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
91 HOWTO (R_SPU_ADD_PIC
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
92 bfd_elf_generic_reloc
, "SPU_ADD_PIC",
93 FALSE
, 0, 0x00000000, FALSE
),
96 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
97 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
98 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
109 case BFD_RELOC_SPU_IMM10W
:
111 case BFD_RELOC_SPU_IMM16W
:
113 case BFD_RELOC_SPU_LO16
:
114 return R_SPU_ADDR16_LO
;
115 case BFD_RELOC_SPU_HI16
:
116 return R_SPU_ADDR16_HI
;
117 case BFD_RELOC_SPU_IMM18
:
119 case BFD_RELOC_SPU_PCREL16
:
121 case BFD_RELOC_SPU_IMM7
:
123 case BFD_RELOC_SPU_IMM8
:
125 case BFD_RELOC_SPU_PCREL9a
:
127 case BFD_RELOC_SPU_PCREL9b
:
129 case BFD_RELOC_SPU_IMM10
:
130 return R_SPU_ADDR10I
;
131 case BFD_RELOC_SPU_IMM16
:
132 return R_SPU_ADDR16I
;
135 case BFD_RELOC_32_PCREL
:
137 case BFD_RELOC_SPU_PPU32
:
139 case BFD_RELOC_SPU_PPU64
:
141 case BFD_RELOC_SPU_ADD_PIC
:
142 return R_SPU_ADD_PIC
;
147 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
149 Elf_Internal_Rela
*dst
)
151 enum elf_spu_reloc_type r_type
;
153 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
154 BFD_ASSERT (r_type
< R_SPU_max
);
155 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
158 static reloc_howto_type
*
159 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
160 bfd_reloc_code_real_type code
)
162 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
164 if (r_type
== R_SPU_NONE
)
167 return elf_howto_table
+ r_type
;
170 static reloc_howto_type
*
171 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
176 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
177 if (elf_howto_table
[i
].name
!= NULL
178 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
179 return &elf_howto_table
[i
];
184 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
186 static bfd_reloc_status_type
187 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
188 void *data
, asection
*input_section
,
189 bfd
*output_bfd
, char **error_message
)
191 bfd_size_type octets
;
195 /* If this is a relocatable link (output_bfd test tells us), just
196 call the generic function. Any adjustment will be done at final
198 if (output_bfd
!= NULL
)
199 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
200 input_section
, output_bfd
, error_message
);
202 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
203 return bfd_reloc_outofrange
;
204 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
206 /* Get symbol value. */
208 if (!bfd_is_com_section (symbol
->section
))
210 if (symbol
->section
->output_section
)
211 val
+= symbol
->section
->output_section
->vma
;
213 val
+= reloc_entry
->addend
;
215 /* Make it pc-relative. */
216 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
219 if (val
+ 256 >= 512)
220 return bfd_reloc_overflow
;
222 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
224 /* Move two high bits of value to REL9I and REL9 position.
225 The mask will take care of selecting the right field. */
226 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
227 insn
&= ~reloc_entry
->howto
->dst_mask
;
228 insn
|= val
& reloc_entry
->howto
->dst_mask
;
229 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
234 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
236 if (!sec
->used_by_bfd
)
238 struct _spu_elf_section_data
*sdata
;
240 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
243 sec
->used_by_bfd
= sdata
;
246 return _bfd_elf_new_section_hook (abfd
, sec
);
249 /* Set up overlay info for executables. */
252 spu_elf_object_p (bfd
*abfd
)
254 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
256 unsigned int i
, num_ovl
, num_buf
;
257 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
258 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
259 Elf_Internal_Phdr
*last_phdr
= NULL
;
261 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
262 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
267 if (last_phdr
== NULL
268 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
271 for (j
= 1; j
< elf_numsections (abfd
); j
++)
273 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
275 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr
, phdr
))
277 asection
*sec
= shdr
->bfd_section
;
278 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
279 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
287 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
288 strip --strip-unneeded will not remove them. */
291 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
293 if (sym
->name
!= NULL
294 && sym
->section
!= bfd_abs_section_ptr
295 && strncmp (sym
->name
, "_EAR_", 5) == 0)
296 sym
->flags
|= BSF_KEEP
;
299 /* SPU ELF linker hash table. */
301 struct spu_link_hash_table
303 struct elf_link_hash_table elf
;
305 struct spu_elf_params
*params
;
307 /* Shortcuts to overlay sections. */
313 /* Count of stubs in each overlay section. */
314 unsigned int *stub_count
;
316 /* The stub section for each overlay section. */
319 struct elf_link_hash_entry
*ovly_entry
[2];
321 /* Number of overlay buffers. */
322 unsigned int num_buf
;
324 /* Total number of overlays. */
325 unsigned int num_overlays
;
327 /* For soft icache. */
328 unsigned int line_size_log2
;
329 unsigned int num_lines_log2
;
330 unsigned int fromelem_size_log2
;
332 /* How much memory we have. */
333 unsigned int local_store
;
334 /* Local store --auto-overlay should reserve for non-overlay
335 functions and data. */
336 unsigned int overlay_fixed
;
337 /* Local store --auto-overlay should reserve for stack and heap. */
338 unsigned int reserved
;
339 /* If reserved is not specified, stack analysis will calculate a value
340 for the stack. This parameter adjusts that value to allow for
341 negative sp access (the ABI says 2000 bytes below sp are valid,
342 and the overlay manager uses some of this area). */
343 int extra_stack_space
;
344 /* Count of overlay stubs needed in non-overlay area. */
345 unsigned int non_ovly_stub
;
348 unsigned int stub_err
: 1;
351 /* Hijack the generic got fields for overlay stub accounting. */
355 struct got_entry
*next
;
364 #define spu_hash_table(p) \
365 ((struct spu_link_hash_table *) ((p)->hash))
369 struct function_info
*fun
;
370 struct call_info
*next
;
372 unsigned int max_depth
;
373 unsigned int is_tail
: 1;
374 unsigned int is_pasted
: 1;
375 unsigned int broken_cycle
: 1;
376 unsigned int priority
: 13;
381 /* List of functions called. Also branches to hot/cold part of
383 struct call_info
*call_list
;
384 /* For hot/cold part of function, point to owner. */
385 struct function_info
*start
;
386 /* Symbol at start of function. */
388 Elf_Internal_Sym
*sym
;
389 struct elf_link_hash_entry
*h
;
391 /* Function section. */
394 /* Where last called from, and number of sections called from. */
395 asection
*last_caller
;
396 unsigned int call_count
;
397 /* Address range of (this part of) function. */
399 /* Offset where we found a store of lr, or -1 if none found. */
401 /* Offset where we found the stack adjustment insn. */
405 /* Distance from root of call tree. Tail and hot/cold branches
406 count as one deeper. We aren't counting stack frames here. */
408 /* Set if global symbol. */
409 unsigned int global
: 1;
410 /* Set if known to be start of function (as distinct from a hunk
411 in hot/cold section. */
412 unsigned int is_func
: 1;
413 /* Set if not a root node. */
414 unsigned int non_root
: 1;
415 /* Flags used during call tree traversal. It's cheaper to replicate
416 the visit flags than have one which needs clearing after a traversal. */
417 unsigned int visit1
: 1;
418 unsigned int visit2
: 1;
419 unsigned int marking
: 1;
420 unsigned int visit3
: 1;
421 unsigned int visit4
: 1;
422 unsigned int visit5
: 1;
423 unsigned int visit6
: 1;
424 unsigned int visit7
: 1;
427 struct spu_elf_stack_info
431 /* Variable size array describing functions, one per contiguous
432 address range belonging to a function. */
433 struct function_info fun
[1];
436 static struct function_info
*find_function (asection
*, bfd_vma
,
437 struct bfd_link_info
*);
439 /* Create a spu ELF linker hash table. */
441 static struct bfd_link_hash_table
*
442 spu_elf_link_hash_table_create (bfd
*abfd
)
444 struct spu_link_hash_table
*htab
;
446 htab
= bfd_malloc (sizeof (*htab
));
450 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
451 _bfd_elf_link_hash_newfunc
,
452 sizeof (struct elf_link_hash_entry
)))
458 memset (&htab
->ovtab
, 0,
459 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
461 htab
->elf
.init_got_refcount
.refcount
= 0;
462 htab
->elf
.init_got_refcount
.glist
= NULL
;
463 htab
->elf
.init_got_offset
.offset
= 0;
464 htab
->elf
.init_got_offset
.glist
= NULL
;
465 return &htab
->elf
.root
;
469 spu_elf_setup (struct bfd_link_info
*info
, struct spu_elf_params
*params
)
471 bfd_vma max_branch_log2
;
473 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
474 htab
->params
= params
;
475 htab
->line_size_log2
= bfd_log2 (htab
->params
->line_size
);
476 htab
->num_lines_log2
= bfd_log2 (htab
->params
->num_lines
);
478 /* For the software i-cache, we provide a "from" list whose size
479 is a power-of-two number of quadwords, big enough to hold one
480 byte per outgoing branch. Compute this number here. */
481 max_branch_log2
= bfd_log2 (htab
->params
->max_branch
);
482 htab
->fromelem_size_log2
= max_branch_log2
> 4 ? max_branch_log2
- 4 : 0;
485 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
486 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
487 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
490 get_sym_h (struct elf_link_hash_entry
**hp
,
491 Elf_Internal_Sym
**symp
,
493 Elf_Internal_Sym
**locsymsp
,
494 unsigned long r_symndx
,
497 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
499 if (r_symndx
>= symtab_hdr
->sh_info
)
501 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
502 struct elf_link_hash_entry
*h
;
504 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
505 while (h
->root
.type
== bfd_link_hash_indirect
506 || h
->root
.type
== bfd_link_hash_warning
)
507 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
517 asection
*symsec
= NULL
;
518 if (h
->root
.type
== bfd_link_hash_defined
519 || h
->root
.type
== bfd_link_hash_defweak
)
520 symsec
= h
->root
.u
.def
.section
;
526 Elf_Internal_Sym
*sym
;
527 Elf_Internal_Sym
*locsyms
= *locsymsp
;
531 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
533 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
535 0, NULL
, NULL
, NULL
);
540 sym
= locsyms
+ r_symndx
;
549 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
555 /* Create the note section if not already present. This is done early so
556 that the linker maps the sections to the right place in the output. */
559 spu_elf_create_sections (struct bfd_link_info
*info
)
563 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
564 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
569 /* Make SPU_PTNOTE_SPUNAME section. */
576 ibfd
= info
->input_bfds
;
577 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
578 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
580 || !bfd_set_section_alignment (ibfd
, s
, 4))
583 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
584 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
585 size
+= (name_len
+ 3) & -4;
587 if (!bfd_set_section_size (ibfd
, s
, size
))
590 data
= bfd_zalloc (ibfd
, size
);
594 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
595 bfd_put_32 (ibfd
, name_len
, data
+ 4);
596 bfd_put_32 (ibfd
, 1, data
+ 8);
597 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
598 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
599 bfd_get_filename (info
->output_bfd
), name_len
);
606 /* qsort predicate to sort sections by vma. */
609 sort_sections (const void *a
, const void *b
)
611 const asection
*const *s1
= a
;
612 const asection
*const *s2
= b
;
613 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
616 return delta
< 0 ? -1 : 1;
618 return (*s1
)->index
- (*s2
)->index
;
621 /* Identify overlays in the output bfd, and number them.
622 Returns 0 on error, 1 if no overlays, 2 if overlays. */
625 spu_elf_find_overlays (struct bfd_link_info
*info
)
627 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
628 asection
**alloc_sec
;
629 unsigned int i
, n
, ovl_index
, num_buf
;
632 static const char *const entry_names
[2][2] = {
633 { "__ovly_load", "__icache_br_handler" },
634 { "__ovly_return", "__icache_call_handler" }
637 if (info
->output_bfd
->section_count
< 2)
641 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
642 if (alloc_sec
== NULL
)
645 /* Pick out all the alloced sections. */
646 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
647 if ((s
->flags
& SEC_ALLOC
) != 0
648 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
658 /* Sort them by vma. */
659 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
661 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
662 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
664 /* Look for an overlapping vma to find the first overlay section. */
665 bfd_vma vma_start
= 0;
666 bfd_vma lma_start
= 0;
668 for (i
= 1; i
< n
; i
++)
671 if (s
->vma
< ovl_end
)
673 asection
*s0
= alloc_sec
[i
- 1];
675 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
681 << (htab
->num_lines_log2
+ htab
->line_size_log2
)));
686 ovl_end
= s
->vma
+ s
->size
;
689 /* Now find any sections within the cache area. */
690 for (ovl_index
= 0, num_buf
= 0; i
< n
; i
++)
693 if (s
->vma
>= ovl_end
)
696 /* A section in an overlay area called .ovl.init is not
697 an overlay, in the sense that it might be loaded in
698 by the overlay manager, but rather the initial
699 section contents for the overlay buffer. */
700 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
702 num_buf
= ((s
->vma
- vma_start
) >> htab
->line_size_log2
) + 1;
703 if (((s
->vma
- vma_start
) & (htab
->params
->line_size
- 1))
704 || ((s
->lma
- lma_start
) & (htab
->params
->line_size
- 1)))
706 info
->callbacks
->einfo (_("%X%P: overlay section %A "
707 "does not start on a cache line.\n"),
709 bfd_set_error (bfd_error_bad_value
);
712 else if (s
->size
> htab
->params
->line_size
)
714 info
->callbacks
->einfo (_("%X%P: overlay section %A "
715 "is larger than a cache line.\n"),
717 bfd_set_error (bfd_error_bad_value
);
721 alloc_sec
[ovl_index
++] = s
;
722 spu_elf_section_data (s
)->u
.o
.ovl_index
723 = ((s
->lma
- lma_start
) >> htab
->line_size_log2
) + 1;
724 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
728 /* Ensure there are no more overlay sections. */
732 if (s
->vma
< ovl_end
)
734 info
->callbacks
->einfo (_("%X%P: overlay section %A "
735 "is not in cache area.\n"),
737 bfd_set_error (bfd_error_bad_value
);
741 ovl_end
= s
->vma
+ s
->size
;
746 /* Look for overlapping vmas. Any with overlap must be overlays.
747 Count them. Also count the number of overlay regions. */
748 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
751 if (s
->vma
< ovl_end
)
753 asection
*s0
= alloc_sec
[i
- 1];
755 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
758 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
760 alloc_sec
[ovl_index
] = s0
;
761 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
762 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= num_buf
;
765 ovl_end
= s
->vma
+ s
->size
;
767 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
769 alloc_sec
[ovl_index
] = s
;
770 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
771 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
772 if (s0
->vma
!= s
->vma
)
774 info
->callbacks
->einfo (_("%X%P: overlay sections %A "
775 "and %A do not start at the "
778 bfd_set_error (bfd_error_bad_value
);
781 if (ovl_end
< s
->vma
+ s
->size
)
782 ovl_end
= s
->vma
+ s
->size
;
786 ovl_end
= s
->vma
+ s
->size
;
790 htab
->num_overlays
= ovl_index
;
791 htab
->num_buf
= num_buf
;
792 htab
->ovl_sec
= alloc_sec
;
797 for (i
= 0; i
< 2; i
++)
800 struct elf_link_hash_entry
*h
;
802 name
= entry_names
[i
][htab
->params
->ovly_flavour
];
803 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
807 if (h
->root
.type
== bfd_link_hash_new
)
809 h
->root
.type
= bfd_link_hash_undefined
;
811 h
->ref_regular_nonweak
= 1;
814 htab
->ovly_entry
[i
] = h
;
820 /* Non-zero to use bra in overlay stubs rather than br. */
823 #define BRA 0x30000000
824 #define BRASL 0x31000000
825 #define BR 0x32000000
826 #define BRSL 0x33000000
827 #define NOP 0x40200000
828 #define LNOP 0x00200000
829 #define ILA 0x42000000
831 /* Return true for all relative and absolute branch instructions.
839 brhnz 00100011 0.. */
842 is_branch (const unsigned char *insn
)
844 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
847 /* Return true for all indirect branch instructions.
855 bihnz 00100101 011 */
858 is_indirect_branch (const unsigned char *insn
)
860 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
863 /* Return true for branch hint instructions.
868 is_hint (const unsigned char *insn
)
870 return (insn
[0] & 0xfc) == 0x10;
873 /* True if INPUT_SECTION might need overlay stubs. */
876 maybe_needs_stubs (asection
*input_section
)
878 /* No stubs for debug sections and suchlike. */
879 if ((input_section
->flags
& SEC_ALLOC
) == 0)
882 /* No stubs for link-once sections that will be discarded. */
883 if (input_section
->output_section
== bfd_abs_section_ptr
)
886 /* Don't create stubs for .eh_frame references. */
887 if (strcmp (input_section
->name
, ".eh_frame") == 0)
909 /* Return non-zero if this reloc symbol should go via an overlay stub.
910 Return 2 if the stub must be in non-overlay area. */
912 static enum _stub_type
913 needs_ovl_stub (struct elf_link_hash_entry
*h
,
914 Elf_Internal_Sym
*sym
,
916 asection
*input_section
,
917 Elf_Internal_Rela
*irela
,
919 struct bfd_link_info
*info
)
921 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
922 enum elf_spu_reloc_type r_type
;
923 unsigned int sym_type
;
924 bfd_boolean branch
, hint
, call
;
925 enum _stub_type ret
= no_stub
;
929 || sym_sec
->output_section
== bfd_abs_section_ptr
930 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
935 /* Ensure no stubs for user supplied overlay manager syms. */
936 if (h
== htab
->ovly_entry
[0] || h
== htab
->ovly_entry
[1])
939 /* setjmp always goes via an overlay stub, because then the return
940 and hence the longjmp goes via __ovly_return. That magically
941 makes setjmp/longjmp between overlays work. */
942 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
943 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
950 sym_type
= ELF_ST_TYPE (sym
->st_info
);
952 r_type
= ELF32_R_TYPE (irela
->r_info
);
956 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
958 if (contents
== NULL
)
961 if (!bfd_get_section_contents (input_section
->owner
,
968 contents
+= irela
->r_offset
;
970 branch
= is_branch (contents
);
971 hint
= is_hint (contents
);
974 call
= (contents
[0] & 0xfd) == 0x31;
976 && sym_type
!= STT_FUNC
979 /* It's common for people to write assembly and forget
980 to give function symbols the right type. Handle
981 calls to such symbols, but warn so that (hopefully)
982 people will fix their code. We need the symbol
983 type to be correct to distinguish function pointer
984 initialisation from other pointer initialisations. */
985 const char *sym_name
;
988 sym_name
= h
->root
.root
.string
;
991 Elf_Internal_Shdr
*symtab_hdr
;
992 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
993 sym_name
= bfd_elf_sym_name (input_section
->owner
,
998 (*_bfd_error_handler
) (_("warning: call to non-function"
999 " symbol %s defined in %B"),
1000 sym_sec
->owner
, sym_name
);
1006 if ((!branch
&& htab
->params
->ovly_flavour
== ovly_soft_icache
)
1007 || (sym_type
!= STT_FUNC
1008 && !(branch
|| hint
)
1009 && (sym_sec
->flags
& SEC_CODE
) == 0))
1012 /* Usually, symbols in non-overlay sections don't need stubs. */
1013 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
1014 && !htab
->params
->non_overlay_stubs
)
1017 /* A reference from some other section to a symbol in an overlay
1018 section needs a stub. */
1019 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
1020 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
1022 unsigned int lrlive
= 0;
1024 lrlive
= (contents
[1] & 0x70) >> 4;
1026 if (!lrlive
&& (call
|| sym_type
== STT_FUNC
))
1027 ret
= call_ovl_stub
;
1029 ret
= br000_ovl_stub
+ lrlive
;
1032 /* If this insn isn't a branch then we are possibly taking the
1033 address of a function and passing it out somehow. Soft-icache code
1034 always generates inline code to do indirect branches. */
1035 if (!(branch
|| hint
)
1036 && sym_type
== STT_FUNC
1037 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1044 count_stub (struct spu_link_hash_table
*htab
,
1047 enum _stub_type stub_type
,
1048 struct elf_link_hash_entry
*h
,
1049 const Elf_Internal_Rela
*irela
)
1051 unsigned int ovl
= 0;
1052 struct got_entry
*g
, **head
;
1055 /* If this instruction is a branch or call, we need a stub
1056 for it. One stub per function per overlay.
1057 If it isn't a branch, then we are taking the address of
1058 this function so need a stub in the non-overlay area
1059 for it. One stub per function. */
1060 if (stub_type
!= nonovl_stub
)
1061 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1064 head
= &h
->got
.glist
;
1067 if (elf_local_got_ents (ibfd
) == NULL
)
1069 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
1070 * sizeof (*elf_local_got_ents (ibfd
)));
1071 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
1072 if (elf_local_got_ents (ibfd
) == NULL
)
1075 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1078 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1080 htab
->stub_count
[ovl
] += 1;
1086 addend
= irela
->r_addend
;
1090 struct got_entry
*gnext
;
1092 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1093 if (g
->addend
== addend
&& g
->ovl
== 0)
1098 /* Need a new non-overlay area stub. Zap other stubs. */
1099 for (g
= *head
; g
!= NULL
; g
= gnext
)
1102 if (g
->addend
== addend
)
1104 htab
->stub_count
[g
->ovl
] -= 1;
1112 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1113 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1119 g
= bfd_malloc (sizeof *g
);
1124 g
->stub_addr
= (bfd_vma
) -1;
1128 htab
->stub_count
[ovl
] += 1;
1134 /* Support two sizes of overlay stubs, a slower more compact stub of two
1135 intructions, and a faster stub of four instructions.
1136 Soft-icache stubs are four or eight words. */
1139 ovl_stub_size (struct spu_elf_params
*params
)
1141 return 16 << params
->ovly_flavour
>> params
->compact_stub
;
1145 ovl_stub_size_log2 (struct spu_elf_params
*params
)
1147 return 4 + params
->ovly_flavour
- params
->compact_stub
;
1150 /* Two instruction overlay stubs look like:
1152 brsl $75,__ovly_load
1153 .word target_ovl_and_address
1155 ovl_and_address is a word with the overlay number in the top 14 bits
1156 and local store address in the bottom 18 bits.
1158 Four instruction overlay stubs look like:
1162 ila $79,target_address
1165 Software icache stubs are:
1169 .word lrlive_branchlocalstoreaddr;
1170 brasl $75,__icache_br_handler
1175 build_stub (struct bfd_link_info
*info
,
1178 enum _stub_type stub_type
,
1179 struct elf_link_hash_entry
*h
,
1180 const Elf_Internal_Rela
*irela
,
1184 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1185 unsigned int ovl
, dest_ovl
, set_id
;
1186 struct got_entry
*g
, **head
;
1188 bfd_vma addend
, from
, to
, br_dest
, patt
;
1189 unsigned int lrlive
;
1192 if (stub_type
!= nonovl_stub
)
1193 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1196 head
= &h
->got
.glist
;
1198 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1202 addend
= irela
->r_addend
;
1204 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1206 g
= bfd_malloc (sizeof *g
);
1212 g
->br_addr
= (irela
->r_offset
1213 + isec
->output_offset
1214 + isec
->output_section
->vma
);
1220 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1221 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1226 if (g
->ovl
== 0 && ovl
!= 0)
1229 if (g
->stub_addr
!= (bfd_vma
) -1)
1233 sec
= htab
->stub_sec
[ovl
];
1234 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
1235 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
1236 g
->stub_addr
= from
;
1237 to
= (htab
->ovly_entry
[0]->root
.u
.def
.value
1238 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_offset
1239 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_section
->vma
);
1241 if (((dest
| to
| from
) & 3) != 0)
1246 dest_ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
1248 if (htab
->params
->ovly_flavour
== ovly_normal
1249 && !htab
->params
->compact_stub
)
1251 bfd_put_32 (sec
->owner
, ILA
+ ((dest_ovl
<< 7) & 0x01ffff80) + 78,
1252 sec
->contents
+ sec
->size
);
1253 bfd_put_32 (sec
->owner
, LNOP
,
1254 sec
->contents
+ sec
->size
+ 4);
1255 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
1256 sec
->contents
+ sec
->size
+ 8);
1258 bfd_put_32 (sec
->owner
, BR
+ (((to
- (from
+ 12)) << 5) & 0x007fff80),
1259 sec
->contents
+ sec
->size
+ 12);
1261 bfd_put_32 (sec
->owner
, BRA
+ ((to
<< 5) & 0x007fff80),
1262 sec
->contents
+ sec
->size
+ 12);
1264 else if (htab
->params
->ovly_flavour
== ovly_normal
1265 && htab
->params
->compact_stub
)
1268 bfd_put_32 (sec
->owner
, BRSL
+ (((to
- from
) << 5) & 0x007fff80) + 75,
1269 sec
->contents
+ sec
->size
);
1271 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1272 sec
->contents
+ sec
->size
);
1273 bfd_put_32 (sec
->owner
, (dest
& 0x3ffff) | (dest_ovl
<< 18),
1274 sec
->contents
+ sec
->size
+ 4);
1276 else if (htab
->params
->ovly_flavour
== ovly_soft_icache
1277 && htab
->params
->compact_stub
)
1280 if (stub_type
== nonovl_stub
)
1282 else if (stub_type
== call_ovl_stub
)
1283 /* A brsl makes lr live and *(*sp+16) is live.
1284 Tail calls have the same liveness. */
1286 else if (!htab
->params
->lrlive_analysis
)
1287 /* Assume stack frame and lr save. */
1289 else if (irela
!= NULL
)
1291 /* Analyse branch instructions. */
1292 struct function_info
*caller
;
1295 caller
= find_function (isec
, irela
->r_offset
, info
);
1296 if (caller
->start
== NULL
)
1297 off
= irela
->r_offset
;
1300 struct function_info
*found
= NULL
;
1302 /* Find the earliest piece of this function that
1303 has frame adjusting instructions. We might
1304 see dynamic frame adjustment (eg. for alloca)
1305 in some later piece, but functions using
1306 alloca always set up a frame earlier. Frame
1307 setup instructions are always in one piece. */
1308 if (caller
->lr_store
!= (bfd_vma
) -1
1309 || caller
->sp_adjust
!= (bfd_vma
) -1)
1311 while (caller
->start
!= NULL
)
1313 caller
= caller
->start
;
1314 if (caller
->lr_store
!= (bfd_vma
) -1
1315 || caller
->sp_adjust
!= (bfd_vma
) -1)
1323 if (off
> caller
->sp_adjust
)
1325 if (off
> caller
->lr_store
)
1326 /* Only *(*sp+16) is live. */
1329 /* If no lr save, then we must be in a
1330 leaf function with a frame.
1331 lr is still live. */
1334 else if (off
> caller
->lr_store
)
1336 /* Between lr save and stack adjust. */
1338 /* This should never happen since prologues won't
1343 /* On entry to function. */
1346 if (stub_type
!= br000_ovl_stub
1347 && lrlive
!= stub_type
- br000_ovl_stub
)
1348 info
->callbacks
->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1349 "from analysis (%u)\n"),
1350 isec
, irela
->r_offset
, lrlive
,
1351 stub_type
- br000_ovl_stub
);
1354 /* If given lrlive info via .brinfo, use it. */
1355 if (stub_type
> br000_ovl_stub
)
1356 lrlive
= stub_type
- br000_ovl_stub
;
1359 to
= (htab
->ovly_entry
[1]->root
.u
.def
.value
1360 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_offset
1361 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_section
->vma
);
1363 /* The branch that uses this stub goes to stub_addr + 4. We'll
1364 set up an xor pattern that can be used by the icache manager
1365 to modify this branch to go directly to its destination. */
1367 br_dest
= g
->stub_addr
;
1370 /* Except in the case of _SPUEAR_ stubs, the branch in
1371 question is the one in the stub itself. */
1372 BFD_ASSERT (stub_type
== nonovl_stub
);
1373 g
->br_addr
= g
->stub_addr
;
1377 set_id
= ((dest_ovl
- 1) >> htab
->num_lines_log2
) + 1;
1378 bfd_put_32 (sec
->owner
, (set_id
<< 18) | (dest
& 0x3ffff),
1379 sec
->contents
+ sec
->size
);
1380 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1381 sec
->contents
+ sec
->size
+ 4);
1382 bfd_put_32 (sec
->owner
, (lrlive
<< 29) | (g
->br_addr
& 0x3ffff),
1383 sec
->contents
+ sec
->size
+ 8);
1384 patt
= dest
^ br_dest
;
1385 if (irela
!= NULL
&& ELF32_R_TYPE (irela
->r_info
) == R_SPU_REL16
)
1386 patt
= (dest
- g
->br_addr
) ^ (br_dest
- g
->br_addr
);
1387 bfd_put_32 (sec
->owner
, (patt
<< 5) & 0x007fff80,
1388 sec
->contents
+ sec
->size
+ 12);
1391 /* Extra space for linked list entries. */
1397 sec
->size
+= ovl_stub_size (htab
->params
);
1399 if (htab
->params
->emit_stub_syms
)
1405 len
= 8 + sizeof (".ovl_call.") - 1;
1407 len
+= strlen (h
->root
.root
.string
);
1412 add
= (int) irela
->r_addend
& 0xffffffff;
1415 name
= bfd_malloc (len
);
1419 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1421 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1423 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1424 dest_sec
->id
& 0xffffffff,
1425 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1427 sprintf (name
+ len
- 9, "+%x", add
);
1429 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1433 if (h
->root
.type
== bfd_link_hash_new
)
1435 h
->root
.type
= bfd_link_hash_defined
;
1436 h
->root
.u
.def
.section
= sec
;
1437 h
->size
= ovl_stub_size (htab
->params
);
1438 h
->root
.u
.def
.value
= sec
->size
- h
->size
;
1442 h
->ref_regular_nonweak
= 1;
1443 h
->forced_local
= 1;
1451 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1455 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1457 /* Symbols starting with _SPUEAR_ need a stub because they may be
1458 invoked by the PPU. */
1459 struct bfd_link_info
*info
= inf
;
1460 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1463 if ((h
->root
.type
== bfd_link_hash_defined
1464 || h
->root
.type
== bfd_link_hash_defweak
)
1466 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1467 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1468 && sym_sec
->output_section
!= bfd_abs_section_ptr
1469 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1470 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1471 || htab
->params
->non_overlay_stubs
))
1473 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1480 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1482 /* Symbols starting with _SPUEAR_ need a stub because they may be
1483 invoked by the PPU. */
1484 struct bfd_link_info
*info
= inf
;
1485 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1488 if ((h
->root
.type
== bfd_link_hash_defined
1489 || h
->root
.type
== bfd_link_hash_defweak
)
1491 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1492 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1493 && sym_sec
->output_section
!= bfd_abs_section_ptr
1494 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1495 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1496 || htab
->params
->non_overlay_stubs
))
1498 return build_stub (info
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1499 h
->root
.u
.def
.value
, sym_sec
);
1505 /* Size or build stubs. */
1508 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1510 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1513 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1515 extern const bfd_target bfd_elf32_spu_vec
;
1516 Elf_Internal_Shdr
*symtab_hdr
;
1518 Elf_Internal_Sym
*local_syms
= NULL
;
1520 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1523 /* We'll need the symbol table in a second. */
1524 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1525 if (symtab_hdr
->sh_info
== 0)
1528 /* Walk over each section attached to the input bfd. */
1529 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1531 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1533 /* If there aren't any relocs, then there's nothing more to do. */
1534 if ((isec
->flags
& SEC_RELOC
) == 0
1535 || isec
->reloc_count
== 0)
1538 if (!maybe_needs_stubs (isec
))
1541 /* Get the relocs. */
1542 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1544 if (internal_relocs
== NULL
)
1545 goto error_ret_free_local
;
1547 /* Now examine each relocation. */
1548 irela
= internal_relocs
;
1549 irelaend
= irela
+ isec
->reloc_count
;
1550 for (; irela
< irelaend
; irela
++)
1552 enum elf_spu_reloc_type r_type
;
1553 unsigned int r_indx
;
1555 Elf_Internal_Sym
*sym
;
1556 struct elf_link_hash_entry
*h
;
1557 enum _stub_type stub_type
;
1559 r_type
= ELF32_R_TYPE (irela
->r_info
);
1560 r_indx
= ELF32_R_SYM (irela
->r_info
);
1562 if (r_type
>= R_SPU_max
)
1564 bfd_set_error (bfd_error_bad_value
);
1565 error_ret_free_internal
:
1566 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1567 free (internal_relocs
);
1568 error_ret_free_local
:
1569 if (local_syms
!= NULL
1570 && (symtab_hdr
->contents
1571 != (unsigned char *) local_syms
))
1576 /* Determine the reloc target section. */
1577 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1578 goto error_ret_free_internal
;
1580 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1582 if (stub_type
== no_stub
)
1584 else if (stub_type
== stub_error
)
1585 goto error_ret_free_internal
;
1587 if (htab
->stub_count
== NULL
)
1590 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1591 htab
->stub_count
= bfd_zmalloc (amt
);
1592 if (htab
->stub_count
== NULL
)
1593 goto error_ret_free_internal
;
1598 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1599 goto error_ret_free_internal
;
1606 dest
= h
->root
.u
.def
.value
;
1608 dest
= sym
->st_value
;
1609 dest
+= irela
->r_addend
;
1610 if (!build_stub (info
, ibfd
, isec
, stub_type
, h
, irela
,
1612 goto error_ret_free_internal
;
1616 /* We're done with the internal relocs, free them. */
1617 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1618 free (internal_relocs
);
1621 if (local_syms
!= NULL
1622 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1624 if (!info
->keep_memory
)
1627 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1634 /* Allocate space for overlay call and return stubs.
1635 Return 0 on error, 1 if no overlays, 2 otherwise. */
1638 spu_elf_size_stubs (struct bfd_link_info
*info
)
1640 struct spu_link_hash_table
*htab
;
1647 if (!process_stubs (info
, FALSE
))
1650 htab
= spu_hash_table (info
);
1651 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1655 ibfd
= info
->input_bfds
;
1656 if (htab
->stub_count
!= NULL
)
1658 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1659 htab
->stub_sec
= bfd_zmalloc (amt
);
1660 if (htab
->stub_sec
== NULL
)
1663 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1664 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1665 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1666 htab
->stub_sec
[0] = stub
;
1668 || !bfd_set_section_alignment (ibfd
, stub
,
1669 ovl_stub_size_log2 (htab
->params
)))
1671 stub
->size
= htab
->stub_count
[0] * ovl_stub_size (htab
->params
);
1672 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1673 /* Extra space for linked list entries. */
1674 stub
->size
+= htab
->stub_count
[0] * 16;
1676 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1678 asection
*osec
= htab
->ovl_sec
[i
];
1679 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1680 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1681 htab
->stub_sec
[ovl
] = stub
;
1683 || !bfd_set_section_alignment (ibfd
, stub
,
1684 ovl_stub_size_log2 (htab
->params
)))
1686 stub
->size
= htab
->stub_count
[ovl
] * ovl_stub_size (htab
->params
);
1690 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1692 /* Space for icache manager tables.
1693 a) Tag array, one quadword per cache line.
1694 b) Rewrite "to" list, one quadword per cache line.
1695 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1696 a power-of-two number of full quadwords) per cache line. */
1699 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1700 if (htab
->ovtab
== NULL
1701 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1704 htab
->ovtab
->size
= (16 + 16 + (16 << htab
->fromelem_size_log2
))
1705 << htab
->num_lines_log2
;
1707 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1708 htab
->init
= bfd_make_section_anyway_with_flags (ibfd
, ".ovini", flags
);
1709 if (htab
->init
== NULL
1710 || !bfd_set_section_alignment (ibfd
, htab
->init
, 4))
1713 htab
->init
->size
= 16;
1715 else if (htab
->stub_count
== NULL
)
1719 /* htab->ovtab consists of two arrays.
1729 . } _ovly_buf_table[];
1732 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1733 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1734 if (htab
->ovtab
== NULL
1735 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1738 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1741 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1742 if (htab
->toe
== NULL
1743 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1745 htab
->toe
->size
= 16;
1750 /* Called from ld to place overlay manager data sections. This is done
1751 after the overlay manager itself is loaded, mainly so that the
1752 linker's htab->init section is placed after any other .ovl.init
1756 spu_elf_place_overlay_data (struct bfd_link_info
*info
)
1758 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1761 if (htab
->stub_sec
!= NULL
)
1763 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[0], NULL
, ".text");
1765 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1767 asection
*osec
= htab
->ovl_sec
[i
];
1768 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1769 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[ovl
], osec
, NULL
);
1773 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1774 (*htab
->params
->place_spu_section
) (htab
->init
, NULL
, ".ovl.init");
1776 if (htab
->ovtab
!= NULL
)
1778 const char *ovout
= ".data";
1779 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1781 (*htab
->params
->place_spu_section
) (htab
->ovtab
, NULL
, ovout
);
1784 if (htab
->toe
!= NULL
)
1785 (*htab
->params
->place_spu_section
) (htab
->toe
, NULL
, ".toe");
1788 /* Functions to handle embedded spu_ovl.o object. */
1791 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1797 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1803 struct _ovl_stream
*os
;
1807 os
= (struct _ovl_stream
*) stream
;
1808 max
= (const char *) os
->end
- (const char *) os
->start
;
1810 if ((ufile_ptr
) offset
>= max
)
1814 if (count
> max
- offset
)
1815 count
= max
- offset
;
1817 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1822 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1824 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1831 return *ovl_bfd
!= NULL
;
1835 overlay_index (asection
*sec
)
1838 || sec
->output_section
== bfd_abs_section_ptr
)
1840 return spu_elf_section_data (sec
->output_section
)->u
.o
.ovl_index
;
1843 /* Define an STT_OBJECT symbol. */
1845 static struct elf_link_hash_entry
*
1846 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1848 struct elf_link_hash_entry
*h
;
1850 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1854 if (h
->root
.type
!= bfd_link_hash_defined
1857 h
->root
.type
= bfd_link_hash_defined
;
1858 h
->root
.u
.def
.section
= htab
->ovtab
;
1859 h
->type
= STT_OBJECT
;
1862 h
->ref_regular_nonweak
= 1;
1865 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1867 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1868 h
->root
.u
.def
.section
->owner
,
1869 h
->root
.root
.string
);
1870 bfd_set_error (bfd_error_bad_value
);
1875 (*_bfd_error_handler
) (_("you are not allowed to define %s in a script"),
1876 h
->root
.root
.string
);
1877 bfd_set_error (bfd_error_bad_value
);
1884 /* Fill in all stubs and the overlay tables. */
1887 spu_elf_build_stubs (struct bfd_link_info
*info
)
1889 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1890 struct elf_link_hash_entry
*h
;
1896 if (htab
->num_overlays
!= 0)
1898 for (i
= 0; i
< 2; i
++)
1900 h
= htab
->ovly_entry
[i
];
1902 && (h
->root
.type
== bfd_link_hash_defined
1903 || h
->root
.type
== bfd_link_hash_defweak
)
1906 s
= h
->root
.u
.def
.section
->output_section
;
1907 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1909 (*_bfd_error_handler
) (_("%s in overlay section"),
1910 h
->root
.root
.string
);
1911 bfd_set_error (bfd_error_bad_value
);
1918 if (htab
->stub_sec
!= NULL
)
1920 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1921 if (htab
->stub_sec
[i
]->size
!= 0)
1923 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1924 htab
->stub_sec
[i
]->size
);
1925 if (htab
->stub_sec
[i
]->contents
== NULL
)
1927 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1928 htab
->stub_sec
[i
]->size
= 0;
1931 /* Fill in all the stubs. */
1932 process_stubs (info
, TRUE
);
1933 if (!htab
->stub_err
)
1934 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1938 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1939 bfd_set_error (bfd_error_bad_value
);
1943 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1945 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1947 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1948 bfd_set_error (bfd_error_bad_value
);
1951 htab
->stub_sec
[i
]->rawsize
= 0;
1955 if (htab
->ovtab
== NULL
|| htab
->ovtab
->size
== 0)
1958 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1959 if (htab
->ovtab
->contents
== NULL
)
1962 p
= htab
->ovtab
->contents
;
1963 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1967 h
= define_ovtab_symbol (htab
, "__icache_tag_array");
1970 h
->root
.u
.def
.value
= 0;
1971 h
->size
= 16 << htab
->num_lines_log2
;
1974 h
= define_ovtab_symbol (htab
, "__icache_tag_array_size");
1977 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
1978 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1980 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to");
1983 h
->root
.u
.def
.value
= off
;
1984 h
->size
= 16 << htab
->num_lines_log2
;
1987 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to_size");
1990 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
1991 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1993 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from");
1996 h
->root
.u
.def
.value
= off
;
1997 h
->size
= 16 << (htab
->fromelem_size_log2
+ htab
->num_lines_log2
);
2000 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from_size");
2003 h
->root
.u
.def
.value
= 16 << (htab
->fromelem_size_log2
2004 + htab
->num_lines_log2
);
2005 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2007 h
= define_ovtab_symbol (htab
, "__icache_log2_fromelemsize");
2010 h
->root
.u
.def
.value
= htab
->fromelem_size_log2
;
2011 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2013 h
= define_ovtab_symbol (htab
, "__icache_base");
2016 h
->root
.u
.def
.value
= htab
->ovl_sec
[0]->vma
;
2017 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2018 h
->size
= htab
->num_buf
<< htab
->line_size_log2
;
2020 h
= define_ovtab_symbol (htab
, "__icache_linesize");
2023 h
->root
.u
.def
.value
= 1 << htab
->line_size_log2
;
2024 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2026 h
= define_ovtab_symbol (htab
, "__icache_log2_linesize");
2029 h
->root
.u
.def
.value
= htab
->line_size_log2
;
2030 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2032 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_linesize");
2035 h
->root
.u
.def
.value
= -htab
->line_size_log2
;
2036 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2038 h
= define_ovtab_symbol (htab
, "__icache_cachesize");
2041 h
->root
.u
.def
.value
= 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
);
2042 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2044 h
= define_ovtab_symbol (htab
, "__icache_log2_cachesize");
2047 h
->root
.u
.def
.value
= htab
->num_lines_log2
+ htab
->line_size_log2
;
2048 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2050 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_cachesize");
2053 h
->root
.u
.def
.value
= -(htab
->num_lines_log2
+ htab
->line_size_log2
);
2054 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2056 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
2058 htab
->init
->contents
= bfd_zalloc (htab
->init
->owner
,
2060 if (htab
->init
->contents
== NULL
)
2063 h
= define_ovtab_symbol (htab
, "__icache_fileoff");
2066 h
->root
.u
.def
.value
= 0;
2067 h
->root
.u
.def
.section
= htab
->init
;
2073 /* Write out _ovly_table. */
2074 /* set low bit of .size to mark non-overlay area as present. */
2076 obfd
= htab
->ovtab
->output_section
->owner
;
2077 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
2079 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
2083 unsigned long off
= ovl_index
* 16;
2084 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
2086 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
2087 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16,
2089 /* file_off written later in spu_elf_modify_program_headers. */
2090 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
2094 h
= define_ovtab_symbol (htab
, "_ovly_table");
2097 h
->root
.u
.def
.value
= 16;
2098 h
->size
= htab
->num_overlays
* 16;
2100 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
2103 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2106 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
2109 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2110 h
->size
= htab
->num_buf
* 4;
2112 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
2115 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
2119 h
= define_ovtab_symbol (htab
, "_EAR_");
2122 h
->root
.u
.def
.section
= htab
->toe
;
2123 h
->root
.u
.def
.value
= 0;
2129 /* Check that all loadable section VMAs lie in the range
2130 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2133 spu_elf_check_vma (struct bfd_link_info
*info
)
2135 struct elf_segment_map
*m
;
2137 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2138 bfd
*abfd
= info
->output_bfd
;
2139 bfd_vma hi
= htab
->params
->local_store_hi
;
2140 bfd_vma lo
= htab
->params
->local_store_lo
;
2142 htab
->local_store
= hi
+ 1 - lo
;
2144 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
2145 if (m
->p_type
== PT_LOAD
)
2146 for (i
= 0; i
< m
->count
; i
++)
2147 if (m
->sections
[i
]->size
!= 0
2148 && (m
->sections
[i
]->vma
< lo
2149 || m
->sections
[i
]->vma
> hi
2150 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
2151 return m
->sections
[i
];
2156 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2157 Search for stack adjusting insns, and return the sp delta.
2158 If a store of lr is found save the instruction offset to *LR_STORE.
2159 If a stack adjusting instruction is found, save that offset to
2163 find_function_stack_adjust (asection
*sec
,
2170 memset (reg
, 0, sizeof (reg
));
2171 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
2173 unsigned char buf
[4];
2177 /* Assume no relocs on stack adjusing insns. */
2178 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
2182 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
2184 if (buf
[0] == 0x24 /* stqd */)
2186 if (rt
== 0 /* lr */ && ra
== 1 /* sp */)
2191 /* Partly decoded immediate field. */
2192 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
2194 if (buf
[0] == 0x1c /* ai */)
2197 imm
= (imm
^ 0x200) - 0x200;
2198 reg
[rt
] = reg
[ra
] + imm
;
2200 if (rt
== 1 /* sp */)
2204 *sp_adjust
= offset
;
2208 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
2210 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2212 reg
[rt
] = reg
[ra
] + reg
[rb
];
2217 *sp_adjust
= offset
;
2221 else if (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */)
2223 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2225 reg
[rt
] = reg
[rb
] - reg
[ra
];
2230 *sp_adjust
= offset
;
2234 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2236 if (buf
[0] >= 0x42 /* ila */)
2237 imm
|= (buf
[0] & 1) << 17;
2242 if (buf
[0] == 0x40 /* il */)
2244 if ((buf
[1] & 0x80) == 0)
2246 imm
= (imm
^ 0x8000) - 0x8000;
2248 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
2254 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
2256 reg
[rt
] |= imm
& 0xffff;
2259 else if (buf
[0] == 0x04 /* ori */)
2262 imm
= (imm
^ 0x200) - 0x200;
2263 reg
[rt
] = reg
[ra
] | imm
;
2266 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
2268 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
2269 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
2270 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
2271 | ((imm
& 0x1000) ? 0x000000ff : 0));
2274 else if (buf
[0] == 0x16 /* andbi */)
2280 reg
[rt
] = reg
[ra
] & imm
;
2283 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
2285 /* Used in pic reg load. Say rt is trashed. Won't be used
2286 in stack adjust, but we need to continue past this branch. */
2290 else if (is_branch (buf
) || is_indirect_branch (buf
))
2291 /* If we hit a branch then we must be out of the prologue. */
2298 /* qsort predicate to sort symbols by section and value. */
2300 static Elf_Internal_Sym
*sort_syms_syms
;
2301 static asection
**sort_syms_psecs
;
2304 sort_syms (const void *a
, const void *b
)
2306 Elf_Internal_Sym
*const *s1
= a
;
2307 Elf_Internal_Sym
*const *s2
= b
;
2308 asection
*sec1
,*sec2
;
2309 bfd_signed_vma delta
;
2311 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
2312 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
2315 return sec1
->index
- sec2
->index
;
2317 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
2319 return delta
< 0 ? -1 : 1;
2321 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
2323 return delta
< 0 ? -1 : 1;
2325 return *s1
< *s2
? -1 : 1;
2328 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2329 entries for section SEC. */
2331 static struct spu_elf_stack_info
*
2332 alloc_stack_info (asection
*sec
, int max_fun
)
2334 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2337 amt
= sizeof (struct spu_elf_stack_info
);
2338 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
2339 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
2340 if (sec_data
->u
.i
.stack_info
!= NULL
)
2341 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
2342 return sec_data
->u
.i
.stack_info
;
2345 /* Add a new struct function_info describing a (part of a) function
2346 starting at SYM_H. Keep the array sorted by address. */
2348 static struct function_info
*
2349 maybe_insert_function (asection
*sec
,
2352 bfd_boolean is_func
)
2354 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2355 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2361 sinfo
= alloc_stack_info (sec
, 20);
2368 Elf_Internal_Sym
*sym
= sym_h
;
2369 off
= sym
->st_value
;
2370 size
= sym
->st_size
;
2374 struct elf_link_hash_entry
*h
= sym_h
;
2375 off
= h
->root
.u
.def
.value
;
2379 for (i
= sinfo
->num_fun
; --i
>= 0; )
2380 if (sinfo
->fun
[i
].lo
<= off
)
2385 /* Don't add another entry for an alias, but do update some
2387 if (sinfo
->fun
[i
].lo
== off
)
2389 /* Prefer globals over local syms. */
2390 if (global
&& !sinfo
->fun
[i
].global
)
2392 sinfo
->fun
[i
].global
= TRUE
;
2393 sinfo
->fun
[i
].u
.h
= sym_h
;
2396 sinfo
->fun
[i
].is_func
= TRUE
;
2397 return &sinfo
->fun
[i
];
2399 /* Ignore a zero-size symbol inside an existing function. */
2400 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
2401 return &sinfo
->fun
[i
];
2404 if (sinfo
->num_fun
>= sinfo
->max_fun
)
2406 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
2407 bfd_size_type old
= amt
;
2409 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2410 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
2411 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2412 sinfo
= bfd_realloc (sinfo
, amt
);
2415 memset ((char *) sinfo
+ old
, 0, amt
- old
);
2416 sec_data
->u
.i
.stack_info
= sinfo
;
2419 if (++i
< sinfo
->num_fun
)
2420 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
2421 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
2422 sinfo
->fun
[i
].is_func
= is_func
;
2423 sinfo
->fun
[i
].global
= global
;
2424 sinfo
->fun
[i
].sec
= sec
;
2426 sinfo
->fun
[i
].u
.h
= sym_h
;
2428 sinfo
->fun
[i
].u
.sym
= sym_h
;
2429 sinfo
->fun
[i
].lo
= off
;
2430 sinfo
->fun
[i
].hi
= off
+ size
;
2431 sinfo
->fun
[i
].lr_store
= -1;
2432 sinfo
->fun
[i
].sp_adjust
= -1;
2433 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
,
2434 &sinfo
->fun
[i
].lr_store
,
2435 &sinfo
->fun
[i
].sp_adjust
);
2436 sinfo
->num_fun
+= 1;
2437 return &sinfo
->fun
[i
];
2440 /* Return the name of FUN. */
2443 func_name (struct function_info
*fun
)
2447 Elf_Internal_Shdr
*symtab_hdr
;
2449 while (fun
->start
!= NULL
)
2453 return fun
->u
.h
->root
.root
.string
;
2456 if (fun
->u
.sym
->st_name
== 0)
2458 size_t len
= strlen (sec
->name
);
2459 char *name
= bfd_malloc (len
+ 10);
2462 sprintf (name
, "%s+%lx", sec
->name
,
2463 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
2467 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2468 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
2471 /* Read the instruction at OFF in SEC. Return true iff the instruction
2472 is a nop, lnop, or stop 0 (all zero insn). */
2475 is_nop (asection
*sec
, bfd_vma off
)
2477 unsigned char insn
[4];
2479 if (off
+ 4 > sec
->size
2480 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
2482 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
2484 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
2489 /* Extend the range of FUN to cover nop padding up to LIMIT.
2490 Return TRUE iff some instruction other than a NOP was found. */
2493 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
2495 bfd_vma off
= (fun
->hi
+ 3) & -4;
2497 while (off
< limit
&& is_nop (fun
->sec
, off
))
2508 /* Check and fix overlapping function ranges. Return TRUE iff there
2509 are gaps in the current info we have about functions in SEC. */
2512 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
2514 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2515 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2517 bfd_boolean gaps
= FALSE
;
2522 for (i
= 1; i
< sinfo
->num_fun
; i
++)
2523 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
2525 /* Fix overlapping symbols. */
2526 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
2527 const char *f2
= func_name (&sinfo
->fun
[i
]);
2529 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
2530 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
2532 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2535 if (sinfo
->num_fun
== 0)
2539 if (sinfo
->fun
[0].lo
!= 0)
2541 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2543 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2545 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2546 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2548 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2554 /* Search current function info for a function that contains address
2555 OFFSET in section SEC. */
2557 static struct function_info
*
2558 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2560 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2561 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2565 hi
= sinfo
->num_fun
;
2568 mid
= (lo
+ hi
) / 2;
2569 if (offset
< sinfo
->fun
[mid
].lo
)
2571 else if (offset
>= sinfo
->fun
[mid
].hi
)
2574 return &sinfo
->fun
[mid
];
2576 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2578 bfd_set_error (bfd_error_bad_value
);
2582 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2583 if CALLEE was new. If this function return FALSE, CALLEE should
2587 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2589 struct call_info
**pp
, *p
;
2591 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2592 if (p
->fun
== callee
->fun
)
2594 /* Tail calls use less stack than normal calls. Retain entry
2595 for normal call over one for tail call. */
2596 p
->is_tail
&= callee
->is_tail
;
2599 p
->fun
->start
= NULL
;
2600 p
->fun
->is_func
= TRUE
;
2602 p
->count
+= callee
->count
;
2603 /* Reorder list so most recent call is first. */
2605 p
->next
= caller
->call_list
;
2606 caller
->call_list
= p
;
2609 callee
->next
= caller
->call_list
;
2610 caller
->call_list
= callee
;
2614 /* Copy CALL and insert the copy into CALLER. */
2617 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2619 struct call_info
*callee
;
2620 callee
= bfd_malloc (sizeof (*callee
));
2624 if (!insert_callee (caller
, callee
))
2629 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2630 overlay stub sections. */
2633 interesting_section (asection
*s
)
2635 return (s
->output_section
!= bfd_abs_section_ptr
2636 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2637 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2641 /* Rummage through the relocs for SEC, looking for function calls.
2642 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2643 mark destination symbols on calls as being functions. Also
2644 look at branches, which may be tail calls or go to hot/cold
2645 section part of same function. */
2648 mark_functions_via_relocs (asection
*sec
,
2649 struct bfd_link_info
*info
,
2652 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2653 Elf_Internal_Shdr
*symtab_hdr
;
2655 unsigned int priority
= 0;
2656 static bfd_boolean warned
;
2658 if (!interesting_section (sec
)
2659 || sec
->reloc_count
== 0)
2662 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2664 if (internal_relocs
== NULL
)
2667 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2668 psyms
= &symtab_hdr
->contents
;
2669 irela
= internal_relocs
;
2670 irelaend
= irela
+ sec
->reloc_count
;
2671 for (; irela
< irelaend
; irela
++)
2673 enum elf_spu_reloc_type r_type
;
2674 unsigned int r_indx
;
2676 Elf_Internal_Sym
*sym
;
2677 struct elf_link_hash_entry
*h
;
2679 bfd_boolean reject
, is_call
;
2680 struct function_info
*caller
;
2681 struct call_info
*callee
;
2684 r_type
= ELF32_R_TYPE (irela
->r_info
);
2685 if (r_type
!= R_SPU_REL16
2686 && r_type
!= R_SPU_ADDR16
)
2689 if (!(call_tree
&& spu_hash_table (info
)->params
->auto_overlay
))
2693 r_indx
= ELF32_R_SYM (irela
->r_info
);
2694 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2698 || sym_sec
->output_section
== bfd_abs_section_ptr
)
2704 unsigned char insn
[4];
2706 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2707 irela
->r_offset
, 4))
2709 if (is_branch (insn
))
2711 is_call
= (insn
[0] & 0xfd) == 0x31;
2712 priority
= insn
[1] & 0x0f;
2714 priority
|= insn
[2];
2716 priority
|= insn
[3];
2718 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2719 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2722 info
->callbacks
->einfo
2723 (_("%B(%A+0x%v): call to non-code section"
2724 " %B(%A), analysis incomplete\n"),
2725 sec
->owner
, sec
, irela
->r_offset
,
2726 sym_sec
->owner
, sym_sec
);
2734 if (!(call_tree
&& spu_hash_table (info
)->params
->auto_overlay
)
2742 /* For --auto-overlay, count possible stubs we need for
2743 function pointer references. */
2744 unsigned int sym_type
;
2748 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2749 if (sym_type
== STT_FUNC
)
2750 spu_hash_table (info
)->non_ovly_stub
+= 1;
2755 val
= h
->root
.u
.def
.value
;
2757 val
= sym
->st_value
;
2758 val
+= irela
->r_addend
;
2762 struct function_info
*fun
;
2764 if (irela
->r_addend
!= 0)
2766 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2769 fake
->st_value
= val
;
2771 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2775 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2777 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2780 if (irela
->r_addend
!= 0
2781 && fun
->u
.sym
!= sym
)
2786 caller
= find_function (sec
, irela
->r_offset
, info
);
2789 callee
= bfd_malloc (sizeof *callee
);
2793 callee
->fun
= find_function (sym_sec
, val
, info
);
2794 if (callee
->fun
== NULL
)
2796 callee
->is_tail
= !is_call
;
2797 callee
->is_pasted
= FALSE
;
2798 callee
->broken_cycle
= FALSE
;
2799 callee
->priority
= priority
;
2801 if (callee
->fun
->last_caller
!= sec
)
2803 callee
->fun
->last_caller
= sec
;
2804 callee
->fun
->call_count
+= 1;
2806 if (!insert_callee (caller
, callee
))
2809 && !callee
->fun
->is_func
2810 && callee
->fun
->stack
== 0)
2812 /* This is either a tail call or a branch from one part of
2813 the function to another, ie. hot/cold section. If the
2814 destination has been called by some other function then
2815 it is a separate function. We also assume that functions
2816 are not split across input files. */
2817 if (sec
->owner
!= sym_sec
->owner
)
2819 callee
->fun
->start
= NULL
;
2820 callee
->fun
->is_func
= TRUE
;
2822 else if (callee
->fun
->start
== NULL
)
2824 struct function_info
*caller_start
= caller
;
2825 while (caller_start
->start
)
2826 caller_start
= caller_start
->start
;
2828 if (caller_start
!= callee
->fun
)
2829 callee
->fun
->start
= caller_start
;
2833 struct function_info
*callee_start
;
2834 struct function_info
*caller_start
;
2835 callee_start
= callee
->fun
;
2836 while (callee_start
->start
)
2837 callee_start
= callee_start
->start
;
2838 caller_start
= caller
;
2839 while (caller_start
->start
)
2840 caller_start
= caller_start
->start
;
2841 if (caller_start
!= callee_start
)
2843 callee
->fun
->start
= NULL
;
2844 callee
->fun
->is_func
= TRUE
;
2853 /* Handle something like .init or .fini, which has a piece of a function.
2854 These sections are pasted together to form a single function. */
2857 pasted_function (asection
*sec
)
2859 struct bfd_link_order
*l
;
2860 struct _spu_elf_section_data
*sec_data
;
2861 struct spu_elf_stack_info
*sinfo
;
2862 Elf_Internal_Sym
*fake
;
2863 struct function_info
*fun
, *fun_start
;
2865 fake
= bfd_zmalloc (sizeof (*fake
));
2869 fake
->st_size
= sec
->size
;
2871 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2872 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2876 /* Find a function immediately preceding this section. */
2878 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2880 if (l
->u
.indirect
.section
== sec
)
2882 if (fun_start
!= NULL
)
2884 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2888 fun
->start
= fun_start
;
2890 callee
->is_tail
= TRUE
;
2891 callee
->is_pasted
= TRUE
;
2892 callee
->broken_cycle
= FALSE
;
2893 callee
->priority
= 0;
2895 if (!insert_callee (fun_start
, callee
))
2901 if (l
->type
== bfd_indirect_link_order
2902 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2903 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2904 && sinfo
->num_fun
!= 0)
2905 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2908 /* Don't return an error if we did not find a function preceding this
2909 section. The section may have incorrect flags. */
2913 /* Map address ranges in code sections to functions. */
2916 discover_functions (struct bfd_link_info
*info
)
2920 Elf_Internal_Sym
***psym_arr
;
2921 asection
***sec_arr
;
2922 bfd_boolean gaps
= FALSE
;
2925 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2928 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2929 if (psym_arr
== NULL
)
2931 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2932 if (sec_arr
== NULL
)
2935 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2937 ibfd
= ibfd
->link_next
, bfd_idx
++)
2939 extern const bfd_target bfd_elf32_spu_vec
;
2940 Elf_Internal_Shdr
*symtab_hdr
;
2943 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2944 asection
**psecs
, **p
;
2946 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2949 /* Read all the symbols. */
2950 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2951 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2955 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2956 if (interesting_section (sec
))
2964 if (symtab_hdr
->contents
!= NULL
)
2966 /* Don't use cached symbols since the generic ELF linker
2967 code only reads local symbols, and we need globals too. */
2968 free (symtab_hdr
->contents
);
2969 symtab_hdr
->contents
= NULL
;
2971 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2973 symtab_hdr
->contents
= (void *) syms
;
2977 /* Select defined function symbols that are going to be output. */
2978 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2981 psym_arr
[bfd_idx
] = psyms
;
2982 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2985 sec_arr
[bfd_idx
] = psecs
;
2986 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2987 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2988 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2992 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2993 if (s
!= NULL
&& interesting_section (s
))
2996 symcount
= psy
- psyms
;
2999 /* Sort them by section and offset within section. */
3000 sort_syms_syms
= syms
;
3001 sort_syms_psecs
= psecs
;
3002 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
3004 /* Now inspect the function symbols. */
3005 for (psy
= psyms
; psy
< psyms
+ symcount
; )
3007 asection
*s
= psecs
[*psy
- syms
];
3008 Elf_Internal_Sym
**psy2
;
3010 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
3011 if (psecs
[*psy2
- syms
] != s
)
3014 if (!alloc_stack_info (s
, psy2
- psy
))
3019 /* First install info about properly typed and sized functions.
3020 In an ideal world this will cover all code sections, except
3021 when partitioning functions into hot and cold sections,
3022 and the horrible pasted together .init and .fini functions. */
3023 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
3026 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3028 asection
*s
= psecs
[sy
- syms
];
3029 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
3034 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3035 if (interesting_section (sec
))
3036 gaps
|= check_function_ranges (sec
, info
);
3041 /* See if we can discover more function symbols by looking at
3043 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3045 ibfd
= ibfd
->link_next
, bfd_idx
++)
3049 if (psym_arr
[bfd_idx
] == NULL
)
3052 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3053 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
3057 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3059 ibfd
= ibfd
->link_next
, bfd_idx
++)
3061 Elf_Internal_Shdr
*symtab_hdr
;
3063 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
3066 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
3069 psecs
= sec_arr
[bfd_idx
];
3071 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
3072 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
3075 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3076 if (interesting_section (sec
))
3077 gaps
|= check_function_ranges (sec
, info
);
3081 /* Finally, install all globals. */
3082 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
3086 s
= psecs
[sy
- syms
];
3088 /* Global syms might be improperly typed functions. */
3089 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
3090 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
3092 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
3098 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3100 extern const bfd_target bfd_elf32_spu_vec
;
3103 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3106 /* Some of the symbols we've installed as marking the
3107 beginning of functions may have a size of zero. Extend
3108 the range of such functions to the beginning of the
3109 next symbol of interest. */
3110 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3111 if (interesting_section (sec
))
3113 struct _spu_elf_section_data
*sec_data
;
3114 struct spu_elf_stack_info
*sinfo
;
3116 sec_data
= spu_elf_section_data (sec
);
3117 sinfo
= sec_data
->u
.i
.stack_info
;
3118 if (sinfo
!= NULL
&& sinfo
->num_fun
!= 0)
3121 bfd_vma hi
= sec
->size
;
3123 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
3125 sinfo
->fun
[fun_idx
].hi
= hi
;
3126 hi
= sinfo
->fun
[fun_idx
].lo
;
3129 sinfo
->fun
[0].lo
= 0;
3131 /* No symbols in this section. Must be .init or .fini
3132 or something similar. */
3133 else if (!pasted_function (sec
))
3139 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3141 ibfd
= ibfd
->link_next
, bfd_idx
++)
3143 if (psym_arr
[bfd_idx
] == NULL
)
3146 free (psym_arr
[bfd_idx
]);
3147 free (sec_arr
[bfd_idx
]);
3156 /* Iterate over all function_info we have collected, calling DOIT on
3157 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3161 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
3162 struct bfd_link_info
*,
3164 struct bfd_link_info
*info
,
3170 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3172 extern const bfd_target bfd_elf32_spu_vec
;
3175 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3178 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3180 struct _spu_elf_section_data
*sec_data
;
3181 struct spu_elf_stack_info
*sinfo
;
3183 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3184 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3187 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3188 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
3189 if (!doit (&sinfo
->fun
[i
], info
, param
))
3197 /* Transfer call info attached to struct function_info entries for
3198 all of a given function's sections to the first entry. */
3201 transfer_calls (struct function_info
*fun
,
3202 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3203 void *param ATTRIBUTE_UNUSED
)
3205 struct function_info
*start
= fun
->start
;
3209 struct call_info
*call
, *call_next
;
3211 while (start
->start
!= NULL
)
3212 start
= start
->start
;
3213 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
3215 call_next
= call
->next
;
3216 if (!insert_callee (start
, call
))
3219 fun
->call_list
= NULL
;
3224 /* Mark nodes in the call graph that are called by some other node. */
3227 mark_non_root (struct function_info
*fun
,
3228 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3229 void *param ATTRIBUTE_UNUSED
)
3231 struct call_info
*call
;
3236 for (call
= fun
->call_list
; call
; call
= call
->next
)
3238 call
->fun
->non_root
= TRUE
;
3239 mark_non_root (call
->fun
, 0, 0);
3244 /* Remove cycles from the call graph. Set depth of nodes. */
3247 remove_cycles (struct function_info
*fun
,
3248 struct bfd_link_info
*info
,
3251 struct call_info
**callp
, *call
;
3252 unsigned int depth
= *(unsigned int *) param
;
3253 unsigned int max_depth
= depth
;
3257 fun
->marking
= TRUE
;
3259 callp
= &fun
->call_list
;
3260 while ((call
= *callp
) != NULL
)
3262 call
->max_depth
= depth
+ !call
->is_pasted
;
3263 if (!call
->fun
->visit2
)
3265 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
3267 if (max_depth
< call
->max_depth
)
3268 max_depth
= call
->max_depth
;
3270 else if (call
->fun
->marking
)
3272 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3274 if (!htab
->params
->auto_overlay
3275 && htab
->params
->stack_analysis
)
3277 const char *f1
= func_name (fun
);
3278 const char *f2
= func_name (call
->fun
);
3280 info
->callbacks
->info (_("Stack analysis will ignore the call "
3285 call
->broken_cycle
= TRUE
;
3287 callp
= &call
->next
;
3289 fun
->marking
= FALSE
;
3290 *(unsigned int *) param
= max_depth
;
3294 /* Check that we actually visited all nodes in remove_cycles. If we
3295 didn't, then there is some cycle in the call graph not attached to
3296 any root node. Arbitrarily choose a node in the cycle as a new
3297 root and break the cycle. */
3300 mark_detached_root (struct function_info
*fun
,
3301 struct bfd_link_info
*info
,
3306 fun
->non_root
= FALSE
;
3307 *(unsigned int *) param
= 0;
3308 return remove_cycles (fun
, info
, param
);
3311 /* Populate call_list for each function. */
3314 build_call_tree (struct bfd_link_info
*info
)
3319 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3321 extern const bfd_target bfd_elf32_spu_vec
;
3324 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3327 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3328 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
3332 /* Transfer call info from hot/cold section part of function
3334 if (!spu_hash_table (info
)->params
->auto_overlay
3335 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
3338 /* Find the call graph root(s). */
3339 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
3342 /* Remove cycles from the call graph. We start from the root node(s)
3343 so that we break cycles in a reasonable place. */
3345 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
3348 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
3351 /* qsort predicate to sort calls by priority, max_depth then count. */
3354 sort_calls (const void *a
, const void *b
)
3356 struct call_info
*const *c1
= a
;
3357 struct call_info
*const *c2
= b
;
3360 delta
= (*c2
)->priority
- (*c1
)->priority
;
3364 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
3368 delta
= (*c2
)->count
- (*c1
)->count
;
3372 return (char *) c1
- (char *) c2
;
3376 unsigned int max_overlay_size
;
3379 /* Set linker_mark and gc_mark on any sections that we will put in
3380 overlays. These flags are used by the generic ELF linker, but we
3381 won't be continuing on to bfd_elf_final_link so it is OK to use
3382 them. linker_mark is clear before we get here. Set segment_mark
3383 on sections that are part of a pasted function (excluding the last
3386 Set up function rodata section if --overlay-rodata. We don't
3387 currently include merged string constant rodata sections since
3389 Sort the call graph so that the deepest nodes will be visited
3393 mark_overlay_section (struct function_info
*fun
,
3394 struct bfd_link_info
*info
,
3397 struct call_info
*call
;
3399 struct _mos_param
*mos_param
= param
;
3400 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3406 if (!fun
->sec
->linker_mark
3407 && (htab
->params
->ovly_flavour
!= ovly_soft_icache
3408 || htab
->params
->non_ia_text
3409 || strncmp (fun
->sec
->name
, ".text.ia.", 9) == 0
3410 || strcmp (fun
->sec
->name
, ".init") == 0
3411 || strcmp (fun
->sec
->name
, ".fini") == 0))
3415 fun
->sec
->linker_mark
= 1;
3416 fun
->sec
->gc_mark
= 1;
3417 fun
->sec
->segment_mark
= 0;
3418 /* Ensure SEC_CODE is set on this text section (it ought to
3419 be!), and SEC_CODE is clear on rodata sections. We use
3420 this flag to differentiate the two overlay section types. */
3421 fun
->sec
->flags
|= SEC_CODE
;
3423 size
= fun
->sec
->size
;
3424 if (htab
->params
->auto_overlay
& OVERLAY_RODATA
)
3428 /* Find the rodata section corresponding to this function's
3430 if (strcmp (fun
->sec
->name
, ".text") == 0)
3432 name
= bfd_malloc (sizeof (".rodata"));
3435 memcpy (name
, ".rodata", sizeof (".rodata"));
3437 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
3439 size_t len
= strlen (fun
->sec
->name
);
3440 name
= bfd_malloc (len
+ 3);
3443 memcpy (name
, ".rodata", sizeof (".rodata"));
3444 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
3446 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
3448 size_t len
= strlen (fun
->sec
->name
) + 1;
3449 name
= bfd_malloc (len
);
3452 memcpy (name
, fun
->sec
->name
, len
);
3458 asection
*rodata
= NULL
;
3459 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
3460 if (group_sec
== NULL
)
3461 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
3463 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
3465 if (strcmp (group_sec
->name
, name
) == 0)
3470 group_sec
= elf_section_data (group_sec
)->next_in_group
;
3472 fun
->rodata
= rodata
;
3475 size
+= fun
->rodata
->size
;
3476 if (htab
->params
->line_size
!= 0
3477 && size
> htab
->params
->line_size
)
3479 size
-= fun
->rodata
->size
;
3484 fun
->rodata
->linker_mark
= 1;
3485 fun
->rodata
->gc_mark
= 1;
3486 fun
->rodata
->flags
&= ~SEC_CODE
;
3492 if (mos_param
->max_overlay_size
< size
)
3493 mos_param
->max_overlay_size
= size
;
3496 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3501 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
3505 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3506 calls
[count
++] = call
;
3508 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
3510 fun
->call_list
= NULL
;
3514 calls
[count
]->next
= fun
->call_list
;
3515 fun
->call_list
= calls
[count
];
3520 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3522 if (call
->is_pasted
)
3524 /* There can only be one is_pasted call per function_info. */
3525 BFD_ASSERT (!fun
->sec
->segment_mark
);
3526 fun
->sec
->segment_mark
= 1;
3528 if (!call
->broken_cycle
3529 && !mark_overlay_section (call
->fun
, info
, param
))
3533 /* Don't put entry code into an overlay. The overlay manager needs
3534 a stack! Also, don't mark .ovl.init as an overlay. */
3535 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
3536 == info
->output_bfd
->start_address
3537 || strncmp (fun
->sec
->output_section
->name
, ".ovl.init", 9) == 0)
3539 fun
->sec
->linker_mark
= 0;
3540 if (fun
->rodata
!= NULL
)
3541 fun
->rodata
->linker_mark
= 0;
3546 /* If non-zero then unmark functions called from those within sections
3547 that we need to unmark. Unfortunately this isn't reliable since the
3548 call graph cannot know the destination of function pointer calls. */
3549 #define RECURSE_UNMARK 0
3552 asection
*exclude_input_section
;
3553 asection
*exclude_output_section
;
3554 unsigned long clearing
;
3557 /* Undo some of mark_overlay_section's work. */
3560 unmark_overlay_section (struct function_info
*fun
,
3561 struct bfd_link_info
*info
,
3564 struct call_info
*call
;
3565 struct _uos_param
*uos_param
= param
;
3566 unsigned int excluded
= 0;
3574 if (fun
->sec
== uos_param
->exclude_input_section
3575 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3579 uos_param
->clearing
+= excluded
;
3581 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3583 fun
->sec
->linker_mark
= 0;
3585 fun
->rodata
->linker_mark
= 0;
3588 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3589 if (!call
->broken_cycle
3590 && !unmark_overlay_section (call
->fun
, info
, param
))
3594 uos_param
->clearing
-= excluded
;
3599 unsigned int lib_size
;
3600 asection
**lib_sections
;
3603 /* Add sections we have marked as belonging to overlays to an array
3604 for consideration as non-overlay sections. The array consist of
3605 pairs of sections, (text,rodata), for functions in the call graph. */
3608 collect_lib_sections (struct function_info
*fun
,
3609 struct bfd_link_info
*info
,
3612 struct _cl_param
*lib_param
= param
;
3613 struct call_info
*call
;
3620 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3623 size
= fun
->sec
->size
;
3625 size
+= fun
->rodata
->size
;
3627 if (size
<= lib_param
->lib_size
)
3629 *lib_param
->lib_sections
++ = fun
->sec
;
3630 fun
->sec
->gc_mark
= 0;
3631 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3633 *lib_param
->lib_sections
++ = fun
->rodata
;
3634 fun
->rodata
->gc_mark
= 0;
3637 *lib_param
->lib_sections
++ = NULL
;
3640 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3641 if (!call
->broken_cycle
)
3642 collect_lib_sections (call
->fun
, info
, param
);
3647 /* qsort predicate to sort sections by call count. */
3650 sort_lib (const void *a
, const void *b
)
3652 asection
*const *s1
= a
;
3653 asection
*const *s2
= b
;
3654 struct _spu_elf_section_data
*sec_data
;
3655 struct spu_elf_stack_info
*sinfo
;
3659 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3660 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3663 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3664 delta
-= sinfo
->fun
[i
].call_count
;
3667 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3668 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3671 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3672 delta
+= sinfo
->fun
[i
].call_count
;
3681 /* Remove some sections from those marked to be in overlays. Choose
3682 those that are called from many places, likely library functions. */
3685 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3688 asection
**lib_sections
;
3689 unsigned int i
, lib_count
;
3690 struct _cl_param collect_lib_param
;
3691 struct function_info dummy_caller
;
3692 struct spu_link_hash_table
*htab
;
3694 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3696 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3698 extern const bfd_target bfd_elf32_spu_vec
;
3701 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3704 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3705 if (sec
->linker_mark
3706 && sec
->size
< lib_size
3707 && (sec
->flags
& SEC_CODE
) != 0)
3710 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3711 if (lib_sections
== NULL
)
3712 return (unsigned int) -1;
3713 collect_lib_param
.lib_size
= lib_size
;
3714 collect_lib_param
.lib_sections
= lib_sections
;
3715 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3717 return (unsigned int) -1;
3718 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3720 /* Sort sections so that those with the most calls are first. */
3722 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3724 htab
= spu_hash_table (info
);
3725 for (i
= 0; i
< lib_count
; i
++)
3727 unsigned int tmp
, stub_size
;
3729 struct _spu_elf_section_data
*sec_data
;
3730 struct spu_elf_stack_info
*sinfo
;
3732 sec
= lib_sections
[2 * i
];
3733 /* If this section is OK, its size must be less than lib_size. */
3735 /* If it has a rodata section, then add that too. */
3736 if (lib_sections
[2 * i
+ 1])
3737 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3738 /* Add any new overlay call stubs needed by the section. */
3741 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3742 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3745 struct call_info
*call
;
3747 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3748 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3749 if (call
->fun
->sec
->linker_mark
)
3751 struct call_info
*p
;
3752 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3753 if (p
->fun
== call
->fun
)
3756 stub_size
+= ovl_stub_size (htab
->params
);
3759 if (tmp
+ stub_size
< lib_size
)
3761 struct call_info
**pp
, *p
;
3763 /* This section fits. Mark it as non-overlay. */
3764 lib_sections
[2 * i
]->linker_mark
= 0;
3765 if (lib_sections
[2 * i
+ 1])
3766 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3767 lib_size
-= tmp
+ stub_size
;
3768 /* Call stubs to the section we just added are no longer
3770 pp
= &dummy_caller
.call_list
;
3771 while ((p
= *pp
) != NULL
)
3772 if (!p
->fun
->sec
->linker_mark
)
3774 lib_size
+= ovl_stub_size (htab
->params
);
3780 /* Add new call stubs to dummy_caller. */
3781 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3782 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3785 struct call_info
*call
;
3787 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3788 for (call
= sinfo
->fun
[k
].call_list
;
3791 if (call
->fun
->sec
->linker_mark
)
3793 struct call_info
*callee
;
3794 callee
= bfd_malloc (sizeof (*callee
));
3796 return (unsigned int) -1;
3798 if (!insert_callee (&dummy_caller
, callee
))
3804 while (dummy_caller
.call_list
!= NULL
)
3806 struct call_info
*call
= dummy_caller
.call_list
;
3807 dummy_caller
.call_list
= call
->next
;
3810 for (i
= 0; i
< 2 * lib_count
; i
++)
3811 if (lib_sections
[i
])
3812 lib_sections
[i
]->gc_mark
= 1;
3813 free (lib_sections
);
3817 /* Build an array of overlay sections. The deepest node's section is
3818 added first, then its parent node's section, then everything called
3819 from the parent section. The idea being to group sections to
3820 minimise calls between different overlays. */
3823 collect_overlays (struct function_info
*fun
,
3824 struct bfd_link_info
*info
,
3827 struct call_info
*call
;
3828 bfd_boolean added_fun
;
3829 asection
***ovly_sections
= param
;
3835 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3836 if (!call
->is_pasted
&& !call
->broken_cycle
)
3838 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3844 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3846 fun
->sec
->gc_mark
= 0;
3847 *(*ovly_sections
)++ = fun
->sec
;
3848 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3850 fun
->rodata
->gc_mark
= 0;
3851 *(*ovly_sections
)++ = fun
->rodata
;
3854 *(*ovly_sections
)++ = NULL
;
3857 /* Pasted sections must stay with the first section. We don't
3858 put pasted sections in the array, just the first section.
3859 Mark subsequent sections as already considered. */
3860 if (fun
->sec
->segment_mark
)
3862 struct function_info
*call_fun
= fun
;
3865 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3866 if (call
->is_pasted
)
3868 call_fun
= call
->fun
;
3869 call_fun
->sec
->gc_mark
= 0;
3870 if (call_fun
->rodata
)
3871 call_fun
->rodata
->gc_mark
= 0;
3877 while (call_fun
->sec
->segment_mark
);
3881 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3882 if (!call
->broken_cycle
3883 && !collect_overlays (call
->fun
, info
, ovly_sections
))
3888 struct _spu_elf_section_data
*sec_data
;
3889 struct spu_elf_stack_info
*sinfo
;
3891 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3892 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3895 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3896 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3904 struct _sum_stack_param
{
3906 size_t overall_stack
;
3907 bfd_boolean emit_stack_syms
;
3910 /* Descend the call graph for FUN, accumulating total stack required. */
3913 sum_stack (struct function_info
*fun
,
3914 struct bfd_link_info
*info
,
3917 struct call_info
*call
;
3918 struct function_info
*max
;
3919 size_t stack
, cum_stack
;
3921 bfd_boolean has_call
;
3922 struct _sum_stack_param
*sum_stack_param
= param
;
3923 struct spu_link_hash_table
*htab
;
3925 cum_stack
= fun
->stack
;
3926 sum_stack_param
->cum_stack
= cum_stack
;
3932 for (call
= fun
->call_list
; call
; call
= call
->next
)
3934 if (call
->broken_cycle
)
3936 if (!call
->is_pasted
)
3938 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3940 stack
= sum_stack_param
->cum_stack
;
3941 /* Include caller stack for normal calls, don't do so for
3942 tail calls. fun->stack here is local stack usage for
3944 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3945 stack
+= fun
->stack
;
3946 if (cum_stack
< stack
)
3953 sum_stack_param
->cum_stack
= cum_stack
;
3955 /* Now fun->stack holds cumulative stack. */
3956 fun
->stack
= cum_stack
;
3960 && sum_stack_param
->overall_stack
< cum_stack
)
3961 sum_stack_param
->overall_stack
= cum_stack
;
3963 htab
= spu_hash_table (info
);
3964 if (htab
->params
->auto_overlay
)
3967 f1
= func_name (fun
);
3968 if (htab
->params
->stack_analysis
)
3971 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3972 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3973 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3977 info
->callbacks
->minfo (_(" calls:\n"));
3978 for (call
= fun
->call_list
; call
; call
= call
->next
)
3979 if (!call
->is_pasted
&& !call
->broken_cycle
)
3981 const char *f2
= func_name (call
->fun
);
3982 const char *ann1
= call
->fun
== max
? "*" : " ";
3983 const char *ann2
= call
->is_tail
? "t" : " ";
3985 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
3990 if (sum_stack_param
->emit_stack_syms
)
3992 char *name
= bfd_malloc (18 + strlen (f1
));
3993 struct elf_link_hash_entry
*h
;
3998 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
3999 sprintf (name
, "__stack_%s", f1
);
4001 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
4003 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
4006 && (h
->root
.type
== bfd_link_hash_new
4007 || h
->root
.type
== bfd_link_hash_undefined
4008 || h
->root
.type
== bfd_link_hash_undefweak
))
4010 h
->root
.type
= bfd_link_hash_defined
;
4011 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
4012 h
->root
.u
.def
.value
= cum_stack
;
4017 h
->ref_regular_nonweak
= 1;
4018 h
->forced_local
= 1;
4026 /* SEC is part of a pasted function. Return the call_info for the
4027 next section of this function. */
4029 static struct call_info
*
4030 find_pasted_call (asection
*sec
)
4032 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
4033 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
4034 struct call_info
*call
;
4037 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4038 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
4039 if (call
->is_pasted
)
4045 /* qsort predicate to sort bfds by file name. */
4048 sort_bfds (const void *a
, const void *b
)
4050 bfd
*const *abfd1
= a
;
4051 bfd
*const *abfd2
= b
;
4053 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
4057 print_one_overlay_section (FILE *script
,
4060 unsigned int ovlynum
,
4061 unsigned int *ovly_map
,
4062 asection
**ovly_sections
,
4063 struct bfd_link_info
*info
)
4067 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4069 asection
*sec
= ovly_sections
[2 * j
];
4071 if (fprintf (script
, " %s%c%s (%s)\n",
4072 (sec
->owner
->my_archive
!= NULL
4073 ? sec
->owner
->my_archive
->filename
: ""),
4074 info
->path_separator
,
4075 sec
->owner
->filename
,
4078 if (sec
->segment_mark
)
4080 struct call_info
*call
= find_pasted_call (sec
);
4081 while (call
!= NULL
)
4083 struct function_info
*call_fun
= call
->fun
;
4084 sec
= call_fun
->sec
;
4085 if (fprintf (script
, " %s%c%s (%s)\n",
4086 (sec
->owner
->my_archive
!= NULL
4087 ? sec
->owner
->my_archive
->filename
: ""),
4088 info
->path_separator
,
4089 sec
->owner
->filename
,
4092 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4093 if (call
->is_pasted
)
4099 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4101 asection
*sec
= ovly_sections
[2 * j
+ 1];
4103 && fprintf (script
, " %s%c%s (%s)\n",
4104 (sec
->owner
->my_archive
!= NULL
4105 ? sec
->owner
->my_archive
->filename
: ""),
4106 info
->path_separator
,
4107 sec
->owner
->filename
,
4111 sec
= ovly_sections
[2 * j
];
4112 if (sec
->segment_mark
)
4114 struct call_info
*call
= find_pasted_call (sec
);
4115 while (call
!= NULL
)
4117 struct function_info
*call_fun
= call
->fun
;
4118 sec
= call_fun
->rodata
;
4120 && fprintf (script
, " %s%c%s (%s)\n",
4121 (sec
->owner
->my_archive
!= NULL
4122 ? sec
->owner
->my_archive
->filename
: ""),
4123 info
->path_separator
,
4124 sec
->owner
->filename
,
4127 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4128 if (call
->is_pasted
)
4137 /* Handle --auto-overlay. */
4140 spu_elf_auto_overlay (struct bfd_link_info
*info
)
4144 struct elf_segment_map
*m
;
4145 unsigned int fixed_size
, lo
, hi
;
4146 struct spu_link_hash_table
*htab
;
4147 unsigned int base
, i
, count
, bfd_count
;
4148 unsigned int region
, ovlynum
;
4149 asection
**ovly_sections
, **ovly_p
;
4150 unsigned int *ovly_map
;
4152 unsigned int total_overlay_size
, overlay_size
;
4153 const char *ovly_mgr_entry
;
4154 struct elf_link_hash_entry
*h
;
4155 struct _mos_param mos_param
;
4156 struct _uos_param uos_param
;
4157 struct function_info dummy_caller
;
4159 /* Find the extents of our loadable image. */
4160 lo
= (unsigned int) -1;
4162 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4163 if (m
->p_type
== PT_LOAD
)
4164 for (i
= 0; i
< m
->count
; i
++)
4165 if (m
->sections
[i
]->size
!= 0)
4167 if (m
->sections
[i
]->vma
< lo
)
4168 lo
= m
->sections
[i
]->vma
;
4169 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
4170 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
4172 fixed_size
= hi
+ 1 - lo
;
4174 if (!discover_functions (info
))
4177 if (!build_call_tree (info
))
4180 htab
= spu_hash_table (info
);
4181 if (htab
->reserved
== 0)
4183 struct _sum_stack_param sum_stack_param
;
4185 sum_stack_param
.emit_stack_syms
= 0;
4186 sum_stack_param
.overall_stack
= 0;
4187 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4189 htab
->reserved
= sum_stack_param
.overall_stack
+ htab
->extra_stack_space
;
4192 /* No need for overlays if everything already fits. */
4193 if (fixed_size
+ htab
->reserved
<= htab
->local_store
4194 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
4196 htab
->params
->auto_overlay
= 0;
4200 uos_param
.exclude_input_section
= 0;
4201 uos_param
.exclude_output_section
4202 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
4204 ovly_mgr_entry
= "__ovly_load";
4205 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4206 ovly_mgr_entry
= "__icache_br_handler";
4207 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
4208 FALSE
, FALSE
, FALSE
);
4210 && (h
->root
.type
== bfd_link_hash_defined
4211 || h
->root
.type
== bfd_link_hash_defweak
)
4214 /* We have a user supplied overlay manager. */
4215 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
4219 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4220 builtin version to .text, and will adjust .text size. */
4221 fixed_size
+= (*htab
->params
->spu_elf_load_ovl_mgr
) ();
4224 /* Mark overlay sections, and find max overlay section size. */
4225 mos_param
.max_overlay_size
= 0;
4226 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
4229 /* We can't put the overlay manager or interrupt routines in
4231 uos_param
.clearing
= 0;
4232 if ((uos_param
.exclude_input_section
4233 || uos_param
.exclude_output_section
)
4234 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
4238 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4240 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
4241 if (bfd_arr
== NULL
)
4244 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4247 total_overlay_size
= 0;
4248 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4250 extern const bfd_target bfd_elf32_spu_vec
;
4252 unsigned int old_count
;
4254 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
4258 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4259 if (sec
->linker_mark
)
4261 if ((sec
->flags
& SEC_CODE
) != 0)
4263 fixed_size
-= sec
->size
;
4264 total_overlay_size
+= sec
->size
;
4266 else if ((sec
->flags
& (SEC_ALLOC
| SEC_LOAD
)) == (SEC_ALLOC
| SEC_LOAD
)
4267 && sec
->output_section
->owner
== info
->output_bfd
4268 && strncmp (sec
->output_section
->name
, ".ovl.init", 9) == 0)
4269 fixed_size
-= sec
->size
;
4270 if (count
!= old_count
)
4271 bfd_arr
[bfd_count
++] = ibfd
;
4274 /* Since the overlay link script selects sections by file name and
4275 section name, ensure that file names are unique. */
4278 bfd_boolean ok
= TRUE
;
4280 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
4281 for (i
= 1; i
< bfd_count
; ++i
)
4282 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
4284 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
4286 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
4287 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
4288 bfd_arr
[i
]->filename
,
4289 bfd_arr
[i
]->my_archive
->filename
);
4291 info
->callbacks
->einfo (_("%s duplicated\n"),
4292 bfd_arr
[i
]->filename
);
4298 info
->callbacks
->einfo (_("sorry, no support for duplicate "
4299 "object files in auto-overlay script\n"));
4300 bfd_set_error (bfd_error_bad_value
);
4306 fixed_size
+= htab
->reserved
;
4307 fixed_size
+= htab
->non_ovly_stub
* ovl_stub_size (htab
->params
);
4308 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
4310 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4312 /* Stubs in the non-icache area are bigger. */
4313 fixed_size
+= htab
->non_ovly_stub
* 16;
4314 /* Space for icache manager tables.
4315 a) Tag array, one quadword per cache line.
4316 - word 0: ia address of present line, init to zero. */
4317 fixed_size
+= 16 << htab
->num_lines_log2
;
4318 /* b) Rewrite "to" list, one quadword per cache line. */
4319 fixed_size
+= 16 << htab
->num_lines_log2
;
4320 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4321 to a power-of-two number of full quadwords) per cache line. */
4322 fixed_size
+= 16 << (htab
->fromelem_size_log2
4323 + htab
->num_lines_log2
);
4324 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4329 /* Guess number of overlays. Assuming overlay buffer is on
4330 average only half full should be conservative. */
4331 ovlynum
= (total_overlay_size
* 2 * htab
->params
->num_lines
4332 / (htab
->local_store
- fixed_size
));
4333 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4334 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
4338 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
4339 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4340 "size of 0x%v exceeds local store\n"),
4341 (bfd_vma
) fixed_size
,
4342 (bfd_vma
) mos_param
.max_overlay_size
);
4344 /* Now see if we should put some functions in the non-overlay area. */
4345 else if (fixed_size
< htab
->overlay_fixed
)
4347 unsigned int max_fixed
, lib_size
;
4349 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
4350 if (max_fixed
> htab
->overlay_fixed
)
4351 max_fixed
= htab
->overlay_fixed
;
4352 lib_size
= max_fixed
- fixed_size
;
4353 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
4354 if (lib_size
== (unsigned int) -1)
4356 fixed_size
= max_fixed
- lib_size
;
4359 /* Build an array of sections, suitably sorted to place into
4361 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
4362 if (ovly_sections
== NULL
)
4364 ovly_p
= ovly_sections
;
4365 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
4367 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
4368 ovly_map
= bfd_malloc (count
* sizeof (*ovly_map
));
4369 if (ovly_map
== NULL
)
4372 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
4373 overlay_size
= (htab
->local_store
- fixed_size
) / htab
->params
->num_lines
;
4374 if (htab
->params
->line_size
!= 0)
4375 overlay_size
= htab
->params
->line_size
;
4378 while (base
< count
)
4380 unsigned int size
= 0, rosize
= 0, roalign
= 0;
4382 for (i
= base
; i
< count
; i
++)
4384 asection
*sec
, *rosec
;
4385 unsigned int tmp
, rotmp
;
4386 unsigned int num_stubs
;
4387 struct call_info
*call
, *pasty
;
4388 struct _spu_elf_section_data
*sec_data
;
4389 struct spu_elf_stack_info
*sinfo
;
4392 /* See whether we can add this section to the current
4393 overlay without overflowing our overlay buffer. */
4394 sec
= ovly_sections
[2 * i
];
4395 tmp
= align_power (size
, sec
->alignment_power
) + sec
->size
;
4397 rosec
= ovly_sections
[2 * i
+ 1];
4400 rotmp
= align_power (rotmp
, rosec
->alignment_power
) + rosec
->size
;
4401 if (roalign
< rosec
->alignment_power
)
4402 roalign
= rosec
->alignment_power
;
4404 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4406 if (sec
->segment_mark
)
4408 /* Pasted sections must stay together, so add their
4410 struct call_info
*pasty
= find_pasted_call (sec
);
4411 while (pasty
!= NULL
)
4413 struct function_info
*call_fun
= pasty
->fun
;
4414 tmp
= (align_power (tmp
, call_fun
->sec
->alignment_power
)
4415 + call_fun
->sec
->size
);
4416 if (call_fun
->rodata
)
4418 rotmp
= (align_power (rotmp
,
4419 call_fun
->rodata
->alignment_power
)
4420 + call_fun
->rodata
->size
);
4421 if (roalign
< rosec
->alignment_power
)
4422 roalign
= rosec
->alignment_power
;
4424 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
4425 if (pasty
->is_pasted
)
4429 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4432 /* If we add this section, we might need new overlay call
4433 stubs. Add any overlay section calls to dummy_call. */
4435 sec_data
= spu_elf_section_data (sec
);
4436 sinfo
= sec_data
->u
.i
.stack_info
;
4437 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4438 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
4439 if (call
->is_pasted
)
4441 BFD_ASSERT (pasty
== NULL
);
4444 else if (call
->fun
->sec
->linker_mark
)
4446 if (!copy_callee (&dummy_caller
, call
))
4449 while (pasty
!= NULL
)
4451 struct function_info
*call_fun
= pasty
->fun
;
4453 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4454 if (call
->is_pasted
)
4456 BFD_ASSERT (pasty
== NULL
);
4459 else if (!copy_callee (&dummy_caller
, call
))
4463 /* Calculate call stub size. */
4465 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
4468 unsigned int stub_delta
= 1;
4470 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4471 stub_delta
= call
->count
;
4472 num_stubs
+= stub_delta
;
4474 /* If the call is within this overlay, we won't need a
4476 for (k
= base
; k
< i
+ 1; k
++)
4477 if (call
->fun
->sec
== ovly_sections
[2 * k
])
4479 num_stubs
-= stub_delta
;
4483 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4484 && num_stubs
> htab
->params
->max_branch
)
4486 if (align_power (tmp
, roalign
) + rotmp
4487 + num_stubs
* ovl_stub_size (htab
->params
) > overlay_size
)
4495 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
4496 ovly_sections
[2 * i
]->owner
,
4497 ovly_sections
[2 * i
],
4498 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
4499 bfd_set_error (bfd_error_bad_value
);
4503 while (dummy_caller
.call_list
!= NULL
)
4505 struct call_info
*call
= dummy_caller
.call_list
;
4506 dummy_caller
.call_list
= call
->next
;
4512 ovly_map
[base
++] = ovlynum
;
4515 script
= htab
->params
->spu_elf_open_overlay_script ();
4517 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4520 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4522 if (fprintf (script
,
4523 " .data.icache ALIGN (16) : { *(.ovtab) *(.data.icache) }\n"
4524 " . = ALIGN (%u);\n"
4525 " .ovl.init : { *(.ovl.init) }\n"
4526 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4527 htab
->params
->line_size
) <= 0)
4532 while (base
< count
)
4534 unsigned int indx
= ovlynum
- 1;
4535 unsigned int vma
, lma
;
4537 vma
= (indx
& (htab
->params
->num_lines
- 1)) << htab
->line_size_log2
;
4538 lma
= indx
<< htab
->line_size_log2
;
4540 if (fprintf (script
, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4541 ": AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16) + %u) {\n",
4542 ovlynum
, vma
, lma
) <= 0)
4545 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4546 ovly_map
, ovly_sections
, info
);
4547 if (base
== (unsigned) -1)
4550 if (fprintf (script
, " }\n") <= 0)
4556 if (fprintf (script
, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4557 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
)) <= 0)
4562 if (fprintf (script
,
4563 " . = ALIGN (16);\n"
4564 " .ovl.init : { *(.ovl.init) }\n"
4565 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4568 for (region
= 1; region
<= htab
->params
->num_lines
; region
++)
4572 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4580 /* We need to set lma since we are overlaying .ovl.init. */
4581 if (fprintf (script
,
4582 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4587 if (fprintf (script
, " OVERLAY :\n {\n") <= 0)
4591 while (base
< count
)
4593 if (fprintf (script
, " .ovly%u {\n", ovlynum
) <= 0)
4596 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4597 ovly_map
, ovly_sections
, info
);
4598 if (base
== (unsigned) -1)
4601 if (fprintf (script
, " }\n") <= 0)
4604 ovlynum
+= htab
->params
->num_lines
;
4605 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4609 if (fprintf (script
, " }\n") <= 0)
4616 free (ovly_sections
);
4618 if (fprintf (script
, "}\nINSERT BEFORE .text;\n") <= 0)
4620 if (fclose (script
) != 0)
4623 if (htab
->params
->auto_overlay
& AUTO_RELINK
)
4624 (*htab
->params
->spu_elf_relink
) ();
4629 bfd_set_error (bfd_error_system_call
);
4631 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
4635 /* Provide an estimate of total stack required. */
4638 spu_elf_stack_analysis (struct bfd_link_info
*info
)
4640 struct spu_link_hash_table
*htab
;
4641 struct _sum_stack_param sum_stack_param
;
4643 if (!discover_functions (info
))
4646 if (!build_call_tree (info
))
4649 htab
= spu_hash_table (info
);
4650 if (htab
->params
->stack_analysis
)
4652 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
4653 info
->callbacks
->minfo (_("\nStack size for functions. "
4654 "Annotations: '*' max stack, 't' tail call\n"));
4657 sum_stack_param
.emit_stack_syms
= htab
->params
->emit_stack_syms
;
4658 sum_stack_param
.overall_stack
= 0;
4659 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4662 if (htab
->params
->stack_analysis
)
4663 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
4664 (bfd_vma
) sum_stack_param
.overall_stack
);
4668 /* Perform a final link. */
4671 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
4673 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4675 if (htab
->params
->auto_overlay
)
4676 spu_elf_auto_overlay (info
);
4678 if ((htab
->params
->stack_analysis
4679 || (htab
->params
->ovly_flavour
== ovly_soft_icache
4680 && htab
->params
->lrlive_analysis
))
4681 && !spu_elf_stack_analysis (info
))
4682 info
->callbacks
->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4684 if (!spu_elf_build_stubs (info
))
4685 info
->callbacks
->einfo ("%F%P: can not build overlay stubs: %E\n");
4687 return bfd_elf_final_link (output_bfd
, info
);
4690 /* Called when not normally emitting relocs, ie. !info->relocatable
4691 and !info->emitrelocations. Returns a count of special relocs
4692 that need to be emitted. */
4695 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
4697 Elf_Internal_Rela
*relocs
;
4698 unsigned int count
= 0;
4700 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
4704 Elf_Internal_Rela
*rel
;
4705 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
4707 for (rel
= relocs
; rel
< relend
; rel
++)
4709 int r_type
= ELF32_R_TYPE (rel
->r_info
);
4710 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4714 if (elf_section_data (sec
)->relocs
!= relocs
)
4721 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4724 spu_elf_relocate_section (bfd
*output_bfd
,
4725 struct bfd_link_info
*info
,
4727 asection
*input_section
,
4729 Elf_Internal_Rela
*relocs
,
4730 Elf_Internal_Sym
*local_syms
,
4731 asection
**local_sections
)
4733 Elf_Internal_Shdr
*symtab_hdr
;
4734 struct elf_link_hash_entry
**sym_hashes
;
4735 Elf_Internal_Rela
*rel
, *relend
;
4736 struct spu_link_hash_table
*htab
;
4739 bfd_boolean emit_these_relocs
= FALSE
;
4740 bfd_boolean is_ea_sym
;
4742 unsigned int iovl
= 0;
4744 htab
= spu_hash_table (info
);
4745 stubs
= (htab
->stub_sec
!= NULL
4746 && maybe_needs_stubs (input_section
));
4747 iovl
= overlay_index (input_section
);
4748 ea
= bfd_get_section_by_name (output_bfd
, "._ea");
4749 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
4750 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
4753 relend
= relocs
+ input_section
->reloc_count
;
4754 for (; rel
< relend
; rel
++)
4757 reloc_howto_type
*howto
;
4758 unsigned int r_symndx
;
4759 Elf_Internal_Sym
*sym
;
4761 struct elf_link_hash_entry
*h
;
4762 const char *sym_name
;
4765 bfd_reloc_status_type r
;
4766 bfd_boolean unresolved_reloc
;
4768 enum _stub_type stub_type
;
4770 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4771 r_type
= ELF32_R_TYPE (rel
->r_info
);
4772 howto
= elf_howto_table
+ r_type
;
4773 unresolved_reloc
= FALSE
;
4778 if (r_symndx
< symtab_hdr
->sh_info
)
4780 sym
= local_syms
+ r_symndx
;
4781 sec
= local_sections
[r_symndx
];
4782 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4783 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4787 if (sym_hashes
== NULL
)
4790 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4792 while (h
->root
.type
== bfd_link_hash_indirect
4793 || h
->root
.type
== bfd_link_hash_warning
)
4794 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4797 if (h
->root
.type
== bfd_link_hash_defined
4798 || h
->root
.type
== bfd_link_hash_defweak
)
4800 sec
= h
->root
.u
.def
.section
;
4802 || sec
->output_section
== NULL
)
4803 /* Set a flag that will be cleared later if we find a
4804 relocation value for this symbol. output_section
4805 is typically NULL for symbols satisfied by a shared
4807 unresolved_reloc
= TRUE
;
4809 relocation
= (h
->root
.u
.def
.value
4810 + sec
->output_section
->vma
4811 + sec
->output_offset
);
4813 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4815 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4816 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4818 else if (!info
->relocatable
4819 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4822 err
= (info
->unresolved_syms_in_objects
== RM_GENERATE_ERROR
4823 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
);
4824 if (!info
->callbacks
->undefined_symbol (info
,
4825 h
->root
.root
.string
,
4828 rel
->r_offset
, err
))
4832 sym_name
= h
->root
.root
.string
;
4835 if (sec
!= NULL
&& elf_discarded_section (sec
))
4837 /* For relocs against symbols from removed linkonce sections,
4838 or sections discarded by a linker script, we just want the
4839 section contents zeroed. Avoid any special processing. */
4840 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
4846 if (info
->relocatable
)
4849 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4850 if (r_type
== R_SPU_ADD_PIC
&& h
!= NULL
4851 && (h
->def_regular
|| ELF_COMMON_DEF_P (h
)))
4853 bfd_byte
*loc
= contents
+ rel
->r_offset
;
4859 is_ea_sym
= (ea
!= NULL
4861 && sec
->output_section
== ea
);
4863 /* If this symbol is in an overlay area, we may need to relocate
4864 to the overlay stub. */
4865 addend
= rel
->r_addend
;
4868 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4869 contents
, info
)) != no_stub
)
4871 unsigned int ovl
= 0;
4872 struct got_entry
*g
, **head
;
4874 if (stub_type
!= nonovl_stub
)
4878 head
= &h
->got
.glist
;
4880 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4882 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4883 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4885 && g
->br_addr
== (rel
->r_offset
4886 + input_section
->output_offset
4887 + input_section
->output_section
->vma
))
4888 : g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4893 relocation
= g
->stub_addr
;
4898 /* For soft icache, encode the overlay index into addresses. */
4899 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4900 && (r_type
== R_SPU_ADDR16_HI
4901 || r_type
== R_SPU_ADDR32
|| r_type
== R_SPU_REL32
)
4904 unsigned int ovl
= overlay_index (sec
);
4907 unsigned int set_id
= ((ovl
- 1) >> htab
->num_lines_log2
) + 1;
4908 relocation
+= set_id
<< 18;
4913 if (unresolved_reloc
)
4915 else if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4919 /* ._ea is a special section that isn't allocated in SPU
4920 memory, but rather occupies space in PPU memory as
4921 part of an embedded ELF image. If this reloc is
4922 against a symbol defined in ._ea, then transform the
4923 reloc into an equivalent one without a symbol
4924 relative to the start of the ELF image. */
4925 rel
->r_addend
+= (relocation
4927 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4928 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4930 emit_these_relocs
= TRUE
;
4934 unresolved_reloc
= TRUE
;
4936 if (unresolved_reloc
)
4938 (*_bfd_error_handler
)
4939 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4941 bfd_get_section_name (input_bfd
, input_section
),
4942 (long) rel
->r_offset
,
4948 r
= _bfd_final_link_relocate (howto
,
4952 rel
->r_offset
, relocation
, addend
);
4954 if (r
!= bfd_reloc_ok
)
4956 const char *msg
= (const char *) 0;
4960 case bfd_reloc_overflow
:
4961 if (!((*info
->callbacks
->reloc_overflow
)
4962 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
4963 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
4967 case bfd_reloc_undefined
:
4968 if (!((*info
->callbacks
->undefined_symbol
)
4969 (info
, sym_name
, input_bfd
, input_section
,
4970 rel
->r_offset
, TRUE
)))
4974 case bfd_reloc_outofrange
:
4975 msg
= _("internal error: out of range error");
4978 case bfd_reloc_notsupported
:
4979 msg
= _("internal error: unsupported relocation error");
4982 case bfd_reloc_dangerous
:
4983 msg
= _("internal error: dangerous error");
4987 msg
= _("internal error: unknown error");
4992 if (!((*info
->callbacks
->warning
)
4993 (info
, msg
, sym_name
, input_bfd
, input_section
,
5002 && emit_these_relocs
5003 && !info
->emitrelocations
)
5005 Elf_Internal_Rela
*wrel
;
5006 Elf_Internal_Shdr
*rel_hdr
;
5008 wrel
= rel
= relocs
;
5009 relend
= relocs
+ input_section
->reloc_count
;
5010 for (; rel
< relend
; rel
++)
5014 r_type
= ELF32_R_TYPE (rel
->r_info
);
5015 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
5018 input_section
->reloc_count
= wrel
- relocs
;
5019 /* Backflips for _bfd_elf_link_output_relocs. */
5020 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
5021 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
5028 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5031 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
5032 const char *sym_name ATTRIBUTE_UNUSED
,
5033 Elf_Internal_Sym
*sym
,
5034 asection
*sym_sec ATTRIBUTE_UNUSED
,
5035 struct elf_link_hash_entry
*h
)
5037 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5039 if (!info
->relocatable
5040 && htab
->stub_sec
!= NULL
5042 && (h
->root
.type
== bfd_link_hash_defined
5043 || h
->root
.type
== bfd_link_hash_defweak
)
5045 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
5047 struct got_entry
*g
;
5049 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
5050 if (htab
->params
->ovly_flavour
== ovly_soft_icache
5051 ? g
->br_addr
== g
->stub_addr
5052 : g
->addend
== 0 && g
->ovl
== 0)
5054 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
5055 (htab
->stub_sec
[0]->output_section
->owner
,
5056 htab
->stub_sec
[0]->output_section
));
5057 sym
->st_value
= g
->stub_addr
;
5065 static int spu_plugin
= 0;
5068 spu_elf_plugin (int val
)
5073 /* Set ELF header e_type for plugins. */
5076 spu_elf_post_process_headers (bfd
*abfd
,
5077 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
5081 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
5083 i_ehdrp
->e_type
= ET_DYN
;
5087 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5088 segments for overlays. */
5091 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5098 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5099 extra
= htab
->num_overlays
;
5105 sec
= bfd_get_section_by_name (abfd
, ".toe");
5106 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
5112 /* Remove .toe section from other PT_LOAD segments and put it in
5113 a segment of its own. Put overlays in separate segments too. */
5116 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
5119 struct elf_segment_map
*m
, *m_overlay
;
5120 struct elf_segment_map
**p
, **p_overlay
;
5126 toe
= bfd_get_section_by_name (abfd
, ".toe");
5127 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
5128 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
5129 for (i
= 0; i
< m
->count
; i
++)
5130 if ((s
= m
->sections
[i
]) == toe
5131 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
5133 struct elf_segment_map
*m2
;
5136 if (i
+ 1 < m
->count
)
5138 amt
= sizeof (struct elf_segment_map
);
5139 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
5140 m2
= bfd_zalloc (abfd
, amt
);
5143 m2
->count
= m
->count
- (i
+ 1);
5144 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
5145 m2
->count
* sizeof (m
->sections
[0]));
5146 m2
->p_type
= PT_LOAD
;
5154 amt
= sizeof (struct elf_segment_map
);
5155 m2
= bfd_zalloc (abfd
, amt
);
5158 m2
->p_type
= PT_LOAD
;
5160 m2
->sections
[0] = s
;
5168 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5169 PT_LOAD segments. This can cause the .ovl.init section to be
5170 overwritten with the contents of some overlay segment. To work
5171 around this issue, we ensure that all PF_OVERLAY segments are
5172 sorted first amongst the program headers; this ensures that even
5173 with a broken loader, the .ovl.init section (which is not marked
5174 as PF_OVERLAY) will be placed into SPU local store on startup. */
5176 /* Move all overlay segments onto a separate list. */
5177 p
= &elf_tdata (abfd
)->segment_map
;
5178 p_overlay
= &m_overlay
;
5181 if ((*p
)->p_type
== PT_LOAD
&& (*p
)->count
== 1
5182 && spu_elf_section_data ((*p
)->sections
[0])->u
.o
.ovl_index
!= 0)
5184 struct elf_segment_map
*m
= *p
;
5187 p_overlay
= &m
->next
;
5194 /* Re-insert overlay segments at the head of the segment map. */
5195 *p_overlay
= elf_tdata (abfd
)->segment_map
;
5196 elf_tdata (abfd
)->segment_map
= m_overlay
;
5201 /* Tweak the section type of .note.spu_name. */
5204 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
5205 Elf_Internal_Shdr
*hdr
,
5208 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
5209 hdr
->sh_type
= SHT_NOTE
;
5213 /* Tweak phdrs before writing them out. */
5216 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5218 const struct elf_backend_data
*bed
;
5219 struct elf_obj_tdata
*tdata
;
5220 Elf_Internal_Phdr
*phdr
, *last
;
5221 struct spu_link_hash_table
*htab
;
5228 bed
= get_elf_backend_data (abfd
);
5229 tdata
= elf_tdata (abfd
);
5231 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
5232 htab
= spu_hash_table (info
);
5233 if (htab
->num_overlays
!= 0)
5235 struct elf_segment_map
*m
;
5238 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
5240 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
5242 /* Mark this as an overlay header. */
5243 phdr
[i
].p_flags
|= PF_OVERLAY
;
5245 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0
5246 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
5248 bfd_byte
*p
= htab
->ovtab
->contents
;
5249 unsigned int off
= o
* 16 + 8;
5251 /* Write file_off into _ovly_table. */
5252 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
5255 /* Soft-icache has its file offset put in .ovl.init. */
5256 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
5258 bfd_vma val
= elf_section_data (htab
->ovl_sec
[0])->this_hdr
.sh_offset
;
5260 bfd_put_32 (htab
->init
->owner
, val
, htab
->init
->contents
+ 4);
5264 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5265 of 16. This should always be possible when using the standard
5266 linker scripts, but don't create overlapping segments if
5267 someone is playing games with linker scripts. */
5269 for (i
= count
; i
-- != 0; )
5270 if (phdr
[i
].p_type
== PT_LOAD
)
5274 adjust
= -phdr
[i
].p_filesz
& 15;
5277 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
5280 adjust
= -phdr
[i
].p_memsz
& 15;
5283 && phdr
[i
].p_filesz
!= 0
5284 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
5285 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
5288 if (phdr
[i
].p_filesz
!= 0)
5292 if (i
== (unsigned int) -1)
5293 for (i
= count
; i
-- != 0; )
5294 if (phdr
[i
].p_type
== PT_LOAD
)
5298 adjust
= -phdr
[i
].p_filesz
& 15;
5299 phdr
[i
].p_filesz
+= adjust
;
5301 adjust
= -phdr
[i
].p_memsz
& 15;
5302 phdr
[i
].p_memsz
+= adjust
;
5308 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5309 #define TARGET_BIG_NAME "elf32-spu"
5310 #define ELF_ARCH bfd_arch_spu
5311 #define ELF_MACHINE_CODE EM_SPU
5312 /* This matches the alignment need for DMA. */
5313 #define ELF_MAXPAGESIZE 0x80
5314 #define elf_backend_rela_normal 1
5315 #define elf_backend_can_gc_sections 1
5317 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5318 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5319 #define elf_info_to_howto spu_elf_info_to_howto
5320 #define elf_backend_count_relocs spu_elf_count_relocs
5321 #define elf_backend_relocate_section spu_elf_relocate_section
5322 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5323 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5324 #define elf_backend_object_p spu_elf_object_p
5325 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5326 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5328 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5329 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5330 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5331 #define elf_backend_post_process_headers spu_elf_post_process_headers
5332 #define elf_backend_fake_sections spu_elf_fake_sections
5333 #define elf_backend_special_sections spu_elf_special_sections
5334 #define bfd_elf32_bfd_final_link spu_elf_final_link
5336 #include "elf32-target.h"