Set SEC_KEEP on section XXX for undefined __start_XXX/__stop_XXX
[binutils.git] / bfd / elf32-spu.c
blobc6139c9dabff0162a832ef8470f8a9eb9052b4ee
1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
21 #include "sysdep.h"
22 #include "libiberty.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf/spu.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33 void *, asection *,
34 bfd *, char **);
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
90 FALSE, 0, -1, FALSE),
91 HOWTO (R_SPU_ADD_PIC, 0, 0, 0, FALSE, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "SPU_ADD_PIC",
93 FALSE, 0, 0x00000000, FALSE),
96 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
97 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
98 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
99 { NULL, 0, 0, 0, 0 }
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
105 switch (code)
107 default:
108 return R_SPU_NONE;
109 case BFD_RELOC_SPU_IMM10W:
110 return R_SPU_ADDR10;
111 case BFD_RELOC_SPU_IMM16W:
112 return R_SPU_ADDR16;
113 case BFD_RELOC_SPU_LO16:
114 return R_SPU_ADDR16_LO;
115 case BFD_RELOC_SPU_HI16:
116 return R_SPU_ADDR16_HI;
117 case BFD_RELOC_SPU_IMM18:
118 return R_SPU_ADDR18;
119 case BFD_RELOC_SPU_PCREL16:
120 return R_SPU_REL16;
121 case BFD_RELOC_SPU_IMM7:
122 return R_SPU_ADDR7;
123 case BFD_RELOC_SPU_IMM8:
124 return R_SPU_NONE;
125 case BFD_RELOC_SPU_PCREL9a:
126 return R_SPU_REL9;
127 case BFD_RELOC_SPU_PCREL9b:
128 return R_SPU_REL9I;
129 case BFD_RELOC_SPU_IMM10:
130 return R_SPU_ADDR10I;
131 case BFD_RELOC_SPU_IMM16:
132 return R_SPU_ADDR16I;
133 case BFD_RELOC_32:
134 return R_SPU_ADDR32;
135 case BFD_RELOC_32_PCREL:
136 return R_SPU_REL32;
137 case BFD_RELOC_SPU_PPU32:
138 return R_SPU_PPU32;
139 case BFD_RELOC_SPU_PPU64:
140 return R_SPU_PPU64;
141 case BFD_RELOC_SPU_ADD_PIC:
142 return R_SPU_ADD_PIC;
146 static void
147 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
148 arelent *cache_ptr,
149 Elf_Internal_Rela *dst)
151 enum elf_spu_reloc_type r_type;
153 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
154 BFD_ASSERT (r_type < R_SPU_max);
155 cache_ptr->howto = &elf_howto_table[(int) r_type];
158 static reloc_howto_type *
159 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
160 bfd_reloc_code_real_type code)
162 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
164 if (r_type == R_SPU_NONE)
165 return NULL;
167 return elf_howto_table + r_type;
170 static reloc_howto_type *
171 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
172 const char *r_name)
174 unsigned int i;
176 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
177 if (elf_howto_table[i].name != NULL
178 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
179 return &elf_howto_table[i];
181 return NULL;
184 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
186 static bfd_reloc_status_type
187 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
188 void *data, asection *input_section,
189 bfd *output_bfd, char **error_message)
191 bfd_size_type octets;
192 bfd_vma val;
193 long insn;
195 /* If this is a relocatable link (output_bfd test tells us), just
196 call the generic function. Any adjustment will be done at final
197 link time. */
198 if (output_bfd != NULL)
199 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
200 input_section, output_bfd, error_message);
202 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
203 return bfd_reloc_outofrange;
204 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
206 /* Get symbol value. */
207 val = 0;
208 if (!bfd_is_com_section (symbol->section))
209 val = symbol->value;
210 if (symbol->section->output_section)
211 val += symbol->section->output_section->vma;
213 val += reloc_entry->addend;
215 /* Make it pc-relative. */
216 val -= input_section->output_section->vma + input_section->output_offset;
218 val >>= 2;
219 if (val + 256 >= 512)
220 return bfd_reloc_overflow;
222 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
224 /* Move two high bits of value to REL9I and REL9 position.
225 The mask will take care of selecting the right field. */
226 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
227 insn &= ~reloc_entry->howto->dst_mask;
228 insn |= val & reloc_entry->howto->dst_mask;
229 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
230 return bfd_reloc_ok;
233 static bfd_boolean
234 spu_elf_new_section_hook (bfd *abfd, asection *sec)
236 if (!sec->used_by_bfd)
238 struct _spu_elf_section_data *sdata;
240 sdata = bfd_zalloc (abfd, sizeof (*sdata));
241 if (sdata == NULL)
242 return FALSE;
243 sec->used_by_bfd = sdata;
246 return _bfd_elf_new_section_hook (abfd, sec);
249 /* Set up overlay info for executables. */
251 static bfd_boolean
252 spu_elf_object_p (bfd *abfd)
254 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
256 unsigned int i, num_ovl, num_buf;
257 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
258 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
259 Elf_Internal_Phdr *last_phdr = NULL;
261 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
262 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
264 unsigned int j;
266 ++num_ovl;
267 if (last_phdr == NULL
268 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
269 ++num_buf;
270 last_phdr = phdr;
271 for (j = 1; j < elf_numsections (abfd); j++)
273 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
275 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
277 asection *sec = shdr->bfd_section;
278 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
279 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
284 return TRUE;
287 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
288 strip --strip-unneeded will not remove them. */
290 static void
291 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
293 if (sym->name != NULL
294 && sym->section != bfd_abs_section_ptr
295 && strncmp (sym->name, "_EAR_", 5) == 0)
296 sym->flags |= BSF_KEEP;
299 /* SPU ELF linker hash table. */
301 struct spu_link_hash_table
303 struct elf_link_hash_table elf;
305 struct spu_elf_params *params;
307 /* Shortcuts to overlay sections. */
308 asection *ovtab;
309 asection *init;
310 asection *toe;
311 asection **ovl_sec;
313 /* Count of stubs in each overlay section. */
314 unsigned int *stub_count;
316 /* The stub section for each overlay section. */
317 asection **stub_sec;
319 struct elf_link_hash_entry *ovly_entry[2];
321 /* Number of overlay buffers. */
322 unsigned int num_buf;
324 /* Total number of overlays. */
325 unsigned int num_overlays;
327 /* For soft icache. */
328 unsigned int line_size_log2;
329 unsigned int num_lines_log2;
330 unsigned int fromelem_size_log2;
332 /* How much memory we have. */
333 unsigned int local_store;
335 /* Count of overlay stubs needed in non-overlay area. */
336 unsigned int non_ovly_stub;
338 /* Pointer to the fixup section */
339 asection *sfixup;
341 /* Set on error. */
342 unsigned int stub_err : 1;
345 /* Hijack the generic got fields for overlay stub accounting. */
347 struct got_entry
349 struct got_entry *next;
350 unsigned int ovl;
351 union {
352 bfd_vma addend;
353 bfd_vma br_addr;
355 bfd_vma stub_addr;
358 #define spu_hash_table(p) \
359 ((struct spu_link_hash_table *) ((p)->hash))
361 struct call_info
363 struct function_info *fun;
364 struct call_info *next;
365 unsigned int count;
366 unsigned int max_depth;
367 unsigned int is_tail : 1;
368 unsigned int is_pasted : 1;
369 unsigned int broken_cycle : 1;
370 unsigned int priority : 13;
373 struct function_info
375 /* List of functions called. Also branches to hot/cold part of
376 function. */
377 struct call_info *call_list;
378 /* For hot/cold part of function, point to owner. */
379 struct function_info *start;
380 /* Symbol at start of function. */
381 union {
382 Elf_Internal_Sym *sym;
383 struct elf_link_hash_entry *h;
384 } u;
385 /* Function section. */
386 asection *sec;
387 asection *rodata;
388 /* Where last called from, and number of sections called from. */
389 asection *last_caller;
390 unsigned int call_count;
391 /* Address range of (this part of) function. */
392 bfd_vma lo, hi;
393 /* Offset where we found a store of lr, or -1 if none found. */
394 bfd_vma lr_store;
395 /* Offset where we found the stack adjustment insn. */
396 bfd_vma sp_adjust;
397 /* Stack usage. */
398 int stack;
399 /* Distance from root of call tree. Tail and hot/cold branches
400 count as one deeper. We aren't counting stack frames here. */
401 unsigned int depth;
402 /* Set if global symbol. */
403 unsigned int global : 1;
404 /* Set if known to be start of function (as distinct from a hunk
405 in hot/cold section. */
406 unsigned int is_func : 1;
407 /* Set if not a root node. */
408 unsigned int non_root : 1;
409 /* Flags used during call tree traversal. It's cheaper to replicate
410 the visit flags than have one which needs clearing after a traversal. */
411 unsigned int visit1 : 1;
412 unsigned int visit2 : 1;
413 unsigned int marking : 1;
414 unsigned int visit3 : 1;
415 unsigned int visit4 : 1;
416 unsigned int visit5 : 1;
417 unsigned int visit6 : 1;
418 unsigned int visit7 : 1;
421 struct spu_elf_stack_info
423 int num_fun;
424 int max_fun;
425 /* Variable size array describing functions, one per contiguous
426 address range belonging to a function. */
427 struct function_info fun[1];
430 static struct function_info *find_function (asection *, bfd_vma,
431 struct bfd_link_info *);
433 /* Create a spu ELF linker hash table. */
435 static struct bfd_link_hash_table *
436 spu_elf_link_hash_table_create (bfd *abfd)
438 struct spu_link_hash_table *htab;
440 htab = bfd_malloc (sizeof (*htab));
441 if (htab == NULL)
442 return NULL;
444 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
445 _bfd_elf_link_hash_newfunc,
446 sizeof (struct elf_link_hash_entry)))
448 free (htab);
449 return NULL;
452 memset (&htab->ovtab, 0,
453 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
455 htab->elf.init_got_refcount.refcount = 0;
456 htab->elf.init_got_refcount.glist = NULL;
457 htab->elf.init_got_offset.offset = 0;
458 htab->elf.init_got_offset.glist = NULL;
459 return &htab->elf.root;
462 void
463 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
465 bfd_vma max_branch_log2;
467 struct spu_link_hash_table *htab = spu_hash_table (info);
468 htab->params = params;
469 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
470 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
472 /* For the software i-cache, we provide a "from" list whose size
473 is a power-of-two number of quadwords, big enough to hold one
474 byte per outgoing branch. Compute this number here. */
475 max_branch_log2 = bfd_log2 (htab->params->max_branch);
476 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
479 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
480 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
481 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
483 static bfd_boolean
484 get_sym_h (struct elf_link_hash_entry **hp,
485 Elf_Internal_Sym **symp,
486 asection **symsecp,
487 Elf_Internal_Sym **locsymsp,
488 unsigned long r_symndx,
489 bfd *ibfd)
491 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
493 if (r_symndx >= symtab_hdr->sh_info)
495 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
496 struct elf_link_hash_entry *h;
498 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
499 while (h->root.type == bfd_link_hash_indirect
500 || h->root.type == bfd_link_hash_warning)
501 h = (struct elf_link_hash_entry *) h->root.u.i.link;
503 if (hp != NULL)
504 *hp = h;
506 if (symp != NULL)
507 *symp = NULL;
509 if (symsecp != NULL)
511 asection *symsec = NULL;
512 if (h->root.type == bfd_link_hash_defined
513 || h->root.type == bfd_link_hash_defweak)
514 symsec = h->root.u.def.section;
515 *symsecp = symsec;
518 else
520 Elf_Internal_Sym *sym;
521 Elf_Internal_Sym *locsyms = *locsymsp;
523 if (locsyms == NULL)
525 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
526 if (locsyms == NULL)
527 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
528 symtab_hdr->sh_info,
529 0, NULL, NULL, NULL);
530 if (locsyms == NULL)
531 return FALSE;
532 *locsymsp = locsyms;
534 sym = locsyms + r_symndx;
536 if (hp != NULL)
537 *hp = NULL;
539 if (symp != NULL)
540 *symp = sym;
542 if (symsecp != NULL)
543 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
546 return TRUE;
549 /* Create the note section if not already present. This is done early so
550 that the linker maps the sections to the right place in the output. */
552 bfd_boolean
553 spu_elf_create_sections (struct bfd_link_info *info)
555 struct spu_link_hash_table *htab = spu_hash_table (info);
556 bfd *ibfd;
558 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
559 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
560 break;
562 if (ibfd == NULL)
564 /* Make SPU_PTNOTE_SPUNAME section. */
565 asection *s;
566 size_t name_len;
567 size_t size;
568 bfd_byte *data;
569 flagword flags;
571 ibfd = info->input_bfds;
572 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
573 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
574 if (s == NULL
575 || !bfd_set_section_alignment (ibfd, s, 4))
576 return FALSE;
578 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
579 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
580 size += (name_len + 3) & -4;
582 if (!bfd_set_section_size (ibfd, s, size))
583 return FALSE;
585 data = bfd_zalloc (ibfd, size);
586 if (data == NULL)
587 return FALSE;
589 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
590 bfd_put_32 (ibfd, name_len, data + 4);
591 bfd_put_32 (ibfd, 1, data + 8);
592 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
593 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
594 bfd_get_filename (info->output_bfd), name_len);
595 s->contents = data;
598 if (htab->params->emit_fixups)
600 asection *s;
601 flagword flags;
602 ibfd = info->input_bfds;
603 flags = SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
604 | SEC_IN_MEMORY;
605 s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
606 if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
607 return FALSE;
608 htab->sfixup = s;
611 return TRUE;
614 /* qsort predicate to sort sections by vma. */
616 static int
617 sort_sections (const void *a, const void *b)
619 const asection *const *s1 = a;
620 const asection *const *s2 = b;
621 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
623 if (delta != 0)
624 return delta < 0 ? -1 : 1;
626 return (*s1)->index - (*s2)->index;
629 /* Identify overlays in the output bfd, and number them.
630 Returns 0 on error, 1 if no overlays, 2 if overlays. */
633 spu_elf_find_overlays (struct bfd_link_info *info)
635 struct spu_link_hash_table *htab = spu_hash_table (info);
636 asection **alloc_sec;
637 unsigned int i, n, ovl_index, num_buf;
638 asection *s;
639 bfd_vma ovl_end;
640 static const char *const entry_names[2][2] = {
641 { "__ovly_load", "__icache_br_handler" },
642 { "__ovly_return", "__icache_call_handler" }
645 if (info->output_bfd->section_count < 2)
646 return 1;
648 alloc_sec
649 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
650 if (alloc_sec == NULL)
651 return 0;
653 /* Pick out all the alloced sections. */
654 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
655 if ((s->flags & SEC_ALLOC) != 0
656 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
657 && s->size != 0)
658 alloc_sec[n++] = s;
660 if (n == 0)
662 free (alloc_sec);
663 return 1;
666 /* Sort them by vma. */
667 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
669 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
670 if (htab->params->ovly_flavour == ovly_soft_icache)
672 unsigned int prev_buf = 0, set_id = 0;
674 /* Look for an overlapping vma to find the first overlay section. */
675 bfd_vma vma_start = 0;
677 for (i = 1; i < n; i++)
679 s = alloc_sec[i];
680 if (s->vma < ovl_end)
682 asection *s0 = alloc_sec[i - 1];
683 vma_start = s0->vma;
684 ovl_end = (s0->vma
685 + ((bfd_vma) 1
686 << (htab->num_lines_log2 + htab->line_size_log2)));
687 --i;
688 break;
690 else
691 ovl_end = s->vma + s->size;
694 /* Now find any sections within the cache area. */
695 for (ovl_index = 0, num_buf = 0; i < n; i++)
697 s = alloc_sec[i];
698 if (s->vma >= ovl_end)
699 break;
701 /* A section in an overlay area called .ovl.init is not
702 an overlay, in the sense that it might be loaded in
703 by the overlay manager, but rather the initial
704 section contents for the overlay buffer. */
705 if (strncmp (s->name, ".ovl.init", 9) != 0)
707 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
708 set_id = (num_buf == prev_buf)? set_id + 1 : 0;
709 prev_buf = num_buf;
711 if ((s->vma - vma_start) & (htab->params->line_size - 1))
713 info->callbacks->einfo (_("%X%P: overlay section %A "
714 "does not start on a cache line.\n"),
716 bfd_set_error (bfd_error_bad_value);
717 return 0;
719 else if (s->size > htab->params->line_size)
721 info->callbacks->einfo (_("%X%P: overlay section %A "
722 "is larger than a cache line.\n"),
724 bfd_set_error (bfd_error_bad_value);
725 return 0;
728 alloc_sec[ovl_index++] = s;
729 spu_elf_section_data (s)->u.o.ovl_index
730 = (set_id << htab->num_lines_log2) + num_buf;
731 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
735 /* Ensure there are no more overlay sections. */
736 for ( ; i < n; i++)
738 s = alloc_sec[i];
739 if (s->vma < ovl_end)
741 info->callbacks->einfo (_("%X%P: overlay section %A "
742 "is not in cache area.\n"),
743 alloc_sec[i-1]);
744 bfd_set_error (bfd_error_bad_value);
745 return 0;
747 else
748 ovl_end = s->vma + s->size;
751 else
753 /* Look for overlapping vmas. Any with overlap must be overlays.
754 Count them. Also count the number of overlay regions. */
755 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
757 s = alloc_sec[i];
758 if (s->vma < ovl_end)
760 asection *s0 = alloc_sec[i - 1];
762 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
764 ++num_buf;
765 if (strncmp (s0->name, ".ovl.init", 9) != 0)
767 alloc_sec[ovl_index] = s0;
768 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
769 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
771 else
772 ovl_end = s->vma + s->size;
774 if (strncmp (s->name, ".ovl.init", 9) != 0)
776 alloc_sec[ovl_index] = s;
777 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
778 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
779 if (s0->vma != s->vma)
781 info->callbacks->einfo (_("%X%P: overlay sections %A "
782 "and %A do not start at the "
783 "same address.\n"),
784 s0, s);
785 bfd_set_error (bfd_error_bad_value);
786 return 0;
788 if (ovl_end < s->vma + s->size)
789 ovl_end = s->vma + s->size;
792 else
793 ovl_end = s->vma + s->size;
797 htab->num_overlays = ovl_index;
798 htab->num_buf = num_buf;
799 htab->ovl_sec = alloc_sec;
801 if (ovl_index == 0)
802 return 1;
804 for (i = 0; i < 2; i++)
806 const char *name;
807 struct elf_link_hash_entry *h;
809 name = entry_names[i][htab->params->ovly_flavour];
810 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
811 if (h == NULL)
812 return 0;
814 if (h->root.type == bfd_link_hash_new)
816 h->root.type = bfd_link_hash_undefined;
817 h->ref_regular = 1;
818 h->ref_regular_nonweak = 1;
819 h->non_elf = 0;
821 htab->ovly_entry[i] = h;
824 return 2;
827 /* Non-zero to use bra in overlay stubs rather than br. */
828 #define BRA_STUBS 0
830 #define BRA 0x30000000
831 #define BRASL 0x31000000
832 #define BR 0x32000000
833 #define BRSL 0x33000000
834 #define NOP 0x40200000
835 #define LNOP 0x00200000
836 #define ILA 0x42000000
838 /* Return true for all relative and absolute branch instructions.
839 bra 00110000 0..
840 brasl 00110001 0..
841 br 00110010 0..
842 brsl 00110011 0..
843 brz 00100000 0..
844 brnz 00100001 0..
845 brhz 00100010 0..
846 brhnz 00100011 0.. */
848 static bfd_boolean
849 is_branch (const unsigned char *insn)
851 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
854 /* Return true for all indirect branch instructions.
855 bi 00110101 000
856 bisl 00110101 001
857 iret 00110101 010
858 bisled 00110101 011
859 biz 00100101 000
860 binz 00100101 001
861 bihz 00100101 010
862 bihnz 00100101 011 */
864 static bfd_boolean
865 is_indirect_branch (const unsigned char *insn)
867 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
870 /* Return true for branch hint instructions.
871 hbra 0001000..
872 hbrr 0001001.. */
874 static bfd_boolean
875 is_hint (const unsigned char *insn)
877 return (insn[0] & 0xfc) == 0x10;
880 /* True if INPUT_SECTION might need overlay stubs. */
882 static bfd_boolean
883 maybe_needs_stubs (asection *input_section)
885 /* No stubs for debug sections and suchlike. */
886 if ((input_section->flags & SEC_ALLOC) == 0)
887 return FALSE;
889 /* No stubs for link-once sections that will be discarded. */
890 if (input_section->output_section == bfd_abs_section_ptr)
891 return FALSE;
893 /* Don't create stubs for .eh_frame references. */
894 if (strcmp (input_section->name, ".eh_frame") == 0)
895 return FALSE;
897 return TRUE;
900 enum _stub_type
902 no_stub,
903 call_ovl_stub,
904 br000_ovl_stub,
905 br001_ovl_stub,
906 br010_ovl_stub,
907 br011_ovl_stub,
908 br100_ovl_stub,
909 br101_ovl_stub,
910 br110_ovl_stub,
911 br111_ovl_stub,
912 nonovl_stub,
913 stub_error
916 /* Return non-zero if this reloc symbol should go via an overlay stub.
917 Return 2 if the stub must be in non-overlay area. */
919 static enum _stub_type
920 needs_ovl_stub (struct elf_link_hash_entry *h,
921 Elf_Internal_Sym *sym,
922 asection *sym_sec,
923 asection *input_section,
924 Elf_Internal_Rela *irela,
925 bfd_byte *contents,
926 struct bfd_link_info *info)
928 struct spu_link_hash_table *htab = spu_hash_table (info);
929 enum elf_spu_reloc_type r_type;
930 unsigned int sym_type;
931 bfd_boolean branch, hint, call;
932 enum _stub_type ret = no_stub;
933 bfd_byte insn[4];
935 if (sym_sec == NULL
936 || sym_sec->output_section == bfd_abs_section_ptr
937 || spu_elf_section_data (sym_sec->output_section) == NULL)
938 return ret;
940 if (h != NULL)
942 /* Ensure no stubs for user supplied overlay manager syms. */
943 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
944 return ret;
946 /* setjmp always goes via an overlay stub, because then the return
947 and hence the longjmp goes via __ovly_return. That magically
948 makes setjmp/longjmp between overlays work. */
949 if (strncmp (h->root.root.string, "setjmp", 6) == 0
950 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
951 ret = call_ovl_stub;
954 if (h != NULL)
955 sym_type = h->type;
956 else
957 sym_type = ELF_ST_TYPE (sym->st_info);
959 r_type = ELF32_R_TYPE (irela->r_info);
960 branch = FALSE;
961 hint = FALSE;
962 call = FALSE;
963 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
965 if (contents == NULL)
967 contents = insn;
968 if (!bfd_get_section_contents (input_section->owner,
969 input_section,
970 contents,
971 irela->r_offset, 4))
972 return stub_error;
974 else
975 contents += irela->r_offset;
977 branch = is_branch (contents);
978 hint = is_hint (contents);
979 if (branch || hint)
981 call = (contents[0] & 0xfd) == 0x31;
982 if (call
983 && sym_type != STT_FUNC
984 && contents != insn)
986 /* It's common for people to write assembly and forget
987 to give function symbols the right type. Handle
988 calls to such symbols, but warn so that (hopefully)
989 people will fix their code. We need the symbol
990 type to be correct to distinguish function pointer
991 initialisation from other pointer initialisations. */
992 const char *sym_name;
994 if (h != NULL)
995 sym_name = h->root.root.string;
996 else
998 Elf_Internal_Shdr *symtab_hdr;
999 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1000 sym_name = bfd_elf_sym_name (input_section->owner,
1001 symtab_hdr,
1002 sym,
1003 sym_sec);
1005 (*_bfd_error_handler) (_("warning: call to non-function"
1006 " symbol %s defined in %B"),
1007 sym_sec->owner, sym_name);
1013 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1014 || (sym_type != STT_FUNC
1015 && !(branch || hint)
1016 && (sym_sec->flags & SEC_CODE) == 0))
1017 return no_stub;
1019 /* Usually, symbols in non-overlay sections don't need stubs. */
1020 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1021 && !htab->params->non_overlay_stubs)
1022 return ret;
1024 /* A reference from some other section to a symbol in an overlay
1025 section needs a stub. */
1026 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1027 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1029 unsigned int lrlive = 0;
1030 if (branch)
1031 lrlive = (contents[1] & 0x70) >> 4;
1033 if (!lrlive && (call || sym_type == STT_FUNC))
1034 ret = call_ovl_stub;
1035 else
1036 ret = br000_ovl_stub + lrlive;
1039 /* If this insn isn't a branch then we are possibly taking the
1040 address of a function and passing it out somehow. Soft-icache code
1041 always generates inline code to do indirect branches. */
1042 if (!(branch || hint)
1043 && sym_type == STT_FUNC
1044 && htab->params->ovly_flavour != ovly_soft_icache)
1045 ret = nonovl_stub;
1047 return ret;
1050 static bfd_boolean
1051 count_stub (struct spu_link_hash_table *htab,
1052 bfd *ibfd,
1053 asection *isec,
1054 enum _stub_type stub_type,
1055 struct elf_link_hash_entry *h,
1056 const Elf_Internal_Rela *irela)
1058 unsigned int ovl = 0;
1059 struct got_entry *g, **head;
1060 bfd_vma addend;
1062 /* If this instruction is a branch or call, we need a stub
1063 for it. One stub per function per overlay.
1064 If it isn't a branch, then we are taking the address of
1065 this function so need a stub in the non-overlay area
1066 for it. One stub per function. */
1067 if (stub_type != nonovl_stub)
1068 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1070 if (h != NULL)
1071 head = &h->got.glist;
1072 else
1074 if (elf_local_got_ents (ibfd) == NULL)
1076 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1077 * sizeof (*elf_local_got_ents (ibfd)));
1078 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1079 if (elf_local_got_ents (ibfd) == NULL)
1080 return FALSE;
1082 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1085 if (htab->params->ovly_flavour == ovly_soft_icache)
1087 htab->stub_count[ovl] += 1;
1088 return TRUE;
1091 addend = 0;
1092 if (irela != NULL)
1093 addend = irela->r_addend;
1095 if (ovl == 0)
1097 struct got_entry *gnext;
1099 for (g = *head; g != NULL; g = g->next)
1100 if (g->addend == addend && g->ovl == 0)
1101 break;
1103 if (g == NULL)
1105 /* Need a new non-overlay area stub. Zap other stubs. */
1106 for (g = *head; g != NULL; g = gnext)
1108 gnext = g->next;
1109 if (g->addend == addend)
1111 htab->stub_count[g->ovl] -= 1;
1112 free (g);
1117 else
1119 for (g = *head; g != NULL; g = g->next)
1120 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1121 break;
1124 if (g == NULL)
1126 g = bfd_malloc (sizeof *g);
1127 if (g == NULL)
1128 return FALSE;
1129 g->ovl = ovl;
1130 g->addend = addend;
1131 g->stub_addr = (bfd_vma) -1;
1132 g->next = *head;
1133 *head = g;
1135 htab->stub_count[ovl] += 1;
1138 return TRUE;
1141 /* Support two sizes of overlay stubs, a slower more compact stub of two
1142 intructions, and a faster stub of four instructions.
1143 Soft-icache stubs are four or eight words. */
1145 static unsigned int
1146 ovl_stub_size (struct spu_elf_params *params)
1148 return 16 << params->ovly_flavour >> params->compact_stub;
1151 static unsigned int
1152 ovl_stub_size_log2 (struct spu_elf_params *params)
1154 return 4 + params->ovly_flavour - params->compact_stub;
1157 /* Two instruction overlay stubs look like:
1159 brsl $75,__ovly_load
1160 .word target_ovl_and_address
1162 ovl_and_address is a word with the overlay number in the top 14 bits
1163 and local store address in the bottom 18 bits.
1165 Four instruction overlay stubs look like:
1167 ila $78,ovl_number
1168 lnop
1169 ila $79,target_address
1170 br __ovly_load
1172 Software icache stubs are:
1174 .word target_index
1175 .word target_ia;
1176 .word lrlive_branchlocalstoreaddr;
1177 brasl $75,__icache_br_handler
1178 .quad xor_pattern
1181 static bfd_boolean
1182 build_stub (struct bfd_link_info *info,
1183 bfd *ibfd,
1184 asection *isec,
1185 enum _stub_type stub_type,
1186 struct elf_link_hash_entry *h,
1187 const Elf_Internal_Rela *irela,
1188 bfd_vma dest,
1189 asection *dest_sec)
1191 struct spu_link_hash_table *htab = spu_hash_table (info);
1192 unsigned int ovl, dest_ovl, set_id;
1193 struct got_entry *g, **head;
1194 asection *sec;
1195 bfd_vma addend, from, to, br_dest, patt;
1196 unsigned int lrlive;
1198 ovl = 0;
1199 if (stub_type != nonovl_stub)
1200 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1202 if (h != NULL)
1203 head = &h->got.glist;
1204 else
1205 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1207 addend = 0;
1208 if (irela != NULL)
1209 addend = irela->r_addend;
1211 if (htab->params->ovly_flavour == ovly_soft_icache)
1213 g = bfd_malloc (sizeof *g);
1214 if (g == NULL)
1215 return FALSE;
1216 g->ovl = ovl;
1217 g->br_addr = 0;
1218 if (irela != NULL)
1219 g->br_addr = (irela->r_offset
1220 + isec->output_offset
1221 + isec->output_section->vma);
1222 g->next = *head;
1223 *head = g;
1225 else
1227 for (g = *head; g != NULL; g = g->next)
1228 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1229 break;
1230 if (g == NULL)
1231 abort ();
1233 if (g->ovl == 0 && ovl != 0)
1234 return TRUE;
1236 if (g->stub_addr != (bfd_vma) -1)
1237 return TRUE;
1240 sec = htab->stub_sec[ovl];
1241 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1242 from = sec->size + sec->output_offset + sec->output_section->vma;
1243 g->stub_addr = from;
1244 to = (htab->ovly_entry[0]->root.u.def.value
1245 + htab->ovly_entry[0]->root.u.def.section->output_offset
1246 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1248 if (((dest | to | from) & 3) != 0)
1250 htab->stub_err = 1;
1251 return FALSE;
1253 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1255 if (htab->params->ovly_flavour == ovly_normal
1256 && !htab->params->compact_stub)
1258 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1259 sec->contents + sec->size);
1260 bfd_put_32 (sec->owner, LNOP,
1261 sec->contents + sec->size + 4);
1262 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1263 sec->contents + sec->size + 8);
1264 if (!BRA_STUBS)
1265 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1266 sec->contents + sec->size + 12);
1267 else
1268 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1269 sec->contents + sec->size + 12);
1271 else if (htab->params->ovly_flavour == ovly_normal
1272 && htab->params->compact_stub)
1274 if (!BRA_STUBS)
1275 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1276 sec->contents + sec->size);
1277 else
1278 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1279 sec->contents + sec->size);
1280 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1281 sec->contents + sec->size + 4);
1283 else if (htab->params->ovly_flavour == ovly_soft_icache
1284 && htab->params->compact_stub)
1286 lrlive = 0;
1287 if (stub_type == nonovl_stub)
1289 else if (stub_type == call_ovl_stub)
1290 /* A brsl makes lr live and *(*sp+16) is live.
1291 Tail calls have the same liveness. */
1292 lrlive = 5;
1293 else if (!htab->params->lrlive_analysis)
1294 /* Assume stack frame and lr save. */
1295 lrlive = 1;
1296 else if (irela != NULL)
1298 /* Analyse branch instructions. */
1299 struct function_info *caller;
1300 bfd_vma off;
1302 caller = find_function (isec, irela->r_offset, info);
1303 if (caller->start == NULL)
1304 off = irela->r_offset;
1305 else
1307 struct function_info *found = NULL;
1309 /* Find the earliest piece of this function that
1310 has frame adjusting instructions. We might
1311 see dynamic frame adjustment (eg. for alloca)
1312 in some later piece, but functions using
1313 alloca always set up a frame earlier. Frame
1314 setup instructions are always in one piece. */
1315 if (caller->lr_store != (bfd_vma) -1
1316 || caller->sp_adjust != (bfd_vma) -1)
1317 found = caller;
1318 while (caller->start != NULL)
1320 caller = caller->start;
1321 if (caller->lr_store != (bfd_vma) -1
1322 || caller->sp_adjust != (bfd_vma) -1)
1323 found = caller;
1325 if (found != NULL)
1326 caller = found;
1327 off = (bfd_vma) -1;
1330 if (off > caller->sp_adjust)
1332 if (off > caller->lr_store)
1333 /* Only *(*sp+16) is live. */
1334 lrlive = 1;
1335 else
1336 /* If no lr save, then we must be in a
1337 leaf function with a frame.
1338 lr is still live. */
1339 lrlive = 4;
1341 else if (off > caller->lr_store)
1343 /* Between lr save and stack adjust. */
1344 lrlive = 3;
1345 /* This should never happen since prologues won't
1346 be split here. */
1347 BFD_ASSERT (0);
1349 else
1350 /* On entry to function. */
1351 lrlive = 5;
1353 if (stub_type != br000_ovl_stub
1354 && lrlive != stub_type - br000_ovl_stub)
1355 info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1356 "from analysis (%u)\n"),
1357 isec, irela->r_offset, lrlive,
1358 stub_type - br000_ovl_stub);
1361 /* If given lrlive info via .brinfo, use it. */
1362 if (stub_type > br000_ovl_stub)
1363 lrlive = stub_type - br000_ovl_stub;
1365 if (ovl == 0)
1366 to = (htab->ovly_entry[1]->root.u.def.value
1367 + htab->ovly_entry[1]->root.u.def.section->output_offset
1368 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1370 /* The branch that uses this stub goes to stub_addr + 4. We'll
1371 set up an xor pattern that can be used by the icache manager
1372 to modify this branch to go directly to its destination. */
1373 g->stub_addr += 4;
1374 br_dest = g->stub_addr;
1375 if (irela == NULL)
1377 /* Except in the case of _SPUEAR_ stubs, the branch in
1378 question is the one in the stub itself. */
1379 BFD_ASSERT (stub_type == nonovl_stub);
1380 g->br_addr = g->stub_addr;
1381 br_dest = to;
1384 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1385 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1386 sec->contents + sec->size);
1387 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1388 sec->contents + sec->size + 4);
1389 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1390 sec->contents + sec->size + 8);
1391 patt = dest ^ br_dest;
1392 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1393 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1394 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1395 sec->contents + sec->size + 12);
1397 if (ovl == 0)
1398 /* Extra space for linked list entries. */
1399 sec->size += 16;
1401 else
1402 abort ();
1404 sec->size += ovl_stub_size (htab->params);
1406 if (htab->params->emit_stub_syms)
1408 size_t len;
1409 char *name;
1410 int add;
1412 len = 8 + sizeof (".ovl_call.") - 1;
1413 if (h != NULL)
1414 len += strlen (h->root.root.string);
1415 else
1416 len += 8 + 1 + 8;
1417 add = 0;
1418 if (irela != NULL)
1419 add = (int) irela->r_addend & 0xffffffff;
1420 if (add != 0)
1421 len += 1 + 8;
1422 name = bfd_malloc (len);
1423 if (name == NULL)
1424 return FALSE;
1426 sprintf (name, "%08x.ovl_call.", g->ovl);
1427 if (h != NULL)
1428 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1429 else
1430 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1431 dest_sec->id & 0xffffffff,
1432 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1433 if (add != 0)
1434 sprintf (name + len - 9, "+%x", add);
1436 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1437 free (name);
1438 if (h == NULL)
1439 return FALSE;
1440 if (h->root.type == bfd_link_hash_new)
1442 h->root.type = bfd_link_hash_defined;
1443 h->root.u.def.section = sec;
1444 h->size = ovl_stub_size (htab->params);
1445 h->root.u.def.value = sec->size - h->size;
1446 h->type = STT_FUNC;
1447 h->ref_regular = 1;
1448 h->def_regular = 1;
1449 h->ref_regular_nonweak = 1;
1450 h->forced_local = 1;
1451 h->non_elf = 0;
1455 return TRUE;
1458 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1459 symbols. */
1461 static bfd_boolean
1462 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1464 /* Symbols starting with _SPUEAR_ need a stub because they may be
1465 invoked by the PPU. */
1466 struct bfd_link_info *info = inf;
1467 struct spu_link_hash_table *htab = spu_hash_table (info);
1468 asection *sym_sec;
1470 if ((h->root.type == bfd_link_hash_defined
1471 || h->root.type == bfd_link_hash_defweak)
1472 && h->def_regular
1473 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1474 && (sym_sec = h->root.u.def.section) != NULL
1475 && sym_sec->output_section != bfd_abs_section_ptr
1476 && spu_elf_section_data (sym_sec->output_section) != NULL
1477 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1478 || htab->params->non_overlay_stubs))
1480 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1483 return TRUE;
1486 static bfd_boolean
1487 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1489 /* Symbols starting with _SPUEAR_ need a stub because they may be
1490 invoked by the PPU. */
1491 struct bfd_link_info *info = inf;
1492 struct spu_link_hash_table *htab = spu_hash_table (info);
1493 asection *sym_sec;
1495 if ((h->root.type == bfd_link_hash_defined
1496 || h->root.type == bfd_link_hash_defweak)
1497 && h->def_regular
1498 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1499 && (sym_sec = h->root.u.def.section) != NULL
1500 && sym_sec->output_section != bfd_abs_section_ptr
1501 && spu_elf_section_data (sym_sec->output_section) != NULL
1502 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1503 || htab->params->non_overlay_stubs))
1505 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1506 h->root.u.def.value, sym_sec);
1509 return TRUE;
1512 /* Size or build stubs. */
1514 static bfd_boolean
1515 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1517 struct spu_link_hash_table *htab = spu_hash_table (info);
1518 bfd *ibfd;
1520 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1522 extern const bfd_target bfd_elf32_spu_vec;
1523 Elf_Internal_Shdr *symtab_hdr;
1524 asection *isec;
1525 Elf_Internal_Sym *local_syms = NULL;
1527 if (ibfd->xvec != &bfd_elf32_spu_vec)
1528 continue;
1530 /* We'll need the symbol table in a second. */
1531 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1532 if (symtab_hdr->sh_info == 0)
1533 continue;
1535 /* Walk over each section attached to the input bfd. */
1536 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1538 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1540 /* If there aren't any relocs, then there's nothing more to do. */
1541 if ((isec->flags & SEC_RELOC) == 0
1542 || isec->reloc_count == 0)
1543 continue;
1545 if (!maybe_needs_stubs (isec))
1546 continue;
1548 /* Get the relocs. */
1549 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1550 info->keep_memory);
1551 if (internal_relocs == NULL)
1552 goto error_ret_free_local;
1554 /* Now examine each relocation. */
1555 irela = internal_relocs;
1556 irelaend = irela + isec->reloc_count;
1557 for (; irela < irelaend; irela++)
1559 enum elf_spu_reloc_type r_type;
1560 unsigned int r_indx;
1561 asection *sym_sec;
1562 Elf_Internal_Sym *sym;
1563 struct elf_link_hash_entry *h;
1564 enum _stub_type stub_type;
1566 r_type = ELF32_R_TYPE (irela->r_info);
1567 r_indx = ELF32_R_SYM (irela->r_info);
1569 if (r_type >= R_SPU_max)
1571 bfd_set_error (bfd_error_bad_value);
1572 error_ret_free_internal:
1573 if (elf_section_data (isec)->relocs != internal_relocs)
1574 free (internal_relocs);
1575 error_ret_free_local:
1576 if (local_syms != NULL
1577 && (symtab_hdr->contents
1578 != (unsigned char *) local_syms))
1579 free (local_syms);
1580 return FALSE;
1583 /* Determine the reloc target section. */
1584 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1585 goto error_ret_free_internal;
1587 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1588 NULL, info);
1589 if (stub_type == no_stub)
1590 continue;
1591 else if (stub_type == stub_error)
1592 goto error_ret_free_internal;
1594 if (htab->stub_count == NULL)
1596 bfd_size_type amt;
1597 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1598 htab->stub_count = bfd_zmalloc (amt);
1599 if (htab->stub_count == NULL)
1600 goto error_ret_free_internal;
1603 if (!build)
1605 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1606 goto error_ret_free_internal;
1608 else
1610 bfd_vma dest;
1612 if (h != NULL)
1613 dest = h->root.u.def.value;
1614 else
1615 dest = sym->st_value;
1616 dest += irela->r_addend;
1617 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1618 dest, sym_sec))
1619 goto error_ret_free_internal;
1623 /* We're done with the internal relocs, free them. */
1624 if (elf_section_data (isec)->relocs != internal_relocs)
1625 free (internal_relocs);
1628 if (local_syms != NULL
1629 && symtab_hdr->contents != (unsigned char *) local_syms)
1631 if (!info->keep_memory)
1632 free (local_syms);
1633 else
1634 symtab_hdr->contents = (unsigned char *) local_syms;
1638 return TRUE;
1641 /* Allocate space for overlay call and return stubs.
1642 Return 0 on error, 1 if no overlays, 2 otherwise. */
1645 spu_elf_size_stubs (struct bfd_link_info *info)
1647 struct spu_link_hash_table *htab;
1648 bfd *ibfd;
1649 bfd_size_type amt;
1650 flagword flags;
1651 unsigned int i;
1652 asection *stub;
1654 if (!process_stubs (info, FALSE))
1655 return 0;
1657 htab = spu_hash_table (info);
1658 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1659 if (htab->stub_err)
1660 return 0;
1662 ibfd = info->input_bfds;
1663 if (htab->stub_count != NULL)
1665 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1666 htab->stub_sec = bfd_zmalloc (amt);
1667 if (htab->stub_sec == NULL)
1668 return 0;
1670 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1671 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1672 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1673 htab->stub_sec[0] = stub;
1674 if (stub == NULL
1675 || !bfd_set_section_alignment (ibfd, stub,
1676 ovl_stub_size_log2 (htab->params)))
1677 return 0;
1678 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1679 if (htab->params->ovly_flavour == ovly_soft_icache)
1680 /* Extra space for linked list entries. */
1681 stub->size += htab->stub_count[0] * 16;
1683 for (i = 0; i < htab->num_overlays; ++i)
1685 asection *osec = htab->ovl_sec[i];
1686 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1687 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1688 htab->stub_sec[ovl] = stub;
1689 if (stub == NULL
1690 || !bfd_set_section_alignment (ibfd, stub,
1691 ovl_stub_size_log2 (htab->params)))
1692 return 0;
1693 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1697 if (htab->params->ovly_flavour == ovly_soft_icache)
1699 /* Space for icache manager tables.
1700 a) Tag array, one quadword per cache line.
1701 b) Rewrite "to" list, one quadword per cache line.
1702 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1703 a power-of-two number of full quadwords) per cache line. */
1705 flags = SEC_ALLOC;
1706 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1707 if (htab->ovtab == NULL
1708 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1709 return 0;
1711 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1712 << htab->num_lines_log2;
1714 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1715 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1716 if (htab->init == NULL
1717 || !bfd_set_section_alignment (ibfd, htab->init, 4))
1718 return 0;
1720 htab->init->size = 16;
1722 else if (htab->stub_count == NULL)
1723 return 1;
1724 else
1726 /* htab->ovtab consists of two arrays.
1727 . struct {
1728 . u32 vma;
1729 . u32 size;
1730 . u32 file_off;
1731 . u32 buf;
1732 . } _ovly_table[];
1734 . struct {
1735 . u32 mapped;
1736 . } _ovly_buf_table[];
1737 . */
1739 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1740 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1741 if (htab->ovtab == NULL
1742 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1743 return 0;
1745 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1748 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1749 if (htab->toe == NULL
1750 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1751 return 0;
1752 htab->toe->size = 16;
1754 return 2;
1757 /* Called from ld to place overlay manager data sections. This is done
1758 after the overlay manager itself is loaded, mainly so that the
1759 linker's htab->init section is placed after any other .ovl.init
1760 sections. */
1762 void
1763 spu_elf_place_overlay_data (struct bfd_link_info *info)
1765 struct spu_link_hash_table *htab = spu_hash_table (info);
1766 unsigned int i;
1768 if (htab->stub_sec != NULL)
1770 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1772 for (i = 0; i < htab->num_overlays; ++i)
1774 asection *osec = htab->ovl_sec[i];
1775 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1776 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1780 if (htab->params->ovly_flavour == ovly_soft_icache)
1781 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1783 if (htab->ovtab != NULL)
1785 const char *ovout = ".data";
1786 if (htab->params->ovly_flavour == ovly_soft_icache)
1787 ovout = ".bss";
1788 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1791 if (htab->toe != NULL)
1792 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1795 /* Functions to handle embedded spu_ovl.o object. */
1797 static void *
1798 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1800 return stream;
1803 static file_ptr
1804 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1805 void *stream,
1806 void *buf,
1807 file_ptr nbytes,
1808 file_ptr offset)
1810 struct _ovl_stream *os;
1811 size_t count;
1812 size_t max;
1814 os = (struct _ovl_stream *) stream;
1815 max = (const char *) os->end - (const char *) os->start;
1817 if ((ufile_ptr) offset >= max)
1818 return 0;
1820 count = nbytes;
1821 if (count > max - offset)
1822 count = max - offset;
1824 memcpy (buf, (const char *) os->start + offset, count);
1825 return count;
1828 bfd_boolean
1829 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1831 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1832 "elf32-spu",
1833 ovl_mgr_open,
1834 (void *) stream,
1835 ovl_mgr_pread,
1836 NULL,
1837 NULL);
1838 return *ovl_bfd != NULL;
1841 static unsigned int
1842 overlay_index (asection *sec)
1844 if (sec == NULL
1845 || sec->output_section == bfd_abs_section_ptr)
1846 return 0;
1847 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1850 /* Define an STT_OBJECT symbol. */
1852 static struct elf_link_hash_entry *
1853 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1855 struct elf_link_hash_entry *h;
1857 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1858 if (h == NULL)
1859 return NULL;
1861 if (h->root.type != bfd_link_hash_defined
1862 || !h->def_regular)
1864 h->root.type = bfd_link_hash_defined;
1865 h->root.u.def.section = htab->ovtab;
1866 h->type = STT_OBJECT;
1867 h->ref_regular = 1;
1868 h->def_regular = 1;
1869 h->ref_regular_nonweak = 1;
1870 h->non_elf = 0;
1872 else if (h->root.u.def.section->owner != NULL)
1874 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1875 h->root.u.def.section->owner,
1876 h->root.root.string);
1877 bfd_set_error (bfd_error_bad_value);
1878 return NULL;
1880 else
1882 (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1883 h->root.root.string);
1884 bfd_set_error (bfd_error_bad_value);
1885 return NULL;
1888 return h;
1891 /* Fill in all stubs and the overlay tables. */
1893 static bfd_boolean
1894 spu_elf_build_stubs (struct bfd_link_info *info)
1896 struct spu_link_hash_table *htab = spu_hash_table (info);
1897 struct elf_link_hash_entry *h;
1898 bfd_byte *p;
1899 asection *s;
1900 bfd *obfd;
1901 unsigned int i;
1903 if (htab->num_overlays != 0)
1905 for (i = 0; i < 2; i++)
1907 h = htab->ovly_entry[i];
1908 if (h != NULL
1909 && (h->root.type == bfd_link_hash_defined
1910 || h->root.type == bfd_link_hash_defweak)
1911 && h->def_regular)
1913 s = h->root.u.def.section->output_section;
1914 if (spu_elf_section_data (s)->u.o.ovl_index)
1916 (*_bfd_error_handler) (_("%s in overlay section"),
1917 h->root.root.string);
1918 bfd_set_error (bfd_error_bad_value);
1919 return FALSE;
1925 if (htab->stub_sec != NULL)
1927 for (i = 0; i <= htab->num_overlays; i++)
1928 if (htab->stub_sec[i]->size != 0)
1930 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1931 htab->stub_sec[i]->size);
1932 if (htab->stub_sec[i]->contents == NULL)
1933 return FALSE;
1934 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1935 htab->stub_sec[i]->size = 0;
1938 /* Fill in all the stubs. */
1939 process_stubs (info, TRUE);
1940 if (!htab->stub_err)
1941 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1943 if (htab->stub_err)
1945 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1946 bfd_set_error (bfd_error_bad_value);
1947 return FALSE;
1950 for (i = 0; i <= htab->num_overlays; i++)
1952 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1954 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1955 bfd_set_error (bfd_error_bad_value);
1956 return FALSE;
1958 htab->stub_sec[i]->rawsize = 0;
1962 if (htab->ovtab == NULL || htab->ovtab->size == 0)
1963 return TRUE;
1965 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1966 if (htab->ovtab->contents == NULL)
1967 return FALSE;
1969 p = htab->ovtab->contents;
1970 if (htab->params->ovly_flavour == ovly_soft_icache)
1972 bfd_vma off;
1974 h = define_ovtab_symbol (htab, "__icache_tag_array");
1975 if (h == NULL)
1976 return FALSE;
1977 h->root.u.def.value = 0;
1978 h->size = 16 << htab->num_lines_log2;
1979 off = h->size;
1981 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
1982 if (h == NULL)
1983 return FALSE;
1984 h->root.u.def.value = 16 << htab->num_lines_log2;
1985 h->root.u.def.section = bfd_abs_section_ptr;
1987 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
1988 if (h == NULL)
1989 return FALSE;
1990 h->root.u.def.value = off;
1991 h->size = 16 << htab->num_lines_log2;
1992 off += h->size;
1994 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
1995 if (h == NULL)
1996 return FALSE;
1997 h->root.u.def.value = 16 << htab->num_lines_log2;
1998 h->root.u.def.section = bfd_abs_section_ptr;
2000 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2001 if (h == NULL)
2002 return FALSE;
2003 h->root.u.def.value = off;
2004 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2005 off += h->size;
2007 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2008 if (h == NULL)
2009 return FALSE;
2010 h->root.u.def.value = 16 << (htab->fromelem_size_log2
2011 + htab->num_lines_log2);
2012 h->root.u.def.section = bfd_abs_section_ptr;
2014 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2015 if (h == NULL)
2016 return FALSE;
2017 h->root.u.def.value = htab->fromelem_size_log2;
2018 h->root.u.def.section = bfd_abs_section_ptr;
2020 h = define_ovtab_symbol (htab, "__icache_base");
2021 if (h == NULL)
2022 return FALSE;
2023 h->root.u.def.value = htab->ovl_sec[0]->vma;
2024 h->root.u.def.section = bfd_abs_section_ptr;
2025 h->size = htab->num_buf << htab->line_size_log2;
2027 h = define_ovtab_symbol (htab, "__icache_linesize");
2028 if (h == NULL)
2029 return FALSE;
2030 h->root.u.def.value = 1 << htab->line_size_log2;
2031 h->root.u.def.section = bfd_abs_section_ptr;
2033 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2034 if (h == NULL)
2035 return FALSE;
2036 h->root.u.def.value = htab->line_size_log2;
2037 h->root.u.def.section = bfd_abs_section_ptr;
2039 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2040 if (h == NULL)
2041 return FALSE;
2042 h->root.u.def.value = -htab->line_size_log2;
2043 h->root.u.def.section = bfd_abs_section_ptr;
2045 h = define_ovtab_symbol (htab, "__icache_cachesize");
2046 if (h == NULL)
2047 return FALSE;
2048 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2049 h->root.u.def.section = bfd_abs_section_ptr;
2051 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2052 if (h == NULL)
2053 return FALSE;
2054 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2055 h->root.u.def.section = bfd_abs_section_ptr;
2057 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2058 if (h == NULL)
2059 return FALSE;
2060 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2061 h->root.u.def.section = bfd_abs_section_ptr;
2063 if (htab->init != NULL && htab->init->size != 0)
2065 htab->init->contents = bfd_zalloc (htab->init->owner,
2066 htab->init->size);
2067 if (htab->init->contents == NULL)
2068 return FALSE;
2070 h = define_ovtab_symbol (htab, "__icache_fileoff");
2071 if (h == NULL)
2072 return FALSE;
2073 h->root.u.def.value = 0;
2074 h->root.u.def.section = htab->init;
2075 h->size = 8;
2078 else
2080 /* Write out _ovly_table. */
2081 /* set low bit of .size to mark non-overlay area as present. */
2082 p[7] = 1;
2083 obfd = htab->ovtab->output_section->owner;
2084 for (s = obfd->sections; s != NULL; s = s->next)
2086 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2088 if (ovl_index != 0)
2090 unsigned long off = ovl_index * 16;
2091 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2093 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2094 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2095 p + off + 4);
2096 /* file_off written later in spu_elf_modify_program_headers. */
2097 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2101 h = define_ovtab_symbol (htab, "_ovly_table");
2102 if (h == NULL)
2103 return FALSE;
2104 h->root.u.def.value = 16;
2105 h->size = htab->num_overlays * 16;
2107 h = define_ovtab_symbol (htab, "_ovly_table_end");
2108 if (h == NULL)
2109 return FALSE;
2110 h->root.u.def.value = htab->num_overlays * 16 + 16;
2111 h->size = 0;
2113 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2114 if (h == NULL)
2115 return FALSE;
2116 h->root.u.def.value = htab->num_overlays * 16 + 16;
2117 h->size = htab->num_buf * 4;
2119 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2120 if (h == NULL)
2121 return FALSE;
2122 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2123 h->size = 0;
2126 h = define_ovtab_symbol (htab, "_EAR_");
2127 if (h == NULL)
2128 return FALSE;
2129 h->root.u.def.section = htab->toe;
2130 h->root.u.def.value = 0;
2131 h->size = 16;
2133 return TRUE;
2136 /* Check that all loadable section VMAs lie in the range
2137 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2139 asection *
2140 spu_elf_check_vma (struct bfd_link_info *info)
2142 struct elf_segment_map *m;
2143 unsigned int i;
2144 struct spu_link_hash_table *htab = spu_hash_table (info);
2145 bfd *abfd = info->output_bfd;
2146 bfd_vma hi = htab->params->local_store_hi;
2147 bfd_vma lo = htab->params->local_store_lo;
2149 htab->local_store = hi + 1 - lo;
2151 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2152 if (m->p_type == PT_LOAD)
2153 for (i = 0; i < m->count; i++)
2154 if (m->sections[i]->size != 0
2155 && (m->sections[i]->vma < lo
2156 || m->sections[i]->vma > hi
2157 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2158 return m->sections[i];
2160 return NULL;
2163 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2164 Search for stack adjusting insns, and return the sp delta.
2165 If a store of lr is found save the instruction offset to *LR_STORE.
2166 If a stack adjusting instruction is found, save that offset to
2167 *SP_ADJUST. */
2169 static int
2170 find_function_stack_adjust (asection *sec,
2171 bfd_vma offset,
2172 bfd_vma *lr_store,
2173 bfd_vma *sp_adjust)
2175 int reg[128];
2177 memset (reg, 0, sizeof (reg));
2178 for ( ; offset + 4 <= sec->size; offset += 4)
2180 unsigned char buf[4];
2181 int rt, ra;
2182 int imm;
2184 /* Assume no relocs on stack adjusing insns. */
2185 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2186 break;
2188 rt = buf[3] & 0x7f;
2189 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2191 if (buf[0] == 0x24 /* stqd */)
2193 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2194 *lr_store = offset;
2195 continue;
2198 /* Partly decoded immediate field. */
2199 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2201 if (buf[0] == 0x1c /* ai */)
2203 imm >>= 7;
2204 imm = (imm ^ 0x200) - 0x200;
2205 reg[rt] = reg[ra] + imm;
2207 if (rt == 1 /* sp */)
2209 if (reg[rt] > 0)
2210 break;
2211 *sp_adjust = offset;
2212 return reg[rt];
2215 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2217 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2219 reg[rt] = reg[ra] + reg[rb];
2220 if (rt == 1)
2222 if (reg[rt] > 0)
2223 break;
2224 *sp_adjust = offset;
2225 return reg[rt];
2228 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2230 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2232 reg[rt] = reg[rb] - reg[ra];
2233 if (rt == 1)
2235 if (reg[rt] > 0)
2236 break;
2237 *sp_adjust = offset;
2238 return reg[rt];
2241 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2243 if (buf[0] >= 0x42 /* ila */)
2244 imm |= (buf[0] & 1) << 17;
2245 else
2247 imm &= 0xffff;
2249 if (buf[0] == 0x40 /* il */)
2251 if ((buf[1] & 0x80) == 0)
2252 continue;
2253 imm = (imm ^ 0x8000) - 0x8000;
2255 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2256 imm <<= 16;
2258 reg[rt] = imm;
2259 continue;
2261 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2263 reg[rt] |= imm & 0xffff;
2264 continue;
2266 else if (buf[0] == 0x04 /* ori */)
2268 imm >>= 7;
2269 imm = (imm ^ 0x200) - 0x200;
2270 reg[rt] = reg[ra] | imm;
2271 continue;
2273 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2275 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2276 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2277 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2278 | ((imm & 0x1000) ? 0x000000ff : 0));
2279 continue;
2281 else if (buf[0] == 0x16 /* andbi */)
2283 imm >>= 7;
2284 imm &= 0xff;
2285 imm |= imm << 8;
2286 imm |= imm << 16;
2287 reg[rt] = reg[ra] & imm;
2288 continue;
2290 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2292 /* Used in pic reg load. Say rt is trashed. Won't be used
2293 in stack adjust, but we need to continue past this branch. */
2294 reg[rt] = 0;
2295 continue;
2297 else if (is_branch (buf) || is_indirect_branch (buf))
2298 /* If we hit a branch then we must be out of the prologue. */
2299 break;
2302 return 0;
2305 /* qsort predicate to sort symbols by section and value. */
2307 static Elf_Internal_Sym *sort_syms_syms;
2308 static asection **sort_syms_psecs;
2310 static int
2311 sort_syms (const void *a, const void *b)
2313 Elf_Internal_Sym *const *s1 = a;
2314 Elf_Internal_Sym *const *s2 = b;
2315 asection *sec1,*sec2;
2316 bfd_signed_vma delta;
2318 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2319 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2321 if (sec1 != sec2)
2322 return sec1->index - sec2->index;
2324 delta = (*s1)->st_value - (*s2)->st_value;
2325 if (delta != 0)
2326 return delta < 0 ? -1 : 1;
2328 delta = (*s2)->st_size - (*s1)->st_size;
2329 if (delta != 0)
2330 return delta < 0 ? -1 : 1;
2332 return *s1 < *s2 ? -1 : 1;
2335 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2336 entries for section SEC. */
2338 static struct spu_elf_stack_info *
2339 alloc_stack_info (asection *sec, int max_fun)
2341 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2342 bfd_size_type amt;
2344 amt = sizeof (struct spu_elf_stack_info);
2345 amt += (max_fun - 1) * sizeof (struct function_info);
2346 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2347 if (sec_data->u.i.stack_info != NULL)
2348 sec_data->u.i.stack_info->max_fun = max_fun;
2349 return sec_data->u.i.stack_info;
2352 /* Add a new struct function_info describing a (part of a) function
2353 starting at SYM_H. Keep the array sorted by address. */
2355 static struct function_info *
2356 maybe_insert_function (asection *sec,
2357 void *sym_h,
2358 bfd_boolean global,
2359 bfd_boolean is_func)
2361 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2362 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2363 int i;
2364 bfd_vma off, size;
2366 if (sinfo == NULL)
2368 sinfo = alloc_stack_info (sec, 20);
2369 if (sinfo == NULL)
2370 return NULL;
2373 if (!global)
2375 Elf_Internal_Sym *sym = sym_h;
2376 off = sym->st_value;
2377 size = sym->st_size;
2379 else
2381 struct elf_link_hash_entry *h = sym_h;
2382 off = h->root.u.def.value;
2383 size = h->size;
2386 for (i = sinfo->num_fun; --i >= 0; )
2387 if (sinfo->fun[i].lo <= off)
2388 break;
2390 if (i >= 0)
2392 /* Don't add another entry for an alias, but do update some
2393 info. */
2394 if (sinfo->fun[i].lo == off)
2396 /* Prefer globals over local syms. */
2397 if (global && !sinfo->fun[i].global)
2399 sinfo->fun[i].global = TRUE;
2400 sinfo->fun[i].u.h = sym_h;
2402 if (is_func)
2403 sinfo->fun[i].is_func = TRUE;
2404 return &sinfo->fun[i];
2406 /* Ignore a zero-size symbol inside an existing function. */
2407 else if (sinfo->fun[i].hi > off && size == 0)
2408 return &sinfo->fun[i];
2411 if (sinfo->num_fun >= sinfo->max_fun)
2413 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2414 bfd_size_type old = amt;
2416 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2417 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2418 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2419 sinfo = bfd_realloc (sinfo, amt);
2420 if (sinfo == NULL)
2421 return NULL;
2422 memset ((char *) sinfo + old, 0, amt - old);
2423 sec_data->u.i.stack_info = sinfo;
2426 if (++i < sinfo->num_fun)
2427 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2428 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2429 sinfo->fun[i].is_func = is_func;
2430 sinfo->fun[i].global = global;
2431 sinfo->fun[i].sec = sec;
2432 if (global)
2433 sinfo->fun[i].u.h = sym_h;
2434 else
2435 sinfo->fun[i].u.sym = sym_h;
2436 sinfo->fun[i].lo = off;
2437 sinfo->fun[i].hi = off + size;
2438 sinfo->fun[i].lr_store = -1;
2439 sinfo->fun[i].sp_adjust = -1;
2440 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2441 &sinfo->fun[i].lr_store,
2442 &sinfo->fun[i].sp_adjust);
2443 sinfo->num_fun += 1;
2444 return &sinfo->fun[i];
2447 /* Return the name of FUN. */
2449 static const char *
2450 func_name (struct function_info *fun)
2452 asection *sec;
2453 bfd *ibfd;
2454 Elf_Internal_Shdr *symtab_hdr;
2456 while (fun->start != NULL)
2457 fun = fun->start;
2459 if (fun->global)
2460 return fun->u.h->root.root.string;
2462 sec = fun->sec;
2463 if (fun->u.sym->st_name == 0)
2465 size_t len = strlen (sec->name);
2466 char *name = bfd_malloc (len + 10);
2467 if (name == NULL)
2468 return "(null)";
2469 sprintf (name, "%s+%lx", sec->name,
2470 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2471 return name;
2473 ibfd = sec->owner;
2474 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2475 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2478 /* Read the instruction at OFF in SEC. Return true iff the instruction
2479 is a nop, lnop, or stop 0 (all zero insn). */
2481 static bfd_boolean
2482 is_nop (asection *sec, bfd_vma off)
2484 unsigned char insn[4];
2486 if (off + 4 > sec->size
2487 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2488 return FALSE;
2489 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2490 return TRUE;
2491 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2492 return TRUE;
2493 return FALSE;
2496 /* Extend the range of FUN to cover nop padding up to LIMIT.
2497 Return TRUE iff some instruction other than a NOP was found. */
2499 static bfd_boolean
2500 insns_at_end (struct function_info *fun, bfd_vma limit)
2502 bfd_vma off = (fun->hi + 3) & -4;
2504 while (off < limit && is_nop (fun->sec, off))
2505 off += 4;
2506 if (off < limit)
2508 fun->hi = off;
2509 return TRUE;
2511 fun->hi = limit;
2512 return FALSE;
2515 /* Check and fix overlapping function ranges. Return TRUE iff there
2516 are gaps in the current info we have about functions in SEC. */
2518 static bfd_boolean
2519 check_function_ranges (asection *sec, struct bfd_link_info *info)
2521 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2522 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2523 int i;
2524 bfd_boolean gaps = FALSE;
2526 if (sinfo == NULL)
2527 return FALSE;
2529 for (i = 1; i < sinfo->num_fun; i++)
2530 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2532 /* Fix overlapping symbols. */
2533 const char *f1 = func_name (&sinfo->fun[i - 1]);
2534 const char *f2 = func_name (&sinfo->fun[i]);
2536 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2537 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2539 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2540 gaps = TRUE;
2542 if (sinfo->num_fun == 0)
2543 gaps = TRUE;
2544 else
2546 if (sinfo->fun[0].lo != 0)
2547 gaps = TRUE;
2548 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2550 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2552 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2553 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2555 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2556 gaps = TRUE;
2558 return gaps;
2561 /* Search current function info for a function that contains address
2562 OFFSET in section SEC. */
2564 static struct function_info *
2565 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2567 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2568 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2569 int lo, hi, mid;
2571 lo = 0;
2572 hi = sinfo->num_fun;
2573 while (lo < hi)
2575 mid = (lo + hi) / 2;
2576 if (offset < sinfo->fun[mid].lo)
2577 hi = mid;
2578 else if (offset >= sinfo->fun[mid].hi)
2579 lo = mid + 1;
2580 else
2581 return &sinfo->fun[mid];
2583 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2584 sec, offset);
2585 bfd_set_error (bfd_error_bad_value);
2586 return NULL;
2589 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2590 if CALLEE was new. If this function return FALSE, CALLEE should
2591 be freed. */
2593 static bfd_boolean
2594 insert_callee (struct function_info *caller, struct call_info *callee)
2596 struct call_info **pp, *p;
2598 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2599 if (p->fun == callee->fun)
2601 /* Tail calls use less stack than normal calls. Retain entry
2602 for normal call over one for tail call. */
2603 p->is_tail &= callee->is_tail;
2604 if (!p->is_tail)
2606 p->fun->start = NULL;
2607 p->fun->is_func = TRUE;
2609 p->count += callee->count;
2610 /* Reorder list so most recent call is first. */
2611 *pp = p->next;
2612 p->next = caller->call_list;
2613 caller->call_list = p;
2614 return FALSE;
2616 callee->next = caller->call_list;
2617 caller->call_list = callee;
2618 return TRUE;
2621 /* Copy CALL and insert the copy into CALLER. */
2623 static bfd_boolean
2624 copy_callee (struct function_info *caller, const struct call_info *call)
2626 struct call_info *callee;
2627 callee = bfd_malloc (sizeof (*callee));
2628 if (callee == NULL)
2629 return FALSE;
2630 *callee = *call;
2631 if (!insert_callee (caller, callee))
2632 free (callee);
2633 return TRUE;
2636 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2637 overlay stub sections. */
2639 static bfd_boolean
2640 interesting_section (asection *s)
2642 return (s->output_section != bfd_abs_section_ptr
2643 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2644 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2645 && s->size != 0);
2648 /* Rummage through the relocs for SEC, looking for function calls.
2649 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2650 mark destination symbols on calls as being functions. Also
2651 look at branches, which may be tail calls or go to hot/cold
2652 section part of same function. */
2654 static bfd_boolean
2655 mark_functions_via_relocs (asection *sec,
2656 struct bfd_link_info *info,
2657 int call_tree)
2659 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2660 Elf_Internal_Shdr *symtab_hdr;
2661 void *psyms;
2662 unsigned int priority = 0;
2663 static bfd_boolean warned;
2665 if (!interesting_section (sec)
2666 || sec->reloc_count == 0)
2667 return TRUE;
2669 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2670 info->keep_memory);
2671 if (internal_relocs == NULL)
2672 return FALSE;
2674 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2675 psyms = &symtab_hdr->contents;
2676 irela = internal_relocs;
2677 irelaend = irela + sec->reloc_count;
2678 for (; irela < irelaend; irela++)
2680 enum elf_spu_reloc_type r_type;
2681 unsigned int r_indx;
2682 asection *sym_sec;
2683 Elf_Internal_Sym *sym;
2684 struct elf_link_hash_entry *h;
2685 bfd_vma val;
2686 bfd_boolean nonbranch, is_call;
2687 struct function_info *caller;
2688 struct call_info *callee;
2690 r_type = ELF32_R_TYPE (irela->r_info);
2691 nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2693 r_indx = ELF32_R_SYM (irela->r_info);
2694 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2695 return FALSE;
2697 if (sym_sec == NULL
2698 || sym_sec->output_section == bfd_abs_section_ptr)
2699 continue;
2701 is_call = FALSE;
2702 if (!nonbranch)
2704 unsigned char insn[4];
2706 if (!bfd_get_section_contents (sec->owner, sec, insn,
2707 irela->r_offset, 4))
2708 return FALSE;
2709 if (is_branch (insn))
2711 is_call = (insn[0] & 0xfd) == 0x31;
2712 priority = insn[1] & 0x0f;
2713 priority <<= 8;
2714 priority |= insn[2];
2715 priority <<= 8;
2716 priority |= insn[3];
2717 priority >>= 7;
2718 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2719 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2721 if (!warned)
2722 info->callbacks->einfo
2723 (_("%B(%A+0x%v): call to non-code section"
2724 " %B(%A), analysis incomplete\n"),
2725 sec->owner, sec, irela->r_offset,
2726 sym_sec->owner, sym_sec);
2727 warned = TRUE;
2728 continue;
2731 else
2733 nonbranch = TRUE;
2734 if (is_hint (insn))
2735 continue;
2739 if (nonbranch)
2741 /* For --auto-overlay, count possible stubs we need for
2742 function pointer references. */
2743 unsigned int sym_type;
2744 if (h)
2745 sym_type = h->type;
2746 else
2747 sym_type = ELF_ST_TYPE (sym->st_info);
2748 if (sym_type == STT_FUNC)
2750 if (call_tree && spu_hash_table (info)->params->auto_overlay)
2751 spu_hash_table (info)->non_ovly_stub += 1;
2752 /* If the symbol type is STT_FUNC then this must be a
2753 function pointer initialisation. */
2754 continue;
2756 /* Ignore data references. */
2757 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2758 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2759 continue;
2760 /* Otherwise we probably have a jump table reloc for
2761 a switch statement or some other reference to a
2762 code label. */
2765 if (h)
2766 val = h->root.u.def.value;
2767 else
2768 val = sym->st_value;
2769 val += irela->r_addend;
2771 if (!call_tree)
2773 struct function_info *fun;
2775 if (irela->r_addend != 0)
2777 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2778 if (fake == NULL)
2779 return FALSE;
2780 fake->st_value = val;
2781 fake->st_shndx
2782 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2783 sym = fake;
2785 if (sym)
2786 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2787 else
2788 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2789 if (fun == NULL)
2790 return FALSE;
2791 if (irela->r_addend != 0
2792 && fun->u.sym != sym)
2793 free (sym);
2794 continue;
2797 caller = find_function (sec, irela->r_offset, info);
2798 if (caller == NULL)
2799 return FALSE;
2800 callee = bfd_malloc (sizeof *callee);
2801 if (callee == NULL)
2802 return FALSE;
2804 callee->fun = find_function (sym_sec, val, info);
2805 if (callee->fun == NULL)
2806 return FALSE;
2807 callee->is_tail = !is_call;
2808 callee->is_pasted = FALSE;
2809 callee->broken_cycle = FALSE;
2810 callee->priority = priority;
2811 callee->count = nonbranch? 0 : 1;
2812 if (callee->fun->last_caller != sec)
2814 callee->fun->last_caller = sec;
2815 callee->fun->call_count += 1;
2817 if (!insert_callee (caller, callee))
2818 free (callee);
2819 else if (!is_call
2820 && !callee->fun->is_func
2821 && callee->fun->stack == 0)
2823 /* This is either a tail call or a branch from one part of
2824 the function to another, ie. hot/cold section. If the
2825 destination has been called by some other function then
2826 it is a separate function. We also assume that functions
2827 are not split across input files. */
2828 if (sec->owner != sym_sec->owner)
2830 callee->fun->start = NULL;
2831 callee->fun->is_func = TRUE;
2833 else if (callee->fun->start == NULL)
2835 struct function_info *caller_start = caller;
2836 while (caller_start->start)
2837 caller_start = caller_start->start;
2839 if (caller_start != callee->fun)
2840 callee->fun->start = caller_start;
2842 else
2844 struct function_info *callee_start;
2845 struct function_info *caller_start;
2846 callee_start = callee->fun;
2847 while (callee_start->start)
2848 callee_start = callee_start->start;
2849 caller_start = caller;
2850 while (caller_start->start)
2851 caller_start = caller_start->start;
2852 if (caller_start != callee_start)
2854 callee->fun->start = NULL;
2855 callee->fun->is_func = TRUE;
2861 return TRUE;
2864 /* Handle something like .init or .fini, which has a piece of a function.
2865 These sections are pasted together to form a single function. */
2867 static bfd_boolean
2868 pasted_function (asection *sec)
2870 struct bfd_link_order *l;
2871 struct _spu_elf_section_data *sec_data;
2872 struct spu_elf_stack_info *sinfo;
2873 Elf_Internal_Sym *fake;
2874 struct function_info *fun, *fun_start;
2876 fake = bfd_zmalloc (sizeof (*fake));
2877 if (fake == NULL)
2878 return FALSE;
2879 fake->st_value = 0;
2880 fake->st_size = sec->size;
2881 fake->st_shndx
2882 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2883 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2884 if (!fun)
2885 return FALSE;
2887 /* Find a function immediately preceding this section. */
2888 fun_start = NULL;
2889 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2891 if (l->u.indirect.section == sec)
2893 if (fun_start != NULL)
2895 struct call_info *callee = bfd_malloc (sizeof *callee);
2896 if (callee == NULL)
2897 return FALSE;
2899 fun->start = fun_start;
2900 callee->fun = fun;
2901 callee->is_tail = TRUE;
2902 callee->is_pasted = TRUE;
2903 callee->broken_cycle = FALSE;
2904 callee->priority = 0;
2905 callee->count = 1;
2906 if (!insert_callee (fun_start, callee))
2907 free (callee);
2908 return TRUE;
2910 break;
2912 if (l->type == bfd_indirect_link_order
2913 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2914 && (sinfo = sec_data->u.i.stack_info) != NULL
2915 && sinfo->num_fun != 0)
2916 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2919 /* Don't return an error if we did not find a function preceding this
2920 section. The section may have incorrect flags. */
2921 return TRUE;
2924 /* Map address ranges in code sections to functions. */
2926 static bfd_boolean
2927 discover_functions (struct bfd_link_info *info)
2929 bfd *ibfd;
2930 int bfd_idx;
2931 Elf_Internal_Sym ***psym_arr;
2932 asection ***sec_arr;
2933 bfd_boolean gaps = FALSE;
2935 bfd_idx = 0;
2936 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2937 bfd_idx++;
2939 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2940 if (psym_arr == NULL)
2941 return FALSE;
2942 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2943 if (sec_arr == NULL)
2944 return FALSE;
2946 for (ibfd = info->input_bfds, bfd_idx = 0;
2947 ibfd != NULL;
2948 ibfd = ibfd->link_next, bfd_idx++)
2950 extern const bfd_target bfd_elf32_spu_vec;
2951 Elf_Internal_Shdr *symtab_hdr;
2952 asection *sec;
2953 size_t symcount;
2954 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2955 asection **psecs, **p;
2957 if (ibfd->xvec != &bfd_elf32_spu_vec)
2958 continue;
2960 /* Read all the symbols. */
2961 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2962 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2963 if (symcount == 0)
2965 if (!gaps)
2966 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2967 if (interesting_section (sec))
2969 gaps = TRUE;
2970 break;
2972 continue;
2975 if (symtab_hdr->contents != NULL)
2977 /* Don't use cached symbols since the generic ELF linker
2978 code only reads local symbols, and we need globals too. */
2979 free (symtab_hdr->contents);
2980 symtab_hdr->contents = NULL;
2982 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2983 NULL, NULL, NULL);
2984 symtab_hdr->contents = (void *) syms;
2985 if (syms == NULL)
2986 return FALSE;
2988 /* Select defined function symbols that are going to be output. */
2989 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2990 if (psyms == NULL)
2991 return FALSE;
2992 psym_arr[bfd_idx] = psyms;
2993 psecs = bfd_malloc (symcount * sizeof (*psecs));
2994 if (psecs == NULL)
2995 return FALSE;
2996 sec_arr[bfd_idx] = psecs;
2997 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2998 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2999 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3001 asection *s;
3003 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3004 if (s != NULL && interesting_section (s))
3005 *psy++ = sy;
3007 symcount = psy - psyms;
3008 *psy = NULL;
3010 /* Sort them by section and offset within section. */
3011 sort_syms_syms = syms;
3012 sort_syms_psecs = psecs;
3013 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3015 /* Now inspect the function symbols. */
3016 for (psy = psyms; psy < psyms + symcount; )
3018 asection *s = psecs[*psy - syms];
3019 Elf_Internal_Sym **psy2;
3021 for (psy2 = psy; ++psy2 < psyms + symcount; )
3022 if (psecs[*psy2 - syms] != s)
3023 break;
3025 if (!alloc_stack_info (s, psy2 - psy))
3026 return FALSE;
3027 psy = psy2;
3030 /* First install info about properly typed and sized functions.
3031 In an ideal world this will cover all code sections, except
3032 when partitioning functions into hot and cold sections,
3033 and the horrible pasted together .init and .fini functions. */
3034 for (psy = psyms; psy < psyms + symcount; ++psy)
3036 sy = *psy;
3037 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3039 asection *s = psecs[sy - syms];
3040 if (!maybe_insert_function (s, sy, FALSE, TRUE))
3041 return FALSE;
3045 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3046 if (interesting_section (sec))
3047 gaps |= check_function_ranges (sec, info);
3050 if (gaps)
3052 /* See if we can discover more function symbols by looking at
3053 relocations. */
3054 for (ibfd = info->input_bfds, bfd_idx = 0;
3055 ibfd != NULL;
3056 ibfd = ibfd->link_next, bfd_idx++)
3058 asection *sec;
3060 if (psym_arr[bfd_idx] == NULL)
3061 continue;
3063 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3064 if (!mark_functions_via_relocs (sec, info, FALSE))
3065 return FALSE;
3068 for (ibfd = info->input_bfds, bfd_idx = 0;
3069 ibfd != NULL;
3070 ibfd = ibfd->link_next, bfd_idx++)
3072 Elf_Internal_Shdr *symtab_hdr;
3073 asection *sec;
3074 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3075 asection **psecs;
3077 if ((psyms = psym_arr[bfd_idx]) == NULL)
3078 continue;
3080 psecs = sec_arr[bfd_idx];
3082 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3083 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3085 gaps = FALSE;
3086 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3087 if (interesting_section (sec))
3088 gaps |= check_function_ranges (sec, info);
3089 if (!gaps)
3090 continue;
3092 /* Finally, install all globals. */
3093 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3095 asection *s;
3097 s = psecs[sy - syms];
3099 /* Global syms might be improperly typed functions. */
3100 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3101 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3103 if (!maybe_insert_function (s, sy, FALSE, FALSE))
3104 return FALSE;
3109 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3111 extern const bfd_target bfd_elf32_spu_vec;
3112 asection *sec;
3114 if (ibfd->xvec != &bfd_elf32_spu_vec)
3115 continue;
3117 /* Some of the symbols we've installed as marking the
3118 beginning of functions may have a size of zero. Extend
3119 the range of such functions to the beginning of the
3120 next symbol of interest. */
3121 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3122 if (interesting_section (sec))
3124 struct _spu_elf_section_data *sec_data;
3125 struct spu_elf_stack_info *sinfo;
3127 sec_data = spu_elf_section_data (sec);
3128 sinfo = sec_data->u.i.stack_info;
3129 if (sinfo != NULL && sinfo->num_fun != 0)
3131 int fun_idx;
3132 bfd_vma hi = sec->size;
3134 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3136 sinfo->fun[fun_idx].hi = hi;
3137 hi = sinfo->fun[fun_idx].lo;
3140 sinfo->fun[0].lo = 0;
3142 /* No symbols in this section. Must be .init or .fini
3143 or something similar. */
3144 else if (!pasted_function (sec))
3145 return FALSE;
3150 for (ibfd = info->input_bfds, bfd_idx = 0;
3151 ibfd != NULL;
3152 ibfd = ibfd->link_next, bfd_idx++)
3154 if (psym_arr[bfd_idx] == NULL)
3155 continue;
3157 free (psym_arr[bfd_idx]);
3158 free (sec_arr[bfd_idx]);
3161 free (psym_arr);
3162 free (sec_arr);
3164 return TRUE;
3167 /* Iterate over all function_info we have collected, calling DOIT on
3168 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3169 if ROOT_ONLY. */
3171 static bfd_boolean
3172 for_each_node (bfd_boolean (*doit) (struct function_info *,
3173 struct bfd_link_info *,
3174 void *),
3175 struct bfd_link_info *info,
3176 void *param,
3177 int root_only)
3179 bfd *ibfd;
3181 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3183 extern const bfd_target bfd_elf32_spu_vec;
3184 asection *sec;
3186 if (ibfd->xvec != &bfd_elf32_spu_vec)
3187 continue;
3189 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3191 struct _spu_elf_section_data *sec_data;
3192 struct spu_elf_stack_info *sinfo;
3194 if ((sec_data = spu_elf_section_data (sec)) != NULL
3195 && (sinfo = sec_data->u.i.stack_info) != NULL)
3197 int i;
3198 for (i = 0; i < sinfo->num_fun; ++i)
3199 if (!root_only || !sinfo->fun[i].non_root)
3200 if (!doit (&sinfo->fun[i], info, param))
3201 return FALSE;
3205 return TRUE;
3208 /* Transfer call info attached to struct function_info entries for
3209 all of a given function's sections to the first entry. */
3211 static bfd_boolean
3212 transfer_calls (struct function_info *fun,
3213 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3214 void *param ATTRIBUTE_UNUSED)
3216 struct function_info *start = fun->start;
3218 if (start != NULL)
3220 struct call_info *call, *call_next;
3222 while (start->start != NULL)
3223 start = start->start;
3224 for (call = fun->call_list; call != NULL; call = call_next)
3226 call_next = call->next;
3227 if (!insert_callee (start, call))
3228 free (call);
3230 fun->call_list = NULL;
3232 return TRUE;
3235 /* Mark nodes in the call graph that are called by some other node. */
3237 static bfd_boolean
3238 mark_non_root (struct function_info *fun,
3239 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3240 void *param ATTRIBUTE_UNUSED)
3242 struct call_info *call;
3244 if (fun->visit1)
3245 return TRUE;
3246 fun->visit1 = TRUE;
3247 for (call = fun->call_list; call; call = call->next)
3249 call->fun->non_root = TRUE;
3250 mark_non_root (call->fun, 0, 0);
3252 return TRUE;
3255 /* Remove cycles from the call graph. Set depth of nodes. */
3257 static bfd_boolean
3258 remove_cycles (struct function_info *fun,
3259 struct bfd_link_info *info,
3260 void *param)
3262 struct call_info **callp, *call;
3263 unsigned int depth = *(unsigned int *) param;
3264 unsigned int max_depth = depth;
3266 fun->depth = depth;
3267 fun->visit2 = TRUE;
3268 fun->marking = TRUE;
3270 callp = &fun->call_list;
3271 while ((call = *callp) != NULL)
3273 call->max_depth = depth + !call->is_pasted;
3274 if (!call->fun->visit2)
3276 if (!remove_cycles (call->fun, info, &call->max_depth))
3277 return FALSE;
3278 if (max_depth < call->max_depth)
3279 max_depth = call->max_depth;
3281 else if (call->fun->marking)
3283 struct spu_link_hash_table *htab = spu_hash_table (info);
3285 if (!htab->params->auto_overlay
3286 && htab->params->stack_analysis)
3288 const char *f1 = func_name (fun);
3289 const char *f2 = func_name (call->fun);
3291 info->callbacks->info (_("Stack analysis will ignore the call "
3292 "from %s to %s\n"),
3293 f1, f2);
3296 call->broken_cycle = TRUE;
3298 callp = &call->next;
3300 fun->marking = FALSE;
3301 *(unsigned int *) param = max_depth;
3302 return TRUE;
3305 /* Check that we actually visited all nodes in remove_cycles. If we
3306 didn't, then there is some cycle in the call graph not attached to
3307 any root node. Arbitrarily choose a node in the cycle as a new
3308 root and break the cycle. */
3310 static bfd_boolean
3311 mark_detached_root (struct function_info *fun,
3312 struct bfd_link_info *info,
3313 void *param)
3315 if (fun->visit2)
3316 return TRUE;
3317 fun->non_root = FALSE;
3318 *(unsigned int *) param = 0;
3319 return remove_cycles (fun, info, param);
3322 /* Populate call_list for each function. */
3324 static bfd_boolean
3325 build_call_tree (struct bfd_link_info *info)
3327 bfd *ibfd;
3328 unsigned int depth;
3330 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3332 extern const bfd_target bfd_elf32_spu_vec;
3333 asection *sec;
3335 if (ibfd->xvec != &bfd_elf32_spu_vec)
3336 continue;
3338 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3339 if (!mark_functions_via_relocs (sec, info, TRUE))
3340 return FALSE;
3343 /* Transfer call info from hot/cold section part of function
3344 to main entry. */
3345 if (!spu_hash_table (info)->params->auto_overlay
3346 && !for_each_node (transfer_calls, info, 0, FALSE))
3347 return FALSE;
3349 /* Find the call graph root(s). */
3350 if (!for_each_node (mark_non_root, info, 0, FALSE))
3351 return FALSE;
3353 /* Remove cycles from the call graph. We start from the root node(s)
3354 so that we break cycles in a reasonable place. */
3355 depth = 0;
3356 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3357 return FALSE;
3359 return for_each_node (mark_detached_root, info, &depth, FALSE);
3362 /* qsort predicate to sort calls by priority, max_depth then count. */
3364 static int
3365 sort_calls (const void *a, const void *b)
3367 struct call_info *const *c1 = a;
3368 struct call_info *const *c2 = b;
3369 int delta;
3371 delta = (*c2)->priority - (*c1)->priority;
3372 if (delta != 0)
3373 return delta;
3375 delta = (*c2)->max_depth - (*c1)->max_depth;
3376 if (delta != 0)
3377 return delta;
3379 delta = (*c2)->count - (*c1)->count;
3380 if (delta != 0)
3381 return delta;
3383 return (char *) c1 - (char *) c2;
3386 struct _mos_param {
3387 unsigned int max_overlay_size;
3390 /* Set linker_mark and gc_mark on any sections that we will put in
3391 overlays. These flags are used by the generic ELF linker, but we
3392 won't be continuing on to bfd_elf_final_link so it is OK to use
3393 them. linker_mark is clear before we get here. Set segment_mark
3394 on sections that are part of a pasted function (excluding the last
3395 section).
3397 Set up function rodata section if --overlay-rodata. We don't
3398 currently include merged string constant rodata sections since
3400 Sort the call graph so that the deepest nodes will be visited
3401 first. */
3403 static bfd_boolean
3404 mark_overlay_section (struct function_info *fun,
3405 struct bfd_link_info *info,
3406 void *param)
3408 struct call_info *call;
3409 unsigned int count;
3410 struct _mos_param *mos_param = param;
3411 struct spu_link_hash_table *htab = spu_hash_table (info);
3413 if (fun->visit4)
3414 return TRUE;
3416 fun->visit4 = TRUE;
3417 if (!fun->sec->linker_mark
3418 && (htab->params->ovly_flavour != ovly_soft_icache
3419 || htab->params->non_ia_text
3420 || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3421 || strcmp (fun->sec->name, ".init") == 0
3422 || strcmp (fun->sec->name, ".fini") == 0))
3424 unsigned int size;
3426 fun->sec->linker_mark = 1;
3427 fun->sec->gc_mark = 1;
3428 fun->sec->segment_mark = 0;
3429 /* Ensure SEC_CODE is set on this text section (it ought to
3430 be!), and SEC_CODE is clear on rodata sections. We use
3431 this flag to differentiate the two overlay section types. */
3432 fun->sec->flags |= SEC_CODE;
3434 size = fun->sec->size;
3435 if (htab->params->auto_overlay & OVERLAY_RODATA)
3437 char *name = NULL;
3439 /* Find the rodata section corresponding to this function's
3440 text section. */
3441 if (strcmp (fun->sec->name, ".text") == 0)
3443 name = bfd_malloc (sizeof (".rodata"));
3444 if (name == NULL)
3445 return FALSE;
3446 memcpy (name, ".rodata", sizeof (".rodata"));
3448 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3450 size_t len = strlen (fun->sec->name);
3451 name = bfd_malloc (len + 3);
3452 if (name == NULL)
3453 return FALSE;
3454 memcpy (name, ".rodata", sizeof (".rodata"));
3455 memcpy (name + 7, fun->sec->name + 5, len - 4);
3457 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3459 size_t len = strlen (fun->sec->name) + 1;
3460 name = bfd_malloc (len);
3461 if (name == NULL)
3462 return FALSE;
3463 memcpy (name, fun->sec->name, len);
3464 name[14] = 'r';
3467 if (name != NULL)
3469 asection *rodata = NULL;
3470 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3471 if (group_sec == NULL)
3472 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3473 else
3474 while (group_sec != NULL && group_sec != fun->sec)
3476 if (strcmp (group_sec->name, name) == 0)
3478 rodata = group_sec;
3479 break;
3481 group_sec = elf_section_data (group_sec)->next_in_group;
3483 fun->rodata = rodata;
3484 if (fun->rodata)
3486 size += fun->rodata->size;
3487 if (htab->params->line_size != 0
3488 && size > htab->params->line_size)
3490 size -= fun->rodata->size;
3491 fun->rodata = NULL;
3493 else
3495 fun->rodata->linker_mark = 1;
3496 fun->rodata->gc_mark = 1;
3497 fun->rodata->flags &= ~SEC_CODE;
3500 free (name);
3503 if (mos_param->max_overlay_size < size)
3504 mos_param->max_overlay_size = size;
3507 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3508 count += 1;
3510 if (count > 1)
3512 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3513 if (calls == NULL)
3514 return FALSE;
3516 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3517 calls[count++] = call;
3519 qsort (calls, count, sizeof (*calls), sort_calls);
3521 fun->call_list = NULL;
3522 while (count != 0)
3524 --count;
3525 calls[count]->next = fun->call_list;
3526 fun->call_list = calls[count];
3528 free (calls);
3531 for (call = fun->call_list; call != NULL; call = call->next)
3533 if (call->is_pasted)
3535 /* There can only be one is_pasted call per function_info. */
3536 BFD_ASSERT (!fun->sec->segment_mark);
3537 fun->sec->segment_mark = 1;
3539 if (!call->broken_cycle
3540 && !mark_overlay_section (call->fun, info, param))
3541 return FALSE;
3544 /* Don't put entry code into an overlay. The overlay manager needs
3545 a stack! Also, don't mark .ovl.init as an overlay. */
3546 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3547 == info->output_bfd->start_address
3548 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3550 fun->sec->linker_mark = 0;
3551 if (fun->rodata != NULL)
3552 fun->rodata->linker_mark = 0;
3554 return TRUE;
3557 /* If non-zero then unmark functions called from those within sections
3558 that we need to unmark. Unfortunately this isn't reliable since the
3559 call graph cannot know the destination of function pointer calls. */
3560 #define RECURSE_UNMARK 0
3562 struct _uos_param {
3563 asection *exclude_input_section;
3564 asection *exclude_output_section;
3565 unsigned long clearing;
3568 /* Undo some of mark_overlay_section's work. */
3570 static bfd_boolean
3571 unmark_overlay_section (struct function_info *fun,
3572 struct bfd_link_info *info,
3573 void *param)
3575 struct call_info *call;
3576 struct _uos_param *uos_param = param;
3577 unsigned int excluded = 0;
3579 if (fun->visit5)
3580 return TRUE;
3582 fun->visit5 = TRUE;
3584 excluded = 0;
3585 if (fun->sec == uos_param->exclude_input_section
3586 || fun->sec->output_section == uos_param->exclude_output_section)
3587 excluded = 1;
3589 if (RECURSE_UNMARK)
3590 uos_param->clearing += excluded;
3592 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3594 fun->sec->linker_mark = 0;
3595 if (fun->rodata)
3596 fun->rodata->linker_mark = 0;
3599 for (call = fun->call_list; call != NULL; call = call->next)
3600 if (!call->broken_cycle
3601 && !unmark_overlay_section (call->fun, info, param))
3602 return FALSE;
3604 if (RECURSE_UNMARK)
3605 uos_param->clearing -= excluded;
3606 return TRUE;
3609 struct _cl_param {
3610 unsigned int lib_size;
3611 asection **lib_sections;
3614 /* Add sections we have marked as belonging to overlays to an array
3615 for consideration as non-overlay sections. The array consist of
3616 pairs of sections, (text,rodata), for functions in the call graph. */
3618 static bfd_boolean
3619 collect_lib_sections (struct function_info *fun,
3620 struct bfd_link_info *info,
3621 void *param)
3623 struct _cl_param *lib_param = param;
3624 struct call_info *call;
3625 unsigned int size;
3627 if (fun->visit6)
3628 return TRUE;
3630 fun->visit6 = TRUE;
3631 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3632 return TRUE;
3634 size = fun->sec->size;
3635 if (fun->rodata)
3636 size += fun->rodata->size;
3638 if (size <= lib_param->lib_size)
3640 *lib_param->lib_sections++ = fun->sec;
3641 fun->sec->gc_mark = 0;
3642 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3644 *lib_param->lib_sections++ = fun->rodata;
3645 fun->rodata->gc_mark = 0;
3647 else
3648 *lib_param->lib_sections++ = NULL;
3651 for (call = fun->call_list; call != NULL; call = call->next)
3652 if (!call->broken_cycle)
3653 collect_lib_sections (call->fun, info, param);
3655 return TRUE;
3658 /* qsort predicate to sort sections by call count. */
3660 static int
3661 sort_lib (const void *a, const void *b)
3663 asection *const *s1 = a;
3664 asection *const *s2 = b;
3665 struct _spu_elf_section_data *sec_data;
3666 struct spu_elf_stack_info *sinfo;
3667 int delta;
3669 delta = 0;
3670 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3671 && (sinfo = sec_data->u.i.stack_info) != NULL)
3673 int i;
3674 for (i = 0; i < sinfo->num_fun; ++i)
3675 delta -= sinfo->fun[i].call_count;
3678 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3679 && (sinfo = sec_data->u.i.stack_info) != NULL)
3681 int i;
3682 for (i = 0; i < sinfo->num_fun; ++i)
3683 delta += sinfo->fun[i].call_count;
3686 if (delta != 0)
3687 return delta;
3689 return s1 - s2;
3692 /* Remove some sections from those marked to be in overlays. Choose
3693 those that are called from many places, likely library functions. */
3695 static unsigned int
3696 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3698 bfd *ibfd;
3699 asection **lib_sections;
3700 unsigned int i, lib_count;
3701 struct _cl_param collect_lib_param;
3702 struct function_info dummy_caller;
3703 struct spu_link_hash_table *htab;
3705 memset (&dummy_caller, 0, sizeof (dummy_caller));
3706 lib_count = 0;
3707 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3709 extern const bfd_target bfd_elf32_spu_vec;
3710 asection *sec;
3712 if (ibfd->xvec != &bfd_elf32_spu_vec)
3713 continue;
3715 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3716 if (sec->linker_mark
3717 && sec->size < lib_size
3718 && (sec->flags & SEC_CODE) != 0)
3719 lib_count += 1;
3721 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3722 if (lib_sections == NULL)
3723 return (unsigned int) -1;
3724 collect_lib_param.lib_size = lib_size;
3725 collect_lib_param.lib_sections = lib_sections;
3726 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3727 TRUE))
3728 return (unsigned int) -1;
3729 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3731 /* Sort sections so that those with the most calls are first. */
3732 if (lib_count > 1)
3733 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3735 htab = spu_hash_table (info);
3736 for (i = 0; i < lib_count; i++)
3738 unsigned int tmp, stub_size;
3739 asection *sec;
3740 struct _spu_elf_section_data *sec_data;
3741 struct spu_elf_stack_info *sinfo;
3743 sec = lib_sections[2 * i];
3744 /* If this section is OK, its size must be less than lib_size. */
3745 tmp = sec->size;
3746 /* If it has a rodata section, then add that too. */
3747 if (lib_sections[2 * i + 1])
3748 tmp += lib_sections[2 * i + 1]->size;
3749 /* Add any new overlay call stubs needed by the section. */
3750 stub_size = 0;
3751 if (tmp < lib_size
3752 && (sec_data = spu_elf_section_data (sec)) != NULL
3753 && (sinfo = sec_data->u.i.stack_info) != NULL)
3755 int k;
3756 struct call_info *call;
3758 for (k = 0; k < sinfo->num_fun; ++k)
3759 for (call = sinfo->fun[k].call_list; call; call = call->next)
3760 if (call->fun->sec->linker_mark)
3762 struct call_info *p;
3763 for (p = dummy_caller.call_list; p; p = p->next)
3764 if (p->fun == call->fun)
3765 break;
3766 if (!p)
3767 stub_size += ovl_stub_size (htab->params);
3770 if (tmp + stub_size < lib_size)
3772 struct call_info **pp, *p;
3774 /* This section fits. Mark it as non-overlay. */
3775 lib_sections[2 * i]->linker_mark = 0;
3776 if (lib_sections[2 * i + 1])
3777 lib_sections[2 * i + 1]->linker_mark = 0;
3778 lib_size -= tmp + stub_size;
3779 /* Call stubs to the section we just added are no longer
3780 needed. */
3781 pp = &dummy_caller.call_list;
3782 while ((p = *pp) != NULL)
3783 if (!p->fun->sec->linker_mark)
3785 lib_size += ovl_stub_size (htab->params);
3786 *pp = p->next;
3787 free (p);
3789 else
3790 pp = &p->next;
3791 /* Add new call stubs to dummy_caller. */
3792 if ((sec_data = spu_elf_section_data (sec)) != NULL
3793 && (sinfo = sec_data->u.i.stack_info) != NULL)
3795 int k;
3796 struct call_info *call;
3798 for (k = 0; k < sinfo->num_fun; ++k)
3799 for (call = sinfo->fun[k].call_list;
3800 call;
3801 call = call->next)
3802 if (call->fun->sec->linker_mark)
3804 struct call_info *callee;
3805 callee = bfd_malloc (sizeof (*callee));
3806 if (callee == NULL)
3807 return (unsigned int) -1;
3808 *callee = *call;
3809 if (!insert_callee (&dummy_caller, callee))
3810 free (callee);
3815 while (dummy_caller.call_list != NULL)
3817 struct call_info *call = dummy_caller.call_list;
3818 dummy_caller.call_list = call->next;
3819 free (call);
3821 for (i = 0; i < 2 * lib_count; i++)
3822 if (lib_sections[i])
3823 lib_sections[i]->gc_mark = 1;
3824 free (lib_sections);
3825 return lib_size;
3828 /* Build an array of overlay sections. The deepest node's section is
3829 added first, then its parent node's section, then everything called
3830 from the parent section. The idea being to group sections to
3831 minimise calls between different overlays. */
3833 static bfd_boolean
3834 collect_overlays (struct function_info *fun,
3835 struct bfd_link_info *info,
3836 void *param)
3838 struct call_info *call;
3839 bfd_boolean added_fun;
3840 asection ***ovly_sections = param;
3842 if (fun->visit7)
3843 return TRUE;
3845 fun->visit7 = TRUE;
3846 for (call = fun->call_list; call != NULL; call = call->next)
3847 if (!call->is_pasted && !call->broken_cycle)
3849 if (!collect_overlays (call->fun, info, ovly_sections))
3850 return FALSE;
3851 break;
3854 added_fun = FALSE;
3855 if (fun->sec->linker_mark && fun->sec->gc_mark)
3857 fun->sec->gc_mark = 0;
3858 *(*ovly_sections)++ = fun->sec;
3859 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3861 fun->rodata->gc_mark = 0;
3862 *(*ovly_sections)++ = fun->rodata;
3864 else
3865 *(*ovly_sections)++ = NULL;
3866 added_fun = TRUE;
3868 /* Pasted sections must stay with the first section. We don't
3869 put pasted sections in the array, just the first section.
3870 Mark subsequent sections as already considered. */
3871 if (fun->sec->segment_mark)
3873 struct function_info *call_fun = fun;
3876 for (call = call_fun->call_list; call != NULL; call = call->next)
3877 if (call->is_pasted)
3879 call_fun = call->fun;
3880 call_fun->sec->gc_mark = 0;
3881 if (call_fun->rodata)
3882 call_fun->rodata->gc_mark = 0;
3883 break;
3885 if (call == NULL)
3886 abort ();
3888 while (call_fun->sec->segment_mark);
3892 for (call = fun->call_list; call != NULL; call = call->next)
3893 if (!call->broken_cycle
3894 && !collect_overlays (call->fun, info, ovly_sections))
3895 return FALSE;
3897 if (added_fun)
3899 struct _spu_elf_section_data *sec_data;
3900 struct spu_elf_stack_info *sinfo;
3902 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3903 && (sinfo = sec_data->u.i.stack_info) != NULL)
3905 int i;
3906 for (i = 0; i < sinfo->num_fun; ++i)
3907 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3908 return FALSE;
3912 return TRUE;
3915 struct _sum_stack_param {
3916 size_t cum_stack;
3917 size_t overall_stack;
3918 bfd_boolean emit_stack_syms;
3921 /* Descend the call graph for FUN, accumulating total stack required. */
3923 static bfd_boolean
3924 sum_stack (struct function_info *fun,
3925 struct bfd_link_info *info,
3926 void *param)
3928 struct call_info *call;
3929 struct function_info *max;
3930 size_t stack, cum_stack;
3931 const char *f1;
3932 bfd_boolean has_call;
3933 struct _sum_stack_param *sum_stack_param = param;
3934 struct spu_link_hash_table *htab;
3936 cum_stack = fun->stack;
3937 sum_stack_param->cum_stack = cum_stack;
3938 if (fun->visit3)
3939 return TRUE;
3941 has_call = FALSE;
3942 max = NULL;
3943 for (call = fun->call_list; call; call = call->next)
3945 if (call->broken_cycle)
3946 continue;
3947 if (!call->is_pasted)
3948 has_call = TRUE;
3949 if (!sum_stack (call->fun, info, sum_stack_param))
3950 return FALSE;
3951 stack = sum_stack_param->cum_stack;
3952 /* Include caller stack for normal calls, don't do so for
3953 tail calls. fun->stack here is local stack usage for
3954 this function. */
3955 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3956 stack += fun->stack;
3957 if (cum_stack < stack)
3959 cum_stack = stack;
3960 max = call->fun;
3964 sum_stack_param->cum_stack = cum_stack;
3965 stack = fun->stack;
3966 /* Now fun->stack holds cumulative stack. */
3967 fun->stack = cum_stack;
3968 fun->visit3 = TRUE;
3970 if (!fun->non_root
3971 && sum_stack_param->overall_stack < cum_stack)
3972 sum_stack_param->overall_stack = cum_stack;
3974 htab = spu_hash_table (info);
3975 if (htab->params->auto_overlay)
3976 return TRUE;
3978 f1 = func_name (fun);
3979 if (htab->params->stack_analysis)
3981 if (!fun->non_root)
3982 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3983 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3984 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3986 if (has_call)
3988 info->callbacks->minfo (_(" calls:\n"));
3989 for (call = fun->call_list; call; call = call->next)
3990 if (!call->is_pasted && !call->broken_cycle)
3992 const char *f2 = func_name (call->fun);
3993 const char *ann1 = call->fun == max ? "*" : " ";
3994 const char *ann2 = call->is_tail ? "t" : " ";
3996 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
4001 if (sum_stack_param->emit_stack_syms)
4003 char *name = bfd_malloc (18 + strlen (f1));
4004 struct elf_link_hash_entry *h;
4006 if (name == NULL)
4007 return FALSE;
4009 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4010 sprintf (name, "__stack_%s", f1);
4011 else
4012 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4014 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4015 free (name);
4016 if (h != NULL
4017 && (h->root.type == bfd_link_hash_new
4018 || h->root.type == bfd_link_hash_undefined
4019 || h->root.type == bfd_link_hash_undefweak))
4021 h->root.type = bfd_link_hash_defined;
4022 h->root.u.def.section = bfd_abs_section_ptr;
4023 h->root.u.def.value = cum_stack;
4024 h->size = 0;
4025 h->type = 0;
4026 h->ref_regular = 1;
4027 h->def_regular = 1;
4028 h->ref_regular_nonweak = 1;
4029 h->forced_local = 1;
4030 h->non_elf = 0;
4034 return TRUE;
4037 /* SEC is part of a pasted function. Return the call_info for the
4038 next section of this function. */
4040 static struct call_info *
4041 find_pasted_call (asection *sec)
4043 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4044 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4045 struct call_info *call;
4046 int k;
4048 for (k = 0; k < sinfo->num_fun; ++k)
4049 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4050 if (call->is_pasted)
4051 return call;
4052 abort ();
4053 return 0;
4056 /* qsort predicate to sort bfds by file name. */
4058 static int
4059 sort_bfds (const void *a, const void *b)
4061 bfd *const *abfd1 = a;
4062 bfd *const *abfd2 = b;
4064 return strcmp ((*abfd1)->filename, (*abfd2)->filename);
4067 static unsigned int
4068 print_one_overlay_section (FILE *script,
4069 unsigned int base,
4070 unsigned int count,
4071 unsigned int ovlynum,
4072 unsigned int *ovly_map,
4073 asection **ovly_sections,
4074 struct bfd_link_info *info)
4076 unsigned int j;
4078 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4080 asection *sec = ovly_sections[2 * j];
4082 if (fprintf (script, " %s%c%s (%s)\n",
4083 (sec->owner->my_archive != NULL
4084 ? sec->owner->my_archive->filename : ""),
4085 info->path_separator,
4086 sec->owner->filename,
4087 sec->name) <= 0)
4088 return -1;
4089 if (sec->segment_mark)
4091 struct call_info *call = find_pasted_call (sec);
4092 while (call != NULL)
4094 struct function_info *call_fun = call->fun;
4095 sec = call_fun->sec;
4096 if (fprintf (script, " %s%c%s (%s)\n",
4097 (sec->owner->my_archive != NULL
4098 ? sec->owner->my_archive->filename : ""),
4099 info->path_separator,
4100 sec->owner->filename,
4101 sec->name) <= 0)
4102 return -1;
4103 for (call = call_fun->call_list; call; call = call->next)
4104 if (call->is_pasted)
4105 break;
4110 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4112 asection *sec = ovly_sections[2 * j + 1];
4113 if (sec != NULL
4114 && fprintf (script, " %s%c%s (%s)\n",
4115 (sec->owner->my_archive != NULL
4116 ? sec->owner->my_archive->filename : ""),
4117 info->path_separator,
4118 sec->owner->filename,
4119 sec->name) <= 0)
4120 return -1;
4122 sec = ovly_sections[2 * j];
4123 if (sec->segment_mark)
4125 struct call_info *call = find_pasted_call (sec);
4126 while (call != NULL)
4128 struct function_info *call_fun = call->fun;
4129 sec = call_fun->rodata;
4130 if (sec != NULL
4131 && fprintf (script, " %s%c%s (%s)\n",
4132 (sec->owner->my_archive != NULL
4133 ? sec->owner->my_archive->filename : ""),
4134 info->path_separator,
4135 sec->owner->filename,
4136 sec->name) <= 0)
4137 return -1;
4138 for (call = call_fun->call_list; call; call = call->next)
4139 if (call->is_pasted)
4140 break;
4145 return j;
4148 /* Handle --auto-overlay. */
4150 static void
4151 spu_elf_auto_overlay (struct bfd_link_info *info)
4153 bfd *ibfd;
4154 bfd **bfd_arr;
4155 struct elf_segment_map *m;
4156 unsigned int fixed_size, lo, hi;
4157 unsigned int reserved;
4158 struct spu_link_hash_table *htab;
4159 unsigned int base, i, count, bfd_count;
4160 unsigned int region, ovlynum;
4161 asection **ovly_sections, **ovly_p;
4162 unsigned int *ovly_map;
4163 FILE *script;
4164 unsigned int total_overlay_size, overlay_size;
4165 const char *ovly_mgr_entry;
4166 struct elf_link_hash_entry *h;
4167 struct _mos_param mos_param;
4168 struct _uos_param uos_param;
4169 struct function_info dummy_caller;
4171 /* Find the extents of our loadable image. */
4172 lo = (unsigned int) -1;
4173 hi = 0;
4174 for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
4175 if (m->p_type == PT_LOAD)
4176 for (i = 0; i < m->count; i++)
4177 if (m->sections[i]->size != 0)
4179 if (m->sections[i]->vma < lo)
4180 lo = m->sections[i]->vma;
4181 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4182 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4184 fixed_size = hi + 1 - lo;
4186 if (!discover_functions (info))
4187 goto err_exit;
4189 if (!build_call_tree (info))
4190 goto err_exit;
4192 htab = spu_hash_table (info);
4193 reserved = htab->params->auto_overlay_reserved;
4194 if (reserved == 0)
4196 struct _sum_stack_param sum_stack_param;
4198 sum_stack_param.emit_stack_syms = 0;
4199 sum_stack_param.overall_stack = 0;
4200 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4201 goto err_exit;
4202 reserved = (sum_stack_param.overall_stack
4203 + htab->params->extra_stack_space);
4206 /* No need for overlays if everything already fits. */
4207 if (fixed_size + reserved <= htab->local_store
4208 && htab->params->ovly_flavour != ovly_soft_icache)
4210 htab->params->auto_overlay = 0;
4211 return;
4214 uos_param.exclude_input_section = 0;
4215 uos_param.exclude_output_section
4216 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4218 ovly_mgr_entry = "__ovly_load";
4219 if (htab->params->ovly_flavour == ovly_soft_icache)
4220 ovly_mgr_entry = "__icache_br_handler";
4221 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4222 FALSE, FALSE, FALSE);
4223 if (h != NULL
4224 && (h->root.type == bfd_link_hash_defined
4225 || h->root.type == bfd_link_hash_defweak)
4226 && h->def_regular)
4228 /* We have a user supplied overlay manager. */
4229 uos_param.exclude_input_section = h->root.u.def.section;
4231 else
4233 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4234 builtin version to .text, and will adjust .text size. */
4235 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4238 /* Mark overlay sections, and find max overlay section size. */
4239 mos_param.max_overlay_size = 0;
4240 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4241 goto err_exit;
4243 /* We can't put the overlay manager or interrupt routines in
4244 overlays. */
4245 uos_param.clearing = 0;
4246 if ((uos_param.exclude_input_section
4247 || uos_param.exclude_output_section)
4248 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4249 goto err_exit;
4251 bfd_count = 0;
4252 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4253 ++bfd_count;
4254 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4255 if (bfd_arr == NULL)
4256 goto err_exit;
4258 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4259 count = 0;
4260 bfd_count = 0;
4261 total_overlay_size = 0;
4262 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4264 extern const bfd_target bfd_elf32_spu_vec;
4265 asection *sec;
4266 unsigned int old_count;
4268 if (ibfd->xvec != &bfd_elf32_spu_vec)
4269 continue;
4271 old_count = count;
4272 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4273 if (sec->linker_mark)
4275 if ((sec->flags & SEC_CODE) != 0)
4276 count += 1;
4277 fixed_size -= sec->size;
4278 total_overlay_size += sec->size;
4280 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4281 && sec->output_section->owner == info->output_bfd
4282 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4283 fixed_size -= sec->size;
4284 if (count != old_count)
4285 bfd_arr[bfd_count++] = ibfd;
4288 /* Since the overlay link script selects sections by file name and
4289 section name, ensure that file names are unique. */
4290 if (bfd_count > 1)
4292 bfd_boolean ok = TRUE;
4294 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4295 for (i = 1; i < bfd_count; ++i)
4296 if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4298 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4300 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4301 info->callbacks->einfo (_("%s duplicated in %s\n"),
4302 bfd_arr[i]->filename,
4303 bfd_arr[i]->my_archive->filename);
4304 else
4305 info->callbacks->einfo (_("%s duplicated\n"),
4306 bfd_arr[i]->filename);
4307 ok = FALSE;
4310 if (!ok)
4312 info->callbacks->einfo (_("sorry, no support for duplicate "
4313 "object files in auto-overlay script\n"));
4314 bfd_set_error (bfd_error_bad_value);
4315 goto err_exit;
4318 free (bfd_arr);
4320 fixed_size += reserved;
4321 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4322 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4324 if (htab->params->ovly_flavour == ovly_soft_icache)
4326 /* Stubs in the non-icache area are bigger. */
4327 fixed_size += htab->non_ovly_stub * 16;
4328 /* Space for icache manager tables.
4329 a) Tag array, one quadword per cache line.
4330 - word 0: ia address of present line, init to zero. */
4331 fixed_size += 16 << htab->num_lines_log2;
4332 /* b) Rewrite "to" list, one quadword per cache line. */
4333 fixed_size += 16 << htab->num_lines_log2;
4334 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4335 to a power-of-two number of full quadwords) per cache line. */
4336 fixed_size += 16 << (htab->fromelem_size_log2
4337 + htab->num_lines_log2);
4338 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4339 fixed_size += 16;
4341 else
4343 /* Guess number of overlays. Assuming overlay buffer is on
4344 average only half full should be conservative. */
4345 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4346 / (htab->local_store - fixed_size));
4347 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4348 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4352 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4353 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4354 "size of 0x%v exceeds local store\n"),
4355 (bfd_vma) fixed_size,
4356 (bfd_vma) mos_param.max_overlay_size);
4358 /* Now see if we should put some functions in the non-overlay area. */
4359 else if (fixed_size < htab->params->auto_overlay_fixed)
4361 unsigned int max_fixed, lib_size;
4363 max_fixed = htab->local_store - mos_param.max_overlay_size;
4364 if (max_fixed > htab->params->auto_overlay_fixed)
4365 max_fixed = htab->params->auto_overlay_fixed;
4366 lib_size = max_fixed - fixed_size;
4367 lib_size = auto_ovl_lib_functions (info, lib_size);
4368 if (lib_size == (unsigned int) -1)
4369 goto err_exit;
4370 fixed_size = max_fixed - lib_size;
4373 /* Build an array of sections, suitably sorted to place into
4374 overlays. */
4375 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4376 if (ovly_sections == NULL)
4377 goto err_exit;
4378 ovly_p = ovly_sections;
4379 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4380 goto err_exit;
4381 count = (size_t) (ovly_p - ovly_sections) / 2;
4382 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4383 if (ovly_map == NULL)
4384 goto err_exit;
4386 memset (&dummy_caller, 0, sizeof (dummy_caller));
4387 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4388 if (htab->params->line_size != 0)
4389 overlay_size = htab->params->line_size;
4390 base = 0;
4391 ovlynum = 0;
4392 while (base < count)
4394 unsigned int size = 0, rosize = 0, roalign = 0;
4396 for (i = base; i < count; i++)
4398 asection *sec, *rosec;
4399 unsigned int tmp, rotmp;
4400 unsigned int num_stubs;
4401 struct call_info *call, *pasty;
4402 struct _spu_elf_section_data *sec_data;
4403 struct spu_elf_stack_info *sinfo;
4404 unsigned int k;
4406 /* See whether we can add this section to the current
4407 overlay without overflowing our overlay buffer. */
4408 sec = ovly_sections[2 * i];
4409 tmp = align_power (size, sec->alignment_power) + sec->size;
4410 rotmp = rosize;
4411 rosec = ovly_sections[2 * i + 1];
4412 if (rosec != NULL)
4414 rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4415 if (roalign < rosec->alignment_power)
4416 roalign = rosec->alignment_power;
4418 if (align_power (tmp, roalign) + rotmp > overlay_size)
4419 break;
4420 if (sec->segment_mark)
4422 /* Pasted sections must stay together, so add their
4423 sizes too. */
4424 pasty = find_pasted_call (sec);
4425 while (pasty != NULL)
4427 struct function_info *call_fun = pasty->fun;
4428 tmp = (align_power (tmp, call_fun->sec->alignment_power)
4429 + call_fun->sec->size);
4430 if (call_fun->rodata)
4432 rotmp = (align_power (rotmp,
4433 call_fun->rodata->alignment_power)
4434 + call_fun->rodata->size);
4435 if (roalign < rosec->alignment_power)
4436 roalign = rosec->alignment_power;
4438 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4439 if (pasty->is_pasted)
4440 break;
4443 if (align_power (tmp, roalign) + rotmp > overlay_size)
4444 break;
4446 /* If we add this section, we might need new overlay call
4447 stubs. Add any overlay section calls to dummy_call. */
4448 pasty = NULL;
4449 sec_data = spu_elf_section_data (sec);
4450 sinfo = sec_data->u.i.stack_info;
4451 for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4452 for (call = sinfo->fun[k].call_list; call; call = call->next)
4453 if (call->is_pasted)
4455 BFD_ASSERT (pasty == NULL);
4456 pasty = call;
4458 else if (call->fun->sec->linker_mark)
4460 if (!copy_callee (&dummy_caller, call))
4461 goto err_exit;
4463 while (pasty != NULL)
4465 struct function_info *call_fun = pasty->fun;
4466 pasty = NULL;
4467 for (call = call_fun->call_list; call; call = call->next)
4468 if (call->is_pasted)
4470 BFD_ASSERT (pasty == NULL);
4471 pasty = call;
4473 else if (!copy_callee (&dummy_caller, call))
4474 goto err_exit;
4477 /* Calculate call stub size. */
4478 num_stubs = 0;
4479 for (call = dummy_caller.call_list; call; call = call->next)
4481 unsigned int stub_delta = 1;
4483 if (htab->params->ovly_flavour == ovly_soft_icache)
4484 stub_delta = call->count;
4485 num_stubs += stub_delta;
4487 /* If the call is within this overlay, we won't need a
4488 stub. */
4489 for (k = base; k < i + 1; k++)
4490 if (call->fun->sec == ovly_sections[2 * k])
4492 num_stubs -= stub_delta;
4493 break;
4496 if (htab->params->ovly_flavour == ovly_soft_icache
4497 && num_stubs > htab->params->max_branch)
4498 break;
4499 if (align_power (tmp, roalign) + rotmp
4500 + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4501 break;
4502 size = tmp;
4503 rosize = rotmp;
4506 if (i == base)
4508 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4509 ovly_sections[2 * i]->owner,
4510 ovly_sections[2 * i],
4511 ovly_sections[2 * i + 1] ? " + rodata" : "");
4512 bfd_set_error (bfd_error_bad_value);
4513 goto err_exit;
4516 while (dummy_caller.call_list != NULL)
4518 struct call_info *call = dummy_caller.call_list;
4519 dummy_caller.call_list = call->next;
4520 free (call);
4523 ++ovlynum;
4524 while (base < i)
4525 ovly_map[base++] = ovlynum;
4528 script = htab->params->spu_elf_open_overlay_script ();
4530 if (htab->params->ovly_flavour == ovly_soft_icache)
4532 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4533 goto file_err;
4535 if (fprintf (script,
4536 " . = ALIGN (%u);\n"
4537 " .ovl.init : { *(.ovl.init) }\n"
4538 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4539 htab->params->line_size) <= 0)
4540 goto file_err;
4542 base = 0;
4543 ovlynum = 1;
4544 while (base < count)
4546 unsigned int indx = ovlynum - 1;
4547 unsigned int vma, lma;
4549 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4550 lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4552 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4553 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4554 ovlynum, vma, lma) <= 0)
4555 goto file_err;
4557 base = print_one_overlay_section (script, base, count, ovlynum,
4558 ovly_map, ovly_sections, info);
4559 if (base == (unsigned) -1)
4560 goto file_err;
4562 if (fprintf (script, " }\n") <= 0)
4563 goto file_err;
4565 ovlynum++;
4568 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4569 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4570 goto file_err;
4572 if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4573 goto file_err;
4575 else
4577 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4578 goto file_err;
4580 if (fprintf (script,
4581 " . = ALIGN (16);\n"
4582 " .ovl.init : { *(.ovl.init) }\n"
4583 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4584 goto file_err;
4586 for (region = 1; region <= htab->params->num_lines; region++)
4588 ovlynum = region;
4589 base = 0;
4590 while (base < count && ovly_map[base] < ovlynum)
4591 base++;
4593 if (base == count)
4594 break;
4596 if (region == 1)
4598 /* We need to set lma since we are overlaying .ovl.init. */
4599 if (fprintf (script,
4600 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4601 goto file_err;
4603 else
4605 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4606 goto file_err;
4609 while (base < count)
4611 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4612 goto file_err;
4614 base = print_one_overlay_section (script, base, count, ovlynum,
4615 ovly_map, ovly_sections, info);
4616 if (base == (unsigned) -1)
4617 goto file_err;
4619 if (fprintf (script, " }\n") <= 0)
4620 goto file_err;
4622 ovlynum += htab->params->num_lines;
4623 while (base < count && ovly_map[base] < ovlynum)
4624 base++;
4627 if (fprintf (script, " }\n") <= 0)
4628 goto file_err;
4631 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4632 goto file_err;
4635 free (ovly_map);
4636 free (ovly_sections);
4638 if (fclose (script) != 0)
4639 goto file_err;
4641 if (htab->params->auto_overlay & AUTO_RELINK)
4642 (*htab->params->spu_elf_relink) ();
4644 xexit (0);
4646 file_err:
4647 bfd_set_error (bfd_error_system_call);
4648 err_exit:
4649 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
4650 xexit (1);
4653 /* Provide an estimate of total stack required. */
4655 static bfd_boolean
4656 spu_elf_stack_analysis (struct bfd_link_info *info)
4658 struct spu_link_hash_table *htab;
4659 struct _sum_stack_param sum_stack_param;
4661 if (!discover_functions (info))
4662 return FALSE;
4664 if (!build_call_tree (info))
4665 return FALSE;
4667 htab = spu_hash_table (info);
4668 if (htab->params->stack_analysis)
4670 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4671 info->callbacks->minfo (_("\nStack size for functions. "
4672 "Annotations: '*' max stack, 't' tail call\n"));
4675 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4676 sum_stack_param.overall_stack = 0;
4677 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4678 return FALSE;
4680 if (htab->params->stack_analysis)
4681 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4682 (bfd_vma) sum_stack_param.overall_stack);
4683 return TRUE;
4686 /* Perform a final link. */
4688 static bfd_boolean
4689 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4691 struct spu_link_hash_table *htab = spu_hash_table (info);
4693 if (htab->params->auto_overlay)
4694 spu_elf_auto_overlay (info);
4696 if ((htab->params->stack_analysis
4697 || (htab->params->ovly_flavour == ovly_soft_icache
4698 && htab->params->lrlive_analysis))
4699 && !spu_elf_stack_analysis (info))
4700 info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4702 if (!spu_elf_build_stubs (info))
4703 info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
4705 return bfd_elf_final_link (output_bfd, info);
4708 /* Called when not normally emitting relocs, ie. !info->relocatable
4709 and !info->emitrelocations. Returns a count of special relocs
4710 that need to be emitted. */
4712 static unsigned int
4713 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4715 Elf_Internal_Rela *relocs;
4716 unsigned int count = 0;
4718 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4719 info->keep_memory);
4720 if (relocs != NULL)
4722 Elf_Internal_Rela *rel;
4723 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4725 for (rel = relocs; rel < relend; rel++)
4727 int r_type = ELF32_R_TYPE (rel->r_info);
4728 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4729 ++count;
4732 if (elf_section_data (sec)->relocs != relocs)
4733 free (relocs);
4736 return count;
4739 /* Functions for adding fixup records to .fixup */
4741 #define FIXUP_RECORD_SIZE 4
4743 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4744 bfd_put_32 (output_bfd, addr, \
4745 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4746 #define FIXUP_GET(output_bfd,htab,index) \
4747 bfd_get_32 (output_bfd, \
4748 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4750 /* Store OFFSET in .fixup. This assumes it will be called with an
4751 increasing OFFSET. When this OFFSET fits with the last base offset,
4752 it just sets a bit, otherwise it adds a new fixup record. */
4753 static void
4754 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4755 bfd_vma offset)
4757 struct spu_link_hash_table *htab = spu_hash_table (info);
4758 asection *sfixup = htab->sfixup;
4759 bfd_vma qaddr = offset & ~(bfd_vma) 15;
4760 bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4761 if (sfixup->reloc_count == 0)
4763 FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4764 sfixup->reloc_count++;
4766 else
4768 bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4769 if (qaddr != (base & ~(bfd_vma) 15))
4771 if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4772 (*_bfd_error_handler) (_("fatal error while creating .fixup"));
4773 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4774 sfixup->reloc_count++;
4776 else
4777 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4781 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4783 static int
4784 spu_elf_relocate_section (bfd *output_bfd,
4785 struct bfd_link_info *info,
4786 bfd *input_bfd,
4787 asection *input_section,
4788 bfd_byte *contents,
4789 Elf_Internal_Rela *relocs,
4790 Elf_Internal_Sym *local_syms,
4791 asection **local_sections)
4793 Elf_Internal_Shdr *symtab_hdr;
4794 struct elf_link_hash_entry **sym_hashes;
4795 Elf_Internal_Rela *rel, *relend;
4796 struct spu_link_hash_table *htab;
4797 asection *ea;
4798 int ret = TRUE;
4799 bfd_boolean emit_these_relocs = FALSE;
4800 bfd_boolean is_ea_sym;
4801 bfd_boolean stubs;
4802 unsigned int iovl = 0;
4804 htab = spu_hash_table (info);
4805 stubs = (htab->stub_sec != NULL
4806 && maybe_needs_stubs (input_section));
4807 iovl = overlay_index (input_section);
4808 ea = bfd_get_section_by_name (output_bfd, "._ea");
4809 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4810 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4812 rel = relocs;
4813 relend = relocs + input_section->reloc_count;
4814 for (; rel < relend; rel++)
4816 int r_type;
4817 reloc_howto_type *howto;
4818 unsigned int r_symndx;
4819 Elf_Internal_Sym *sym;
4820 asection *sec;
4821 struct elf_link_hash_entry *h;
4822 const char *sym_name;
4823 bfd_vma relocation;
4824 bfd_vma addend;
4825 bfd_reloc_status_type r;
4826 bfd_boolean unresolved_reloc;
4827 bfd_boolean warned;
4828 enum _stub_type stub_type;
4830 r_symndx = ELF32_R_SYM (rel->r_info);
4831 r_type = ELF32_R_TYPE (rel->r_info);
4832 howto = elf_howto_table + r_type;
4833 unresolved_reloc = FALSE;
4834 warned = FALSE;
4835 h = NULL;
4836 sym = NULL;
4837 sec = NULL;
4838 if (r_symndx < symtab_hdr->sh_info)
4840 sym = local_syms + r_symndx;
4841 sec = local_sections[r_symndx];
4842 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4843 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4845 else
4847 if (sym_hashes == NULL)
4848 return FALSE;
4850 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4852 while (h->root.type == bfd_link_hash_indirect
4853 || h->root.type == bfd_link_hash_warning)
4854 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4856 relocation = 0;
4857 if (h->root.type == bfd_link_hash_defined
4858 || h->root.type == bfd_link_hash_defweak)
4860 sec = h->root.u.def.section;
4861 if (sec == NULL
4862 || sec->output_section == NULL)
4863 /* Set a flag that will be cleared later if we find a
4864 relocation value for this symbol. output_section
4865 is typically NULL for symbols satisfied by a shared
4866 library. */
4867 unresolved_reloc = TRUE;
4868 else
4869 relocation = (h->root.u.def.value
4870 + sec->output_section->vma
4871 + sec->output_offset);
4873 else if (h->root.type == bfd_link_hash_undefweak)
4875 else if (info->unresolved_syms_in_objects == RM_IGNORE
4876 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4878 else if (!info->relocatable
4879 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4881 bfd_boolean err;
4882 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4883 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4884 if (!info->callbacks->undefined_symbol (info,
4885 h->root.root.string,
4886 input_bfd,
4887 input_section,
4888 rel->r_offset, err))
4889 return FALSE;
4890 warned = TRUE;
4892 sym_name = h->root.root.string;
4895 if (sec != NULL && elf_discarded_section (sec))
4897 /* For relocs against symbols from removed linkonce sections,
4898 or sections discarded by a linker script, we just want the
4899 section contents zeroed. Avoid any special processing. */
4900 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
4901 rel->r_info = 0;
4902 rel->r_addend = 0;
4903 continue;
4906 if (info->relocatable)
4907 continue;
4909 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4910 if (r_type == R_SPU_ADD_PIC
4911 && h != NULL
4912 && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4914 bfd_byte *loc = contents + rel->r_offset;
4915 loc[0] = 0x1c;
4916 loc[1] = 0x00;
4917 loc[2] &= 0x3f;
4920 is_ea_sym = (ea != NULL
4921 && sec != NULL
4922 && sec->output_section == ea);
4924 /* If this symbol is in an overlay area, we may need to relocate
4925 to the overlay stub. */
4926 addend = rel->r_addend;
4927 if (stubs
4928 && !is_ea_sym
4929 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4930 contents, info)) != no_stub)
4932 unsigned int ovl = 0;
4933 struct got_entry *g, **head;
4935 if (stub_type != nonovl_stub)
4936 ovl = iovl;
4938 if (h != NULL)
4939 head = &h->got.glist;
4940 else
4941 head = elf_local_got_ents (input_bfd) + r_symndx;
4943 for (g = *head; g != NULL; g = g->next)
4944 if (htab->params->ovly_flavour == ovly_soft_icache
4945 ? (g->ovl == ovl
4946 && g->br_addr == (rel->r_offset
4947 + input_section->output_offset
4948 + input_section->output_section->vma))
4949 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4950 break;
4951 if (g == NULL)
4952 abort ();
4954 relocation = g->stub_addr;
4955 addend = 0;
4957 else
4959 /* For soft icache, encode the overlay index into addresses. */
4960 if (htab->params->ovly_flavour == ovly_soft_icache
4961 && (r_type == R_SPU_ADDR16_HI
4962 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4963 && !is_ea_sym)
4965 unsigned int ovl = overlay_index (sec);
4966 if (ovl != 0)
4968 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4969 relocation += set_id << 18;
4974 if (htab->params->emit_fixups && !info->relocatable
4975 && (input_section->flags & SEC_ALLOC) != 0
4976 && r_type == R_SPU_ADDR32)
4978 bfd_vma offset;
4979 offset = rel->r_offset + input_section->output_section->vma
4980 + input_section->output_offset;
4981 spu_elf_emit_fixup (output_bfd, info, offset);
4984 if (unresolved_reloc)
4986 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4988 if (is_ea_sym)
4990 /* ._ea is a special section that isn't allocated in SPU
4991 memory, but rather occupies space in PPU memory as
4992 part of an embedded ELF image. If this reloc is
4993 against a symbol defined in ._ea, then transform the
4994 reloc into an equivalent one without a symbol
4995 relative to the start of the ELF image. */
4996 rel->r_addend += (relocation
4997 - ea->vma
4998 + elf_section_data (ea)->this_hdr.sh_offset);
4999 rel->r_info = ELF32_R_INFO (0, r_type);
5001 emit_these_relocs = TRUE;
5002 continue;
5004 else if (is_ea_sym)
5005 unresolved_reloc = TRUE;
5007 if (unresolved_reloc)
5009 (*_bfd_error_handler)
5010 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5011 input_bfd,
5012 bfd_get_section_name (input_bfd, input_section),
5013 (long) rel->r_offset,
5014 howto->name,
5015 sym_name);
5016 ret = FALSE;
5019 r = _bfd_final_link_relocate (howto,
5020 input_bfd,
5021 input_section,
5022 contents,
5023 rel->r_offset, relocation, addend);
5025 if (r != bfd_reloc_ok)
5027 const char *msg = (const char *) 0;
5029 switch (r)
5031 case bfd_reloc_overflow:
5032 if (!((*info->callbacks->reloc_overflow)
5033 (info, (h ? &h->root : NULL), sym_name, howto->name,
5034 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
5035 return FALSE;
5036 break;
5038 case bfd_reloc_undefined:
5039 if (!((*info->callbacks->undefined_symbol)
5040 (info, sym_name, input_bfd, input_section,
5041 rel->r_offset, TRUE)))
5042 return FALSE;
5043 break;
5045 case bfd_reloc_outofrange:
5046 msg = _("internal error: out of range error");
5047 goto common_error;
5049 case bfd_reloc_notsupported:
5050 msg = _("internal error: unsupported relocation error");
5051 goto common_error;
5053 case bfd_reloc_dangerous:
5054 msg = _("internal error: dangerous error");
5055 goto common_error;
5057 default:
5058 msg = _("internal error: unknown error");
5059 /* fall through */
5061 common_error:
5062 ret = FALSE;
5063 if (!((*info->callbacks->warning)
5064 (info, msg, sym_name, input_bfd, input_section,
5065 rel->r_offset)))
5066 return FALSE;
5067 break;
5072 if (ret
5073 && emit_these_relocs
5074 && !info->emitrelocations)
5076 Elf_Internal_Rela *wrel;
5077 Elf_Internal_Shdr *rel_hdr;
5079 wrel = rel = relocs;
5080 relend = relocs + input_section->reloc_count;
5081 for (; rel < relend; rel++)
5083 int r_type;
5085 r_type = ELF32_R_TYPE (rel->r_info);
5086 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5087 *wrel++ = *rel;
5089 input_section->reloc_count = wrel - relocs;
5090 /* Backflips for _bfd_elf_link_output_relocs. */
5091 rel_hdr = &elf_section_data (input_section)->rel_hdr;
5092 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5093 ret = 2;
5096 return ret;
5099 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5101 static int
5102 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5103 const char *sym_name ATTRIBUTE_UNUSED,
5104 Elf_Internal_Sym *sym,
5105 asection *sym_sec ATTRIBUTE_UNUSED,
5106 struct elf_link_hash_entry *h)
5108 struct spu_link_hash_table *htab = spu_hash_table (info);
5110 if (!info->relocatable
5111 && htab->stub_sec != NULL
5112 && h != NULL
5113 && (h->root.type == bfd_link_hash_defined
5114 || h->root.type == bfd_link_hash_defweak)
5115 && h->def_regular
5116 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5118 struct got_entry *g;
5120 for (g = h->got.glist; g != NULL; g = g->next)
5121 if (htab->params->ovly_flavour == ovly_soft_icache
5122 ? g->br_addr == g->stub_addr
5123 : g->addend == 0 && g->ovl == 0)
5125 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5126 (htab->stub_sec[0]->output_section->owner,
5127 htab->stub_sec[0]->output_section));
5128 sym->st_value = g->stub_addr;
5129 break;
5133 return 1;
5136 static int spu_plugin = 0;
5138 void
5139 spu_elf_plugin (int val)
5141 spu_plugin = val;
5144 /* Set ELF header e_type for plugins. */
5146 static void
5147 spu_elf_post_process_headers (bfd *abfd,
5148 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5150 if (spu_plugin)
5152 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5154 i_ehdrp->e_type = ET_DYN;
5158 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5159 segments for overlays. */
5161 static int
5162 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5164 int extra = 0;
5165 asection *sec;
5167 if (info != NULL)
5169 struct spu_link_hash_table *htab = spu_hash_table (info);
5170 extra = htab->num_overlays;
5173 if (extra)
5174 ++extra;
5176 sec = bfd_get_section_by_name (abfd, ".toe");
5177 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5178 ++extra;
5180 return extra;
5183 /* Remove .toe section from other PT_LOAD segments and put it in
5184 a segment of its own. Put overlays in separate segments too. */
5186 static bfd_boolean
5187 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5189 asection *toe, *s;
5190 struct elf_segment_map *m, *m_overlay;
5191 struct elf_segment_map **p, **p_overlay;
5192 unsigned int i;
5194 if (info == NULL)
5195 return TRUE;
5197 toe = bfd_get_section_by_name (abfd, ".toe");
5198 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
5199 if (m->p_type == PT_LOAD && m->count > 1)
5200 for (i = 0; i < m->count; i++)
5201 if ((s = m->sections[i]) == toe
5202 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5204 struct elf_segment_map *m2;
5205 bfd_vma amt;
5207 if (i + 1 < m->count)
5209 amt = sizeof (struct elf_segment_map);
5210 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5211 m2 = bfd_zalloc (abfd, amt);
5212 if (m2 == NULL)
5213 return FALSE;
5214 m2->count = m->count - (i + 1);
5215 memcpy (m2->sections, m->sections + i + 1,
5216 m2->count * sizeof (m->sections[0]));
5217 m2->p_type = PT_LOAD;
5218 m2->next = m->next;
5219 m->next = m2;
5221 m->count = 1;
5222 if (i != 0)
5224 m->count = i;
5225 amt = sizeof (struct elf_segment_map);
5226 m2 = bfd_zalloc (abfd, amt);
5227 if (m2 == NULL)
5228 return FALSE;
5229 m2->p_type = PT_LOAD;
5230 m2->count = 1;
5231 m2->sections[0] = s;
5232 m2->next = m->next;
5233 m->next = m2;
5235 break;
5239 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5240 PT_LOAD segments. This can cause the .ovl.init section to be
5241 overwritten with the contents of some overlay segment. To work
5242 around this issue, we ensure that all PF_OVERLAY segments are
5243 sorted first amongst the program headers; this ensures that even
5244 with a broken loader, the .ovl.init section (which is not marked
5245 as PF_OVERLAY) will be placed into SPU local store on startup. */
5247 /* Move all overlay segments onto a separate list. */
5248 p = &elf_tdata (abfd)->segment_map;
5249 p_overlay = &m_overlay;
5250 while (*p != NULL)
5252 if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5253 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5255 m = *p;
5256 *p = m->next;
5257 *p_overlay = m;
5258 p_overlay = &m->next;
5259 continue;
5262 p = &((*p)->next);
5265 /* Re-insert overlay segments at the head of the segment map. */
5266 *p_overlay = elf_tdata (abfd)->segment_map;
5267 elf_tdata (abfd)->segment_map = m_overlay;
5269 return TRUE;
5272 /* Tweak the section type of .note.spu_name. */
5274 static bfd_boolean
5275 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5276 Elf_Internal_Shdr *hdr,
5277 asection *sec)
5279 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5280 hdr->sh_type = SHT_NOTE;
5281 return TRUE;
5284 /* Tweak phdrs before writing them out. */
5286 static int
5287 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5289 const struct elf_backend_data *bed;
5290 struct elf_obj_tdata *tdata;
5291 Elf_Internal_Phdr *phdr, *last;
5292 struct spu_link_hash_table *htab;
5293 unsigned int count;
5294 unsigned int i;
5296 if (info == NULL)
5297 return TRUE;
5299 bed = get_elf_backend_data (abfd);
5300 tdata = elf_tdata (abfd);
5301 phdr = tdata->phdr;
5302 count = tdata->program_header_size / bed->s->sizeof_phdr;
5303 htab = spu_hash_table (info);
5304 if (htab->num_overlays != 0)
5306 struct elf_segment_map *m;
5307 unsigned int o;
5309 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
5310 if (m->count != 0
5311 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5313 /* Mark this as an overlay header. */
5314 phdr[i].p_flags |= PF_OVERLAY;
5316 if (htab->ovtab != NULL && htab->ovtab->size != 0
5317 && htab->params->ovly_flavour != ovly_soft_icache)
5319 bfd_byte *p = htab->ovtab->contents;
5320 unsigned int off = o * 16 + 8;
5322 /* Write file_off into _ovly_table. */
5323 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5326 /* Soft-icache has its file offset put in .ovl.init. */
5327 if (htab->init != NULL && htab->init->size != 0)
5329 bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5331 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5335 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5336 of 16. This should always be possible when using the standard
5337 linker scripts, but don't create overlapping segments if
5338 someone is playing games with linker scripts. */
5339 last = NULL;
5340 for (i = count; i-- != 0; )
5341 if (phdr[i].p_type == PT_LOAD)
5343 unsigned adjust;
5345 adjust = -phdr[i].p_filesz & 15;
5346 if (adjust != 0
5347 && last != NULL
5348 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5349 break;
5351 adjust = -phdr[i].p_memsz & 15;
5352 if (adjust != 0
5353 && last != NULL
5354 && phdr[i].p_filesz != 0
5355 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5356 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5357 break;
5359 if (phdr[i].p_filesz != 0)
5360 last = &phdr[i];
5363 if (i == (unsigned int) -1)
5364 for (i = count; i-- != 0; )
5365 if (phdr[i].p_type == PT_LOAD)
5367 unsigned adjust;
5369 adjust = -phdr[i].p_filesz & 15;
5370 phdr[i].p_filesz += adjust;
5372 adjust = -phdr[i].p_memsz & 15;
5373 phdr[i].p_memsz += adjust;
5376 return TRUE;
5379 bfd_boolean
5380 spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5382 struct spu_link_hash_table *htab = spu_hash_table (info);
5383 if (htab->params->emit_fixups)
5385 asection *sfixup = htab->sfixup;
5386 int fixup_count = 0;
5387 bfd *ibfd;
5388 size_t size;
5390 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
5392 asection *isec;
5394 if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5395 continue;
5397 /* Walk over each section attached to the input bfd. */
5398 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5400 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5401 bfd_vma base_end;
5403 /* If there aren't any relocs, then there's nothing more
5404 to do. */
5405 if ((isec->flags & SEC_RELOC) == 0
5406 || isec->reloc_count == 0)
5407 continue;
5409 /* Get the relocs. */
5410 internal_relocs =
5411 _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5412 info->keep_memory);
5413 if (internal_relocs == NULL)
5414 return FALSE;
5416 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5417 relocations. They are stored in a single word by
5418 saving the upper 28 bits of the address and setting the
5419 lower 4 bits to a bit mask of the words that have the
5420 relocation. BASE_END keeps track of the next quadword. */
5421 irela = internal_relocs;
5422 irelaend = irela + isec->reloc_count;
5423 base_end = 0;
5424 for (; irela < irelaend; irela++)
5425 if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5426 && irela->r_offset >= base_end)
5428 base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5429 fixup_count++;
5434 /* We always have a NULL fixup as a sentinel */
5435 size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5436 if (!bfd_set_section_size (output_bfd, sfixup, size))
5437 return FALSE;
5438 sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5439 if (sfixup->contents == NULL)
5440 return FALSE;
5442 return TRUE;
5445 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5446 #define TARGET_BIG_NAME "elf32-spu"
5447 #define ELF_ARCH bfd_arch_spu
5448 #define ELF_MACHINE_CODE EM_SPU
5449 /* This matches the alignment need for DMA. */
5450 #define ELF_MAXPAGESIZE 0x80
5451 #define elf_backend_rela_normal 1
5452 #define elf_backend_can_gc_sections 1
5454 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5455 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5456 #define elf_info_to_howto spu_elf_info_to_howto
5457 #define elf_backend_count_relocs spu_elf_count_relocs
5458 #define elf_backend_relocate_section spu_elf_relocate_section
5459 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5460 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5461 #define elf_backend_object_p spu_elf_object_p
5462 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5463 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5465 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5466 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5467 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5468 #define elf_backend_post_process_headers spu_elf_post_process_headers
5469 #define elf_backend_fake_sections spu_elf_fake_sections
5470 #define elf_backend_special_sections spu_elf_special_sections
5471 #define bfd_elf32_bfd_final_link spu_elf_final_link
5473 #include "elf32-target.h"