Remove gas/.
[binutils.git] / bfd / elf32-spu.c
blob3015cd65bee5721b781218a1358c6789350aa7a8
1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
21 #include "sysdep.h"
22 #include "libiberty.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf/spu.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33 void *, asection *,
34 bfd *, char **);
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
90 FALSE, 0, -1, FALSE),
93 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
94 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
95 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
96 { NULL, 0, 0, 0, 0 }
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
102 switch (code)
104 default:
105 return R_SPU_NONE;
106 case BFD_RELOC_SPU_IMM10W:
107 return R_SPU_ADDR10;
108 case BFD_RELOC_SPU_IMM16W:
109 return R_SPU_ADDR16;
110 case BFD_RELOC_SPU_LO16:
111 return R_SPU_ADDR16_LO;
112 case BFD_RELOC_SPU_HI16:
113 return R_SPU_ADDR16_HI;
114 case BFD_RELOC_SPU_IMM18:
115 return R_SPU_ADDR18;
116 case BFD_RELOC_SPU_PCREL16:
117 return R_SPU_REL16;
118 case BFD_RELOC_SPU_IMM7:
119 return R_SPU_ADDR7;
120 case BFD_RELOC_SPU_IMM8:
121 return R_SPU_NONE;
122 case BFD_RELOC_SPU_PCREL9a:
123 return R_SPU_REL9;
124 case BFD_RELOC_SPU_PCREL9b:
125 return R_SPU_REL9I;
126 case BFD_RELOC_SPU_IMM10:
127 return R_SPU_ADDR10I;
128 case BFD_RELOC_SPU_IMM16:
129 return R_SPU_ADDR16I;
130 case BFD_RELOC_32:
131 return R_SPU_ADDR32;
132 case BFD_RELOC_32_PCREL:
133 return R_SPU_REL32;
134 case BFD_RELOC_SPU_PPU32:
135 return R_SPU_PPU32;
136 case BFD_RELOC_SPU_PPU64:
137 return R_SPU_PPU64;
141 static void
142 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
143 arelent *cache_ptr,
144 Elf_Internal_Rela *dst)
146 enum elf_spu_reloc_type r_type;
148 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
149 BFD_ASSERT (r_type < R_SPU_max);
150 cache_ptr->howto = &elf_howto_table[(int) r_type];
153 static reloc_howto_type *
154 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
155 bfd_reloc_code_real_type code)
157 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
159 if (r_type == R_SPU_NONE)
160 return NULL;
162 return elf_howto_table + r_type;
165 static reloc_howto_type *
166 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
167 const char *r_name)
169 unsigned int i;
171 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
172 if (elf_howto_table[i].name != NULL
173 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
174 return &elf_howto_table[i];
176 return NULL;
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
183 void *data, asection *input_section,
184 bfd *output_bfd, char **error_message)
186 bfd_size_type octets;
187 bfd_vma val;
188 long insn;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
192 link time. */
193 if (output_bfd != NULL)
194 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
195 input_section, output_bfd, error_message);
197 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
198 return bfd_reloc_outofrange;
199 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
201 /* Get symbol value. */
202 val = 0;
203 if (!bfd_is_com_section (symbol->section))
204 val = symbol->value;
205 if (symbol->section->output_section)
206 val += symbol->section->output_section->vma;
208 val += reloc_entry->addend;
210 /* Make it pc-relative. */
211 val -= input_section->output_section->vma + input_section->output_offset;
213 val >>= 2;
214 if (val + 256 >= 512)
215 return bfd_reloc_overflow;
217 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
222 insn &= ~reloc_entry->howto->dst_mask;
223 insn |= val & reloc_entry->howto->dst_mask;
224 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
225 return bfd_reloc_ok;
228 static bfd_boolean
229 spu_elf_new_section_hook (bfd *abfd, asection *sec)
231 if (!sec->used_by_bfd)
233 struct _spu_elf_section_data *sdata;
235 sdata = bfd_zalloc (abfd, sizeof (*sdata));
236 if (sdata == NULL)
237 return FALSE;
238 sec->used_by_bfd = sdata;
241 return _bfd_elf_new_section_hook (abfd, sec);
244 /* Set up overlay info for executables. */
246 static bfd_boolean
247 spu_elf_object_p (bfd *abfd)
249 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
251 unsigned int i, num_ovl, num_buf;
252 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
253 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
254 Elf_Internal_Phdr *last_phdr = NULL;
256 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
257 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
259 unsigned int j;
261 ++num_ovl;
262 if (last_phdr == NULL
263 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
264 ++num_buf;
265 last_phdr = phdr;
266 for (j = 1; j < elf_numsections (abfd); j++)
268 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
272 asection *sec = shdr->bfd_section;
273 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
274 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
279 return TRUE;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
285 static void
286 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
288 if (sym->name != NULL
289 && sym->section != bfd_abs_section_ptr
290 && strncmp (sym->name, "_EAR_", 5) == 0)
291 sym->flags |= BSF_KEEP;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf;
300 struct spu_elf_params *params;
302 /* Shortcuts to overlay sections. */
303 asection *ovtab;
304 asection *init;
305 asection *toe;
306 asection **ovl_sec;
308 /* Count of stubs in each overlay section. */
309 unsigned int *stub_count;
311 /* The stub section for each overlay section. */
312 asection **stub_sec;
314 struct elf_link_hash_entry *ovly_load;
315 struct elf_link_hash_entry *ovly_return;
316 unsigned long ovly_load_r_symndx;
318 /* Number of overlay buffers. */
319 unsigned int num_buf;
321 /* Total number of overlays. */
322 unsigned int num_overlays;
324 /* For soft icache. */
325 unsigned int line_size_log2;
326 unsigned int num_lines_log2;
328 /* How much memory we have. */
329 unsigned int local_store;
330 /* Local store --auto-overlay should reserve for non-overlay
331 functions and data. */
332 unsigned int overlay_fixed;
333 /* Local store --auto-overlay should reserve for stack and heap. */
334 unsigned int reserved;
335 /* If reserved is not specified, stack analysis will calculate a value
336 for the stack. This parameter adjusts that value to allow for
337 negative sp access (the ABI says 2000 bytes below sp are valid,
338 and the overlay manager uses some of this area). */
339 int extra_stack_space;
340 /* Count of overlay stubs needed in non-overlay area. */
341 unsigned int non_ovly_stub;
343 /* Set on error. */
344 unsigned int stub_err : 1;
347 /* Hijack the generic got fields for overlay stub accounting. */
349 struct got_entry
351 struct got_entry *next;
352 unsigned int ovl;
353 union {
354 bfd_vma addend;
355 bfd_vma br_addr;
357 bfd_vma stub_addr;
360 #define spu_hash_table(p) \
361 ((struct spu_link_hash_table *) ((p)->hash))
363 struct call_info
365 struct function_info *fun;
366 struct call_info *next;
367 unsigned int count;
368 unsigned int max_depth;
369 unsigned int is_tail : 1;
370 unsigned int is_pasted : 1;
371 unsigned int priority : 13;
374 struct function_info
376 /* List of functions called. Also branches to hot/cold part of
377 function. */
378 struct call_info *call_list;
379 /* For hot/cold part of function, point to owner. */
380 struct function_info *start;
381 /* Symbol at start of function. */
382 union {
383 Elf_Internal_Sym *sym;
384 struct elf_link_hash_entry *h;
385 } u;
386 /* Function section. */
387 asection *sec;
388 asection *rodata;
389 /* Where last called from, and number of sections called from. */
390 asection *last_caller;
391 unsigned int call_count;
392 /* Address range of (this part of) function. */
393 bfd_vma lo, hi;
394 /* Offset where we found a store of lr, or -1 if none found. */
395 bfd_vma lr_store;
396 /* Offset where we found the stack adjustment insn. */
397 bfd_vma sp_adjust;
398 /* Stack usage. */
399 int stack;
400 /* Distance from root of call tree. Tail and hot/cold branches
401 count as one deeper. We aren't counting stack frames here. */
402 unsigned int depth;
403 /* Set if global symbol. */
404 unsigned int global : 1;
405 /* Set if known to be start of function (as distinct from a hunk
406 in hot/cold section. */
407 unsigned int is_func : 1;
408 /* Set if not a root node. */
409 unsigned int non_root : 1;
410 /* Flags used during call tree traversal. It's cheaper to replicate
411 the visit flags than have one which needs clearing after a traversal. */
412 unsigned int visit1 : 1;
413 unsigned int visit2 : 1;
414 unsigned int marking : 1;
415 unsigned int visit3 : 1;
416 unsigned int visit4 : 1;
417 unsigned int visit5 : 1;
418 unsigned int visit6 : 1;
419 unsigned int visit7 : 1;
422 struct spu_elf_stack_info
424 int num_fun;
425 int max_fun;
426 /* Variable size array describing functions, one per contiguous
427 address range belonging to a function. */
428 struct function_info fun[1];
431 static struct function_info *find_function (asection *, bfd_vma,
432 struct bfd_link_info *);
434 /* Create a spu ELF linker hash table. */
436 static struct bfd_link_hash_table *
437 spu_elf_link_hash_table_create (bfd *abfd)
439 struct spu_link_hash_table *htab;
441 htab = bfd_malloc (sizeof (*htab));
442 if (htab == NULL)
443 return NULL;
445 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
446 _bfd_elf_link_hash_newfunc,
447 sizeof (struct elf_link_hash_entry)))
449 free (htab);
450 return NULL;
453 memset (&htab->ovtab, 0,
454 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
456 htab->elf.init_got_refcount.refcount = 0;
457 htab->elf.init_got_refcount.glist = NULL;
458 htab->elf.init_got_offset.offset = 0;
459 htab->elf.init_got_offset.glist = NULL;
460 return &htab->elf.root;
463 void
464 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
466 struct spu_link_hash_table *htab = spu_hash_table (info);
467 htab->params = params;
468 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
469 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
472 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
473 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
474 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
476 static bfd_boolean
477 get_sym_h (struct elf_link_hash_entry **hp,
478 Elf_Internal_Sym **symp,
479 asection **symsecp,
480 Elf_Internal_Sym **locsymsp,
481 unsigned long r_symndx,
482 bfd *ibfd)
484 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
486 if (r_symndx >= symtab_hdr->sh_info)
488 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
489 struct elf_link_hash_entry *h;
491 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
492 while (h->root.type == bfd_link_hash_indirect
493 || h->root.type == bfd_link_hash_warning)
494 h = (struct elf_link_hash_entry *) h->root.u.i.link;
496 if (hp != NULL)
497 *hp = h;
499 if (symp != NULL)
500 *symp = NULL;
502 if (symsecp != NULL)
504 asection *symsec = NULL;
505 if (h->root.type == bfd_link_hash_defined
506 || h->root.type == bfd_link_hash_defweak)
507 symsec = h->root.u.def.section;
508 *symsecp = symsec;
511 else
513 Elf_Internal_Sym *sym;
514 Elf_Internal_Sym *locsyms = *locsymsp;
516 if (locsyms == NULL)
518 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
519 if (locsyms == NULL)
520 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
521 symtab_hdr->sh_info,
522 0, NULL, NULL, NULL);
523 if (locsyms == NULL)
524 return FALSE;
525 *locsymsp = locsyms;
527 sym = locsyms + r_symndx;
529 if (hp != NULL)
530 *hp = NULL;
532 if (symp != NULL)
533 *symp = sym;
535 if (symsecp != NULL)
536 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
539 return TRUE;
542 /* Create the note section if not already present. This is done early so
543 that the linker maps the sections to the right place in the output. */
545 bfd_boolean
546 spu_elf_create_sections (struct bfd_link_info *info)
548 bfd *ibfd;
550 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
551 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
552 break;
554 if (ibfd == NULL)
556 /* Make SPU_PTNOTE_SPUNAME section. */
557 asection *s;
558 size_t name_len;
559 size_t size;
560 bfd_byte *data;
561 flagword flags;
563 ibfd = info->input_bfds;
564 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
565 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
566 if (s == NULL
567 || !bfd_set_section_alignment (ibfd, s, 4))
568 return FALSE;
570 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
571 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
572 size += (name_len + 3) & -4;
574 if (!bfd_set_section_size (ibfd, s, size))
575 return FALSE;
577 data = bfd_zalloc (ibfd, size);
578 if (data == NULL)
579 return FALSE;
581 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
582 bfd_put_32 (ibfd, name_len, data + 4);
583 bfd_put_32 (ibfd, 1, data + 8);
584 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
585 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
586 bfd_get_filename (info->output_bfd), name_len);
587 s->contents = data;
590 return TRUE;
593 /* qsort predicate to sort sections by vma. */
595 static int
596 sort_sections (const void *a, const void *b)
598 const asection *const *s1 = a;
599 const asection *const *s2 = b;
600 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
602 if (delta != 0)
603 return delta < 0 ? -1 : 1;
605 return (*s1)->index - (*s2)->index;
608 /* Identify overlays in the output bfd, and number them. */
610 bfd_boolean
611 spu_elf_find_overlays (struct bfd_link_info *info)
613 struct spu_link_hash_table *htab = spu_hash_table (info);
614 asection **alloc_sec;
615 unsigned int i, n, ovl_index, num_buf;
616 asection *s;
617 bfd_vma ovl_end;
618 const char *ovly_mgr_entry;
620 if (info->output_bfd->section_count < 2)
621 return FALSE;
623 alloc_sec
624 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
625 if (alloc_sec == NULL)
626 return FALSE;
628 /* Pick out all the alloced sections. */
629 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
630 if ((s->flags & SEC_ALLOC) != 0
631 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
632 && s->size != 0)
633 alloc_sec[n++] = s;
635 if (n == 0)
637 free (alloc_sec);
638 return FALSE;
641 /* Sort them by vma. */
642 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
644 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
645 if (htab->params->ovly_flavour == ovly_soft_icache)
647 /* Look for an overlapping vma to find the first overlay section. */
648 bfd_vma vma_start = 0;
649 bfd_vma lma_start = 0;
651 for (i = 1; i < n; i++)
653 s = alloc_sec[i];
654 if (s->vma < ovl_end)
656 asection *s0 = alloc_sec[i - 1];
657 vma_start = s0->vma;
658 lma_start = s0->lma;
659 ovl_end = (s0->vma
660 + ((bfd_vma) 1
661 << (htab->num_lines_log2 + htab->line_size_log2)));
662 --i;
663 break;
665 else
666 ovl_end = s->vma + s->size;
669 /* Now find any sections within the cache area. */
670 for (ovl_index = 0, num_buf = 0; i < n; i++)
672 s = alloc_sec[i];
673 if (s->vma >= ovl_end)
674 break;
676 /* A section in an overlay area called .ovl.init is not
677 an overlay, in the sense that it might be loaded in
678 by the overlay manager, but rather the initial
679 section contents for the overlay buffer. */
680 if (strncmp (s->name, ".ovl.init", 9) != 0)
682 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
683 if (((s->vma - vma_start) & (htab->params->line_size - 1))
684 || ((s->lma - lma_start) & (htab->params->line_size - 1)))
686 info->callbacks->einfo (_("%X%P: overlay section %A "
687 "does not start on a cache line.\n"),
689 bfd_set_error (bfd_error_bad_value);
690 return FALSE;
692 else if (s->size > htab->params->line_size)
694 info->callbacks->einfo (_("%X%P: overlay section %A "
695 "is larger than a cache line.\n"),
697 bfd_set_error (bfd_error_bad_value);
698 return FALSE;
701 alloc_sec[ovl_index++] = s;
702 spu_elf_section_data (s)->u.o.ovl_index
703 = ((s->lma - lma_start) >> htab->line_size_log2) + 1;
704 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
708 /* Ensure there are no more overlay sections. */
709 for ( ; i < n; i++)
711 s = alloc_sec[i];
712 if (s->vma < ovl_end)
714 info->callbacks->einfo (_("%X%P: overlay section %A "
715 "is not in cache area.\n"),
716 alloc_sec[i-1]);
717 bfd_set_error (bfd_error_bad_value);
718 return FALSE;
720 else
721 ovl_end = s->vma + s->size;
724 else
726 /* Look for overlapping vmas. Any with overlap must be overlays.
727 Count them. Also count the number of overlay regions. */
728 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
730 s = alloc_sec[i];
731 if (s->vma < ovl_end)
733 asection *s0 = alloc_sec[i - 1];
735 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
737 ++num_buf;
738 if (strncmp (s0->name, ".ovl.init", 9) != 0)
740 alloc_sec[ovl_index] = s0;
741 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
742 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
744 else
745 ovl_end = s->vma + s->size;
747 if (strncmp (s->name, ".ovl.init", 9) != 0)
749 alloc_sec[ovl_index] = s;
750 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
751 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
752 if (s0->vma != s->vma)
754 info->callbacks->einfo (_("%X%P: overlay sections %A "
755 "and %A do not start at the "
756 "same address.\n"),
757 s0, s);
758 bfd_set_error (bfd_error_bad_value);
759 return FALSE;
761 if (ovl_end < s->vma + s->size)
762 ovl_end = s->vma + s->size;
765 else
766 ovl_end = s->vma + s->size;
770 htab->num_overlays = ovl_index;
771 htab->num_buf = num_buf;
772 htab->ovl_sec = alloc_sec;
773 ovly_mgr_entry = "__ovly_load";
774 if (htab->params->ovly_flavour == ovly_soft_icache)
775 ovly_mgr_entry = "__icache_br_handler";
776 htab->ovly_load = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
777 FALSE, FALSE, FALSE);
778 if (htab->params->ovly_flavour != ovly_soft_icache)
779 htab->ovly_return = elf_link_hash_lookup (&htab->elf, "__ovly_return",
780 FALSE, FALSE, FALSE);
781 return ovl_index != 0;
784 /* Non-zero to use bra in overlay stubs rather than br. */
785 #define BRA_STUBS 0
787 #define BRA 0x30000000
788 #define BRASL 0x31000000
789 #define BR 0x32000000
790 #define BRSL 0x33000000
791 #define NOP 0x40200000
792 #define LNOP 0x00200000
793 #define ILA 0x42000000
795 /* Return true for all relative and absolute branch instructions.
796 bra 00110000 0..
797 brasl 00110001 0..
798 br 00110010 0..
799 brsl 00110011 0..
800 brz 00100000 0..
801 brnz 00100001 0..
802 brhz 00100010 0..
803 brhnz 00100011 0.. */
805 static bfd_boolean
806 is_branch (const unsigned char *insn)
808 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
811 /* Return true for all indirect branch instructions.
812 bi 00110101 000
813 bisl 00110101 001
814 iret 00110101 010
815 bisled 00110101 011
816 biz 00100101 000
817 binz 00100101 001
818 bihz 00100101 010
819 bihnz 00100101 011 */
821 static bfd_boolean
822 is_indirect_branch (const unsigned char *insn)
824 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
827 /* Return true for branch hint instructions.
828 hbra 0001000..
829 hbrr 0001001.. */
831 static bfd_boolean
832 is_hint (const unsigned char *insn)
834 return (insn[0] & 0xfc) == 0x10;
837 /* True if INPUT_SECTION might need overlay stubs. */
839 static bfd_boolean
840 maybe_needs_stubs (asection *input_section)
842 /* No stubs for debug sections and suchlike. */
843 if ((input_section->flags & SEC_ALLOC) == 0)
844 return FALSE;
846 /* No stubs for link-once sections that will be discarded. */
847 if (input_section->output_section == bfd_abs_section_ptr)
848 return FALSE;
850 /* Don't create stubs for .eh_frame references. */
851 if (strcmp (input_section->name, ".eh_frame") == 0)
852 return FALSE;
854 return TRUE;
857 enum _stub_type
859 no_stub,
860 call_ovl_stub,
861 br000_ovl_stub,
862 br001_ovl_stub,
863 br010_ovl_stub,
864 br011_ovl_stub,
865 br100_ovl_stub,
866 br101_ovl_stub,
867 br110_ovl_stub,
868 br111_ovl_stub,
869 nonovl_stub,
870 stub_error
873 /* Return non-zero if this reloc symbol should go via an overlay stub.
874 Return 2 if the stub must be in non-overlay area. */
876 static enum _stub_type
877 needs_ovl_stub (struct elf_link_hash_entry *h,
878 Elf_Internal_Sym *sym,
879 asection *sym_sec,
880 asection *input_section,
881 Elf_Internal_Rela *irela,
882 bfd_byte *contents,
883 struct bfd_link_info *info)
885 struct spu_link_hash_table *htab = spu_hash_table (info);
886 enum elf_spu_reloc_type r_type;
887 unsigned int sym_type;
888 bfd_boolean branch, hint, call;
889 enum _stub_type ret = no_stub;
890 bfd_byte insn[4];
892 if (sym_sec == NULL
893 || sym_sec->output_section == bfd_abs_section_ptr
894 || spu_elf_section_data (sym_sec->output_section) == NULL)
895 return ret;
897 if (h != NULL)
899 /* Ensure no stubs for user supplied overlay manager syms. */
900 if (h == htab->ovly_load || h == htab->ovly_return)
901 return ret;
903 /* setjmp always goes via an overlay stub, because then the return
904 and hence the longjmp goes via __ovly_return. That magically
905 makes setjmp/longjmp between overlays work. */
906 if (strncmp (h->root.root.string, "setjmp", 6) == 0
907 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
908 ret = call_ovl_stub;
911 if (h != NULL)
912 sym_type = h->type;
913 else
914 sym_type = ELF_ST_TYPE (sym->st_info);
916 r_type = ELF32_R_TYPE (irela->r_info);
917 branch = FALSE;
918 hint = FALSE;
919 call = FALSE;
920 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
922 if (contents == NULL)
924 contents = insn;
925 if (!bfd_get_section_contents (input_section->owner,
926 input_section,
927 contents,
928 irela->r_offset, 4))
929 return stub_error;
931 else
932 contents += irela->r_offset;
934 branch = is_branch (contents);
935 hint = is_hint (contents);
936 if (branch || hint)
938 call = (contents[0] & 0xfd) == 0x31;
939 if (call
940 && sym_type != STT_FUNC
941 && contents != insn)
943 /* It's common for people to write assembly and forget
944 to give function symbols the right type. Handle
945 calls to such symbols, but warn so that (hopefully)
946 people will fix their code. We need the symbol
947 type to be correct to distinguish function pointer
948 initialisation from other pointer initialisations. */
949 const char *sym_name;
951 if (h != NULL)
952 sym_name = h->root.root.string;
953 else
955 Elf_Internal_Shdr *symtab_hdr;
956 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
957 sym_name = bfd_elf_sym_name (input_section->owner,
958 symtab_hdr,
959 sym,
960 sym_sec);
962 (*_bfd_error_handler) (_("warning: call to non-function"
963 " symbol %s defined in %B"),
964 sym_sec->owner, sym_name);
970 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
971 || (sym_type != STT_FUNC
972 && !(branch || hint)
973 && (sym_sec->flags & SEC_CODE) == 0))
974 return no_stub;
976 /* Usually, symbols in non-overlay sections don't need stubs. */
977 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
978 && !htab->params->non_overlay_stubs)
979 return ret;
981 /* A reference from some other section to a symbol in an overlay
982 section needs a stub. */
983 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
984 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
986 if (call || sym_type == STT_FUNC)
987 ret = call_ovl_stub;
988 else
990 ret = br000_ovl_stub;
992 if (branch)
994 unsigned int lrlive = (contents[1] & 0x70) >> 4;
995 ret += lrlive;
1000 /* If this insn isn't a branch then we are possibly taking the
1001 address of a function and passing it out somehow. Soft-icache code
1002 always generates inline code to do indirect branches. */
1003 if (!(branch || hint)
1004 && sym_type == STT_FUNC
1005 && htab->params->ovly_flavour != ovly_soft_icache)
1006 ret = nonovl_stub;
1008 return ret;
1011 static bfd_boolean
1012 count_stub (struct spu_link_hash_table *htab,
1013 bfd *ibfd,
1014 asection *isec,
1015 enum _stub_type stub_type,
1016 struct elf_link_hash_entry *h,
1017 const Elf_Internal_Rela *irela)
1019 unsigned int ovl = 0;
1020 struct got_entry *g, **head;
1021 bfd_vma addend;
1023 /* If this instruction is a branch or call, we need a stub
1024 for it. One stub per function per overlay.
1025 If it isn't a branch, then we are taking the address of
1026 this function so need a stub in the non-overlay area
1027 for it. One stub per function. */
1028 if (stub_type != nonovl_stub)
1029 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1031 if (h != NULL)
1032 head = &h->got.glist;
1033 else
1035 if (elf_local_got_ents (ibfd) == NULL)
1037 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1038 * sizeof (*elf_local_got_ents (ibfd)));
1039 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1040 if (elf_local_got_ents (ibfd) == NULL)
1041 return FALSE;
1043 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1046 if (htab->params->ovly_flavour == ovly_soft_icache)
1048 htab->stub_count[ovl] += 1;
1049 return TRUE;
1052 addend = 0;
1053 if (irela != NULL)
1054 addend = irela->r_addend;
1056 if (ovl == 0)
1058 struct got_entry *gnext;
1060 for (g = *head; g != NULL; g = g->next)
1061 if (g->addend == addend && g->ovl == 0)
1062 break;
1064 if (g == NULL)
1066 /* Need a new non-overlay area stub. Zap other stubs. */
1067 for (g = *head; g != NULL; g = gnext)
1069 gnext = g->next;
1070 if (g->addend == addend)
1072 htab->stub_count[g->ovl] -= 1;
1073 free (g);
1078 else
1080 for (g = *head; g != NULL; g = g->next)
1081 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1082 break;
1085 if (g == NULL)
1087 g = bfd_malloc (sizeof *g);
1088 if (g == NULL)
1089 return FALSE;
1090 g->ovl = ovl;
1091 g->addend = addend;
1092 g->stub_addr = (bfd_vma) -1;
1093 g->next = *head;
1094 *head = g;
1096 htab->stub_count[ovl] += 1;
1099 return TRUE;
1102 /* Support two sizes of overlay stubs, a slower more compact stub of two
1103 intructions, and a faster stub of four instructions. */
1105 static unsigned int
1106 ovl_stub_size (enum _ovly_flavour ovly_flavour)
1108 return 8 << ovly_flavour;
1111 /* Two instruction overlay stubs look like:
1113 brsl $75,__ovly_load
1114 .word target_ovl_and_address
1116 ovl_and_address is a word with the overlay number in the top 14 bits
1117 and local store address in the bottom 18 bits.
1119 Four instruction overlay stubs look like:
1121 ila $78,ovl_number
1122 lnop
1123 ila $79,target_address
1124 br __ovly_load
1126 Software icache stubs are:
1128 .word target_index
1129 .word target_ia;
1130 .word lrlive_branchlocalstoreaddr;
1131 brasl $75,__icache_br_handler
1132 .quad xor_pattern
1135 static bfd_boolean
1136 build_stub (struct bfd_link_info *info,
1137 bfd *ibfd,
1138 asection *isec,
1139 enum _stub_type stub_type,
1140 struct elf_link_hash_entry *h,
1141 const Elf_Internal_Rela *irela,
1142 bfd_vma dest,
1143 asection *dest_sec)
1145 struct spu_link_hash_table *htab = spu_hash_table (info);
1146 unsigned int ovl, dest_ovl, set_id;
1147 struct got_entry *g, **head;
1148 asection *sec;
1149 bfd_vma addend, from, to, br_dest, patt;
1150 unsigned int lrlive;
1152 ovl = 0;
1153 if (stub_type != nonovl_stub)
1154 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1156 if (h != NULL)
1157 head = &h->got.glist;
1158 else
1159 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1161 addend = 0;
1162 if (irela != NULL)
1163 addend = irela->r_addend;
1165 if (htab->params->ovly_flavour == ovly_soft_icache)
1167 g = bfd_malloc (sizeof *g);
1168 if (g == NULL)
1169 return FALSE;
1170 g->ovl = ovl;
1171 g->br_addr = 0;
1172 if (irela != NULL)
1173 g->br_addr = (irela->r_offset
1174 + isec->output_offset
1175 + isec->output_section->vma);
1176 g->next = *head;
1177 *head = g;
1179 else
1181 for (g = *head; g != NULL; g = g->next)
1182 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1183 break;
1184 if (g == NULL)
1185 abort ();
1187 if (g->ovl == 0 && ovl != 0)
1188 return TRUE;
1190 if (g->stub_addr != (bfd_vma) -1)
1191 return TRUE;
1194 sec = htab->stub_sec[ovl];
1195 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1196 from = sec->size + sec->output_offset + sec->output_section->vma;
1197 g->stub_addr = from;
1198 to = (htab->ovly_load->root.u.def.value
1199 + htab->ovly_load->root.u.def.section->output_offset
1200 + htab->ovly_load->root.u.def.section->output_section->vma);
1202 if (((dest | to | from) & 3) != 0)
1204 htab->stub_err = 1;
1205 return FALSE;
1207 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1209 switch (htab->params->ovly_flavour)
1211 case ovly_normal:
1212 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1213 sec->contents + sec->size);
1214 bfd_put_32 (sec->owner, LNOP,
1215 sec->contents + sec->size + 4);
1216 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1217 sec->contents + sec->size + 8);
1218 if (!BRA_STUBS)
1219 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1220 sec->contents + sec->size + 12);
1221 else
1222 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1223 sec->contents + sec->size + 12);
1224 break;
1226 case ovly_compact:
1227 if (!BRA_STUBS)
1228 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1229 sec->contents + sec->size);
1230 else
1231 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1232 sec->contents + sec->size);
1233 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1234 sec->contents + sec->size + 4);
1235 break;
1237 case ovly_soft_icache:
1238 lrlive = 0;
1239 if (stub_type == nonovl_stub)
1241 else if (stub_type == call_ovl_stub)
1242 /* A brsl makes lr live and *(*sp+16) is live.
1243 Tail calls have the same liveness. */
1244 lrlive = 5;
1245 else if (!htab->params->lrlive_analysis)
1246 /* Assume stack frame and lr save. */
1247 lrlive = 1;
1248 else if (irela != NULL)
1250 /* Analyse branch instructions. */
1251 struct function_info *caller;
1252 bfd_vma off;
1254 caller = find_function (isec, irela->r_offset, info);
1255 if (caller->start == NULL)
1256 off = irela->r_offset;
1257 else
1259 struct function_info *found = NULL;
1261 /* Find the earliest piece of this function that
1262 has frame adjusting instructions. We might
1263 see dynamic frame adjustment (eg. for alloca)
1264 in some later piece, but functions using
1265 alloca always set up a frame earlier. Frame
1266 setup instructions are always in one piece. */
1267 if (caller->lr_store != (bfd_vma) -1
1268 || caller->sp_adjust != (bfd_vma) -1)
1269 found = caller;
1270 while (caller->start != NULL)
1272 caller = caller->start;
1273 if (caller->lr_store != (bfd_vma) -1
1274 || caller->sp_adjust != (bfd_vma) -1)
1275 found = caller;
1277 if (found != NULL)
1278 caller = found;
1279 off = (bfd_vma) -1;
1282 if (off > caller->sp_adjust)
1284 if (off > caller->lr_store)
1285 /* Only *(*sp+16) is live. */
1286 lrlive = 1;
1287 else
1288 /* If no lr save, then we must be in a
1289 leaf function with a frame.
1290 lr is still live. */
1291 lrlive = 4;
1293 else if (off > caller->lr_store)
1295 /* Between lr save and stack adjust. */
1296 lrlive = 3;
1297 /* This should never happen since prologues won't
1298 be split here. */
1299 BFD_ASSERT (0);
1301 else
1302 /* On entry to function. */
1303 lrlive = 5;
1305 if (stub_type != br000_ovl_stub
1306 && lrlive != stub_type - br000_ovl_stub)
1307 info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1308 "from analysis (%u)\n"),
1309 isec, irela->r_offset, lrlive,
1310 stub_type - br000_ovl_stub);
1313 /* If given lrlive info via .brinfo, use it. */
1314 if (stub_type > br000_ovl_stub)
1315 lrlive = stub_type - br000_ovl_stub;
1317 /* The branch that uses this stub goes to stub_addr + 12. We'll
1318 set up an xor pattern that can be used by the icache manager
1319 to modify this branch to go directly to its destination. */
1320 g->stub_addr += 12;
1321 br_dest = g->stub_addr;
1322 if (irela == NULL)
1324 /* Except in the case of _SPUEAR_ stubs, the branch in
1325 question is the one in the stub itself. */
1326 BFD_ASSERT (stub_type == nonovl_stub);
1327 g->br_addr = g->stub_addr;
1328 br_dest = to;
1331 bfd_put_32 (sec->owner, dest_ovl - 1,
1332 sec->contents + sec->size + 0);
1333 set_id = (dest_ovl - 1) >> htab->num_lines_log2;
1334 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1335 sec->contents + sec->size + 4);
1336 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1337 sec->contents + sec->size + 8);
1338 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1339 sec->contents + sec->size + 12);
1340 patt = dest ^ br_dest;
1341 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1342 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1343 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1344 sec->contents + sec->size + 16 + (g->br_addr & 0xf));
1345 if (ovl == 0)
1346 /* Extra space for linked list entries. */
1347 sec->size += 16;
1348 break;
1350 default:
1351 abort ();
1353 sec->size += ovl_stub_size (htab->params->ovly_flavour);
1355 if (htab->params->emit_stub_syms)
1357 size_t len;
1358 char *name;
1359 int add;
1361 len = 8 + sizeof (".ovl_call.") - 1;
1362 if (h != NULL)
1363 len += strlen (h->root.root.string);
1364 else
1365 len += 8 + 1 + 8;
1366 add = 0;
1367 if (irela != NULL)
1368 add = (int) irela->r_addend & 0xffffffff;
1369 if (add != 0)
1370 len += 1 + 8;
1371 name = bfd_malloc (len);
1372 if (name == NULL)
1373 return FALSE;
1375 sprintf (name, "%08x.ovl_call.", g->ovl);
1376 if (h != NULL)
1377 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1378 else
1379 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1380 dest_sec->id & 0xffffffff,
1381 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1382 if (add != 0)
1383 sprintf (name + len - 9, "+%x", add);
1385 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1386 free (name);
1387 if (h == NULL)
1388 return FALSE;
1389 if (h->root.type == bfd_link_hash_new)
1391 h->root.type = bfd_link_hash_defined;
1392 h->root.u.def.section = sec;
1393 h->size = ovl_stub_size (htab->params->ovly_flavour);
1394 h->root.u.def.value = sec->size - h->size;
1395 h->type = STT_FUNC;
1396 h->ref_regular = 1;
1397 h->def_regular = 1;
1398 h->ref_regular_nonweak = 1;
1399 h->forced_local = 1;
1400 h->non_elf = 0;
1404 return TRUE;
1407 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1408 symbols. */
1410 static bfd_boolean
1411 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1413 /* Symbols starting with _SPUEAR_ need a stub because they may be
1414 invoked by the PPU. */
1415 struct bfd_link_info *info = inf;
1416 struct spu_link_hash_table *htab = spu_hash_table (info);
1417 asection *sym_sec;
1419 if ((h->root.type == bfd_link_hash_defined
1420 || h->root.type == bfd_link_hash_defweak)
1421 && h->def_regular
1422 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1423 && (sym_sec = h->root.u.def.section) != NULL
1424 && sym_sec->output_section != bfd_abs_section_ptr
1425 && spu_elf_section_data (sym_sec->output_section) != NULL
1426 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1427 || htab->params->non_overlay_stubs))
1429 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1432 return TRUE;
1435 static bfd_boolean
1436 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1438 /* Symbols starting with _SPUEAR_ need a stub because they may be
1439 invoked by the PPU. */
1440 struct bfd_link_info *info = inf;
1441 struct spu_link_hash_table *htab = spu_hash_table (info);
1442 asection *sym_sec;
1444 if ((h->root.type == bfd_link_hash_defined
1445 || h->root.type == bfd_link_hash_defweak)
1446 && h->def_regular
1447 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1448 && (sym_sec = h->root.u.def.section) != NULL
1449 && sym_sec->output_section != bfd_abs_section_ptr
1450 && spu_elf_section_data (sym_sec->output_section) != NULL
1451 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1452 || htab->params->non_overlay_stubs))
1454 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1455 h->root.u.def.value, sym_sec);
1458 return TRUE;
1461 /* Size or build stubs. */
1463 static bfd_boolean
1464 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1466 struct spu_link_hash_table *htab = spu_hash_table (info);
1467 bfd *ibfd;
1469 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1471 extern const bfd_target bfd_elf32_spu_vec;
1472 Elf_Internal_Shdr *symtab_hdr;
1473 asection *isec;
1474 Elf_Internal_Sym *local_syms = NULL;
1476 if (ibfd->xvec != &bfd_elf32_spu_vec)
1477 continue;
1479 /* We'll need the symbol table in a second. */
1480 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1481 if (symtab_hdr->sh_info == 0)
1482 continue;
1484 /* Walk over each section attached to the input bfd. */
1485 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1487 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1489 /* If there aren't any relocs, then there's nothing more to do. */
1490 if ((isec->flags & SEC_RELOC) == 0
1491 || isec->reloc_count == 0)
1492 continue;
1494 if (!maybe_needs_stubs (isec))
1495 continue;
1497 /* Get the relocs. */
1498 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1499 info->keep_memory);
1500 if (internal_relocs == NULL)
1501 goto error_ret_free_local;
1503 /* Now examine each relocation. */
1504 irela = internal_relocs;
1505 irelaend = irela + isec->reloc_count;
1506 for (; irela < irelaend; irela++)
1508 enum elf_spu_reloc_type r_type;
1509 unsigned int r_indx;
1510 asection *sym_sec;
1511 Elf_Internal_Sym *sym;
1512 struct elf_link_hash_entry *h;
1513 enum _stub_type stub_type;
1515 r_type = ELF32_R_TYPE (irela->r_info);
1516 r_indx = ELF32_R_SYM (irela->r_info);
1518 if (r_type >= R_SPU_max)
1520 bfd_set_error (bfd_error_bad_value);
1521 error_ret_free_internal:
1522 if (elf_section_data (isec)->relocs != internal_relocs)
1523 free (internal_relocs);
1524 error_ret_free_local:
1525 if (local_syms != NULL
1526 && (symtab_hdr->contents
1527 != (unsigned char *) local_syms))
1528 free (local_syms);
1529 return FALSE;
1532 /* Determine the reloc target section. */
1533 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1534 goto error_ret_free_internal;
1536 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1537 NULL, info);
1538 if (stub_type == no_stub)
1539 continue;
1540 else if (stub_type == stub_error)
1541 goto error_ret_free_internal;
1543 if (htab->stub_count == NULL)
1545 bfd_size_type amt;
1546 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1547 htab->stub_count = bfd_zmalloc (amt);
1548 if (htab->stub_count == NULL)
1549 goto error_ret_free_internal;
1552 if (!build)
1554 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1555 goto error_ret_free_internal;
1557 else
1559 bfd_vma dest;
1561 if (h != NULL)
1562 dest = h->root.u.def.value;
1563 else
1564 dest = sym->st_value;
1565 dest += irela->r_addend;
1566 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1567 dest, sym_sec))
1568 goto error_ret_free_internal;
1572 /* We're done with the internal relocs, free them. */
1573 if (elf_section_data (isec)->relocs != internal_relocs)
1574 free (internal_relocs);
1577 if (local_syms != NULL
1578 && symtab_hdr->contents != (unsigned char *) local_syms)
1580 if (!info->keep_memory)
1581 free (local_syms);
1582 else
1583 symtab_hdr->contents = (unsigned char *) local_syms;
1587 return TRUE;
1590 /* Allocate space for overlay call and return stubs. */
1593 spu_elf_size_stubs (struct bfd_link_info *info)
1595 struct spu_link_hash_table *htab;
1596 bfd *ibfd;
1597 bfd_size_type amt;
1598 flagword flags;
1599 unsigned int i;
1600 asection *stub;
1601 const char *ovout;
1603 if (!process_stubs (info, FALSE))
1604 return 0;
1606 htab = spu_hash_table (info);
1607 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1608 if (htab->stub_err)
1609 return 0;
1611 if (htab->stub_count == NULL)
1612 return 1;
1614 ibfd = info->input_bfds;
1615 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1616 htab->stub_sec = bfd_zmalloc (amt);
1617 if (htab->stub_sec == NULL)
1618 return 0;
1620 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1621 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1622 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1623 htab->stub_sec[0] = stub;
1624 if (stub == NULL
1625 || !bfd_set_section_alignment (ibfd, stub,
1626 htab->params->ovly_flavour + 3))
1627 return 0;
1628 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params->ovly_flavour);
1629 if (htab->params->ovly_flavour == ovly_soft_icache)
1630 /* Extra space for linked list entries. */
1631 stub->size += htab->stub_count[0] * 16;
1632 (*htab->params->place_spu_section) (stub, NULL, ".text");
1634 for (i = 0; i < htab->num_overlays; ++i)
1636 asection *osec = htab->ovl_sec[i];
1637 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1638 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1639 htab->stub_sec[ovl] = stub;
1640 if (stub == NULL
1641 || !bfd_set_section_alignment (ibfd, stub,
1642 htab->params->ovly_flavour + 3))
1643 return 0;
1644 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params->ovly_flavour);
1645 (*htab->params->place_spu_section) (stub, osec, NULL);
1648 flags = (SEC_ALLOC | SEC_LOAD
1649 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1650 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1651 if (htab->ovtab == NULL
1652 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1653 return 0;
1655 if (htab->params->ovly_flavour == ovly_soft_icache)
1657 /* Space for icache manager tables.
1658 a) Tag array, one quadword per cache line.
1659 b) Linked list elements, max_branch per line quadwords.
1660 c) Indirect branch descriptors, 8 quadwords. */
1661 htab->ovtab->size = 16 * (((1 + htab->params->max_branch)
1662 << htab->num_lines_log2)
1663 + 8);
1665 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1666 if (htab->init == NULL
1667 || !bfd_set_section_alignment (ibfd, htab->init, 4))
1668 return 0;
1670 htab->init->size = 16;
1671 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1673 else
1675 /* htab->ovtab consists of two arrays.
1676 . struct {
1677 . u32 vma;
1678 . u32 size;
1679 . u32 file_off;
1680 . u32 buf;
1681 . } _ovly_table[];
1683 . struct {
1684 . u32 mapped;
1685 . } _ovly_buf_table[];
1686 . */
1688 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1690 ovout = ".data";
1691 if (htab->params->ovly_flavour == ovly_soft_icache)
1692 ovout = ".data.icache";
1693 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1695 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1696 if (htab->toe == NULL
1697 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1698 return 0;
1699 htab->toe->size = htab->params->ovly_flavour == ovly_soft_icache ? 256 : 16;
1700 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1702 return 2;
1705 /* Functions to handle embedded spu_ovl.o object. */
1707 static void *
1708 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1710 return stream;
1713 static file_ptr
1714 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1715 void *stream,
1716 void *buf,
1717 file_ptr nbytes,
1718 file_ptr offset)
1720 struct _ovl_stream *os;
1721 size_t count;
1722 size_t max;
1724 os = (struct _ovl_stream *) stream;
1725 max = (const char *) os->end - (const char *) os->start;
1727 if ((ufile_ptr) offset >= max)
1728 return 0;
1730 count = nbytes;
1731 if (count > max - offset)
1732 count = max - offset;
1734 memcpy (buf, (const char *) os->start + offset, count);
1735 return count;
1738 bfd_boolean
1739 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1741 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1742 "elf32-spu",
1743 ovl_mgr_open,
1744 (void *) stream,
1745 ovl_mgr_pread,
1746 NULL,
1747 NULL);
1748 return *ovl_bfd != NULL;
1751 static unsigned int
1752 overlay_index (asection *sec)
1754 if (sec == NULL
1755 || sec->output_section == bfd_abs_section_ptr)
1756 return 0;
1757 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1760 /* Define an STT_OBJECT symbol. */
1762 static struct elf_link_hash_entry *
1763 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1765 struct elf_link_hash_entry *h;
1767 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1768 if (h == NULL)
1769 return NULL;
1771 if (h->root.type != bfd_link_hash_defined
1772 || !h->def_regular)
1774 h->root.type = bfd_link_hash_defined;
1775 h->root.u.def.section = htab->ovtab;
1776 h->type = STT_OBJECT;
1777 h->ref_regular = 1;
1778 h->def_regular = 1;
1779 h->ref_regular_nonweak = 1;
1780 h->non_elf = 0;
1782 else if (h->root.u.def.section->owner != NULL)
1784 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1785 h->root.u.def.section->owner,
1786 h->root.root.string);
1787 bfd_set_error (bfd_error_bad_value);
1788 return NULL;
1790 else
1792 (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1793 h->root.root.string);
1794 bfd_set_error (bfd_error_bad_value);
1795 return NULL;
1798 return h;
1801 /* Fill in all stubs and the overlay tables. */
1803 static bfd_boolean
1804 spu_elf_build_stubs (struct bfd_link_info *info)
1806 struct spu_link_hash_table *htab = spu_hash_table (info);
1807 struct elf_link_hash_entry *h;
1808 bfd_byte *p;
1809 asection *s;
1810 bfd *obfd;
1811 unsigned int i;
1813 if (htab->stub_count == NULL)
1814 return TRUE;
1816 for (i = 0; i <= htab->num_overlays; i++)
1817 if (htab->stub_sec[i]->size != 0)
1819 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1820 htab->stub_sec[i]->size);
1821 if (htab->stub_sec[i]->contents == NULL)
1822 return FALSE;
1823 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1824 htab->stub_sec[i]->size = 0;
1827 h = htab->ovly_load;
1828 if (h == NULL)
1830 const char *ovly_mgr_entry = "__ovly_load";
1832 if (htab->params->ovly_flavour == ovly_soft_icache)
1833 ovly_mgr_entry = "__icache_br_handler";
1834 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
1835 FALSE, FALSE, FALSE);
1836 htab->ovly_load = h;
1838 BFD_ASSERT (h != NULL
1839 && (h->root.type == bfd_link_hash_defined
1840 || h->root.type == bfd_link_hash_defweak)
1841 && h->def_regular);
1843 s = h->root.u.def.section->output_section;
1844 if (spu_elf_section_data (s)->u.o.ovl_index)
1846 (*_bfd_error_handler) (_("%s in overlay section"),
1847 h->root.root.string);
1848 bfd_set_error (bfd_error_bad_value);
1849 return FALSE;
1852 h = htab->ovly_return;
1853 if (h == NULL && htab->params->ovly_flavour != ovly_soft_icache)
1855 h = elf_link_hash_lookup (&htab->elf, "__ovly_return",
1856 FALSE, FALSE, FALSE);
1857 htab->ovly_return = h;
1860 /* Fill in all the stubs. */
1861 process_stubs (info, TRUE);
1862 if (!htab->stub_err)
1863 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1865 if (htab->stub_err)
1867 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1868 bfd_set_error (bfd_error_bad_value);
1869 return FALSE;
1872 for (i = 0; i <= htab->num_overlays; i++)
1874 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1876 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1877 bfd_set_error (bfd_error_bad_value);
1878 return FALSE;
1880 htab->stub_sec[i]->rawsize = 0;
1883 if (htab->ovtab == NULL || htab->ovtab->size == 0)
1884 return TRUE;
1886 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1887 if (htab->ovtab->contents == NULL)
1888 return FALSE;
1890 p = htab->ovtab->contents;
1891 if (htab->params->ovly_flavour == ovly_soft_icache)
1893 #define BI_HANDLER "__icache_ptr_handler0"
1894 char name[sizeof (BI_HANDLER)];
1895 bfd_vma off, icache_base, linklist, bihand;
1897 h = define_ovtab_symbol (htab, "__icache_tagbase");
1898 if (h == NULL)
1899 return FALSE;
1900 h->root.u.def.value = 0;
1901 h->size = 16 << htab->num_lines_log2;
1902 off = h->size;
1903 icache_base = htab->ovl_sec[0]->vma;
1904 linklist = (htab->ovtab->output_section->vma
1905 + htab->ovtab->output_offset
1906 + off);
1907 for (i = 0; i < htab->params->num_lines; i++)
1909 bfd_vma line_end = icache_base + ((i + 1) << htab->line_size_log2);
1910 bfd_vma stub_base = line_end - htab->params->max_branch * 32;
1911 bfd_vma link_elem = linklist + i * htab->params->max_branch * 16;
1912 bfd_vma locator = link_elem - stub_base / 2;
1914 bfd_put_32 (htab->ovtab->owner, locator, p + 4);
1915 bfd_put_16 (htab->ovtab->owner, link_elem, p + 8);
1916 bfd_put_16 (htab->ovtab->owner, link_elem, p + 10);
1917 bfd_put_16 (htab->ovtab->owner, link_elem, p + 12);
1918 bfd_put_16 (htab->ovtab->owner, link_elem, p + 14);
1919 p += 16;
1922 h = define_ovtab_symbol (htab, "__icache_linked_list");
1923 if (h == NULL)
1924 return FALSE;
1925 h->root.u.def.value = off;
1926 h->size = htab->params->max_branch << (htab->num_lines_log2 + 4);
1927 off += h->size;
1928 p += h->size;
1930 h = elf_link_hash_lookup (&htab->elf, "__icache_bi_handler",
1931 FALSE, FALSE, FALSE);
1932 bihand = 0;
1933 if (h != NULL
1934 && (h->root.type == bfd_link_hash_defined
1935 || h->root.type == bfd_link_hash_defweak)
1936 && h->def_regular)
1937 bihand = (h->root.u.def.value
1938 + h->root.u.def.section->output_offset
1939 + h->root.u.def.section->output_section->vma);
1940 memcpy (name, BI_HANDLER, sizeof (BI_HANDLER));
1941 for (i = 0; i < 8; i++)
1943 name[sizeof (BI_HANDLER) - 2] = '0' + i;
1944 h = define_ovtab_symbol (htab, name);
1945 if (h == NULL)
1946 return FALSE;
1947 h->root.u.def.value = off;
1948 h->size = 16;
1949 bfd_put_32 (htab->ovtab->owner, bihand, p);
1950 bfd_put_32 (htab->ovtab->owner, i << 28, p + 8);
1951 p += 16;
1952 off += 16;
1955 h = define_ovtab_symbol (htab, "__icache_base");
1956 if (h == NULL)
1957 return FALSE;
1958 h->root.u.def.value = htab->ovl_sec[0]->vma;
1959 h->root.u.def.section = bfd_abs_section_ptr;
1960 h->size = htab->num_buf << htab->line_size_log2;
1962 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
1963 if (h == NULL)
1964 return FALSE;
1965 h->root.u.def.value = -htab->line_size_log2;
1966 h->root.u.def.section = bfd_abs_section_ptr;
1968 if (htab->init != NULL && htab->init->size != 0)
1970 htab->init->contents = bfd_zalloc (htab->init->owner,
1971 htab->init->size);
1972 if (htab->init->contents == NULL)
1973 return FALSE;
1975 h = define_ovtab_symbol (htab, "__icache_fileoff");
1976 if (h == NULL)
1977 return FALSE;
1978 h->root.u.def.value = 0;
1979 h->root.u.def.section = htab->init;
1980 h->size = 8;
1983 else
1985 /* Write out _ovly_table. */
1986 /* set low bit of .size to mark non-overlay area as present. */
1987 p[7] = 1;
1988 obfd = htab->ovtab->output_section->owner;
1989 for (s = obfd->sections; s != NULL; s = s->next)
1991 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
1993 if (ovl_index != 0)
1995 unsigned long off = ovl_index * 16;
1996 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
1998 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1999 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2000 p + off + 4);
2001 /* file_off written later in spu_elf_modify_program_headers. */
2002 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2006 h = define_ovtab_symbol (htab, "_ovly_table");
2007 if (h == NULL)
2008 return FALSE;
2009 h->root.u.def.value = 16;
2010 h->size = htab->num_overlays * 16;
2012 h = define_ovtab_symbol (htab, "_ovly_table_end");
2013 if (h == NULL)
2014 return FALSE;
2015 h->root.u.def.value = htab->num_overlays * 16 + 16;
2016 h->size = 0;
2018 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2019 if (h == NULL)
2020 return FALSE;
2021 h->root.u.def.value = htab->num_overlays * 16 + 16;
2022 h->size = htab->num_buf * 4;
2024 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2025 if (h == NULL)
2026 return FALSE;
2027 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2028 h->size = 0;
2031 h = define_ovtab_symbol (htab, "_EAR_");
2032 if (h == NULL)
2033 return FALSE;
2034 h->root.u.def.section = htab->toe;
2035 h->root.u.def.value = 0;
2036 h->size = htab->params->ovly_flavour == ovly_soft_icache ? 16 * 16 : 16;
2038 return TRUE;
2041 /* Check that all loadable section VMAs lie in the range
2042 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2044 asection *
2045 spu_elf_check_vma (struct bfd_link_info *info)
2047 struct elf_segment_map *m;
2048 unsigned int i;
2049 struct spu_link_hash_table *htab = spu_hash_table (info);
2050 bfd *abfd = info->output_bfd;
2051 bfd_vma hi = htab->params->local_store_hi;
2052 bfd_vma lo = htab->params->local_store_lo;
2054 htab->local_store = hi + 1 - lo;
2056 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2057 if (m->p_type == PT_LOAD)
2058 for (i = 0; i < m->count; i++)
2059 if (m->sections[i]->size != 0
2060 && (m->sections[i]->vma < lo
2061 || m->sections[i]->vma > hi
2062 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2063 return m->sections[i];
2065 /* No need for overlays if it all fits. */
2066 if (htab->params->ovly_flavour != ovly_soft_icache)
2067 htab->params->auto_overlay = 0;
2068 return NULL;
2071 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2072 Search for stack adjusting insns, and return the sp delta.
2073 If a store of lr is found save the instruction offset to *LR_STORE.
2074 If a stack adjusting instruction is found, save that offset to
2075 *SP_ADJUST. */
2077 static int
2078 find_function_stack_adjust (asection *sec,
2079 bfd_vma offset,
2080 bfd_vma *lr_store,
2081 bfd_vma *sp_adjust)
2083 int reg[128];
2085 memset (reg, 0, sizeof (reg));
2086 for ( ; offset + 4 <= sec->size; offset += 4)
2088 unsigned char buf[4];
2089 int rt, ra;
2090 int imm;
2092 /* Assume no relocs on stack adjusing insns. */
2093 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2094 break;
2096 rt = buf[3] & 0x7f;
2097 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2099 if (buf[0] == 0x24 /* stqd */)
2101 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2102 *lr_store = offset;
2103 continue;
2106 /* Partly decoded immediate field. */
2107 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2109 if (buf[0] == 0x1c /* ai */)
2111 imm >>= 7;
2112 imm = (imm ^ 0x200) - 0x200;
2113 reg[rt] = reg[ra] + imm;
2115 if (rt == 1 /* sp */)
2117 if (reg[rt] > 0)
2118 break;
2119 *sp_adjust = offset;
2120 return reg[rt];
2123 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2125 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2127 reg[rt] = reg[ra] + reg[rb];
2128 if (rt == 1)
2130 if (reg[rt] > 0)
2131 break;
2132 *sp_adjust = offset;
2133 return reg[rt];
2136 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2138 if (buf[0] >= 0x42 /* ila */)
2139 imm |= (buf[0] & 1) << 17;
2140 else
2142 imm &= 0xffff;
2144 if (buf[0] == 0x40 /* il */)
2146 if ((buf[1] & 0x80) == 0)
2147 continue;
2148 imm = (imm ^ 0x8000) - 0x8000;
2150 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2151 imm <<= 16;
2153 reg[rt] = imm;
2154 continue;
2156 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2158 reg[rt] |= imm & 0xffff;
2159 continue;
2161 else if (buf[0] == 0x04 /* ori */)
2163 imm >>= 7;
2164 imm = (imm ^ 0x200) - 0x200;
2165 reg[rt] = reg[ra] | imm;
2166 continue;
2168 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2170 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2171 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2172 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2173 | ((imm & 0x1000) ? 0x000000ff : 0));
2174 continue;
2176 else if (buf[0] == 0x16 /* andbi */)
2178 imm >>= 7;
2179 imm &= 0xff;
2180 imm |= imm << 8;
2181 imm |= imm << 16;
2182 reg[rt] = reg[ra] & imm;
2183 continue;
2185 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2187 /* Used in pic reg load. Say rt is trashed. Won't be used
2188 in stack adjust, but we need to continue past this branch. */
2189 reg[rt] = 0;
2190 continue;
2192 else if (is_branch (buf) || is_indirect_branch (buf))
2193 /* If we hit a branch then we must be out of the prologue. */
2194 break;
2197 return 0;
2200 /* qsort predicate to sort symbols by section and value. */
2202 static Elf_Internal_Sym *sort_syms_syms;
2203 static asection **sort_syms_psecs;
2205 static int
2206 sort_syms (const void *a, const void *b)
2208 Elf_Internal_Sym *const *s1 = a;
2209 Elf_Internal_Sym *const *s2 = b;
2210 asection *sec1,*sec2;
2211 bfd_signed_vma delta;
2213 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2214 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2216 if (sec1 != sec2)
2217 return sec1->index - sec2->index;
2219 delta = (*s1)->st_value - (*s2)->st_value;
2220 if (delta != 0)
2221 return delta < 0 ? -1 : 1;
2223 delta = (*s2)->st_size - (*s1)->st_size;
2224 if (delta != 0)
2225 return delta < 0 ? -1 : 1;
2227 return *s1 < *s2 ? -1 : 1;
2230 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2231 entries for section SEC. */
2233 static struct spu_elf_stack_info *
2234 alloc_stack_info (asection *sec, int max_fun)
2236 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2237 bfd_size_type amt;
2239 amt = sizeof (struct spu_elf_stack_info);
2240 amt += (max_fun - 1) * sizeof (struct function_info);
2241 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2242 if (sec_data->u.i.stack_info != NULL)
2243 sec_data->u.i.stack_info->max_fun = max_fun;
2244 return sec_data->u.i.stack_info;
2247 /* Add a new struct function_info describing a (part of a) function
2248 starting at SYM_H. Keep the array sorted by address. */
2250 static struct function_info *
2251 maybe_insert_function (asection *sec,
2252 void *sym_h,
2253 bfd_boolean global,
2254 bfd_boolean is_func)
2256 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2257 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2258 int i;
2259 bfd_vma off, size;
2261 if (sinfo == NULL)
2263 sinfo = alloc_stack_info (sec, 20);
2264 if (sinfo == NULL)
2265 return NULL;
2268 if (!global)
2270 Elf_Internal_Sym *sym = sym_h;
2271 off = sym->st_value;
2272 size = sym->st_size;
2274 else
2276 struct elf_link_hash_entry *h = sym_h;
2277 off = h->root.u.def.value;
2278 size = h->size;
2281 for (i = sinfo->num_fun; --i >= 0; )
2282 if (sinfo->fun[i].lo <= off)
2283 break;
2285 if (i >= 0)
2287 /* Don't add another entry for an alias, but do update some
2288 info. */
2289 if (sinfo->fun[i].lo == off)
2291 /* Prefer globals over local syms. */
2292 if (global && !sinfo->fun[i].global)
2294 sinfo->fun[i].global = TRUE;
2295 sinfo->fun[i].u.h = sym_h;
2297 if (is_func)
2298 sinfo->fun[i].is_func = TRUE;
2299 return &sinfo->fun[i];
2301 /* Ignore a zero-size symbol inside an existing function. */
2302 else if (sinfo->fun[i].hi > off && size == 0)
2303 return &sinfo->fun[i];
2306 if (sinfo->num_fun >= sinfo->max_fun)
2308 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2309 bfd_size_type old = amt;
2311 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2312 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2313 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2314 sinfo = bfd_realloc (sinfo, amt);
2315 if (sinfo == NULL)
2316 return NULL;
2317 memset ((char *) sinfo + old, 0, amt - old);
2318 sec_data->u.i.stack_info = sinfo;
2321 if (++i < sinfo->num_fun)
2322 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2323 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2324 sinfo->fun[i].is_func = is_func;
2325 sinfo->fun[i].global = global;
2326 sinfo->fun[i].sec = sec;
2327 if (global)
2328 sinfo->fun[i].u.h = sym_h;
2329 else
2330 sinfo->fun[i].u.sym = sym_h;
2331 sinfo->fun[i].lo = off;
2332 sinfo->fun[i].hi = off + size;
2333 sinfo->fun[i].lr_store = -1;
2334 sinfo->fun[i].sp_adjust = -1;
2335 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2336 &sinfo->fun[i].lr_store,
2337 &sinfo->fun[i].sp_adjust);
2338 sinfo->num_fun += 1;
2339 return &sinfo->fun[i];
2342 /* Return the name of FUN. */
2344 static const char *
2345 func_name (struct function_info *fun)
2347 asection *sec;
2348 bfd *ibfd;
2349 Elf_Internal_Shdr *symtab_hdr;
2351 while (fun->start != NULL)
2352 fun = fun->start;
2354 if (fun->global)
2355 return fun->u.h->root.root.string;
2357 sec = fun->sec;
2358 if (fun->u.sym->st_name == 0)
2360 size_t len = strlen (sec->name);
2361 char *name = bfd_malloc (len + 10);
2362 if (name == NULL)
2363 return "(null)";
2364 sprintf (name, "%s+%lx", sec->name,
2365 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2366 return name;
2368 ibfd = sec->owner;
2369 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2370 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2373 /* Read the instruction at OFF in SEC. Return true iff the instruction
2374 is a nop, lnop, or stop 0 (all zero insn). */
2376 static bfd_boolean
2377 is_nop (asection *sec, bfd_vma off)
2379 unsigned char insn[4];
2381 if (off + 4 > sec->size
2382 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2383 return FALSE;
2384 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2385 return TRUE;
2386 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2387 return TRUE;
2388 return FALSE;
2391 /* Extend the range of FUN to cover nop padding up to LIMIT.
2392 Return TRUE iff some instruction other than a NOP was found. */
2394 static bfd_boolean
2395 insns_at_end (struct function_info *fun, bfd_vma limit)
2397 bfd_vma off = (fun->hi + 3) & -4;
2399 while (off < limit && is_nop (fun->sec, off))
2400 off += 4;
2401 if (off < limit)
2403 fun->hi = off;
2404 return TRUE;
2406 fun->hi = limit;
2407 return FALSE;
2410 /* Check and fix overlapping function ranges. Return TRUE iff there
2411 are gaps in the current info we have about functions in SEC. */
2413 static bfd_boolean
2414 check_function_ranges (asection *sec, struct bfd_link_info *info)
2416 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2417 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2418 int i;
2419 bfd_boolean gaps = FALSE;
2421 if (sinfo == NULL)
2422 return FALSE;
2424 for (i = 1; i < sinfo->num_fun; i++)
2425 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2427 /* Fix overlapping symbols. */
2428 const char *f1 = func_name (&sinfo->fun[i - 1]);
2429 const char *f2 = func_name (&sinfo->fun[i]);
2431 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2432 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2434 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2435 gaps = TRUE;
2437 if (sinfo->num_fun == 0)
2438 gaps = TRUE;
2439 else
2441 if (sinfo->fun[0].lo != 0)
2442 gaps = TRUE;
2443 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2445 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2447 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2448 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2450 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2451 gaps = TRUE;
2453 return gaps;
2456 /* Search current function info for a function that contains address
2457 OFFSET in section SEC. */
2459 static struct function_info *
2460 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2462 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2463 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2464 int lo, hi, mid;
2466 lo = 0;
2467 hi = sinfo->num_fun;
2468 while (lo < hi)
2470 mid = (lo + hi) / 2;
2471 if (offset < sinfo->fun[mid].lo)
2472 hi = mid;
2473 else if (offset >= sinfo->fun[mid].hi)
2474 lo = mid + 1;
2475 else
2476 return &sinfo->fun[mid];
2478 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2479 sec, offset);
2480 bfd_set_error (bfd_error_bad_value);
2481 return NULL;
2484 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2485 if CALLEE was new. If this function return FALSE, CALLEE should
2486 be freed. */
2488 static bfd_boolean
2489 insert_callee (struct function_info *caller, struct call_info *callee)
2491 struct call_info **pp, *p;
2493 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2494 if (p->fun == callee->fun)
2496 /* Tail calls use less stack than normal calls. Retain entry
2497 for normal call over one for tail call. */
2498 p->is_tail &= callee->is_tail;
2499 if (!p->is_tail)
2501 p->fun->start = NULL;
2502 p->fun->is_func = TRUE;
2504 p->count += 1;
2505 /* Reorder list so most recent call is first. */
2506 *pp = p->next;
2507 p->next = caller->call_list;
2508 caller->call_list = p;
2509 return FALSE;
2511 callee->next = caller->call_list;
2512 callee->count += 1;
2513 caller->call_list = callee;
2514 return TRUE;
2517 /* Copy CALL and insert the copy into CALLER. */
2519 static bfd_boolean
2520 copy_callee (struct function_info *caller, const struct call_info *call)
2522 struct call_info *callee;
2523 callee = bfd_malloc (sizeof (*callee));
2524 if (callee == NULL)
2525 return FALSE;
2526 *callee = *call;
2527 if (!insert_callee (caller, callee))
2528 free (callee);
2529 return TRUE;
2532 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2533 overlay stub sections. */
2535 static bfd_boolean
2536 interesting_section (asection *s)
2538 return (s->output_section != bfd_abs_section_ptr
2539 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2540 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2541 && s->size != 0);
2544 /* Rummage through the relocs for SEC, looking for function calls.
2545 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2546 mark destination symbols on calls as being functions. Also
2547 look at branches, which may be tail calls or go to hot/cold
2548 section part of same function. */
2550 static bfd_boolean
2551 mark_functions_via_relocs (asection *sec,
2552 struct bfd_link_info *info,
2553 int call_tree)
2555 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2556 Elf_Internal_Shdr *symtab_hdr;
2557 void *psyms;
2558 unsigned int priority = 0;
2559 static bfd_boolean warned;
2561 if (!interesting_section (sec)
2562 || sec->reloc_count == 0)
2563 return TRUE;
2565 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2566 info->keep_memory);
2567 if (internal_relocs == NULL)
2568 return FALSE;
2570 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2571 psyms = &symtab_hdr->contents;
2572 irela = internal_relocs;
2573 irelaend = irela + sec->reloc_count;
2574 for (; irela < irelaend; irela++)
2576 enum elf_spu_reloc_type r_type;
2577 unsigned int r_indx;
2578 asection *sym_sec;
2579 Elf_Internal_Sym *sym;
2580 struct elf_link_hash_entry *h;
2581 bfd_vma val;
2582 bfd_boolean reject, is_call;
2583 struct function_info *caller;
2584 struct call_info *callee;
2586 reject = FALSE;
2587 r_type = ELF32_R_TYPE (irela->r_info);
2588 if (r_type != R_SPU_REL16
2589 && r_type != R_SPU_ADDR16)
2591 reject = TRUE;
2592 if (!(call_tree && spu_hash_table (info)->params->auto_overlay))
2593 continue;
2596 r_indx = ELF32_R_SYM (irela->r_info);
2597 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2598 return FALSE;
2600 if (sym_sec == NULL
2601 || sym_sec->output_section == bfd_abs_section_ptr)
2602 continue;
2604 is_call = FALSE;
2605 if (!reject)
2607 unsigned char insn[4];
2609 if (!bfd_get_section_contents (sec->owner, sec, insn,
2610 irela->r_offset, 4))
2611 return FALSE;
2612 if (is_branch (insn))
2614 is_call = (insn[0] & 0xfd) == 0x31;
2615 priority = insn[1] & 0x0f;
2616 priority <<= 8;
2617 priority |= insn[2];
2618 priority <<= 8;
2619 priority |= insn[3];
2620 priority >>= 7;
2621 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2622 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2624 if (!warned)
2625 info->callbacks->einfo
2626 (_("%B(%A+0x%v): call to non-code section"
2627 " %B(%A), analysis incomplete\n"),
2628 sec->owner, sec, irela->r_offset,
2629 sym_sec->owner, sym_sec);
2630 warned = TRUE;
2631 continue;
2634 else
2636 reject = TRUE;
2637 if (!(call_tree && spu_hash_table (info)->params->auto_overlay)
2638 || is_hint (insn))
2639 continue;
2643 if (reject)
2645 /* For --auto-overlay, count possible stubs we need for
2646 function pointer references. */
2647 unsigned int sym_type;
2648 if (h)
2649 sym_type = h->type;
2650 else
2651 sym_type = ELF_ST_TYPE (sym->st_info);
2652 if (sym_type == STT_FUNC)
2653 spu_hash_table (info)->non_ovly_stub += 1;
2654 continue;
2657 if (h)
2658 val = h->root.u.def.value;
2659 else
2660 val = sym->st_value;
2661 val += irela->r_addend;
2663 if (!call_tree)
2665 struct function_info *fun;
2667 if (irela->r_addend != 0)
2669 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2670 if (fake == NULL)
2671 return FALSE;
2672 fake->st_value = val;
2673 fake->st_shndx
2674 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2675 sym = fake;
2677 if (sym)
2678 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2679 else
2680 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2681 if (fun == NULL)
2682 return FALSE;
2683 if (irela->r_addend != 0
2684 && fun->u.sym != sym)
2685 free (sym);
2686 continue;
2689 caller = find_function (sec, irela->r_offset, info);
2690 if (caller == NULL)
2691 return FALSE;
2692 callee = bfd_malloc (sizeof *callee);
2693 if (callee == NULL)
2694 return FALSE;
2696 callee->fun = find_function (sym_sec, val, info);
2697 if (callee->fun == NULL)
2698 return FALSE;
2699 callee->is_tail = !is_call;
2700 callee->is_pasted = FALSE;
2701 callee->priority = priority;
2702 callee->count = 0;
2703 if (callee->fun->last_caller != sec)
2705 callee->fun->last_caller = sec;
2706 callee->fun->call_count += 1;
2708 if (!insert_callee (caller, callee))
2709 free (callee);
2710 else if (!is_call
2711 && !callee->fun->is_func
2712 && callee->fun->stack == 0)
2714 /* This is either a tail call or a branch from one part of
2715 the function to another, ie. hot/cold section. If the
2716 destination has been called by some other function then
2717 it is a separate function. We also assume that functions
2718 are not split across input files. */
2719 if (sec->owner != sym_sec->owner)
2721 callee->fun->start = NULL;
2722 callee->fun->is_func = TRUE;
2724 else if (callee->fun->start == NULL)
2725 callee->fun->start = caller;
2726 else
2728 struct function_info *callee_start;
2729 struct function_info *caller_start;
2730 callee_start = callee->fun;
2731 while (callee_start->start)
2732 callee_start = callee_start->start;
2733 caller_start = caller;
2734 while (caller_start->start)
2735 caller_start = caller_start->start;
2736 if (caller_start != callee_start)
2738 callee->fun->start = NULL;
2739 callee->fun->is_func = TRUE;
2745 return TRUE;
2748 /* Handle something like .init or .fini, which has a piece of a function.
2749 These sections are pasted together to form a single function. */
2751 static bfd_boolean
2752 pasted_function (asection *sec)
2754 struct bfd_link_order *l;
2755 struct _spu_elf_section_data *sec_data;
2756 struct spu_elf_stack_info *sinfo;
2757 Elf_Internal_Sym *fake;
2758 struct function_info *fun, *fun_start;
2760 fake = bfd_zmalloc (sizeof (*fake));
2761 if (fake == NULL)
2762 return FALSE;
2763 fake->st_value = 0;
2764 fake->st_size = sec->size;
2765 fake->st_shndx
2766 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2767 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2768 if (!fun)
2769 return FALSE;
2771 /* Find a function immediately preceding this section. */
2772 fun_start = NULL;
2773 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2775 if (l->u.indirect.section == sec)
2777 if (fun_start != NULL)
2779 struct call_info *callee = bfd_malloc (sizeof *callee);
2780 if (callee == NULL)
2781 return FALSE;
2783 fun->start = fun_start;
2784 callee->fun = fun;
2785 callee->is_tail = TRUE;
2786 callee->is_pasted = TRUE;
2787 callee->count = 0;
2788 if (!insert_callee (fun_start, callee))
2789 free (callee);
2790 return TRUE;
2792 break;
2794 if (l->type == bfd_indirect_link_order
2795 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2796 && (sinfo = sec_data->u.i.stack_info) != NULL
2797 && sinfo->num_fun != 0)
2798 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2801 /* Don't return an error if we did not find a function preceding this
2802 section. The section may have incorrect flags. */
2803 return TRUE;
2806 /* Map address ranges in code sections to functions. */
2808 static bfd_boolean
2809 discover_functions (struct bfd_link_info *info)
2811 bfd *ibfd;
2812 int bfd_idx;
2813 Elf_Internal_Sym ***psym_arr;
2814 asection ***sec_arr;
2815 bfd_boolean gaps = FALSE;
2817 bfd_idx = 0;
2818 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2819 bfd_idx++;
2821 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2822 if (psym_arr == NULL)
2823 return FALSE;
2824 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2825 if (sec_arr == NULL)
2826 return FALSE;
2828 for (ibfd = info->input_bfds, bfd_idx = 0;
2829 ibfd != NULL;
2830 ibfd = ibfd->link_next, bfd_idx++)
2832 extern const bfd_target bfd_elf32_spu_vec;
2833 Elf_Internal_Shdr *symtab_hdr;
2834 asection *sec;
2835 size_t symcount;
2836 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2837 asection **psecs, **p;
2839 if (ibfd->xvec != &bfd_elf32_spu_vec)
2840 continue;
2842 /* Read all the symbols. */
2843 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2844 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2845 if (symcount == 0)
2847 if (!gaps)
2848 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2849 if (interesting_section (sec))
2851 gaps = TRUE;
2852 break;
2854 continue;
2857 if (symtab_hdr->contents != NULL)
2859 /* Don't use cached symbols since the generic ELF linker
2860 code only reads local symbols, and we need globals too. */
2861 free (symtab_hdr->contents);
2862 symtab_hdr->contents = NULL;
2864 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2865 NULL, NULL, NULL);
2866 symtab_hdr->contents = (void *) syms;
2867 if (syms == NULL)
2868 return FALSE;
2870 /* Select defined function symbols that are going to be output. */
2871 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2872 if (psyms == NULL)
2873 return FALSE;
2874 psym_arr[bfd_idx] = psyms;
2875 psecs = bfd_malloc (symcount * sizeof (*psecs));
2876 if (psecs == NULL)
2877 return FALSE;
2878 sec_arr[bfd_idx] = psecs;
2879 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2880 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2881 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2883 asection *s;
2885 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2886 if (s != NULL && interesting_section (s))
2887 *psy++ = sy;
2889 symcount = psy - psyms;
2890 *psy = NULL;
2892 /* Sort them by section and offset within section. */
2893 sort_syms_syms = syms;
2894 sort_syms_psecs = psecs;
2895 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2897 /* Now inspect the function symbols. */
2898 for (psy = psyms; psy < psyms + symcount; )
2900 asection *s = psecs[*psy - syms];
2901 Elf_Internal_Sym **psy2;
2903 for (psy2 = psy; ++psy2 < psyms + symcount; )
2904 if (psecs[*psy2 - syms] != s)
2905 break;
2907 if (!alloc_stack_info (s, psy2 - psy))
2908 return FALSE;
2909 psy = psy2;
2912 /* First install info about properly typed and sized functions.
2913 In an ideal world this will cover all code sections, except
2914 when partitioning functions into hot and cold sections,
2915 and the horrible pasted together .init and .fini functions. */
2916 for (psy = psyms; psy < psyms + symcount; ++psy)
2918 sy = *psy;
2919 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2921 asection *s = psecs[sy - syms];
2922 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2923 return FALSE;
2927 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2928 if (interesting_section (sec))
2929 gaps |= check_function_ranges (sec, info);
2932 if (gaps)
2934 /* See if we can discover more function symbols by looking at
2935 relocations. */
2936 for (ibfd = info->input_bfds, bfd_idx = 0;
2937 ibfd != NULL;
2938 ibfd = ibfd->link_next, bfd_idx++)
2940 asection *sec;
2942 if (psym_arr[bfd_idx] == NULL)
2943 continue;
2945 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2946 if (!mark_functions_via_relocs (sec, info, FALSE))
2947 return FALSE;
2950 for (ibfd = info->input_bfds, bfd_idx = 0;
2951 ibfd != NULL;
2952 ibfd = ibfd->link_next, bfd_idx++)
2954 Elf_Internal_Shdr *symtab_hdr;
2955 asection *sec;
2956 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2957 asection **psecs;
2959 if ((psyms = psym_arr[bfd_idx]) == NULL)
2960 continue;
2962 psecs = sec_arr[bfd_idx];
2964 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2965 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2967 gaps = FALSE;
2968 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2969 if (interesting_section (sec))
2970 gaps |= check_function_ranges (sec, info);
2971 if (!gaps)
2972 continue;
2974 /* Finally, install all globals. */
2975 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2977 asection *s;
2979 s = psecs[sy - syms];
2981 /* Global syms might be improperly typed functions. */
2982 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2983 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2985 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2986 return FALSE;
2991 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2993 extern const bfd_target bfd_elf32_spu_vec;
2994 asection *sec;
2996 if (ibfd->xvec != &bfd_elf32_spu_vec)
2997 continue;
2999 /* Some of the symbols we've installed as marking the
3000 beginning of functions may have a size of zero. Extend
3001 the range of such functions to the beginning of the
3002 next symbol of interest. */
3003 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3004 if (interesting_section (sec))
3006 struct _spu_elf_section_data *sec_data;
3007 struct spu_elf_stack_info *sinfo;
3009 sec_data = spu_elf_section_data (sec);
3010 sinfo = sec_data->u.i.stack_info;
3011 if (sinfo != NULL && sinfo->num_fun != 0)
3013 int fun_idx;
3014 bfd_vma hi = sec->size;
3016 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3018 sinfo->fun[fun_idx].hi = hi;
3019 hi = sinfo->fun[fun_idx].lo;
3022 sinfo->fun[0].lo = 0;
3024 /* No symbols in this section. Must be .init or .fini
3025 or something similar. */
3026 else if (!pasted_function (sec))
3027 return FALSE;
3032 for (ibfd = info->input_bfds, bfd_idx = 0;
3033 ibfd != NULL;
3034 ibfd = ibfd->link_next, bfd_idx++)
3036 if (psym_arr[bfd_idx] == NULL)
3037 continue;
3039 free (psym_arr[bfd_idx]);
3040 free (sec_arr[bfd_idx]);
3043 free (psym_arr);
3044 free (sec_arr);
3046 return TRUE;
3049 /* Iterate over all function_info we have collected, calling DOIT on
3050 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3051 if ROOT_ONLY. */
3053 static bfd_boolean
3054 for_each_node (bfd_boolean (*doit) (struct function_info *,
3055 struct bfd_link_info *,
3056 void *),
3057 struct bfd_link_info *info,
3058 void *param,
3059 int root_only)
3061 bfd *ibfd;
3063 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3065 extern const bfd_target bfd_elf32_spu_vec;
3066 asection *sec;
3068 if (ibfd->xvec != &bfd_elf32_spu_vec)
3069 continue;
3071 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3073 struct _spu_elf_section_data *sec_data;
3074 struct spu_elf_stack_info *sinfo;
3076 if ((sec_data = spu_elf_section_data (sec)) != NULL
3077 && (sinfo = sec_data->u.i.stack_info) != NULL)
3079 int i;
3080 for (i = 0; i < sinfo->num_fun; ++i)
3081 if (!root_only || !sinfo->fun[i].non_root)
3082 if (!doit (&sinfo->fun[i], info, param))
3083 return FALSE;
3087 return TRUE;
3090 /* Transfer call info attached to struct function_info entries for
3091 all of a given function's sections to the first entry. */
3093 static bfd_boolean
3094 transfer_calls (struct function_info *fun,
3095 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3096 void *param ATTRIBUTE_UNUSED)
3098 struct function_info *start = fun->start;
3100 if (start != NULL)
3102 struct call_info *call, *call_next;
3104 while (start->start != NULL)
3105 start = start->start;
3106 for (call = fun->call_list; call != NULL; call = call_next)
3108 call_next = call->next;
3109 if (!insert_callee (start, call))
3110 free (call);
3112 fun->call_list = NULL;
3114 return TRUE;
3117 /* Mark nodes in the call graph that are called by some other node. */
3119 static bfd_boolean
3120 mark_non_root (struct function_info *fun,
3121 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3122 void *param ATTRIBUTE_UNUSED)
3124 struct call_info *call;
3126 if (fun->visit1)
3127 return TRUE;
3128 fun->visit1 = TRUE;
3129 for (call = fun->call_list; call; call = call->next)
3131 call->fun->non_root = TRUE;
3132 mark_non_root (call->fun, 0, 0);
3134 return TRUE;
3137 /* Remove cycles from the call graph. Set depth of nodes. */
3139 static bfd_boolean
3140 remove_cycles (struct function_info *fun,
3141 struct bfd_link_info *info,
3142 void *param)
3144 struct call_info **callp, *call;
3145 unsigned int depth = *(unsigned int *) param;
3146 unsigned int max_depth = depth;
3148 fun->depth = depth;
3149 fun->visit2 = TRUE;
3150 fun->marking = TRUE;
3152 callp = &fun->call_list;
3153 while ((call = *callp) != NULL)
3155 call->max_depth = depth + !call->is_pasted;
3156 if (!call->fun->visit2)
3158 if (!remove_cycles (call->fun, info, &call->max_depth))
3159 return FALSE;
3160 if (max_depth < call->max_depth)
3161 max_depth = call->max_depth;
3163 else if (call->fun->marking)
3165 struct spu_link_hash_table *htab = spu_hash_table (info);
3167 if (!htab->params->auto_overlay
3168 && htab->params->stack_analysis)
3170 const char *f1 = func_name (fun);
3171 const char *f2 = func_name (call->fun);
3173 info->callbacks->info (_("Stack analysis will ignore the call "
3174 "from %s to %s\n"),
3175 f1, f2);
3177 *callp = call->next;
3178 free (call);
3179 continue;
3181 callp = &call->next;
3183 fun->marking = FALSE;
3184 *(unsigned int *) param = max_depth;
3185 return TRUE;
3188 /* Check that we actually visited all nodes in remove_cycles. If we
3189 didn't, then there is some cycle in the call graph not attached to
3190 any root node. Arbitrarily choose a node in the cycle as a new
3191 root and break the cycle. */
3193 static bfd_boolean
3194 mark_detached_root (struct function_info *fun,
3195 struct bfd_link_info *info,
3196 void *param)
3198 if (fun->visit2)
3199 return TRUE;
3200 fun->non_root = FALSE;
3201 *(unsigned int *) param = 0;
3202 return remove_cycles (fun, info, param);
3205 /* Populate call_list for each function. */
3207 static bfd_boolean
3208 build_call_tree (struct bfd_link_info *info)
3210 bfd *ibfd;
3211 unsigned int depth;
3213 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3215 extern const bfd_target bfd_elf32_spu_vec;
3216 asection *sec;
3218 if (ibfd->xvec != &bfd_elf32_spu_vec)
3219 continue;
3221 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3222 if (!mark_functions_via_relocs (sec, info, TRUE))
3223 return FALSE;
3226 /* Transfer call info from hot/cold section part of function
3227 to main entry. */
3228 if (!spu_hash_table (info)->params->auto_overlay
3229 && !for_each_node (transfer_calls, info, 0, FALSE))
3230 return FALSE;
3232 /* Find the call graph root(s). */
3233 if (!for_each_node (mark_non_root, info, 0, FALSE))
3234 return FALSE;
3236 /* Remove cycles from the call graph. We start from the root node(s)
3237 so that we break cycles in a reasonable place. */
3238 depth = 0;
3239 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3240 return FALSE;
3242 return for_each_node (mark_detached_root, info, &depth, FALSE);
3245 /* qsort predicate to sort calls by priority, max_depth then count. */
3247 static int
3248 sort_calls (const void *a, const void *b)
3250 struct call_info *const *c1 = a;
3251 struct call_info *const *c2 = b;
3252 int delta;
3254 delta = (*c2)->priority - (*c1)->priority;
3255 if (delta != 0)
3256 return delta;
3258 delta = (*c2)->max_depth - (*c1)->max_depth;
3259 if (delta != 0)
3260 return delta;
3262 delta = (*c2)->count - (*c1)->count;
3263 if (delta != 0)
3264 return delta;
3266 return (char *) c1 - (char *) c2;
3269 struct _mos_param {
3270 unsigned int max_overlay_size;
3273 /* Set linker_mark and gc_mark on any sections that we will put in
3274 overlays. These flags are used by the generic ELF linker, but we
3275 won't be continuing on to bfd_elf_final_link so it is OK to use
3276 them. linker_mark is clear before we get here. Set segment_mark
3277 on sections that are part of a pasted function (excluding the last
3278 section).
3280 Set up function rodata section if --overlay-rodata. We don't
3281 currently include merged string constant rodata sections since
3283 Sort the call graph so that the deepest nodes will be visited
3284 first. */
3286 static bfd_boolean
3287 mark_overlay_section (struct function_info *fun,
3288 struct bfd_link_info *info,
3289 void *param)
3291 struct call_info *call;
3292 unsigned int count;
3293 struct _mos_param *mos_param = param;
3294 struct spu_link_hash_table *htab = spu_hash_table (info);
3296 if (fun->visit4)
3297 return TRUE;
3299 fun->visit4 = TRUE;
3300 if (!fun->sec->linker_mark
3301 && (htab->params->ovly_flavour != ovly_soft_icache
3302 || htab->params->non_ia_text
3303 || strncmp (fun->sec->name, ".text.ia.", 9) == 0))
3305 unsigned int size;
3307 fun->sec->linker_mark = 1;
3308 fun->sec->gc_mark = 1;
3309 fun->sec->segment_mark = 0;
3310 /* Ensure SEC_CODE is set on this text section (it ought to
3311 be!), and SEC_CODE is clear on rodata sections. We use
3312 this flag to differentiate the two overlay section types. */
3313 fun->sec->flags |= SEC_CODE;
3315 size = fun->sec->size;
3316 if (htab->params->auto_overlay & OVERLAY_RODATA)
3318 char *name = NULL;
3320 /* Find the rodata section corresponding to this function's
3321 text section. */
3322 if (strcmp (fun->sec->name, ".text") == 0)
3324 name = bfd_malloc (sizeof (".rodata"));
3325 if (name == NULL)
3326 return FALSE;
3327 memcpy (name, ".rodata", sizeof (".rodata"));
3329 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3331 size_t len = strlen (fun->sec->name);
3332 name = bfd_malloc (len + 3);
3333 if (name == NULL)
3334 return FALSE;
3335 memcpy (name, ".rodata", sizeof (".rodata"));
3336 memcpy (name + 7, fun->sec->name + 5, len - 4);
3338 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3340 size_t len = strlen (fun->sec->name) + 1;
3341 name = bfd_malloc (len);
3342 if (name == NULL)
3343 return FALSE;
3344 memcpy (name, fun->sec->name, len);
3345 name[14] = 'r';
3348 if (name != NULL)
3350 asection *rodata = NULL;
3351 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3352 if (group_sec == NULL)
3353 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3354 else
3355 while (group_sec != NULL && group_sec != fun->sec)
3357 if (strcmp (group_sec->name, name) == 0)
3359 rodata = group_sec;
3360 break;
3362 group_sec = elf_section_data (group_sec)->next_in_group;
3364 fun->rodata = rodata;
3365 if (fun->rodata)
3367 size += fun->rodata->size;
3368 if (htab->params->line_size != 0
3369 && size > htab->params->line_size)
3371 size -= fun->rodata->size;
3372 fun->rodata = NULL;
3374 else
3376 fun->rodata->linker_mark = 1;
3377 fun->rodata->gc_mark = 1;
3378 fun->rodata->flags &= ~SEC_CODE;
3381 free (name);
3384 if (mos_param->max_overlay_size < size)
3385 mos_param->max_overlay_size = size;
3388 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3389 count += 1;
3391 if (count > 1)
3393 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3394 if (calls == NULL)
3395 return FALSE;
3397 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3398 calls[count++] = call;
3400 qsort (calls, count, sizeof (*calls), sort_calls);
3402 fun->call_list = NULL;
3403 while (count != 0)
3405 --count;
3406 calls[count]->next = fun->call_list;
3407 fun->call_list = calls[count];
3409 free (calls);
3412 for (call = fun->call_list; call != NULL; call = call->next)
3414 if (call->is_pasted)
3416 /* There can only be one is_pasted call per function_info. */
3417 BFD_ASSERT (!fun->sec->segment_mark);
3418 fun->sec->segment_mark = 1;
3420 if (!mark_overlay_section (call->fun, info, param))
3421 return FALSE;
3424 /* Don't put entry code into an overlay. The overlay manager needs
3425 a stack! Also, don't mark .ovl.init as an overlay. */
3426 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3427 == info->output_bfd->start_address
3428 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3430 fun->sec->linker_mark = 0;
3431 if (fun->rodata != NULL)
3432 fun->rodata->linker_mark = 0;
3434 return TRUE;
3437 /* If non-zero then unmark functions called from those within sections
3438 that we need to unmark. Unfortunately this isn't reliable since the
3439 call graph cannot know the destination of function pointer calls. */
3440 #define RECURSE_UNMARK 0
3442 struct _uos_param {
3443 asection *exclude_input_section;
3444 asection *exclude_output_section;
3445 unsigned long clearing;
3448 /* Undo some of mark_overlay_section's work. */
3450 static bfd_boolean
3451 unmark_overlay_section (struct function_info *fun,
3452 struct bfd_link_info *info,
3453 void *param)
3455 struct call_info *call;
3456 struct _uos_param *uos_param = param;
3457 unsigned int excluded = 0;
3459 if (fun->visit5)
3460 return TRUE;
3462 fun->visit5 = TRUE;
3464 excluded = 0;
3465 if (fun->sec == uos_param->exclude_input_section
3466 || fun->sec->output_section == uos_param->exclude_output_section)
3467 excluded = 1;
3469 if (RECURSE_UNMARK)
3470 uos_param->clearing += excluded;
3472 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3474 fun->sec->linker_mark = 0;
3475 if (fun->rodata)
3476 fun->rodata->linker_mark = 0;
3479 for (call = fun->call_list; call != NULL; call = call->next)
3480 if (!unmark_overlay_section (call->fun, info, param))
3481 return FALSE;
3483 if (RECURSE_UNMARK)
3484 uos_param->clearing -= excluded;
3485 return TRUE;
3488 struct _cl_param {
3489 unsigned int lib_size;
3490 asection **lib_sections;
3493 /* Add sections we have marked as belonging to overlays to an array
3494 for consideration as non-overlay sections. The array consist of
3495 pairs of sections, (text,rodata), for functions in the call graph. */
3497 static bfd_boolean
3498 collect_lib_sections (struct function_info *fun,
3499 struct bfd_link_info *info,
3500 void *param)
3502 struct _cl_param *lib_param = param;
3503 struct call_info *call;
3504 unsigned int size;
3506 if (fun->visit6)
3507 return TRUE;
3509 fun->visit6 = TRUE;
3510 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3511 return TRUE;
3513 size = fun->sec->size;
3514 if (fun->rodata)
3515 size += fun->rodata->size;
3517 if (size <= lib_param->lib_size)
3519 *lib_param->lib_sections++ = fun->sec;
3520 fun->sec->gc_mark = 0;
3521 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3523 *lib_param->lib_sections++ = fun->rodata;
3524 fun->rodata->gc_mark = 0;
3526 else
3527 *lib_param->lib_sections++ = NULL;
3530 for (call = fun->call_list; call != NULL; call = call->next)
3531 collect_lib_sections (call->fun, info, param);
3533 return TRUE;
3536 /* qsort predicate to sort sections by call count. */
3538 static int
3539 sort_lib (const void *a, const void *b)
3541 asection *const *s1 = a;
3542 asection *const *s2 = b;
3543 struct _spu_elf_section_data *sec_data;
3544 struct spu_elf_stack_info *sinfo;
3545 int delta;
3547 delta = 0;
3548 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3549 && (sinfo = sec_data->u.i.stack_info) != NULL)
3551 int i;
3552 for (i = 0; i < sinfo->num_fun; ++i)
3553 delta -= sinfo->fun[i].call_count;
3556 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3557 && (sinfo = sec_data->u.i.stack_info) != NULL)
3559 int i;
3560 for (i = 0; i < sinfo->num_fun; ++i)
3561 delta += sinfo->fun[i].call_count;
3564 if (delta != 0)
3565 return delta;
3567 return s1 - s2;
3570 /* Remove some sections from those marked to be in overlays. Choose
3571 those that are called from many places, likely library functions. */
3573 static unsigned int
3574 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3576 bfd *ibfd;
3577 asection **lib_sections;
3578 unsigned int i, lib_count;
3579 struct _cl_param collect_lib_param;
3580 struct function_info dummy_caller;
3581 struct spu_link_hash_table *htab;
3583 memset (&dummy_caller, 0, sizeof (dummy_caller));
3584 lib_count = 0;
3585 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3587 extern const bfd_target bfd_elf32_spu_vec;
3588 asection *sec;
3590 if (ibfd->xvec != &bfd_elf32_spu_vec)
3591 continue;
3593 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3594 if (sec->linker_mark
3595 && sec->size < lib_size
3596 && (sec->flags & SEC_CODE) != 0)
3597 lib_count += 1;
3599 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3600 if (lib_sections == NULL)
3601 return (unsigned int) -1;
3602 collect_lib_param.lib_size = lib_size;
3603 collect_lib_param.lib_sections = lib_sections;
3604 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3605 TRUE))
3606 return (unsigned int) -1;
3607 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3609 /* Sort sections so that those with the most calls are first. */
3610 if (lib_count > 1)
3611 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3613 htab = spu_hash_table (info);
3614 for (i = 0; i < lib_count; i++)
3616 unsigned int tmp, stub_size;
3617 asection *sec;
3618 struct _spu_elf_section_data *sec_data;
3619 struct spu_elf_stack_info *sinfo;
3621 sec = lib_sections[2 * i];
3622 /* If this section is OK, its size must be less than lib_size. */
3623 tmp = sec->size;
3624 /* If it has a rodata section, then add that too. */
3625 if (lib_sections[2 * i + 1])
3626 tmp += lib_sections[2 * i + 1]->size;
3627 /* Add any new overlay call stubs needed by the section. */
3628 stub_size = 0;
3629 if (tmp < lib_size
3630 && (sec_data = spu_elf_section_data (sec)) != NULL
3631 && (sinfo = sec_data->u.i.stack_info) != NULL)
3633 int k;
3634 struct call_info *call;
3636 for (k = 0; k < sinfo->num_fun; ++k)
3637 for (call = sinfo->fun[k].call_list; call; call = call->next)
3638 if (call->fun->sec->linker_mark)
3640 struct call_info *p;
3641 for (p = dummy_caller.call_list; p; p = p->next)
3642 if (p->fun == call->fun)
3643 break;
3644 if (!p)
3645 stub_size += ovl_stub_size (htab->params->ovly_flavour);
3648 if (tmp + stub_size < lib_size)
3650 struct call_info **pp, *p;
3652 /* This section fits. Mark it as non-overlay. */
3653 lib_sections[2 * i]->linker_mark = 0;
3654 if (lib_sections[2 * i + 1])
3655 lib_sections[2 * i + 1]->linker_mark = 0;
3656 lib_size -= tmp + stub_size;
3657 /* Call stubs to the section we just added are no longer
3658 needed. */
3659 pp = &dummy_caller.call_list;
3660 while ((p = *pp) != NULL)
3661 if (!p->fun->sec->linker_mark)
3663 lib_size += ovl_stub_size (htab->params->ovly_flavour);
3664 *pp = p->next;
3665 free (p);
3667 else
3668 pp = &p->next;
3669 /* Add new call stubs to dummy_caller. */
3670 if ((sec_data = spu_elf_section_data (sec)) != NULL
3671 && (sinfo = sec_data->u.i.stack_info) != NULL)
3673 int k;
3674 struct call_info *call;
3676 for (k = 0; k < sinfo->num_fun; ++k)
3677 for (call = sinfo->fun[k].call_list;
3678 call;
3679 call = call->next)
3680 if (call->fun->sec->linker_mark)
3682 struct call_info *callee;
3683 callee = bfd_malloc (sizeof (*callee));
3684 if (callee == NULL)
3685 return (unsigned int) -1;
3686 *callee = *call;
3687 if (!insert_callee (&dummy_caller, callee))
3688 free (callee);
3693 while (dummy_caller.call_list != NULL)
3695 struct call_info *call = dummy_caller.call_list;
3696 dummy_caller.call_list = call->next;
3697 free (call);
3699 for (i = 0; i < 2 * lib_count; i++)
3700 if (lib_sections[i])
3701 lib_sections[i]->gc_mark = 1;
3702 free (lib_sections);
3703 return lib_size;
3706 /* Build an array of overlay sections. The deepest node's section is
3707 added first, then its parent node's section, then everything called
3708 from the parent section. The idea being to group sections to
3709 minimise calls between different overlays. */
3711 static bfd_boolean
3712 collect_overlays (struct function_info *fun,
3713 struct bfd_link_info *info,
3714 void *param)
3716 struct call_info *call;
3717 bfd_boolean added_fun;
3718 asection ***ovly_sections = param;
3720 if (fun->visit7)
3721 return TRUE;
3723 fun->visit7 = TRUE;
3724 for (call = fun->call_list; call != NULL; call = call->next)
3725 if (!call->is_pasted)
3727 if (!collect_overlays (call->fun, info, ovly_sections))
3728 return FALSE;
3729 break;
3732 added_fun = FALSE;
3733 if (fun->sec->linker_mark && fun->sec->gc_mark)
3735 fun->sec->gc_mark = 0;
3736 *(*ovly_sections)++ = fun->sec;
3737 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3739 fun->rodata->gc_mark = 0;
3740 *(*ovly_sections)++ = fun->rodata;
3742 else
3743 *(*ovly_sections)++ = NULL;
3744 added_fun = TRUE;
3746 /* Pasted sections must stay with the first section. We don't
3747 put pasted sections in the array, just the first section.
3748 Mark subsequent sections as already considered. */
3749 if (fun->sec->segment_mark)
3751 struct function_info *call_fun = fun;
3754 for (call = call_fun->call_list; call != NULL; call = call->next)
3755 if (call->is_pasted)
3757 call_fun = call->fun;
3758 call_fun->sec->gc_mark = 0;
3759 if (call_fun->rodata)
3760 call_fun->rodata->gc_mark = 0;
3761 break;
3763 if (call == NULL)
3764 abort ();
3766 while (call_fun->sec->segment_mark);
3770 for (call = fun->call_list; call != NULL; call = call->next)
3771 if (!collect_overlays (call->fun, info, ovly_sections))
3772 return FALSE;
3774 if (added_fun)
3776 struct _spu_elf_section_data *sec_data;
3777 struct spu_elf_stack_info *sinfo;
3779 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3780 && (sinfo = sec_data->u.i.stack_info) != NULL)
3782 int i;
3783 for (i = 0; i < sinfo->num_fun; ++i)
3784 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3785 return FALSE;
3789 return TRUE;
3792 struct _sum_stack_param {
3793 size_t cum_stack;
3794 size_t overall_stack;
3795 bfd_boolean emit_stack_syms;
3798 /* Descend the call graph for FUN, accumulating total stack required. */
3800 static bfd_boolean
3801 sum_stack (struct function_info *fun,
3802 struct bfd_link_info *info,
3803 void *param)
3805 struct call_info *call;
3806 struct function_info *max;
3807 size_t stack, cum_stack;
3808 const char *f1;
3809 bfd_boolean has_call;
3810 struct _sum_stack_param *sum_stack_param = param;
3811 struct spu_link_hash_table *htab;
3813 cum_stack = fun->stack;
3814 sum_stack_param->cum_stack = cum_stack;
3815 if (fun->visit3)
3816 return TRUE;
3818 has_call = FALSE;
3819 max = NULL;
3820 for (call = fun->call_list; call; call = call->next)
3822 if (!call->is_pasted)
3823 has_call = TRUE;
3824 if (!sum_stack (call->fun, info, sum_stack_param))
3825 return FALSE;
3826 stack = sum_stack_param->cum_stack;
3827 /* Include caller stack for normal calls, don't do so for
3828 tail calls. fun->stack here is local stack usage for
3829 this function. */
3830 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3831 stack += fun->stack;
3832 if (cum_stack < stack)
3834 cum_stack = stack;
3835 max = call->fun;
3839 sum_stack_param->cum_stack = cum_stack;
3840 stack = fun->stack;
3841 /* Now fun->stack holds cumulative stack. */
3842 fun->stack = cum_stack;
3843 fun->visit3 = TRUE;
3845 if (!fun->non_root
3846 && sum_stack_param->overall_stack < cum_stack)
3847 sum_stack_param->overall_stack = cum_stack;
3849 htab = spu_hash_table (info);
3850 if (htab->params->auto_overlay)
3851 return TRUE;
3853 f1 = func_name (fun);
3854 if (htab->params->stack_analysis)
3856 if (!fun->non_root)
3857 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3858 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3859 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3861 if (has_call)
3863 info->callbacks->minfo (_(" calls:\n"));
3864 for (call = fun->call_list; call; call = call->next)
3865 if (!call->is_pasted)
3867 const char *f2 = func_name (call->fun);
3868 const char *ann1 = call->fun == max ? "*" : " ";
3869 const char *ann2 = call->is_tail ? "t" : " ";
3871 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
3876 if (sum_stack_param->emit_stack_syms)
3878 char *name = bfd_malloc (18 + strlen (f1));
3879 struct elf_link_hash_entry *h;
3881 if (name == NULL)
3882 return FALSE;
3884 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
3885 sprintf (name, "__stack_%s", f1);
3886 else
3887 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
3889 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
3890 free (name);
3891 if (h != NULL
3892 && (h->root.type == bfd_link_hash_new
3893 || h->root.type == bfd_link_hash_undefined
3894 || h->root.type == bfd_link_hash_undefweak))
3896 h->root.type = bfd_link_hash_defined;
3897 h->root.u.def.section = bfd_abs_section_ptr;
3898 h->root.u.def.value = cum_stack;
3899 h->size = 0;
3900 h->type = 0;
3901 h->ref_regular = 1;
3902 h->def_regular = 1;
3903 h->ref_regular_nonweak = 1;
3904 h->forced_local = 1;
3905 h->non_elf = 0;
3909 return TRUE;
3912 /* SEC is part of a pasted function. Return the call_info for the
3913 next section of this function. */
3915 static struct call_info *
3916 find_pasted_call (asection *sec)
3918 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
3919 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
3920 struct call_info *call;
3921 int k;
3923 for (k = 0; k < sinfo->num_fun; ++k)
3924 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
3925 if (call->is_pasted)
3926 return call;
3927 abort ();
3928 return 0;
3931 /* qsort predicate to sort bfds by file name. */
3933 static int
3934 sort_bfds (const void *a, const void *b)
3936 bfd *const *abfd1 = a;
3937 bfd *const *abfd2 = b;
3939 return strcmp ((*abfd1)->filename, (*abfd2)->filename);
3942 static unsigned int
3943 print_one_overlay_section (FILE *script,
3944 unsigned int base,
3945 unsigned int count,
3946 unsigned int ovlynum,
3947 unsigned int *ovly_map,
3948 asection **ovly_sections,
3949 struct bfd_link_info *info)
3951 unsigned int j;
3953 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
3955 asection *sec = ovly_sections[2 * j];
3957 if (fprintf (script, " %s%c%s (%s)\n",
3958 (sec->owner->my_archive != NULL
3959 ? sec->owner->my_archive->filename : ""),
3960 info->path_separator,
3961 sec->owner->filename,
3962 sec->name) <= 0)
3963 return -1;
3964 if (sec->segment_mark)
3966 struct call_info *call = find_pasted_call (sec);
3967 while (call != NULL)
3969 struct function_info *call_fun = call->fun;
3970 sec = call_fun->sec;
3971 if (fprintf (script, " %s%c%s (%s)\n",
3972 (sec->owner->my_archive != NULL
3973 ? sec->owner->my_archive->filename : ""),
3974 info->path_separator,
3975 sec->owner->filename,
3976 sec->name) <= 0)
3977 return -1;
3978 for (call = call_fun->call_list; call; call = call->next)
3979 if (call->is_pasted)
3980 break;
3985 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
3987 asection *sec = ovly_sections[2 * j + 1];
3988 if (sec != NULL
3989 && fprintf (script, " %s%c%s (%s)\n",
3990 (sec->owner->my_archive != NULL
3991 ? sec->owner->my_archive->filename : ""),
3992 info->path_separator,
3993 sec->owner->filename,
3994 sec->name) <= 0)
3995 return -1;
3997 sec = ovly_sections[2 * j];
3998 if (sec->segment_mark)
4000 struct call_info *call = find_pasted_call (sec);
4001 while (call != NULL)
4003 struct function_info *call_fun = call->fun;
4004 sec = call_fun->rodata;
4005 if (sec != NULL
4006 && fprintf (script, " %s%c%s (%s)\n",
4007 (sec->owner->my_archive != NULL
4008 ? sec->owner->my_archive->filename : ""),
4009 info->path_separator,
4010 sec->owner->filename,
4011 sec->name) <= 0)
4012 return -1;
4013 for (call = call_fun->call_list; call; call = call->next)
4014 if (call->is_pasted)
4015 break;
4020 return j;
4023 /* Handle --auto-overlay. */
4025 static void spu_elf_auto_overlay (struct bfd_link_info *)
4026 ATTRIBUTE_NORETURN;
4028 static void
4029 spu_elf_auto_overlay (struct bfd_link_info *info)
4031 bfd *ibfd;
4032 bfd **bfd_arr;
4033 struct elf_segment_map *m;
4034 unsigned int fixed_size, lo, hi;
4035 struct spu_link_hash_table *htab;
4036 unsigned int base, i, count, bfd_count;
4037 unsigned int region, ovlynum;
4038 asection **ovly_sections, **ovly_p;
4039 unsigned int *ovly_map;
4040 FILE *script;
4041 unsigned int total_overlay_size, overlay_size;
4042 const char *ovly_mgr_entry;
4043 struct elf_link_hash_entry *h;
4044 struct _mos_param mos_param;
4045 struct _uos_param uos_param;
4046 struct function_info dummy_caller;
4048 /* Find the extents of our loadable image. */
4049 lo = (unsigned int) -1;
4050 hi = 0;
4051 for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
4052 if (m->p_type == PT_LOAD)
4053 for (i = 0; i < m->count; i++)
4054 if (m->sections[i]->size != 0)
4056 if (m->sections[i]->vma < lo)
4057 lo = m->sections[i]->vma;
4058 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4059 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4061 fixed_size = hi + 1 - lo;
4063 if (!discover_functions (info))
4064 goto err_exit;
4066 if (!build_call_tree (info))
4067 goto err_exit;
4069 uos_param.exclude_input_section = 0;
4070 uos_param.exclude_output_section
4071 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4073 htab = spu_hash_table (info);
4074 ovly_mgr_entry = "__ovly_load";
4075 if (htab->params->ovly_flavour == ovly_soft_icache)
4076 ovly_mgr_entry = "__icache_br_handler";
4077 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4078 FALSE, FALSE, FALSE);
4079 if (h != NULL
4080 && (h->root.type == bfd_link_hash_defined
4081 || h->root.type == bfd_link_hash_defweak)
4082 && h->def_regular)
4084 /* We have a user supplied overlay manager. */
4085 uos_param.exclude_input_section = h->root.u.def.section;
4087 else
4089 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4090 builtin version to .text, and will adjust .text size. */
4091 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4094 /* Mark overlay sections, and find max overlay section size. */
4095 mos_param.max_overlay_size = 0;
4096 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4097 goto err_exit;
4099 /* We can't put the overlay manager or interrupt routines in
4100 overlays. */
4101 uos_param.clearing = 0;
4102 if ((uos_param.exclude_input_section
4103 || uos_param.exclude_output_section)
4104 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4105 goto err_exit;
4107 bfd_count = 0;
4108 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4109 ++bfd_count;
4110 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4111 if (bfd_arr == NULL)
4112 goto err_exit;
4114 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4115 count = 0;
4116 bfd_count = 0;
4117 total_overlay_size = 0;
4118 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4120 extern const bfd_target bfd_elf32_spu_vec;
4121 asection *sec;
4122 unsigned int old_count;
4124 if (ibfd->xvec != &bfd_elf32_spu_vec)
4125 continue;
4127 old_count = count;
4128 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4129 if (sec->linker_mark)
4131 if ((sec->flags & SEC_CODE) != 0)
4132 count += 1;
4133 fixed_size -= sec->size;
4134 total_overlay_size += sec->size;
4136 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4137 && sec->output_section->owner == info->output_bfd
4138 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4139 fixed_size -= sec->size;
4140 if (count != old_count)
4141 bfd_arr[bfd_count++] = ibfd;
4144 /* Since the overlay link script selects sections by file name and
4145 section name, ensure that file names are unique. */
4146 if (bfd_count > 1)
4148 bfd_boolean ok = TRUE;
4150 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4151 for (i = 1; i < bfd_count; ++i)
4152 if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4154 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4156 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4157 info->callbacks->einfo (_("%s duplicated in %s\n"),
4158 bfd_arr[i]->filename,
4159 bfd_arr[i]->my_archive->filename);
4160 else
4161 info->callbacks->einfo (_("%s duplicated\n"),
4162 bfd_arr[i]->filename);
4163 ok = FALSE;
4166 if (!ok)
4168 info->callbacks->einfo (_("sorry, no support for duplicate "
4169 "object files in auto-overlay script\n"));
4170 bfd_set_error (bfd_error_bad_value);
4171 goto err_exit;
4174 free (bfd_arr);
4176 if (htab->reserved == 0)
4178 struct _sum_stack_param sum_stack_param;
4180 sum_stack_param.emit_stack_syms = 0;
4181 sum_stack_param.overall_stack = 0;
4182 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4183 goto err_exit;
4184 htab->reserved = sum_stack_param.overall_stack + htab->extra_stack_space;
4186 fixed_size += htab->reserved;
4187 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params->ovly_flavour);
4188 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4190 if (htab->params->ovly_flavour == ovly_soft_icache)
4192 /* Stubs in the non-icache area are bigger. */
4193 fixed_size += htab->non_ovly_stub * 16;
4194 /* Space for icache manager tables.
4195 a) Tag array, one quadword per cache line.
4196 - word 0: ia address of present line, init to zero.
4197 - word 1: link locator. link_elem=stub_addr/2+locator
4198 - halfwords 4-7: head/tail pointers for linked lists. */
4199 fixed_size += 16 << htab->num_lines_log2;
4200 /* b) Linked list elements, max_branch per line. */
4201 fixed_size += htab->params->max_branch << (htab->num_lines_log2 + 4);
4202 /* c) Indirect branch descriptors, 8 quadwords. */
4203 fixed_size += 8 * 16;
4204 /* d) Pointers to __ea backing store, 16 quadwords. */
4205 fixed_size += 16 * 16;
4207 else
4209 /* Guess number of overlays. Assuming overlay buffer is on
4210 average only half full should be conservative. */
4211 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4212 / (htab->local_store - fixed_size));
4213 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4214 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4218 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4219 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4220 "size of 0x%v exceeds local store\n"),
4221 (bfd_vma) fixed_size,
4222 (bfd_vma) mos_param.max_overlay_size);
4224 /* Now see if we should put some functions in the non-overlay area. */
4225 else if (fixed_size < htab->overlay_fixed)
4227 unsigned int max_fixed, lib_size;
4229 max_fixed = htab->local_store - mos_param.max_overlay_size;
4230 if (max_fixed > htab->overlay_fixed)
4231 max_fixed = htab->overlay_fixed;
4232 lib_size = max_fixed - fixed_size;
4233 lib_size = auto_ovl_lib_functions (info, lib_size);
4234 if (lib_size == (unsigned int) -1)
4235 goto err_exit;
4236 fixed_size = max_fixed - lib_size;
4239 /* Build an array of sections, suitably sorted to place into
4240 overlays. */
4241 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4242 if (ovly_sections == NULL)
4243 goto err_exit;
4244 ovly_p = ovly_sections;
4245 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4246 goto err_exit;
4247 count = (size_t) (ovly_p - ovly_sections) / 2;
4248 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4249 if (ovly_map == NULL)
4250 goto err_exit;
4252 memset (&dummy_caller, 0, sizeof (dummy_caller));
4253 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4254 if (htab->params->line_size != 0)
4255 overlay_size = htab->params->line_size;
4256 base = 0;
4257 ovlynum = 0;
4258 while (base < count)
4260 unsigned int size = 0;
4262 for (i = base; i < count; i++)
4264 asection *sec;
4265 unsigned int tmp;
4266 unsigned int num_stubs;
4267 struct call_info *call, *pasty;
4268 struct _spu_elf_section_data *sec_data;
4269 struct spu_elf_stack_info *sinfo;
4270 int k;
4272 /* See whether we can add this section to the current
4273 overlay without overflowing our overlay buffer. */
4274 sec = ovly_sections[2 * i];
4275 tmp = size + sec->size;
4276 if (ovly_sections[2 * i + 1])
4277 tmp += ovly_sections[2 * i + 1]->size;
4278 if (tmp > overlay_size)
4279 break;
4280 if (sec->segment_mark)
4282 /* Pasted sections must stay together, so add their
4283 sizes too. */
4284 struct call_info *pasty = find_pasted_call (sec);
4285 while (pasty != NULL)
4287 struct function_info *call_fun = pasty->fun;
4288 tmp += call_fun->sec->size;
4289 if (call_fun->rodata)
4290 tmp += call_fun->rodata->size;
4291 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4292 if (pasty->is_pasted)
4293 break;
4296 if (tmp > overlay_size)
4297 break;
4299 /* If we add this section, we might need new overlay call
4300 stubs. Add any overlay section calls to dummy_call. */
4301 pasty = NULL;
4302 sec_data = spu_elf_section_data (sec);
4303 sinfo = sec_data->u.i.stack_info;
4304 for (k = 0; k < sinfo->num_fun; ++k)
4305 for (call = sinfo->fun[k].call_list; call; call = call->next)
4306 if (call->is_pasted)
4308 BFD_ASSERT (pasty == NULL);
4309 pasty = call;
4311 else if (call->fun->sec->linker_mark)
4313 if (!copy_callee (&dummy_caller, call))
4314 goto err_exit;
4316 while (pasty != NULL)
4318 struct function_info *call_fun = pasty->fun;
4319 pasty = NULL;
4320 for (call = call_fun->call_list; call; call = call->next)
4321 if (call->is_pasted)
4323 BFD_ASSERT (pasty == NULL);
4324 pasty = call;
4326 else if (!copy_callee (&dummy_caller, call))
4327 goto err_exit;
4330 /* Calculate call stub size. */
4331 num_stubs = 0;
4332 for (call = dummy_caller.call_list; call; call = call->next)
4334 unsigned int k;
4336 ++num_stubs;
4337 /* If the call is within this overlay, we won't need a
4338 stub. */
4339 for (k = base; k < i + 1; k++)
4340 if (call->fun->sec == ovly_sections[2 * k])
4342 --num_stubs;
4343 break;
4346 if (htab->params->ovly_flavour == ovly_soft_icache
4347 && num_stubs > htab->params->max_branch)
4348 break;
4349 if (tmp + num_stubs * ovl_stub_size (htab->params->ovly_flavour)
4350 > overlay_size)
4351 break;
4352 size = tmp;
4355 if (i == base)
4357 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4358 ovly_sections[2 * i]->owner,
4359 ovly_sections[2 * i],
4360 ovly_sections[2 * i + 1] ? " + rodata" : "");
4361 bfd_set_error (bfd_error_bad_value);
4362 goto err_exit;
4365 while (dummy_caller.call_list != NULL)
4367 struct call_info *call = dummy_caller.call_list;
4368 dummy_caller.call_list = call->next;
4369 free (call);
4372 ++ovlynum;
4373 while (base < i)
4374 ovly_map[base++] = ovlynum;
4377 script = htab->params->spu_elf_open_overlay_script ();
4379 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4380 goto file_err;
4382 if (htab->params->ovly_flavour == ovly_soft_icache)
4384 if (fprintf (script,
4385 " .data.icache ALIGN (16) : { *(.ovtab) *(.data.icache) }\n"
4386 " . = ALIGN (%u);\n"
4387 " .ovl.init : { *(.ovl.init) }\n"
4388 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4389 htab->params->line_size) <= 0)
4390 goto file_err;
4392 base = 0;
4393 ovlynum = 1;
4394 while (base < count)
4396 unsigned int indx = ovlynum - 1;
4397 unsigned int vma, lma;
4399 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4400 lma = indx << htab->line_size_log2;
4402 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4403 ": AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16) + %u) {\n",
4404 ovlynum, vma, lma) <= 0)
4405 goto file_err;
4407 base = print_one_overlay_section (script, base, count, ovlynum,
4408 ovly_map, ovly_sections, info);
4409 if (base == (unsigned) -1)
4410 goto file_err;
4412 if (fprintf (script, " }\n") <= 0)
4413 goto file_err;
4415 ovlynum++;
4418 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4419 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4420 goto file_err;
4422 else
4424 if (fprintf (script,
4425 " . = ALIGN (16);\n"
4426 " .ovl.init : { *(.ovl.init) }\n"
4427 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4428 goto file_err;
4430 for (region = 1; region <= htab->params->num_lines; region++)
4432 ovlynum = region;
4433 base = 0;
4434 while (base < count && ovly_map[base] < ovlynum)
4435 base++;
4437 if (base == count)
4438 break;
4440 if (region == 1)
4442 /* We need to set lma since we are overlaying .ovl.init. */
4443 if (fprintf (script,
4444 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4445 goto file_err;
4447 else
4449 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4450 goto file_err;
4453 while (base < count)
4455 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4456 goto file_err;
4458 base = print_one_overlay_section (script, base, count, ovlynum,
4459 ovly_map, ovly_sections, info);
4460 if (base == (unsigned) -1)
4461 goto file_err;
4463 if (fprintf (script, " }\n") <= 0)
4464 goto file_err;
4466 ovlynum += htab->params->num_lines;
4467 while (base < count && ovly_map[base] < ovlynum)
4468 base++;
4471 if (fprintf (script, " }\n") <= 0)
4472 goto file_err;
4477 free (ovly_map);
4478 free (ovly_sections);
4480 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4481 goto file_err;
4482 if (fclose (script) != 0)
4483 goto file_err;
4485 if (htab->params->auto_overlay & AUTO_RELINK)
4486 (*htab->params->spu_elf_relink) ();
4488 xexit (0);
4490 file_err:
4491 bfd_set_error (bfd_error_system_call);
4492 err_exit:
4493 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
4494 xexit (1);
4497 /* Provide an estimate of total stack required. */
4499 static bfd_boolean
4500 spu_elf_stack_analysis (struct bfd_link_info *info)
4502 struct spu_link_hash_table *htab;
4503 struct _sum_stack_param sum_stack_param;
4505 if (!discover_functions (info))
4506 return FALSE;
4508 if (!build_call_tree (info))
4509 return FALSE;
4511 htab = spu_hash_table (info);
4512 if (htab->params->stack_analysis)
4514 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4515 info->callbacks->minfo (_("\nStack size for functions. "
4516 "Annotations: '*' max stack, 't' tail call\n"));
4519 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4520 sum_stack_param.overall_stack = 0;
4521 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4522 return FALSE;
4524 if (htab->params->stack_analysis)
4525 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4526 (bfd_vma) sum_stack_param.overall_stack);
4527 return TRUE;
4530 /* Perform a final link. */
4532 static bfd_boolean
4533 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4535 struct spu_link_hash_table *htab = spu_hash_table (info);
4537 if (htab->params->auto_overlay)
4538 spu_elf_auto_overlay (info);
4540 if ((htab->params->stack_analysis
4541 || (htab->params->ovly_flavour == ovly_soft_icache
4542 && htab->params->lrlive_analysis))
4543 && !spu_elf_stack_analysis (info))
4544 info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4546 if (!spu_elf_build_stubs (info))
4547 info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
4549 return bfd_elf_final_link (output_bfd, info);
4552 /* Called when not normally emitting relocs, ie. !info->relocatable
4553 and !info->emitrelocations. Returns a count of special relocs
4554 that need to be emitted. */
4556 static unsigned int
4557 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4559 Elf_Internal_Rela *relocs;
4560 unsigned int count = 0;
4562 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4563 info->keep_memory);
4564 if (relocs != NULL)
4566 Elf_Internal_Rela *rel;
4567 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4569 for (rel = relocs; rel < relend; rel++)
4571 int r_type = ELF32_R_TYPE (rel->r_info);
4572 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4573 ++count;
4576 if (elf_section_data (sec)->relocs != relocs)
4577 free (relocs);
4580 return count;
4583 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4585 static int
4586 spu_elf_relocate_section (bfd *output_bfd,
4587 struct bfd_link_info *info,
4588 bfd *input_bfd,
4589 asection *input_section,
4590 bfd_byte *contents,
4591 Elf_Internal_Rela *relocs,
4592 Elf_Internal_Sym *local_syms,
4593 asection **local_sections)
4595 Elf_Internal_Shdr *symtab_hdr;
4596 struct elf_link_hash_entry **sym_hashes;
4597 Elf_Internal_Rela *rel, *relend;
4598 struct spu_link_hash_table *htab;
4599 asection *ea;
4600 int ret = TRUE;
4601 bfd_boolean emit_these_relocs = FALSE;
4602 bfd_boolean is_ea_sym;
4603 bfd_boolean stubs;
4604 unsigned int iovl = 0;
4606 htab = spu_hash_table (info);
4607 stubs = (htab->stub_sec != NULL
4608 && maybe_needs_stubs (input_section));
4609 iovl = overlay_index (input_section);
4610 ea = bfd_get_section_by_name (output_bfd, "._ea");
4611 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4612 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4614 rel = relocs;
4615 relend = relocs + input_section->reloc_count;
4616 for (; rel < relend; rel++)
4618 int r_type;
4619 reloc_howto_type *howto;
4620 unsigned int r_symndx;
4621 Elf_Internal_Sym *sym;
4622 asection *sec;
4623 struct elf_link_hash_entry *h;
4624 const char *sym_name;
4625 bfd_vma relocation;
4626 bfd_vma addend;
4627 bfd_reloc_status_type r;
4628 bfd_boolean unresolved_reloc;
4629 bfd_boolean warned;
4630 bfd_boolean overlay_encoded;
4631 enum _stub_type stub_type;
4633 r_symndx = ELF32_R_SYM (rel->r_info);
4634 r_type = ELF32_R_TYPE (rel->r_info);
4635 howto = elf_howto_table + r_type;
4636 unresolved_reloc = FALSE;
4637 warned = FALSE;
4638 h = NULL;
4639 sym = NULL;
4640 sec = NULL;
4641 if (r_symndx < symtab_hdr->sh_info)
4643 sym = local_syms + r_symndx;
4644 sec = local_sections[r_symndx];
4645 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4646 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4648 else
4650 if (sym_hashes == NULL)
4651 return FALSE;
4653 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4655 while (h->root.type == bfd_link_hash_indirect
4656 || h->root.type == bfd_link_hash_warning)
4657 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4659 relocation = 0;
4660 if (h->root.type == bfd_link_hash_defined
4661 || h->root.type == bfd_link_hash_defweak)
4663 sec = h->root.u.def.section;
4664 if (sec == NULL
4665 || sec->output_section == NULL)
4666 /* Set a flag that will be cleared later if we find a
4667 relocation value for this symbol. output_section
4668 is typically NULL for symbols satisfied by a shared
4669 library. */
4670 unresolved_reloc = TRUE;
4671 else
4672 relocation = (h->root.u.def.value
4673 + sec->output_section->vma
4674 + sec->output_offset);
4676 else if (h->root.type == bfd_link_hash_undefweak)
4678 else if (info->unresolved_syms_in_objects == RM_IGNORE
4679 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4681 else if (!info->relocatable
4682 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4684 bfd_boolean err;
4685 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4686 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4687 if (!info->callbacks->undefined_symbol (info,
4688 h->root.root.string,
4689 input_bfd,
4690 input_section,
4691 rel->r_offset, err))
4692 return FALSE;
4693 warned = TRUE;
4695 sym_name = h->root.root.string;
4698 if (sec != NULL && elf_discarded_section (sec))
4700 /* For relocs against symbols from removed linkonce sections,
4701 or sections discarded by a linker script, we just want the
4702 section contents zeroed. Avoid any special processing. */
4703 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
4704 rel->r_info = 0;
4705 rel->r_addend = 0;
4706 continue;
4709 if (info->relocatable)
4710 continue;
4712 is_ea_sym = (ea != NULL
4713 && sec != NULL
4714 && sec->output_section == ea);
4715 overlay_encoded = FALSE;
4717 /* If this symbol is in an overlay area, we may need to relocate
4718 to the overlay stub. */
4719 addend = rel->r_addend;
4720 if (stubs
4721 && !is_ea_sym
4722 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4723 contents, info)) != no_stub)
4725 unsigned int ovl = 0;
4726 struct got_entry *g, **head;
4728 if (stub_type != nonovl_stub)
4729 ovl = iovl;
4731 if (h != NULL)
4732 head = &h->got.glist;
4733 else
4734 head = elf_local_got_ents (input_bfd) + r_symndx;
4736 for (g = *head; g != NULL; g = g->next)
4737 if (htab->params->ovly_flavour == ovly_soft_icache
4738 ? g->br_addr == (rel->r_offset
4739 + input_section->output_offset
4740 + input_section->output_section->vma)
4741 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4742 break;
4743 if (g == NULL)
4744 abort ();
4746 relocation = g->stub_addr;
4747 addend = 0;
4749 else
4751 /* For soft icache, encode the overlay index into addresses. */
4752 if (htab->params->ovly_flavour == ovly_soft_icache
4753 && !is_ea_sym)
4755 unsigned int ovl = overlay_index (sec);
4756 if (ovl != 0)
4758 unsigned int set_id = (ovl - 1) >> htab->num_lines_log2;
4759 relocation += set_id << 18;
4760 overlay_encoded = set_id != 0;
4765 if (unresolved_reloc)
4767 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4769 if (is_ea_sym)
4771 /* ._ea is a special section that isn't allocated in SPU
4772 memory, but rather occupies space in PPU memory as
4773 part of an embedded ELF image. If this reloc is
4774 against a symbol defined in ._ea, then transform the
4775 reloc into an equivalent one without a symbol
4776 relative to the start of the ELF image. */
4777 rel->r_addend += (relocation
4778 - ea->vma
4779 + elf_section_data (ea)->this_hdr.sh_offset);
4780 rel->r_info = ELF32_R_INFO (0, r_type);
4782 emit_these_relocs = TRUE;
4783 continue;
4785 else if (is_ea_sym)
4786 unresolved_reloc = TRUE;
4788 if (unresolved_reloc)
4790 (*_bfd_error_handler)
4791 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4792 input_bfd,
4793 bfd_get_section_name (input_bfd, input_section),
4794 (long) rel->r_offset,
4795 howto->name,
4796 sym_name);
4797 ret = FALSE;
4800 r = _bfd_final_link_relocate (howto,
4801 input_bfd,
4802 input_section,
4803 contents,
4804 rel->r_offset, relocation, addend);
4806 if (r != bfd_reloc_ok)
4808 const char *msg = (const char *) 0;
4810 switch (r)
4812 case bfd_reloc_overflow:
4813 /* FIXME: We don't want to warn on most references
4814 within an overlay to itself, but this may silence a
4815 warning that should be reported. */
4816 if (overlay_encoded && sec == input_section)
4817 break;
4818 if (!((*info->callbacks->reloc_overflow)
4819 (info, (h ? &h->root : NULL), sym_name, howto->name,
4820 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
4821 return FALSE;
4822 break;
4824 case bfd_reloc_undefined:
4825 if (!((*info->callbacks->undefined_symbol)
4826 (info, sym_name, input_bfd, input_section,
4827 rel->r_offset, TRUE)))
4828 return FALSE;
4829 break;
4831 case bfd_reloc_outofrange:
4832 msg = _("internal error: out of range error");
4833 goto common_error;
4835 case bfd_reloc_notsupported:
4836 msg = _("internal error: unsupported relocation error");
4837 goto common_error;
4839 case bfd_reloc_dangerous:
4840 msg = _("internal error: dangerous error");
4841 goto common_error;
4843 default:
4844 msg = _("internal error: unknown error");
4845 /* fall through */
4847 common_error:
4848 ret = FALSE;
4849 if (!((*info->callbacks->warning)
4850 (info, msg, sym_name, input_bfd, input_section,
4851 rel->r_offset)))
4852 return FALSE;
4853 break;
4858 if (ret
4859 && emit_these_relocs
4860 && !info->emitrelocations)
4862 Elf_Internal_Rela *wrel;
4863 Elf_Internal_Shdr *rel_hdr;
4865 wrel = rel = relocs;
4866 relend = relocs + input_section->reloc_count;
4867 for (; rel < relend; rel++)
4869 int r_type;
4871 r_type = ELF32_R_TYPE (rel->r_info);
4872 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4873 *wrel++ = *rel;
4875 input_section->reloc_count = wrel - relocs;
4876 /* Backflips for _bfd_elf_link_output_relocs. */
4877 rel_hdr = &elf_section_data (input_section)->rel_hdr;
4878 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
4879 ret = 2;
4882 return ret;
4885 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4887 static bfd_boolean
4888 spu_elf_output_symbol_hook (struct bfd_link_info *info,
4889 const char *sym_name ATTRIBUTE_UNUSED,
4890 Elf_Internal_Sym *sym,
4891 asection *sym_sec ATTRIBUTE_UNUSED,
4892 struct elf_link_hash_entry *h)
4894 struct spu_link_hash_table *htab = spu_hash_table (info);
4896 if (!info->relocatable
4897 && htab->stub_sec != NULL
4898 && h != NULL
4899 && (h->root.type == bfd_link_hash_defined
4900 || h->root.type == bfd_link_hash_defweak)
4901 && h->def_regular
4902 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
4904 struct got_entry *g;
4906 for (g = h->got.glist; g != NULL; g = g->next)
4907 if (htab->params->ovly_flavour == ovly_soft_icache
4908 ? g->br_addr == g->stub_addr
4909 : g->addend == 0 && g->ovl == 0)
4911 sym->st_shndx = (_bfd_elf_section_from_bfd_section
4912 (htab->stub_sec[0]->output_section->owner,
4913 htab->stub_sec[0]->output_section));
4914 sym->st_value = g->stub_addr;
4915 break;
4919 return TRUE;
4922 static int spu_plugin = 0;
4924 void
4925 spu_elf_plugin (int val)
4927 spu_plugin = val;
4930 /* Set ELF header e_type for plugins. */
4932 static void
4933 spu_elf_post_process_headers (bfd *abfd,
4934 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4936 if (spu_plugin)
4938 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
4940 i_ehdrp->e_type = ET_DYN;
4944 /* We may add an extra PT_LOAD segment for .toe. We also need extra
4945 segments for overlays. */
4947 static int
4948 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
4950 int extra = 0;
4951 asection *sec;
4953 if (info != NULL)
4955 struct spu_link_hash_table *htab = spu_hash_table (info);
4956 extra = htab->num_overlays;
4959 if (extra)
4960 ++extra;
4962 sec = bfd_get_section_by_name (abfd, ".toe");
4963 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
4964 ++extra;
4966 return extra;
4969 /* Remove .toe section from other PT_LOAD segments and put it in
4970 a segment of its own. Put overlays in separate segments too. */
4972 static bfd_boolean
4973 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
4975 asection *toe, *s;
4976 struct elf_segment_map *m;
4977 unsigned int i;
4979 if (info == NULL)
4980 return TRUE;
4982 toe = bfd_get_section_by_name (abfd, ".toe");
4983 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
4984 if (m->p_type == PT_LOAD && m->count > 1)
4985 for (i = 0; i < m->count; i++)
4986 if ((s = m->sections[i]) == toe
4987 || spu_elf_section_data (s)->u.o.ovl_index != 0)
4989 struct elf_segment_map *m2;
4990 bfd_vma amt;
4992 if (i + 1 < m->count)
4994 amt = sizeof (struct elf_segment_map);
4995 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
4996 m2 = bfd_zalloc (abfd, amt);
4997 if (m2 == NULL)
4998 return FALSE;
4999 m2->count = m->count - (i + 1);
5000 memcpy (m2->sections, m->sections + i + 1,
5001 m2->count * sizeof (m->sections[0]));
5002 m2->p_type = PT_LOAD;
5003 m2->next = m->next;
5004 m->next = m2;
5006 m->count = 1;
5007 if (i != 0)
5009 m->count = i;
5010 amt = sizeof (struct elf_segment_map);
5011 m2 = bfd_zalloc (abfd, amt);
5012 if (m2 == NULL)
5013 return FALSE;
5014 m2->p_type = PT_LOAD;
5015 m2->count = 1;
5016 m2->sections[0] = s;
5017 m2->next = m->next;
5018 m->next = m2;
5020 break;
5023 return TRUE;
5026 /* Tweak the section type of .note.spu_name. */
5028 static bfd_boolean
5029 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5030 Elf_Internal_Shdr *hdr,
5031 asection *sec)
5033 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5034 hdr->sh_type = SHT_NOTE;
5035 return TRUE;
5038 /* Tweak phdrs before writing them out. */
5040 static int
5041 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5043 const struct elf_backend_data *bed;
5044 struct elf_obj_tdata *tdata;
5045 Elf_Internal_Phdr *phdr, *last;
5046 struct spu_link_hash_table *htab;
5047 unsigned int count;
5048 unsigned int i;
5050 if (info == NULL)
5051 return TRUE;
5053 bed = get_elf_backend_data (abfd);
5054 tdata = elf_tdata (abfd);
5055 phdr = tdata->phdr;
5056 count = tdata->program_header_size / bed->s->sizeof_phdr;
5057 htab = spu_hash_table (info);
5058 if (htab->num_overlays != 0)
5060 struct elf_segment_map *m;
5061 unsigned int o;
5063 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
5064 if (m->count != 0
5065 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5067 /* Mark this as an overlay header. */
5068 phdr[i].p_flags |= PF_OVERLAY;
5070 if (htab->ovtab != NULL && htab->ovtab->size != 0
5071 && htab->params->ovly_flavour != ovly_soft_icache)
5073 bfd_byte *p = htab->ovtab->contents;
5074 unsigned int off = o * 16 + 8;
5076 /* Write file_off into _ovly_table. */
5077 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5080 /* Soft-icache has its file offset put in .ovl.init. */
5081 if (htab->init != NULL && htab->init->size != 0)
5083 bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5085 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5089 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5090 of 16. This should always be possible when using the standard
5091 linker scripts, but don't create overlapping segments if
5092 someone is playing games with linker scripts. */
5093 last = NULL;
5094 for (i = count; i-- != 0; )
5095 if (phdr[i].p_type == PT_LOAD)
5097 unsigned adjust;
5099 adjust = -phdr[i].p_filesz & 15;
5100 if (adjust != 0
5101 && last != NULL
5102 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5103 break;
5105 adjust = -phdr[i].p_memsz & 15;
5106 if (adjust != 0
5107 && last != NULL
5108 && phdr[i].p_filesz != 0
5109 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5110 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5111 break;
5113 if (phdr[i].p_filesz != 0)
5114 last = &phdr[i];
5117 if (i == (unsigned int) -1)
5118 for (i = count; i-- != 0; )
5119 if (phdr[i].p_type == PT_LOAD)
5121 unsigned adjust;
5123 adjust = -phdr[i].p_filesz & 15;
5124 phdr[i].p_filesz += adjust;
5126 adjust = -phdr[i].p_memsz & 15;
5127 phdr[i].p_memsz += adjust;
5130 return TRUE;
5133 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5134 #define TARGET_BIG_NAME "elf32-spu"
5135 #define ELF_ARCH bfd_arch_spu
5136 #define ELF_MACHINE_CODE EM_SPU
5137 /* This matches the alignment need for DMA. */
5138 #define ELF_MAXPAGESIZE 0x80
5139 #define elf_backend_rela_normal 1
5140 #define elf_backend_can_gc_sections 1
5142 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5143 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5144 #define elf_info_to_howto spu_elf_info_to_howto
5145 #define elf_backend_count_relocs spu_elf_count_relocs
5146 #define elf_backend_relocate_section spu_elf_relocate_section
5147 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5148 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5149 #define elf_backend_object_p spu_elf_object_p
5150 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5151 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5153 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5154 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5155 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5156 #define elf_backend_post_process_headers spu_elf_post_process_headers
5157 #define elf_backend_fake_sections spu_elf_fake_sections
5158 #define elf_backend_special_sections spu_elf_special_sections
5159 #define bfd_elf32_bfd_final_link spu_elf_final_link
5161 #include "elf32-target.h"