bfd/
[binutils.git] / bfd / elf32-spu.c
blob3ca236793caa62fc12b1e1dc89ac8b7462732680
1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
21 #include "sysdep.h"
22 #include "bfd.h"
23 #include "bfdlink.h"
24 #include "libbfd.h"
25 #include "elf-bfd.h"
26 #include "elf/spu.h"
27 #include "elf32-spu.h"
29 /* We use RELA style relocs. Don't define USE_REL. */
31 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
32 void *, asection *,
33 bfd *, char **);
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
38 static reloc_howto_type elf_howto_table[] = {
39 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
40 bfd_elf_generic_reloc, "SPU_NONE",
41 FALSE, 0, 0x00000000, FALSE),
42 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
43 bfd_elf_generic_reloc, "SPU_ADDR10",
44 FALSE, 0, 0x00ffc000, FALSE),
45 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
46 bfd_elf_generic_reloc, "SPU_ADDR16",
47 FALSE, 0, 0x007fff80, FALSE),
48 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
49 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
50 FALSE, 0, 0x007fff80, FALSE),
51 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
52 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
53 FALSE, 0, 0x007fff80, FALSE),
54 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
55 bfd_elf_generic_reloc, "SPU_ADDR18",
56 FALSE, 0, 0x01ffff80, FALSE),
57 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "SPU_ADDR32",
59 FALSE, 0, 0xffffffff, FALSE),
60 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "SPU_REL16",
62 FALSE, 0, 0x007fff80, TRUE),
63 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
64 bfd_elf_generic_reloc, "SPU_ADDR7",
65 FALSE, 0, 0x001fc000, FALSE),
66 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
67 spu_elf_rel9, "SPU_REL9",
68 FALSE, 0, 0x0180007f, TRUE),
69 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
70 spu_elf_rel9, "SPU_REL9I",
71 FALSE, 0, 0x0000c07f, TRUE),
72 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
73 bfd_elf_generic_reloc, "SPU_ADDR10I",
74 FALSE, 0, 0x00ffc000, FALSE),
75 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
76 bfd_elf_generic_reloc, "SPU_ADDR16I",
77 FALSE, 0, 0x007fff80, FALSE),
78 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
79 bfd_elf_generic_reloc, "SPU_REL32",
80 FALSE, 0, 0xffffffff, TRUE),
81 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "SPU_ADDR16X",
83 FALSE, 0, 0x007fff80, FALSE),
84 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
85 bfd_elf_generic_reloc, "SPU_PPU32",
86 FALSE, 0, 0xffffffff, FALSE),
87 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
88 bfd_elf_generic_reloc, "SPU_PPU64",
89 FALSE, 0, -1, FALSE),
92 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
93 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
94 { NULL, 0, 0, 0, 0 }
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
100 switch (code)
102 default:
103 return R_SPU_NONE;
104 case BFD_RELOC_SPU_IMM10W:
105 return R_SPU_ADDR10;
106 case BFD_RELOC_SPU_IMM16W:
107 return R_SPU_ADDR16;
108 case BFD_RELOC_SPU_LO16:
109 return R_SPU_ADDR16_LO;
110 case BFD_RELOC_SPU_HI16:
111 return R_SPU_ADDR16_HI;
112 case BFD_RELOC_SPU_IMM18:
113 return R_SPU_ADDR18;
114 case BFD_RELOC_SPU_PCREL16:
115 return R_SPU_REL16;
116 case BFD_RELOC_SPU_IMM7:
117 return R_SPU_ADDR7;
118 case BFD_RELOC_SPU_IMM8:
119 return R_SPU_NONE;
120 case BFD_RELOC_SPU_PCREL9a:
121 return R_SPU_REL9;
122 case BFD_RELOC_SPU_PCREL9b:
123 return R_SPU_REL9I;
124 case BFD_RELOC_SPU_IMM10:
125 return R_SPU_ADDR10I;
126 case BFD_RELOC_SPU_IMM16:
127 return R_SPU_ADDR16I;
128 case BFD_RELOC_32:
129 return R_SPU_ADDR32;
130 case BFD_RELOC_32_PCREL:
131 return R_SPU_REL32;
132 case BFD_RELOC_SPU_PPU32:
133 return R_SPU_PPU32;
134 case BFD_RELOC_SPU_PPU64:
135 return R_SPU_PPU64;
139 static void
140 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
141 arelent *cache_ptr,
142 Elf_Internal_Rela *dst)
144 enum elf_spu_reloc_type r_type;
146 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
147 BFD_ASSERT (r_type < R_SPU_max);
148 cache_ptr->howto = &elf_howto_table[(int) r_type];
151 static reloc_howto_type *
152 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
153 bfd_reloc_code_real_type code)
155 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
157 if (r_type == R_SPU_NONE)
158 return NULL;
160 return elf_howto_table + r_type;
163 static reloc_howto_type *
164 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
165 const char *r_name)
167 unsigned int i;
169 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
170 if (elf_howto_table[i].name != NULL
171 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
172 return &elf_howto_table[i];
174 return NULL;
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
181 void *data, asection *input_section,
182 bfd *output_bfd, char **error_message)
184 bfd_size_type octets;
185 bfd_vma val;
186 long insn;
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
190 link time. */
191 if (output_bfd != NULL)
192 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
193 input_section, output_bfd, error_message);
195 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
196 return bfd_reloc_outofrange;
197 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
199 /* Get symbol value. */
200 val = 0;
201 if (!bfd_is_com_section (symbol->section))
202 val = symbol->value;
203 if (symbol->section->output_section)
204 val += symbol->section->output_section->vma;
206 val += reloc_entry->addend;
208 /* Make it pc-relative. */
209 val -= input_section->output_section->vma + input_section->output_offset;
211 val >>= 2;
212 if (val + 256 >= 512)
213 return bfd_reloc_overflow;
215 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
220 insn &= ~reloc_entry->howto->dst_mask;
221 insn |= val & reloc_entry->howto->dst_mask;
222 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
223 return bfd_reloc_ok;
226 static bfd_boolean
227 spu_elf_new_section_hook (bfd *abfd, asection *sec)
229 if (!sec->used_by_bfd)
231 struct _spu_elf_section_data *sdata;
233 sdata = bfd_zalloc (abfd, sizeof (*sdata));
234 if (sdata == NULL)
235 return FALSE;
236 sec->used_by_bfd = sdata;
239 return _bfd_elf_new_section_hook (abfd, sec);
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
245 static void
246 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
248 if (sym->name != NULL
249 && sym->section != bfd_abs_section_ptr
250 && strncmp (sym->name, "_EAR_", 5) == 0)
251 sym->flags |= BSF_KEEP;
254 /* SPU ELF linker hash table. */
256 struct spu_link_hash_table
258 struct elf_link_hash_table elf;
260 /* The stub hash table. */
261 struct bfd_hash_table stub_hash_table;
263 /* Sorted array of stubs. */
264 struct {
265 struct spu_stub_hash_entry **sh;
266 unsigned int count;
267 int err;
268 } stubs;
270 /* Shortcuts to overlay sections. */
271 asection *stub;
272 asection *ovtab;
274 struct elf_link_hash_entry *ovly_load;
275 unsigned long ovly_load_r_symndx;
277 /* An array of two output sections per overlay region, chosen such that
278 the first section vma is the overlay buffer vma (ie. the section has
279 the lowest vma in the group that occupy the region), and the second
280 section vma+size specifies the end of the region. We keep pointers
281 to sections like this because section vmas may change when laying
282 them out. */
283 asection **ovl_region;
285 /* Number of overlay buffers. */
286 unsigned int num_buf;
288 /* Total number of overlays. */
289 unsigned int num_overlays;
291 /* Set if we should emit symbols for stubs. */
292 unsigned int emit_stub_syms:1;
294 /* Set if we want stubs on calls out of overlay regions to
295 non-overlay regions. */
296 unsigned int non_overlay_stubs : 1;
298 /* Set on error. */
299 unsigned int stub_overflow : 1;
301 /* Set if stack size analysis should be done. */
302 unsigned int stack_analysis : 1;
304 /* Set if __stack_* syms will be emitted. */
305 unsigned int emit_stack_syms : 1;
308 #define spu_hash_table(p) \
309 ((struct spu_link_hash_table *) ((p)->hash))
311 struct spu_stub_hash_entry
313 struct bfd_hash_entry root;
315 /* Destination of this stub. */
316 asection *target_section;
317 bfd_vma target_off;
319 /* Offset of entry in stub section. */
320 bfd_vma off;
322 /* Offset from this stub to stub that loads the overlay index. */
323 bfd_vma delta;
326 /* Create an entry in a spu stub hash table. */
328 static struct bfd_hash_entry *
329 stub_hash_newfunc (struct bfd_hash_entry *entry,
330 struct bfd_hash_table *table,
331 const char *string)
333 /* Allocate the structure if it has not already been allocated by a
334 subclass. */
335 if (entry == NULL)
337 entry = bfd_hash_allocate (table, sizeof (struct spu_stub_hash_entry));
338 if (entry == NULL)
339 return entry;
342 /* Call the allocation method of the superclass. */
343 entry = bfd_hash_newfunc (entry, table, string);
344 if (entry != NULL)
346 struct spu_stub_hash_entry *sh = (struct spu_stub_hash_entry *) entry;
348 sh->target_section = NULL;
349 sh->target_off = 0;
350 sh->off = 0;
351 sh->delta = 0;
354 return entry;
357 /* Create a spu ELF linker hash table. */
359 static struct bfd_link_hash_table *
360 spu_elf_link_hash_table_create (bfd *abfd)
362 struct spu_link_hash_table *htab;
364 htab = bfd_malloc (sizeof (*htab));
365 if (htab == NULL)
366 return NULL;
368 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
369 _bfd_elf_link_hash_newfunc,
370 sizeof (struct elf_link_hash_entry)))
372 free (htab);
373 return NULL;
376 /* Init the stub hash table too. */
377 if (!bfd_hash_table_init (&htab->stub_hash_table, stub_hash_newfunc,
378 sizeof (struct spu_stub_hash_entry)))
379 return NULL;
381 memset (&htab->stubs, 0,
382 sizeof (*htab) - offsetof (struct spu_link_hash_table, stubs));
384 return &htab->elf.root;
387 /* Free the derived linker hash table. */
389 static void
390 spu_elf_link_hash_table_free (struct bfd_link_hash_table *hash)
392 struct spu_link_hash_table *ret = (struct spu_link_hash_table *) hash;
394 bfd_hash_table_free (&ret->stub_hash_table);
395 _bfd_generic_link_hash_table_free (hash);
398 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
399 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
400 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
402 static bfd_boolean
403 get_sym_h (struct elf_link_hash_entry **hp,
404 Elf_Internal_Sym **symp,
405 asection **symsecp,
406 Elf_Internal_Sym **locsymsp,
407 unsigned long r_symndx,
408 bfd *ibfd)
410 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
412 if (r_symndx >= symtab_hdr->sh_info)
414 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
415 struct elf_link_hash_entry *h;
417 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
418 while (h->root.type == bfd_link_hash_indirect
419 || h->root.type == bfd_link_hash_warning)
420 h = (struct elf_link_hash_entry *) h->root.u.i.link;
422 if (hp != NULL)
423 *hp = h;
425 if (symp != NULL)
426 *symp = NULL;
428 if (symsecp != NULL)
430 asection *symsec = NULL;
431 if (h->root.type == bfd_link_hash_defined
432 || h->root.type == bfd_link_hash_defweak)
433 symsec = h->root.u.def.section;
434 *symsecp = symsec;
437 else
439 Elf_Internal_Sym *sym;
440 Elf_Internal_Sym *locsyms = *locsymsp;
442 if (locsyms == NULL)
444 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
445 if (locsyms == NULL)
447 size_t symcount = symtab_hdr->sh_info;
449 /* If we are reading symbols into the contents, then
450 read the global syms too. This is done to cache
451 syms for later stack analysis. */
452 if ((unsigned char **) locsymsp == &symtab_hdr->contents)
453 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
454 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
455 NULL, NULL, NULL);
457 if (locsyms == NULL)
458 return FALSE;
459 *locsymsp = locsyms;
461 sym = locsyms + r_symndx;
463 if (hp != NULL)
464 *hp = NULL;
466 if (symp != NULL)
467 *symp = sym;
469 if (symsecp != NULL)
471 asection *symsec = NULL;
472 if ((sym->st_shndx != SHN_UNDEF
473 && sym->st_shndx < SHN_LORESERVE)
474 || sym->st_shndx > SHN_HIRESERVE)
475 symsec = bfd_section_from_elf_index (ibfd, sym->st_shndx);
476 *symsecp = symsec;
480 return TRUE;
483 /* Build a name for an entry in the stub hash table. We can't use a
484 local symbol name because ld -r might generate duplicate local symbols. */
486 static char *
487 spu_stub_name (const asection *sym_sec,
488 const struct elf_link_hash_entry *h,
489 const Elf_Internal_Rela *rel)
491 char *stub_name;
492 bfd_size_type len;
494 if (h)
496 len = strlen (h->root.root.string) + 1 + 8 + 1;
497 stub_name = bfd_malloc (len);
498 if (stub_name == NULL)
499 return stub_name;
501 sprintf (stub_name, "%s+%x",
502 h->root.root.string,
503 (int) rel->r_addend & 0xffffffff);
504 len -= 8;
506 else
508 len = 8 + 1 + 8 + 1 + 8 + 1;
509 stub_name = bfd_malloc (len);
510 if (stub_name == NULL)
511 return stub_name;
513 sprintf (stub_name, "%x:%x+%x",
514 sym_sec->id & 0xffffffff,
515 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
516 (int) rel->r_addend & 0xffffffff);
517 len = strlen (stub_name);
520 if (stub_name[len - 2] == '+'
521 && stub_name[len - 1] == '0'
522 && stub_name[len] == 0)
523 stub_name[len - 2] = 0;
525 return stub_name;
528 /* Create the note section if not already present. This is done early so
529 that the linker maps the sections to the right place in the output. */
531 bfd_boolean
532 spu_elf_create_sections (bfd *output_bfd,
533 struct bfd_link_info *info,
534 int stack_analysis,
535 int emit_stack_syms)
537 bfd *ibfd;
538 struct spu_link_hash_table *htab = spu_hash_table (info);
540 /* Stash some options away where we can get at them later. */
541 htab->stack_analysis = stack_analysis;
542 htab->emit_stack_syms = emit_stack_syms;
544 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
545 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
546 break;
548 if (ibfd == NULL)
550 /* Make SPU_PTNOTE_SPUNAME section. */
551 asection *s;
552 size_t name_len;
553 size_t size;
554 bfd_byte *data;
555 flagword flags;
557 ibfd = info->input_bfds;
558 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
559 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
560 if (s == NULL
561 || !bfd_set_section_alignment (ibfd, s, 4))
562 return FALSE;
564 name_len = strlen (bfd_get_filename (output_bfd)) + 1;
565 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
566 size += (name_len + 3) & -4;
568 if (!bfd_set_section_size (ibfd, s, size))
569 return FALSE;
571 data = bfd_zalloc (ibfd, size);
572 if (data == NULL)
573 return FALSE;
575 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
576 bfd_put_32 (ibfd, name_len, data + 4);
577 bfd_put_32 (ibfd, 1, data + 8);
578 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
579 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
580 bfd_get_filename (output_bfd), name_len);
581 s->contents = data;
584 return TRUE;
587 /* qsort predicate to sort sections by vma. */
589 static int
590 sort_sections (const void *a, const void *b)
592 const asection *const *s1 = a;
593 const asection *const *s2 = b;
594 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
596 if (delta != 0)
597 return delta < 0 ? -1 : 1;
599 return (*s1)->index - (*s2)->index;
602 /* Identify overlays in the output bfd, and number them. */
604 bfd_boolean
605 spu_elf_find_overlays (bfd *output_bfd, struct bfd_link_info *info)
607 struct spu_link_hash_table *htab = spu_hash_table (info);
608 asection **alloc_sec;
609 unsigned int i, n, ovl_index, num_buf;
610 asection *s;
611 bfd_vma ovl_end;
613 if (output_bfd->section_count < 2)
614 return FALSE;
616 alloc_sec = bfd_malloc (output_bfd->section_count * sizeof (*alloc_sec));
617 if (alloc_sec == NULL)
618 return FALSE;
620 /* Pick out all the alloced sections. */
621 for (n = 0, s = output_bfd->sections; s != NULL; s = s->next)
622 if ((s->flags & SEC_ALLOC) != 0
623 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
624 && s->size != 0)
625 alloc_sec[n++] = s;
627 if (n == 0)
629 free (alloc_sec);
630 return FALSE;
633 /* Sort them by vma. */
634 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
636 /* Look for overlapping vmas. Any with overlap must be overlays.
637 Count them. Also count the number of overlay regions and for
638 each region save a section from that region with the lowest vma
639 and another section with the highest end vma. */
640 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
641 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
643 s = alloc_sec[i];
644 if (s->vma < ovl_end)
646 asection *s0 = alloc_sec[i - 1];
648 if (spu_elf_section_data (s0)->ovl_index == 0)
650 spu_elf_section_data (s0)->ovl_index = ++ovl_index;
651 alloc_sec[num_buf * 2] = s0;
652 alloc_sec[num_buf * 2 + 1] = s0;
653 num_buf++;
655 spu_elf_section_data (s)->ovl_index = ++ovl_index;
656 if (ovl_end < s->vma + s->size)
658 ovl_end = s->vma + s->size;
659 alloc_sec[num_buf * 2 - 1] = s;
662 else
663 ovl_end = s->vma + s->size;
666 htab->num_overlays = ovl_index;
667 htab->num_buf = num_buf;
668 if (ovl_index == 0)
670 free (alloc_sec);
671 return FALSE;
674 alloc_sec = bfd_realloc (alloc_sec, num_buf * 2 * sizeof (*alloc_sec));
675 if (alloc_sec == NULL)
676 return FALSE;
678 htab->ovl_region = alloc_sec;
679 return TRUE;
682 /* One of these per stub. */
683 #define SIZEOF_STUB1 8
684 #define ILA_79 0x4200004f /* ila $79,function_address */
685 #define BR 0x32000000 /* br stub2 */
687 /* One of these per overlay. */
688 #define SIZEOF_STUB2 8
689 #define ILA_78 0x4200004e /* ila $78,overlay_number */
690 /* br __ovly_load */
691 #define NOP 0x40200000
693 /* Return true for all relative and absolute branch instructions.
694 bra 00110000 0..
695 brasl 00110001 0..
696 br 00110010 0..
697 brsl 00110011 0..
698 brz 00100000 0..
699 brnz 00100001 0..
700 brhz 00100010 0..
701 brhnz 00100011 0.. */
703 static bfd_boolean
704 is_branch (const unsigned char *insn)
706 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
709 /* Return true for all indirect branch instructions.
710 bi 00110101 000
711 bisl 00110101 001
712 iret 00110101 010
713 bisled 00110101 011
714 biz 00100101 000
715 binz 00100101 001
716 bihz 00100101 010
717 bihnz 00100101 011 */
719 static bfd_boolean
720 is_indirect_branch (const unsigned char *insn)
722 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
725 /* Return true for branch hint instructions.
726 hbra 0001000..
727 hbrr 0001001.. */
729 static bfd_boolean
730 is_hint (const unsigned char *insn)
732 return (insn[0] & 0xfc) == 0x10;
735 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
737 static bfd_boolean
738 needs_ovl_stub (const char *sym_name,
739 asection *sym_sec,
740 asection *input_section,
741 struct spu_link_hash_table *htab,
742 bfd_boolean is_branch)
744 if (htab->num_overlays == 0)
745 return FALSE;
747 if (sym_sec == NULL
748 || sym_sec->output_section == NULL
749 || spu_elf_section_data (sym_sec->output_section) == NULL)
750 return FALSE;
752 /* setjmp always goes via an overlay stub, because then the return
753 and hence the longjmp goes via __ovly_return. That magically
754 makes setjmp/longjmp between overlays work. */
755 if (strncmp (sym_name, "setjmp", 6) == 0
756 && (sym_name[6] == '\0' || sym_name[6] == '@'))
757 return TRUE;
759 /* Usually, symbols in non-overlay sections don't need stubs. */
760 if (spu_elf_section_data (sym_sec->output_section)->ovl_index == 0
761 && !htab->non_overlay_stubs)
762 return FALSE;
764 /* A reference from some other section to a symbol in an overlay
765 section needs a stub. */
766 if (spu_elf_section_data (sym_sec->output_section)->ovl_index
767 != spu_elf_section_data (input_section->output_section)->ovl_index)
768 return TRUE;
770 /* If this insn isn't a branch then we are possibly taking the
771 address of a function and passing it out somehow. */
772 return !is_branch;
775 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
776 symbols. */
778 static bfd_boolean
779 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
781 /* Symbols starting with _SPUEAR_ need a stub because they may be
782 invoked by the PPU. */
783 if ((h->root.type == bfd_link_hash_defined
784 || h->root.type == bfd_link_hash_defweak)
785 && h->def_regular
786 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
788 struct spu_link_hash_table *htab = inf;
789 static Elf_Internal_Rela zero_rel;
790 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
791 struct spu_stub_hash_entry *sh;
793 if (stub_name == NULL)
795 htab->stubs.err = 1;
796 return FALSE;
799 sh = (struct spu_stub_hash_entry *)
800 bfd_hash_lookup (&htab->stub_hash_table, stub_name, TRUE, FALSE);
801 if (sh == NULL)
803 free (stub_name);
804 return FALSE;
807 /* If this entry isn't new, we already have a stub. */
808 if (sh->target_section != NULL)
810 free (stub_name);
811 return TRUE;
814 sh->target_section = h->root.u.def.section;
815 sh->target_off = h->root.u.def.value;
816 htab->stubs.count += 1;
819 return TRUE;
822 /* Called via bfd_hash_traverse to set up pointers to all symbols
823 in the stub hash table. */
825 static bfd_boolean
826 populate_stubs (struct bfd_hash_entry *bh, void *inf)
828 struct spu_link_hash_table *htab = inf;
830 htab->stubs.sh[--htab->stubs.count] = (struct spu_stub_hash_entry *) bh;
831 return TRUE;
834 /* qsort predicate to sort stubs by overlay number. */
836 static int
837 sort_stubs (const void *a, const void *b)
839 const struct spu_stub_hash_entry *const *sa = a;
840 const struct spu_stub_hash_entry *const *sb = b;
841 int i;
842 bfd_signed_vma d;
844 i = spu_elf_section_data ((*sa)->target_section->output_section)->ovl_index;
845 i -= spu_elf_section_data ((*sb)->target_section->output_section)->ovl_index;
846 if (i != 0)
847 return i;
849 d = ((*sa)->target_section->output_section->vma
850 + (*sa)->target_section->output_offset
851 + (*sa)->target_off
852 - (*sb)->target_section->output_section->vma
853 - (*sb)->target_section->output_offset
854 - (*sb)->target_off);
855 if (d != 0)
856 return d < 0 ? -1 : 1;
858 /* Two functions at the same address. Aliases perhaps. */
859 i = strcmp ((*sb)->root.string, (*sa)->root.string);
860 BFD_ASSERT (i != 0);
861 return i;
864 /* Allocate space for overlay call and return stubs. */
866 bfd_boolean
867 spu_elf_size_stubs (bfd *output_bfd,
868 struct bfd_link_info *info,
869 int non_overlay_stubs,
870 int stack_analysis,
871 asection **stub,
872 asection **ovtab,
873 asection **toe)
875 struct spu_link_hash_table *htab = spu_hash_table (info);
876 bfd *ibfd;
877 unsigned i, group;
878 flagword flags;
880 htab->non_overlay_stubs = non_overlay_stubs;
881 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
883 extern const bfd_target bfd_elf32_spu_vec;
884 Elf_Internal_Shdr *symtab_hdr;
885 asection *section;
886 Elf_Internal_Sym *local_syms = NULL;
887 void *psyms;
889 if (ibfd->xvec != &bfd_elf32_spu_vec)
890 continue;
892 /* We'll need the symbol table in a second. */
893 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
894 if (symtab_hdr->sh_info == 0)
895 continue;
897 /* Arrange to read and keep global syms for later stack analysis. */
898 psyms = &local_syms;
899 if (stack_analysis)
900 psyms = &symtab_hdr->contents;
902 /* Walk over each section attached to the input bfd. */
903 for (section = ibfd->sections; section != NULL; section = section->next)
905 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
907 /* If there aren't any relocs, then there's nothing more to do. */
908 if ((section->flags & SEC_RELOC) == 0
909 || (section->flags & SEC_ALLOC) == 0
910 || (section->flags & SEC_LOAD) == 0
911 || section->reloc_count == 0)
912 continue;
914 /* If this section is a link-once section that will be
915 discarded, then don't create any stubs. */
916 if (section->output_section == NULL
917 || section->output_section->owner != output_bfd)
918 continue;
920 /* Get the relocs. */
921 internal_relocs
922 = _bfd_elf_link_read_relocs (ibfd, section, NULL, NULL,
923 info->keep_memory);
924 if (internal_relocs == NULL)
925 goto error_ret_free_local;
927 /* Now examine each relocation. */
928 irela = internal_relocs;
929 irelaend = irela + section->reloc_count;
930 for (; irela < irelaend; irela++)
932 enum elf_spu_reloc_type r_type;
933 unsigned int r_indx;
934 asection *sym_sec;
935 Elf_Internal_Sym *sym;
936 struct elf_link_hash_entry *h;
937 const char *sym_name;
938 char *stub_name;
939 struct spu_stub_hash_entry *sh;
940 unsigned int sym_type;
941 enum _insn_type { non_branch, branch, call } insn_type;
943 r_type = ELF32_R_TYPE (irela->r_info);
944 r_indx = ELF32_R_SYM (irela->r_info);
946 if (r_type >= R_SPU_max)
948 bfd_set_error (bfd_error_bad_value);
949 goto error_ret_free_internal;
952 /* Determine the reloc target section. */
953 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
954 goto error_ret_free_internal;
956 if (sym_sec == NULL
957 || sym_sec->output_section == NULL
958 || sym_sec->output_section->owner != output_bfd)
959 continue;
961 /* Ensure no stubs for user supplied overlay manager syms. */
962 if (h != NULL
963 && (strcmp (h->root.root.string, "__ovly_load") == 0
964 || strcmp (h->root.root.string, "__ovly_return") == 0))
965 continue;
967 insn_type = non_branch;
968 if (r_type == R_SPU_REL16
969 || r_type == R_SPU_ADDR16)
971 unsigned char insn[4];
973 if (!bfd_get_section_contents (ibfd, section, insn,
974 irela->r_offset, 4))
975 goto error_ret_free_internal;
977 if (is_branch (insn) || is_hint (insn))
979 insn_type = branch;
980 if ((insn[0] & 0xfd) == 0x31)
981 insn_type = call;
985 /* We are only interested in function symbols. */
986 if (h != NULL)
988 sym_type = h->type;
989 sym_name = h->root.root.string;
991 else
993 sym_type = ELF_ST_TYPE (sym->st_info);
994 sym_name = bfd_elf_sym_name (sym_sec->owner,
995 symtab_hdr,
996 sym,
997 sym_sec);
999 if (sym_type != STT_FUNC)
1001 /* It's common for people to write assembly and forget
1002 to give function symbols the right type. Handle
1003 calls to such symbols, but warn so that (hopefully)
1004 people will fix their code. We need the symbol
1005 type to be correct to distinguish function pointer
1006 initialisation from other pointer initialisation. */
1007 if (insn_type == call)
1008 (*_bfd_error_handler) (_("warning: call to non-function"
1009 " symbol %s defined in %B"),
1010 sym_sec->owner, sym_name);
1011 else
1012 continue;
1015 if (!needs_ovl_stub (sym_name, sym_sec, section, htab,
1016 insn_type != non_branch))
1017 continue;
1019 stub_name = spu_stub_name (sym_sec, h, irela);
1020 if (stub_name == NULL)
1021 goto error_ret_free_internal;
1023 sh = (struct spu_stub_hash_entry *)
1024 bfd_hash_lookup (&htab->stub_hash_table, stub_name,
1025 TRUE, FALSE);
1026 if (sh == NULL)
1028 free (stub_name);
1029 error_ret_free_internal:
1030 if (elf_section_data (section)->relocs != internal_relocs)
1031 free (internal_relocs);
1032 error_ret_free_local:
1033 if (local_syms != NULL
1034 && (symtab_hdr->contents
1035 != (unsigned char *) local_syms))
1036 free (local_syms);
1037 return FALSE;
1040 /* If this entry isn't new, we already have a stub. */
1041 if (sh->target_section != NULL)
1043 free (stub_name);
1044 continue;
1047 sh->target_section = sym_sec;
1048 if (h != NULL)
1049 sh->target_off = h->root.u.def.value;
1050 else
1051 sh->target_off = sym->st_value;
1052 sh->target_off += irela->r_addend;
1054 htab->stubs.count += 1;
1057 /* We're done with the internal relocs, free them. */
1058 if (elf_section_data (section)->relocs != internal_relocs)
1059 free (internal_relocs);
1062 if (local_syms != NULL
1063 && symtab_hdr->contents != (unsigned char *) local_syms)
1065 if (!info->keep_memory)
1066 free (local_syms);
1067 else
1068 symtab_hdr->contents = (unsigned char *) local_syms;
1072 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, htab);
1073 if (htab->stubs.err)
1074 return FALSE;
1076 *stub = NULL;
1077 if (htab->stubs.count == 0)
1078 return TRUE;
1080 ibfd = info->input_bfds;
1081 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1082 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1083 htab->stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1084 *stub = htab->stub;
1085 if (htab->stub == NULL
1086 || !bfd_set_section_alignment (ibfd, htab->stub, 2))
1087 return FALSE;
1089 flags = (SEC_ALLOC | SEC_LOAD
1090 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1091 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1092 *ovtab = htab->ovtab;
1093 if (htab->ovtab == NULL
1094 || !bfd_set_section_alignment (ibfd, htab->stub, 4))
1095 return FALSE;
1097 *toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1098 if (*toe == NULL
1099 || !bfd_set_section_alignment (ibfd, *toe, 4))
1100 return FALSE;
1101 (*toe)->size = 16;
1103 /* Retrieve all the stubs and sort. */
1104 htab->stubs.sh = bfd_malloc (htab->stubs.count * sizeof (*htab->stubs.sh));
1105 if (htab->stubs.sh == NULL)
1106 return FALSE;
1107 i = htab->stubs.count;
1108 bfd_hash_traverse (&htab->stub_hash_table, populate_stubs, htab);
1109 BFD_ASSERT (htab->stubs.count == 0);
1111 htab->stubs.count = i;
1112 qsort (htab->stubs.sh, htab->stubs.count, sizeof (*htab->stubs.sh),
1113 sort_stubs);
1115 /* Now that the stubs are sorted, place them in the stub section.
1116 Stubs are grouped per overlay
1117 . ila $79,func1
1118 . br 1f
1119 . ila $79,func2
1120 . br 1f
1123 . ila $79,funcn
1124 . nop
1125 . 1:
1126 . ila $78,ovl_index
1127 . br __ovly_load */
1129 group = 0;
1130 for (i = 0; i < htab->stubs.count; i++)
1132 if (spu_elf_section_data (htab->stubs.sh[group]->target_section
1133 ->output_section)->ovl_index
1134 != spu_elf_section_data (htab->stubs.sh[i]->target_section
1135 ->output_section)->ovl_index)
1137 htab->stub->size += SIZEOF_STUB2;
1138 for (; group != i; group++)
1139 htab->stubs.sh[group]->delta
1140 = htab->stubs.sh[i - 1]->off - htab->stubs.sh[group]->off;
1142 if (group == i
1143 || ((htab->stubs.sh[i - 1]->target_section->output_section->vma
1144 + htab->stubs.sh[i - 1]->target_section->output_offset
1145 + htab->stubs.sh[i - 1]->target_off)
1146 != (htab->stubs.sh[i]->target_section->output_section->vma
1147 + htab->stubs.sh[i]->target_section->output_offset
1148 + htab->stubs.sh[i]->target_off)))
1150 htab->stubs.sh[i]->off = htab->stub->size;
1151 htab->stub->size += SIZEOF_STUB1;
1152 if (info->emitrelocations)
1153 htab->stub->reloc_count += 1;
1155 else
1156 htab->stubs.sh[i]->off = htab->stubs.sh[i - 1]->off;
1158 if (group != i)
1159 htab->stub->size += SIZEOF_STUB2;
1160 if (info->emitrelocations)
1161 htab->stub->flags |= SEC_RELOC;
1162 for (; group != i; group++)
1163 htab->stubs.sh[group]->delta
1164 = htab->stubs.sh[i - 1]->off - htab->stubs.sh[group]->off;
1166 /* htab->ovtab consists of two arrays.
1167 . struct {
1168 . u32 vma;
1169 . u32 size;
1170 . u32 file_off;
1171 . u32 buf;
1172 . } _ovly_table[];
1174 . struct {
1175 . u32 mapped;
1176 . } _ovly_buf_table[]; */
1178 htab->ovtab->alignment_power = 4;
1179 htab->ovtab->size = htab->num_overlays * 16 + htab->num_buf * 4;
1181 return TRUE;
1184 /* Functions to handle embedded spu_ovl.o object. */
1186 static void *
1187 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1189 return stream;
1192 static file_ptr
1193 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1194 void *stream,
1195 void *buf,
1196 file_ptr nbytes,
1197 file_ptr offset)
1199 struct _ovl_stream *os;
1200 size_t count;
1201 size_t max;
1203 os = (struct _ovl_stream *) stream;
1204 max = (const char *) os->end - (const char *) os->start;
1206 if ((ufile_ptr) offset >= max)
1207 return 0;
1209 count = nbytes;
1210 if (count > max - offset)
1211 count = max - offset;
1213 memcpy (buf, (const char *) os->start + offset, count);
1214 return count;
1217 bfd_boolean
1218 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1220 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1221 "elf32-spu",
1222 ovl_mgr_open,
1223 (void *) stream,
1224 ovl_mgr_pread,
1225 NULL,
1226 NULL);
1227 return *ovl_bfd != NULL;
1230 /* Fill in the ila and br for a stub. On the last stub for a group,
1231 write the stub that sets the overlay number too. */
1233 static bfd_boolean
1234 write_one_stub (struct spu_stub_hash_entry *ent, struct bfd_link_info *info)
1236 struct spu_link_hash_table *htab = spu_hash_table (info);
1237 asection *sec = htab->stub;
1238 asection *s = ent->target_section;
1239 unsigned int ovl;
1240 bfd_vma val;
1242 val = ent->target_off + s->output_offset + s->output_section->vma;
1243 bfd_put_32 (sec->owner, ILA_79 + ((val << 7) & 0x01ffff80),
1244 sec->contents + ent->off);
1245 val = ent->delta + 4;
1246 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1247 sec->contents + ent->off + 4);
1249 if (info->emitrelocations)
1251 Elf_Internal_Rela *relocs, *r;
1252 struct bfd_elf_section_data *elfsec_data;
1254 elfsec_data = elf_section_data (sec);
1255 relocs = elfsec_data->relocs;
1256 if (relocs == NULL)
1258 bfd_size_type relsize;
1259 Elf_Internal_Shdr *symtab_hdr;
1260 struct elf_link_hash_entry **sym_hash;
1261 unsigned long symcount;
1262 bfd_vma amt;
1264 relsize = sec->reloc_count * sizeof (*relocs);
1265 relocs = bfd_alloc (sec->owner, relsize);
1266 if (relocs == NULL)
1267 return FALSE;
1268 elfsec_data->relocs = relocs;
1269 elfsec_data->rel_hdr.sh_size
1270 = sec->reloc_count * sizeof (Elf32_External_Rela);
1271 elfsec_data->rel_hdr.sh_entsize = sizeof (Elf32_External_Rela);
1272 sec->reloc_count = 0;
1274 /* Increase the size of symbol hash array on the bfd to
1275 which we attached our .stub section. This hack allows
1276 us to create relocs against global symbols. */
1277 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1278 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
1279 symcount -= symtab_hdr->sh_info;
1280 amt = symcount * sizeof (*sym_hash);
1281 sym_hash = bfd_alloc (sec->owner, amt + sizeof (*sym_hash));
1282 if (sym_hash == NULL)
1283 return FALSE;
1284 memcpy (sym_hash, elf_sym_hashes (sec->owner), amt);
1285 sym_hash[symcount] = htab->ovly_load;
1286 htab->ovly_load_r_symndx = symcount + symtab_hdr->sh_info;
1287 elf_sym_hashes (sec->owner) = sym_hash;
1289 r = relocs + sec->reloc_count;
1290 sec->reloc_count += 1;
1291 r->r_offset = ent->off + 4;
1292 r->r_info = ELF32_R_INFO (0, R_SPU_REL16);
1293 r->r_addend = (sec->output_section->vma
1294 + sec->output_offset
1295 + ent->off + 4
1296 + val);
1299 /* If this is the last stub of this group, write stub2. */
1300 if (ent->delta == 0)
1302 bfd_put_32 (sec->owner, NOP,
1303 sec->contents + ent->off + 4);
1305 ovl = spu_elf_section_data (s->output_section)->ovl_index;
1306 bfd_put_32 (sec->owner, ILA_78 + ((ovl << 7) & 0x01ffff80),
1307 sec->contents + ent->off + 8);
1309 val = (htab->ovly_load->root.u.def.section->output_section->vma
1310 + htab->ovly_load->root.u.def.section->output_offset
1311 + htab->ovly_load->root.u.def.value
1312 - (sec->output_section->vma
1313 + sec->output_offset
1314 + ent->off + 12));
1316 if (val + 0x20000 >= 0x40000)
1317 htab->stub_overflow = TRUE;
1319 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1320 sec->contents + ent->off + 12);
1322 if (info->emitrelocations)
1324 Elf_Internal_Rela *relocs, *r;
1325 struct bfd_elf_section_data *elfsec_data;
1327 elfsec_data = elf_section_data (sec);
1328 relocs = elfsec_data->relocs;
1329 /* The last branch is overwritten, so overwrite its reloc too. */
1330 r = relocs + sec->reloc_count - 1;
1331 r->r_offset = ent->off + 12;
1332 r->r_info = ELF32_R_INFO (htab->ovly_load_r_symndx, R_SPU_REL16);
1333 r->r_addend = 0;
1337 if (htab->emit_stub_syms)
1339 struct elf_link_hash_entry *h;
1340 size_t len1, len2;
1341 char *name;
1343 len1 = sizeof ("00000000.ovl_call.") - 1;
1344 len2 = strlen (ent->root.string);
1345 name = bfd_malloc (len1 + len2 + 1);
1346 if (name == NULL)
1347 return FALSE;
1348 memcpy (name, "00000000.ovl_call.", len1);
1349 memcpy (name + len1, ent->root.string, len2 + 1);
1350 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1351 free (name);
1352 if (h == NULL)
1353 return FALSE;
1354 if (h->root.type == bfd_link_hash_new)
1356 h->root.type = bfd_link_hash_defined;
1357 h->root.u.def.section = sec;
1358 h->root.u.def.value = ent->off;
1359 h->size = (ent->delta == 0
1360 ? SIZEOF_STUB1 + SIZEOF_STUB2 : SIZEOF_STUB1);
1361 h->type = STT_FUNC;
1362 h->ref_regular = 1;
1363 h->def_regular = 1;
1364 h->ref_regular_nonweak = 1;
1365 h->forced_local = 1;
1366 h->non_elf = 0;
1370 return TRUE;
1373 /* Define an STT_OBJECT symbol. */
1375 static struct elf_link_hash_entry *
1376 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1378 struct elf_link_hash_entry *h;
1380 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1381 if (h == NULL)
1382 return NULL;
1384 if (h->root.type != bfd_link_hash_defined
1385 || !h->def_regular)
1387 h->root.type = bfd_link_hash_defined;
1388 h->root.u.def.section = htab->ovtab;
1389 h->type = STT_OBJECT;
1390 h->ref_regular = 1;
1391 h->def_regular = 1;
1392 h->ref_regular_nonweak = 1;
1393 h->non_elf = 0;
1395 else
1397 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1398 h->root.u.def.section->owner,
1399 h->root.root.string);
1400 bfd_set_error (bfd_error_bad_value);
1401 return NULL;
1404 return h;
1407 /* Fill in all stubs and the overlay tables. */
1409 bfd_boolean
1410 spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms, asection *toe)
1412 struct spu_link_hash_table *htab = spu_hash_table (info);
1413 struct elf_link_hash_entry *h;
1414 bfd_byte *p;
1415 asection *s;
1416 bfd *obfd;
1417 unsigned int i;
1419 htab->emit_stub_syms = emit_syms;
1420 htab->stub->contents = bfd_zalloc (htab->stub->owner, htab->stub->size);
1421 if (htab->stub->contents == NULL)
1422 return FALSE;
1424 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1425 htab->ovly_load = h;
1426 BFD_ASSERT (h != NULL
1427 && (h->root.type == bfd_link_hash_defined
1428 || h->root.type == bfd_link_hash_defweak)
1429 && h->def_regular);
1431 s = h->root.u.def.section->output_section;
1432 if (spu_elf_section_data (s)->ovl_index)
1434 (*_bfd_error_handler) (_("%s in overlay section"),
1435 h->root.u.def.section->owner);
1436 bfd_set_error (bfd_error_bad_value);
1437 return FALSE;
1440 /* Write out all the stubs. */
1441 for (i = 0; i < htab->stubs.count; i++)
1442 write_one_stub (htab->stubs.sh[i], info);
1444 if (htab->stub_overflow)
1446 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1447 bfd_set_error (bfd_error_bad_value);
1448 return FALSE;
1451 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1452 if (htab->ovtab->contents == NULL)
1453 return FALSE;
1455 /* Write out _ovly_table. */
1456 p = htab->ovtab->contents;
1457 obfd = htab->ovtab->output_section->owner;
1458 for (s = obfd->sections; s != NULL; s = s->next)
1460 unsigned int ovl_index = spu_elf_section_data (s)->ovl_index;
1462 if (ovl_index != 0)
1464 unsigned int lo, hi, mid;
1465 unsigned long off = (ovl_index - 1) * 16;
1466 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1467 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1468 /* file_off written later in spu_elf_modify_program_headers. */
1470 lo = 0;
1471 hi = htab->num_buf;
1472 while (lo < hi)
1474 mid = (lo + hi) >> 1;
1475 if (htab->ovl_region[2 * mid + 1]->vma
1476 + htab->ovl_region[2 * mid + 1]->size <= s->vma)
1477 lo = mid + 1;
1478 else if (htab->ovl_region[2 * mid]->vma > s->vma)
1479 hi = mid;
1480 else
1482 bfd_put_32 (htab->ovtab->owner, mid + 1, p + off + 12);
1483 break;
1486 BFD_ASSERT (lo < hi);
1490 /* Write out _ovly_buf_table. */
1491 p = htab->ovtab->contents + htab->num_overlays * 16;
1492 for (i = 0; i < htab->num_buf; i++)
1494 bfd_put_32 (htab->ovtab->owner, 0, p);
1495 p += 4;
1498 h = define_ovtab_symbol (htab, "_ovly_table");
1499 if (h == NULL)
1500 return FALSE;
1501 h->root.u.def.value = 0;
1502 h->size = htab->num_overlays * 16;
1504 h = define_ovtab_symbol (htab, "_ovly_table_end");
1505 if (h == NULL)
1506 return FALSE;
1507 h->root.u.def.value = htab->num_overlays * 16;
1508 h->size = 0;
1510 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1511 if (h == NULL)
1512 return FALSE;
1513 h->root.u.def.value = htab->num_overlays * 16;
1514 h->size = htab->num_buf * 4;
1516 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1517 if (h == NULL)
1518 return FALSE;
1519 h->root.u.def.value = htab->num_overlays * 16 + htab->num_buf * 4;
1520 h->size = 0;
1522 h = define_ovtab_symbol (htab, "_EAR_");
1523 if (h == NULL)
1524 return FALSE;
1525 h->root.u.def.section = toe;
1526 h->root.u.def.value = 0;
1527 h->size = 16;
1529 return TRUE;
1532 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1533 Search for stack adjusting insns, and return the sp delta. */
1535 static int
1536 find_function_stack_adjust (asection *sec, bfd_vma offset)
1538 int unrecog;
1539 int reg[128];
1541 memset (reg, 0, sizeof (reg));
1542 for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1544 unsigned char buf[4];
1545 int rt, ra;
1546 int imm;
1548 /* Assume no relocs on stack adjusing insns. */
1549 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1550 break;
1552 if (buf[0] == 0x24 /* stqd */)
1553 continue;
1555 rt = buf[3] & 0x7f;
1556 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1557 /* Partly decoded immediate field. */
1558 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1560 if (buf[0] == 0x1c /* ai */)
1562 imm >>= 7;
1563 imm = (imm ^ 0x200) - 0x200;
1564 reg[rt] = reg[ra] + imm;
1566 if (rt == 1 /* sp */)
1568 if (imm > 0)
1569 break;
1570 return reg[rt];
1573 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1575 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1577 reg[rt] = reg[ra] + reg[rb];
1578 if (rt == 1)
1579 return reg[rt];
1581 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1583 if (buf[0] >= 0x42 /* ila */)
1584 imm |= (buf[0] & 1) << 17;
1585 else
1587 imm &= 0xffff;
1589 if (buf[0] == 0x40 /* il */)
1591 if ((buf[1] & 0x80) == 0)
1592 goto unknown_insn;
1593 imm = (imm ^ 0x8000) - 0x8000;
1595 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1596 imm <<= 16;
1598 reg[rt] = imm;
1599 continue;
1601 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1603 reg[rt] |= imm & 0xffff;
1604 continue;
1606 else if (buf[0] == 0x04 /* ori */)
1608 imm >>= 7;
1609 imm = (imm ^ 0x200) - 0x200;
1610 reg[rt] = reg[ra] | imm;
1611 continue;
1613 else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1614 || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1616 /* Used in pic reg load. Say rt is trashed. */
1617 reg[rt] = 0;
1618 continue;
1620 else if (is_branch (buf) || is_indirect_branch (buf))
1621 /* If we hit a branch then we must be out of the prologue. */
1622 break;
1623 unknown_insn:
1624 ++unrecog;
1627 return 0;
1630 /* qsort predicate to sort symbols by section and value. */
1632 static Elf_Internal_Sym *sort_syms_syms;
1633 static asection **sort_syms_psecs;
1635 static int
1636 sort_syms (const void *a, const void *b)
1638 Elf_Internal_Sym *const *s1 = a;
1639 Elf_Internal_Sym *const *s2 = b;
1640 asection *sec1,*sec2;
1641 bfd_signed_vma delta;
1643 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1644 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1646 if (sec1 != sec2)
1647 return sec1->index - sec2->index;
1649 delta = (*s1)->st_value - (*s2)->st_value;
1650 if (delta != 0)
1651 return delta < 0 ? -1 : 1;
1653 delta = (*s2)->st_size - (*s1)->st_size;
1654 if (delta != 0)
1655 return delta < 0 ? -1 : 1;
1657 return *s1 < *s2 ? -1 : 1;
1660 struct call_info
1662 struct function_info *fun;
1663 struct call_info *next;
1664 int is_tail;
1667 struct function_info
1669 /* List of functions called. Also branches to hot/cold part of
1670 function. */
1671 struct call_info *call_list;
1672 /* For hot/cold part of function, point to owner. */
1673 struct function_info *start;
1674 /* Symbol at start of function. */
1675 union {
1676 Elf_Internal_Sym *sym;
1677 struct elf_link_hash_entry *h;
1678 } u;
1679 /* Function section. */
1680 asection *sec;
1681 /* Address range of (this part of) function. */
1682 bfd_vma lo, hi;
1683 /* Stack usage. */
1684 int stack;
1685 /* Set if global symbol. */
1686 unsigned int global : 1;
1687 /* Set if known to be start of function (as distinct from a hunk
1688 in hot/cold section. */
1689 unsigned int is_func : 1;
1690 /* Flags used during call tree traversal. */
1691 unsigned int visit1 : 1;
1692 unsigned int non_root : 1;
1693 unsigned int visit2 : 1;
1694 unsigned int marking : 1;
1695 unsigned int visit3 : 1;
1698 struct spu_elf_stack_info
1700 int num_fun;
1701 int max_fun;
1702 /* Variable size array describing functions, one per contiguous
1703 address range belonging to a function. */
1704 struct function_info fun[1];
1707 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1708 entries for section SEC. */
1710 static struct spu_elf_stack_info *
1711 alloc_stack_info (asection *sec, int max_fun)
1713 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1714 bfd_size_type amt;
1716 amt = sizeof (struct spu_elf_stack_info);
1717 amt += (max_fun - 1) * sizeof (struct function_info);
1718 sec_data->stack_info = bfd_zmalloc (amt);
1719 if (sec_data->stack_info != NULL)
1720 sec_data->stack_info->max_fun = max_fun;
1721 return sec_data->stack_info;
1724 /* Add a new struct function_info describing a (part of a) function
1725 starting at SYM_H. Keep the array sorted by address. */
1727 static struct function_info *
1728 maybe_insert_function (asection *sec,
1729 void *sym_h,
1730 bfd_boolean global,
1731 bfd_boolean is_func)
1733 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1734 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1735 int i;
1736 bfd_vma off, size;
1738 if (sinfo == NULL)
1740 sinfo = alloc_stack_info (sec, 20);
1741 if (sinfo == NULL)
1742 return NULL;
1745 if (!global)
1747 Elf_Internal_Sym *sym = sym_h;
1748 off = sym->st_value;
1749 size = sym->st_size;
1751 else
1753 struct elf_link_hash_entry *h = sym_h;
1754 off = h->root.u.def.value;
1755 size = h->size;
1758 for (i = sinfo->num_fun; --i >= 0; )
1759 if (sinfo->fun[i].lo <= off)
1760 break;
1762 if (i >= 0)
1764 /* Don't add another entry for an alias, but do update some
1765 info. */
1766 if (sinfo->fun[i].lo == off)
1768 /* Prefer globals over local syms. */
1769 if (global && !sinfo->fun[i].global)
1771 sinfo->fun[i].global = TRUE;
1772 sinfo->fun[i].u.h = sym_h;
1774 if (is_func)
1775 sinfo->fun[i].is_func = TRUE;
1776 return &sinfo->fun[i];
1778 /* Ignore a zero-size symbol inside an existing function. */
1779 else if (sinfo->fun[i].hi > off && size == 0)
1780 return &sinfo->fun[i];
1783 if (++i < sinfo->num_fun)
1784 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1785 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1786 else if (i >= sinfo->max_fun)
1788 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1789 bfd_size_type old = amt;
1791 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1792 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1793 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1794 sinfo = bfd_realloc (sinfo, amt);
1795 if (sinfo == NULL)
1796 return NULL;
1797 memset ((char *) sinfo + old, 0, amt - old);
1798 sec_data->stack_info = sinfo;
1800 sinfo->fun[i].is_func = is_func;
1801 sinfo->fun[i].global = global;
1802 sinfo->fun[i].sec = sec;
1803 if (global)
1804 sinfo->fun[i].u.h = sym_h;
1805 else
1806 sinfo->fun[i].u.sym = sym_h;
1807 sinfo->fun[i].lo = off;
1808 sinfo->fun[i].hi = off + size;
1809 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1810 sinfo->num_fun += 1;
1811 return &sinfo->fun[i];
1814 /* Return the name of FUN. */
1816 static const char *
1817 func_name (struct function_info *fun)
1819 asection *sec;
1820 bfd *ibfd;
1821 Elf_Internal_Shdr *symtab_hdr;
1823 while (fun->start != NULL)
1824 fun = fun->start;
1826 if (fun->global)
1827 return fun->u.h->root.root.string;
1829 sec = fun->sec;
1830 if (fun->u.sym->st_name == 0)
1832 size_t len = strlen (sec->name);
1833 char *name = bfd_malloc (len + 10);
1834 if (name == NULL)
1835 return "(null)";
1836 sprintf (name, "%s+%lx", sec->name,
1837 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1838 return name;
1840 ibfd = sec->owner;
1841 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1842 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1845 /* Read the instruction at OFF in SEC. Return true iff the instruction
1846 is a nop, lnop, or stop 0 (all zero insn). */
1848 static bfd_boolean
1849 is_nop (asection *sec, bfd_vma off)
1851 unsigned char insn[4];
1853 if (off + 4 > sec->size
1854 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1855 return FALSE;
1856 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1857 return TRUE;
1858 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1859 return TRUE;
1860 return FALSE;
1863 /* Extend the range of FUN to cover nop padding up to LIMIT.
1864 Return TRUE iff some instruction other than a NOP was found. */
1866 static bfd_boolean
1867 insns_at_end (struct function_info *fun, bfd_vma limit)
1869 bfd_vma off = (fun->hi + 3) & -4;
1871 while (off < limit && is_nop (fun->sec, off))
1872 off += 4;
1873 if (off < limit)
1875 fun->hi = off;
1876 return TRUE;
1878 fun->hi = limit;
1879 return FALSE;
1882 /* Check and fix overlapping function ranges. Return TRUE iff there
1883 are gaps in the current info we have about functions in SEC. */
1885 static bfd_boolean
1886 check_function_ranges (asection *sec, struct bfd_link_info *info)
1888 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1889 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1890 int i;
1891 bfd_boolean gaps = FALSE;
1893 if (sinfo == NULL)
1894 return FALSE;
1896 for (i = 1; i < sinfo->num_fun; i++)
1897 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1899 /* Fix overlapping symbols. */
1900 const char *f1 = func_name (&sinfo->fun[i - 1]);
1901 const char *f2 = func_name (&sinfo->fun[i]);
1903 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1904 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1906 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1907 gaps = TRUE;
1909 if (sinfo->num_fun == 0)
1910 gaps = TRUE;
1911 else
1913 if (sinfo->fun[0].lo != 0)
1914 gaps = TRUE;
1915 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1917 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1919 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1920 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1922 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1923 gaps = TRUE;
1925 return gaps;
1928 /* Search current function info for a function that contains address
1929 OFFSET in section SEC. */
1931 static struct function_info *
1932 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1934 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1935 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1936 int lo, hi, mid;
1938 lo = 0;
1939 hi = sinfo->num_fun;
1940 while (lo < hi)
1942 mid = (lo + hi) / 2;
1943 if (offset < sinfo->fun[mid].lo)
1944 hi = mid;
1945 else if (offset >= sinfo->fun[mid].hi)
1946 lo = mid + 1;
1947 else
1948 return &sinfo->fun[mid];
1950 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
1951 sec, offset);
1952 return NULL;
1955 /* Add CALLEE to CALLER call list if not already present. */
1957 static bfd_boolean
1958 insert_callee (struct function_info *caller, struct call_info *callee)
1960 struct call_info *p;
1961 for (p = caller->call_list; p != NULL; p = p->next)
1962 if (p->fun == callee->fun)
1964 /* Tail calls use less stack than normal calls. Retain entry
1965 for normal call over one for tail call. */
1966 if (p->is_tail > callee->is_tail)
1967 p->is_tail = callee->is_tail;
1968 return FALSE;
1970 callee->next = caller->call_list;
1971 caller->call_list = callee;
1972 return TRUE;
1975 /* Rummage through the relocs for SEC, looking for function calls.
1976 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1977 mark destination symbols on calls as being functions. Also
1978 look at branches, which may be tail calls or go to hot/cold
1979 section part of same function. */
1981 static bfd_boolean
1982 mark_functions_via_relocs (asection *sec,
1983 struct bfd_link_info *info,
1984 int call_tree)
1986 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1987 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1988 Elf_Internal_Sym *syms;
1989 void *psyms;
1990 static bfd_boolean warned;
1992 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
1993 info->keep_memory);
1994 if (internal_relocs == NULL)
1995 return FALSE;
1997 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1998 psyms = &symtab_hdr->contents;
1999 syms = *(Elf_Internal_Sym **) psyms;
2000 irela = internal_relocs;
2001 irelaend = irela + sec->reloc_count;
2002 for (; irela < irelaend; irela++)
2004 enum elf_spu_reloc_type r_type;
2005 unsigned int r_indx;
2006 asection *sym_sec;
2007 Elf_Internal_Sym *sym;
2008 struct elf_link_hash_entry *h;
2009 bfd_vma val;
2010 unsigned char insn[4];
2011 bfd_boolean is_call;
2012 struct function_info *caller;
2013 struct call_info *callee;
2015 r_type = ELF32_R_TYPE (irela->r_info);
2016 if (r_type != R_SPU_REL16
2017 && r_type != R_SPU_ADDR16)
2018 continue;
2020 r_indx = ELF32_R_SYM (irela->r_info);
2021 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2022 return FALSE;
2024 if (sym_sec == NULL
2025 || sym_sec->output_section == NULL
2026 || sym_sec->output_section->owner != sec->output_section->owner)
2027 continue;
2029 if (!bfd_get_section_contents (sec->owner, sec, insn,
2030 irela->r_offset, 4))
2031 return FALSE;
2032 if (!is_branch (insn))
2033 continue;
2035 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2036 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2038 if (!call_tree)
2039 warned = TRUE;
2040 if (!call_tree || !warned)
2041 info->callbacks->einfo (_("%B(%A+0x%v): call to non-code section"
2042 " %B(%A), stack analysis incomplete\n"),
2043 sec->owner, sec, irela->r_offset,
2044 sym_sec->owner, sym_sec);
2045 continue;
2048 is_call = (insn[0] & 0xfd) == 0x31;
2050 if (h)
2051 val = h->root.u.def.value;
2052 else
2053 val = sym->st_value;
2054 val += irela->r_addend;
2056 if (!call_tree)
2058 struct function_info *fun;
2060 if (irela->r_addend != 0)
2062 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2063 if (fake == NULL)
2064 return FALSE;
2065 fake->st_value = val;
2066 fake->st_shndx
2067 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2068 sym = fake;
2070 if (sym)
2071 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2072 else
2073 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2074 if (fun == NULL)
2075 return FALSE;
2076 if (irela->r_addend != 0
2077 && fun->u.sym != sym)
2078 free (sym);
2079 continue;
2082 caller = find_function (sec, irela->r_offset, info);
2083 if (caller == NULL)
2084 return FALSE;
2085 callee = bfd_malloc (sizeof *callee);
2086 if (callee == NULL)
2087 return FALSE;
2089 callee->fun = find_function (sym_sec, val, info);
2090 if (callee->fun == NULL)
2091 return FALSE;
2092 callee->is_tail = !is_call;
2093 if (!insert_callee (caller, callee))
2094 free (callee);
2095 else if (!is_call
2096 && !callee->fun->is_func
2097 && callee->fun->stack == 0)
2099 /* This is either a tail call or a branch from one part of
2100 the function to another, ie. hot/cold section. If the
2101 destination has been called by some other function then
2102 it is a separate function. We also assume that functions
2103 are not split across input files. */
2104 if (callee->fun->start != NULL
2105 || sec->owner != sym_sec->owner)
2107 callee->fun->start = NULL;
2108 callee->fun->is_func = TRUE;
2110 else
2111 callee->fun->start = caller;
2115 return TRUE;
2118 /* Handle something like .init or .fini, which has a piece of a function.
2119 These sections are pasted together to form a single function. */
2121 static bfd_boolean
2122 pasted_function (asection *sec, struct bfd_link_info *info)
2124 struct bfd_link_order *l;
2125 struct _spu_elf_section_data *sec_data;
2126 struct spu_elf_stack_info *sinfo;
2127 Elf_Internal_Sym *fake;
2128 struct function_info *fun, *fun_start;
2130 fake = bfd_zmalloc (sizeof (*fake));
2131 if (fake == NULL)
2132 return FALSE;
2133 fake->st_value = 0;
2134 fake->st_size = sec->size;
2135 fake->st_shndx
2136 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2137 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2138 if (!fun)
2139 return FALSE;
2141 /* Find a function immediately preceding this section. */
2142 fun_start = NULL;
2143 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2145 if (l->u.indirect.section == sec)
2147 if (fun_start != NULL)
2149 if (fun_start->start)
2150 fun_start = fun_start->start;
2151 fun->start = fun_start;
2153 return TRUE;
2155 if (l->type == bfd_indirect_link_order
2156 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2157 && (sinfo = sec_data->stack_info) != NULL
2158 && sinfo->num_fun != 0)
2159 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2162 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2163 return FALSE;
2166 /* We're only interested in code sections. */
2168 static bfd_boolean
2169 interesting_section (asection *s, bfd *obfd, struct spu_link_hash_table *htab)
2171 return (s != htab->stub
2172 && s->output_section != NULL
2173 && s->output_section->owner == obfd
2174 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2175 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2176 && s->size != 0);
2179 /* Map address ranges in code sections to functions. */
2181 static bfd_boolean
2182 discover_functions (bfd *output_bfd, struct bfd_link_info *info)
2184 struct spu_link_hash_table *htab = spu_hash_table (info);
2185 bfd *ibfd;
2186 int bfd_idx;
2187 Elf_Internal_Sym ***psym_arr;
2188 asection ***sec_arr;
2189 bfd_boolean gaps = FALSE;
2191 bfd_idx = 0;
2192 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2193 bfd_idx++;
2195 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2196 if (psym_arr == NULL)
2197 return FALSE;
2198 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2199 if (sec_arr == NULL)
2200 return FALSE;
2203 for (ibfd = info->input_bfds, bfd_idx = 0;
2204 ibfd != NULL;
2205 ibfd = ibfd->link_next, bfd_idx++)
2207 extern const bfd_target bfd_elf32_spu_vec;
2208 Elf_Internal_Shdr *symtab_hdr;
2209 asection *sec;
2210 size_t symcount;
2211 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2212 asection **psecs, **p;
2214 if (ibfd->xvec != &bfd_elf32_spu_vec)
2215 continue;
2217 /* Read all the symbols. */
2218 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2219 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2220 if (symcount == 0)
2221 continue;
2223 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2224 if (syms == NULL)
2226 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2227 NULL, NULL, NULL);
2228 symtab_hdr->contents = (void *) syms;
2229 if (syms == NULL)
2230 return FALSE;
2233 /* Select defined function symbols that are going to be output. */
2234 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2235 if (psyms == NULL)
2236 return FALSE;
2237 psym_arr[bfd_idx] = psyms;
2238 psecs = bfd_malloc (symcount * sizeof (*psecs));
2239 if (psecs == NULL)
2240 return FALSE;
2241 sec_arr[bfd_idx] = psecs;
2242 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2243 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2244 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2246 asection *s;
2248 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2249 if (s != NULL && interesting_section (s, output_bfd, htab))
2250 *psy++ = sy;
2252 symcount = psy - psyms;
2253 *psy = NULL;
2255 /* Sort them by section and offset within section. */
2256 sort_syms_syms = syms;
2257 sort_syms_psecs = psecs;
2258 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2260 /* Now inspect the function symbols. */
2261 for (psy = psyms; psy < psyms + symcount; )
2263 asection *s = psecs[*psy - syms];
2264 Elf_Internal_Sym **psy2;
2266 for (psy2 = psy; ++psy2 < psyms + symcount; )
2267 if (psecs[*psy2 - syms] != s)
2268 break;
2270 if (!alloc_stack_info (s, psy2 - psy))
2271 return FALSE;
2272 psy = psy2;
2275 /* First install info about properly typed and sized functions.
2276 In an ideal world this will cover all code sections, except
2277 when partitioning functions into hot and cold sections,
2278 and the horrible pasted together .init and .fini functions. */
2279 for (psy = psyms; psy < psyms + symcount; ++psy)
2281 sy = *psy;
2282 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2284 asection *s = psecs[sy - syms];
2285 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2286 return FALSE;
2290 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2291 if (interesting_section (sec, output_bfd, htab))
2292 gaps |= check_function_ranges (sec, info);
2295 if (gaps)
2297 /* See if we can discover more function symbols by looking at
2298 relocations. */
2299 for (ibfd = info->input_bfds, bfd_idx = 0;
2300 ibfd != NULL;
2301 ibfd = ibfd->link_next, bfd_idx++)
2303 asection *sec;
2305 if (psym_arr[bfd_idx] == NULL)
2306 continue;
2308 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2309 if (interesting_section (sec, output_bfd, htab)
2310 && sec->reloc_count != 0)
2312 if (!mark_functions_via_relocs (sec, info, FALSE))
2313 return FALSE;
2317 for (ibfd = info->input_bfds, bfd_idx = 0;
2318 ibfd != NULL;
2319 ibfd = ibfd->link_next, bfd_idx++)
2321 Elf_Internal_Shdr *symtab_hdr;
2322 asection *sec;
2323 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2324 asection **psecs;
2326 if ((psyms = psym_arr[bfd_idx]) == NULL)
2327 continue;
2329 psecs = sec_arr[bfd_idx];
2331 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2332 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2334 gaps = FALSE;
2335 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2336 if (interesting_section (sec, output_bfd, htab))
2337 gaps |= check_function_ranges (sec, info);
2338 if (!gaps)
2339 continue;
2341 /* Finally, install all globals. */
2342 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2344 asection *s;
2346 s = psecs[sy - syms];
2348 /* Global syms might be improperly typed functions. */
2349 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2350 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2352 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2353 return FALSE;
2357 /* Some of the symbols we've installed as marking the
2358 beginning of functions may have a size of zero. Extend
2359 the range of such functions to the beginning of the
2360 next symbol of interest. */
2361 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2362 if (interesting_section (sec, output_bfd, htab))
2364 struct _spu_elf_section_data *sec_data;
2365 struct spu_elf_stack_info *sinfo;
2367 sec_data = spu_elf_section_data (sec);
2368 sinfo = sec_data->stack_info;
2369 if (sinfo != NULL)
2371 int fun_idx;
2372 bfd_vma hi = sec->size;
2374 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2376 sinfo->fun[fun_idx].hi = hi;
2377 hi = sinfo->fun[fun_idx].lo;
2380 /* No symbols in this section. Must be .init or .fini
2381 or something similar. */
2382 else if (!pasted_function (sec, info))
2383 return FALSE;
2388 for (ibfd = info->input_bfds, bfd_idx = 0;
2389 ibfd != NULL;
2390 ibfd = ibfd->link_next, bfd_idx++)
2392 if (psym_arr[bfd_idx] == NULL)
2393 continue;
2395 free (psym_arr[bfd_idx]);
2396 free (sec_arr[bfd_idx]);
2399 free (psym_arr);
2400 free (sec_arr);
2402 return TRUE;
2405 /* Mark nodes in the call graph that are called by some other node. */
2407 static void
2408 mark_non_root (struct function_info *fun)
2410 struct call_info *call;
2412 fun->visit1 = TRUE;
2413 for (call = fun->call_list; call; call = call->next)
2415 call->fun->non_root = TRUE;
2416 if (!call->fun->visit1)
2417 mark_non_root (call->fun);
2421 /* Remove cycles from the call graph. */
2423 static void
2424 call_graph_traverse (struct function_info *fun, struct bfd_link_info *info)
2426 struct call_info **callp, *call;
2428 fun->visit2 = TRUE;
2429 fun->marking = TRUE;
2431 callp = &fun->call_list;
2432 while ((call = *callp) != NULL)
2434 if (!call->fun->visit2)
2435 call_graph_traverse (call->fun, info);
2436 else if (call->fun->marking)
2438 const char *f1 = func_name (fun);
2439 const char *f2 = func_name (call->fun);
2441 info->callbacks->info (_("Stack analysis will ignore the call "
2442 "from %s to %s\n"),
2443 f1, f2);
2444 *callp = call->next;
2445 continue;
2447 callp = &call->next;
2449 fun->marking = FALSE;
2452 /* Populate call_list for each function. */
2454 static bfd_boolean
2455 build_call_tree (bfd *output_bfd, struct bfd_link_info *info)
2457 struct spu_link_hash_table *htab = spu_hash_table (info);
2458 bfd *ibfd;
2460 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2462 extern const bfd_target bfd_elf32_spu_vec;
2463 asection *sec;
2465 if (ibfd->xvec != &bfd_elf32_spu_vec)
2466 continue;
2468 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2470 if (!interesting_section (sec, output_bfd, htab)
2471 || sec->reloc_count == 0)
2472 continue;
2474 if (!mark_functions_via_relocs (sec, info, TRUE))
2475 return FALSE;
2478 /* Transfer call info from hot/cold section part of function
2479 to main entry. */
2480 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2482 struct _spu_elf_section_data *sec_data;
2483 struct spu_elf_stack_info *sinfo;
2485 if ((sec_data = spu_elf_section_data (sec)) != NULL
2486 && (sinfo = sec_data->stack_info) != NULL)
2488 int i;
2489 for (i = 0; i < sinfo->num_fun; ++i)
2491 if (sinfo->fun[i].start != NULL)
2493 struct call_info *call = sinfo->fun[i].call_list;
2495 while (call != NULL)
2497 struct call_info *call_next = call->next;
2498 if (!insert_callee (sinfo->fun[i].start, call))
2499 free (call);
2500 call = call_next;
2502 sinfo->fun[i].call_list = NULL;
2503 sinfo->fun[i].non_root = TRUE;
2510 /* Find the call graph root(s). */
2511 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2513 extern const bfd_target bfd_elf32_spu_vec;
2514 asection *sec;
2516 if (ibfd->xvec != &bfd_elf32_spu_vec)
2517 continue;
2519 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2521 struct _spu_elf_section_data *sec_data;
2522 struct spu_elf_stack_info *sinfo;
2524 if ((sec_data = spu_elf_section_data (sec)) != NULL
2525 && (sinfo = sec_data->stack_info) != NULL)
2527 int i;
2528 for (i = 0; i < sinfo->num_fun; ++i)
2529 if (!sinfo->fun[i].visit1)
2530 mark_non_root (&sinfo->fun[i]);
2535 /* Remove cycles from the call graph. We start from the root node(s)
2536 so that we break cycles in a reasonable place. */
2537 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2539 extern const bfd_target bfd_elf32_spu_vec;
2540 asection *sec;
2542 if (ibfd->xvec != &bfd_elf32_spu_vec)
2543 continue;
2545 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2547 struct _spu_elf_section_data *sec_data;
2548 struct spu_elf_stack_info *sinfo;
2550 if ((sec_data = spu_elf_section_data (sec)) != NULL
2551 && (sinfo = sec_data->stack_info) != NULL)
2553 int i;
2554 for (i = 0; i < sinfo->num_fun; ++i)
2555 if (!sinfo->fun[i].non_root)
2556 call_graph_traverse (&sinfo->fun[i], info);
2561 return TRUE;
2564 /* Descend the call graph for FUN, accumulating total stack required. */
2566 static bfd_vma
2567 sum_stack (struct function_info *fun,
2568 struct bfd_link_info *info,
2569 int emit_stack_syms)
2571 struct call_info *call;
2572 struct function_info *max = NULL;
2573 bfd_vma max_stack = fun->stack;
2574 bfd_vma stack;
2575 const char *f1;
2577 if (fun->visit3)
2578 return max_stack;
2580 for (call = fun->call_list; call; call = call->next)
2582 stack = sum_stack (call->fun, info, emit_stack_syms);
2583 /* Include caller stack for normal calls, don't do so for
2584 tail calls. fun->stack here is local stack usage for
2585 this function. */
2586 if (!call->is_tail)
2587 stack += fun->stack;
2588 if (max_stack < stack)
2590 max_stack = stack;
2591 max = call->fun;
2595 f1 = func_name (fun);
2596 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
2597 f1, (bfd_vma) fun->stack, max_stack);
2599 if (fun->call_list)
2601 info->callbacks->minfo (_(" calls:\n"));
2602 for (call = fun->call_list; call; call = call->next)
2604 const char *f2 = func_name (call->fun);
2605 const char *ann1 = call->fun == max ? "*" : " ";
2606 const char *ann2 = call->is_tail ? "t" : " ";
2608 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
2612 /* Now fun->stack holds cumulative stack. */
2613 fun->stack = max_stack;
2614 fun->visit3 = TRUE;
2616 if (emit_stack_syms)
2618 struct spu_link_hash_table *htab = spu_hash_table (info);
2619 char *name = bfd_malloc (18 + strlen (f1));
2620 struct elf_link_hash_entry *h;
2622 if (name != NULL)
2624 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
2625 sprintf (name, "__stack_%s", f1);
2626 else
2627 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
2629 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
2630 free (name);
2631 if (h != NULL
2632 && (h->root.type == bfd_link_hash_new
2633 || h->root.type == bfd_link_hash_undefined
2634 || h->root.type == bfd_link_hash_undefweak))
2636 h->root.type = bfd_link_hash_defined;
2637 h->root.u.def.section = bfd_abs_section_ptr;
2638 h->root.u.def.value = max_stack;
2639 h->size = 0;
2640 h->type = 0;
2641 h->ref_regular = 1;
2642 h->def_regular = 1;
2643 h->ref_regular_nonweak = 1;
2644 h->forced_local = 1;
2645 h->non_elf = 0;
2650 return max_stack;
2653 /* Provide an estimate of total stack required. */
2655 static bfd_boolean
2656 spu_elf_stack_analysis (bfd *output_bfd,
2657 struct bfd_link_info *info,
2658 int emit_stack_syms)
2660 bfd *ibfd;
2661 bfd_vma max_stack = 0;
2663 if (!discover_functions (output_bfd, info))
2664 return FALSE;
2666 if (!build_call_tree (output_bfd, info))
2667 return FALSE;
2669 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
2670 info->callbacks->minfo (_("\nStack size for functions. "
2671 "Annotations: '*' max stack, 't' tail call\n"));
2672 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2674 extern const bfd_target bfd_elf32_spu_vec;
2675 asection *sec;
2677 if (ibfd->xvec != &bfd_elf32_spu_vec)
2678 continue;
2680 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2682 struct _spu_elf_section_data *sec_data;
2683 struct spu_elf_stack_info *sinfo;
2685 if ((sec_data = spu_elf_section_data (sec)) != NULL
2686 && (sinfo = sec_data->stack_info) != NULL)
2688 int i;
2689 for (i = 0; i < sinfo->num_fun; ++i)
2691 if (!sinfo->fun[i].non_root)
2693 bfd_vma stack;
2694 const char *f1;
2696 stack = sum_stack (&sinfo->fun[i], info,
2697 emit_stack_syms);
2698 f1 = func_name (&sinfo->fun[i]);
2699 info->callbacks->info (_(" %s: 0x%v\n"),
2700 f1, stack);
2701 if (max_stack < stack)
2702 max_stack = stack;
2709 info->callbacks->info (_("Maximum stack required is 0x%v\n"), max_stack);
2710 return TRUE;
2713 /* Perform a final link. */
2715 static bfd_boolean
2716 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
2718 struct spu_link_hash_table *htab = spu_hash_table (info);
2720 if (htab->stack_analysis
2721 && !spu_elf_stack_analysis (output_bfd, info, htab->emit_stack_syms))
2722 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
2724 return bfd_elf_final_link (output_bfd, info);
2727 /* Called when not normally emitting relocs, ie. !info->relocatable
2728 and !info->emitrelocations. Returns a count of special relocs
2729 that need to be emitted. */
2731 static unsigned int
2732 spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
2734 unsigned int count = 0;
2735 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
2737 for (; relocs < relend; relocs++)
2739 int r_type = ELF32_R_TYPE (relocs->r_info);
2740 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2741 ++count;
2744 return count;
2747 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2749 static bfd_boolean
2750 spu_elf_relocate_section (bfd *output_bfd,
2751 struct bfd_link_info *info,
2752 bfd *input_bfd,
2753 asection *input_section,
2754 bfd_byte *contents,
2755 Elf_Internal_Rela *relocs,
2756 Elf_Internal_Sym *local_syms,
2757 asection **local_sections)
2759 Elf_Internal_Shdr *symtab_hdr;
2760 struct elf_link_hash_entry **sym_hashes;
2761 Elf_Internal_Rela *rel, *relend;
2762 struct spu_link_hash_table *htab;
2763 bfd_boolean ret = TRUE;
2764 bfd_boolean emit_these_relocs = FALSE;
2766 htab = spu_hash_table (info);
2767 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2768 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
2770 rel = relocs;
2771 relend = relocs + input_section->reloc_count;
2772 for (; rel < relend; rel++)
2774 int r_type;
2775 reloc_howto_type *howto;
2776 unsigned long r_symndx;
2777 Elf_Internal_Sym *sym;
2778 asection *sec;
2779 struct elf_link_hash_entry *h;
2780 const char *sym_name;
2781 bfd_vma relocation;
2782 bfd_vma addend;
2783 bfd_reloc_status_type r;
2784 bfd_boolean unresolved_reloc;
2785 bfd_boolean warned;
2786 bfd_boolean branch;
2788 r_symndx = ELF32_R_SYM (rel->r_info);
2789 r_type = ELF32_R_TYPE (rel->r_info);
2790 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2792 emit_these_relocs = TRUE;
2793 continue;
2796 howto = elf_howto_table + r_type;
2797 unresolved_reloc = FALSE;
2798 warned = FALSE;
2799 h = NULL;
2800 sym = NULL;
2801 sec = NULL;
2802 if (r_symndx < symtab_hdr->sh_info)
2804 sym = local_syms + r_symndx;
2805 sec = local_sections[r_symndx];
2806 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
2807 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2809 else
2811 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2812 r_symndx, symtab_hdr, sym_hashes,
2813 h, sec, relocation,
2814 unresolved_reloc, warned);
2815 sym_name = h->root.root.string;
2818 if (sec != NULL && elf_discarded_section (sec))
2820 /* For relocs against symbols from removed linkonce sections,
2821 or sections discarded by a linker script, we just want the
2822 section contents zeroed. Avoid any special processing. */
2823 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
2824 rel->r_info = 0;
2825 rel->r_addend = 0;
2826 continue;
2829 if (info->relocatable)
2830 continue;
2832 if (unresolved_reloc)
2834 (*_bfd_error_handler)
2835 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2836 input_bfd,
2837 bfd_get_section_name (input_bfd, input_section),
2838 (long) rel->r_offset,
2839 howto->name,
2840 sym_name);
2841 ret = FALSE;
2844 /* If this symbol is in an overlay area, we may need to relocate
2845 to the overlay stub. */
2846 addend = rel->r_addend;
2847 branch = (is_branch (contents + rel->r_offset)
2848 || is_hint (contents + rel->r_offset));
2849 if (needs_ovl_stub (sym_name, sec, input_section, htab, branch))
2851 char *stub_name;
2852 struct spu_stub_hash_entry *sh;
2854 stub_name = spu_stub_name (sec, h, rel);
2855 if (stub_name == NULL)
2856 return FALSE;
2858 sh = (struct spu_stub_hash_entry *)
2859 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2860 if (sh != NULL)
2862 relocation = (htab->stub->output_section->vma
2863 + htab->stub->output_offset
2864 + sh->off);
2865 addend = 0;
2867 free (stub_name);
2870 r = _bfd_final_link_relocate (howto,
2871 input_bfd,
2872 input_section,
2873 contents,
2874 rel->r_offset, relocation, addend);
2876 if (r != bfd_reloc_ok)
2878 const char *msg = (const char *) 0;
2880 switch (r)
2882 case bfd_reloc_overflow:
2883 if (!((*info->callbacks->reloc_overflow)
2884 (info, (h ? &h->root : NULL), sym_name, howto->name,
2885 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
2886 return FALSE;
2887 break;
2889 case bfd_reloc_undefined:
2890 if (!((*info->callbacks->undefined_symbol)
2891 (info, sym_name, input_bfd, input_section,
2892 rel->r_offset, TRUE)))
2893 return FALSE;
2894 break;
2896 case bfd_reloc_outofrange:
2897 msg = _("internal error: out of range error");
2898 goto common_error;
2900 case bfd_reloc_notsupported:
2901 msg = _("internal error: unsupported relocation error");
2902 goto common_error;
2904 case bfd_reloc_dangerous:
2905 msg = _("internal error: dangerous error");
2906 goto common_error;
2908 default:
2909 msg = _("internal error: unknown error");
2910 /* fall through */
2912 common_error:
2913 if (!((*info->callbacks->warning)
2914 (info, msg, sym_name, input_bfd, input_section,
2915 rel->r_offset)))
2916 return FALSE;
2917 break;
2922 if (ret
2923 && emit_these_relocs
2924 && !info->relocatable
2925 && !info->emitrelocations)
2927 Elf_Internal_Rela *wrel;
2928 Elf_Internal_Shdr *rel_hdr;
2930 wrel = rel = relocs;
2931 relend = relocs + input_section->reloc_count;
2932 for (; rel < relend; rel++)
2934 int r_type;
2936 r_type = ELF32_R_TYPE (rel->r_info);
2937 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2938 *wrel++ = *rel;
2940 input_section->reloc_count = wrel - relocs;
2941 /* Backflips for _bfd_elf_link_output_relocs. */
2942 rel_hdr = &elf_section_data (input_section)->rel_hdr;
2943 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
2944 ret = 2;
2947 return ret;
2950 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2952 static bfd_boolean
2953 spu_elf_output_symbol_hook (struct bfd_link_info *info,
2954 const char *sym_name ATTRIBUTE_UNUSED,
2955 Elf_Internal_Sym *sym,
2956 asection *sym_sec ATTRIBUTE_UNUSED,
2957 struct elf_link_hash_entry *h)
2959 struct spu_link_hash_table *htab = spu_hash_table (info);
2961 if (!info->relocatable
2962 && htab->num_overlays != 0
2963 && h != NULL
2964 && (h->root.type == bfd_link_hash_defined
2965 || h->root.type == bfd_link_hash_defweak)
2966 && h->def_regular
2967 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
2969 static Elf_Internal_Rela zero_rel;
2970 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
2971 struct spu_stub_hash_entry *sh;
2973 if (stub_name == NULL)
2974 return FALSE;
2975 sh = (struct spu_stub_hash_entry *)
2976 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2977 free (stub_name);
2978 if (sh == NULL)
2979 return TRUE;
2980 sym->st_shndx
2981 = _bfd_elf_section_from_bfd_section (htab->stub->output_section->owner,
2982 htab->stub->output_section);
2983 sym->st_value = (htab->stub->output_section->vma
2984 + htab->stub->output_offset
2985 + sh->off);
2988 return TRUE;
2991 static int spu_plugin = 0;
2993 void
2994 spu_elf_plugin (int val)
2996 spu_plugin = val;
2999 /* Set ELF header e_type for plugins. */
3001 static void
3002 spu_elf_post_process_headers (bfd *abfd,
3003 struct bfd_link_info *info ATTRIBUTE_UNUSED)
3005 if (spu_plugin)
3007 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
3009 i_ehdrp->e_type = ET_DYN;
3013 /* We may add an extra PT_LOAD segment for .toe. We also need extra
3014 segments for overlays. */
3016 static int
3017 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
3019 struct spu_link_hash_table *htab = spu_hash_table (info);
3020 int extra = htab->num_overlays;
3021 asection *sec;
3023 if (extra)
3024 ++extra;
3026 sec = bfd_get_section_by_name (abfd, ".toe");
3027 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
3028 ++extra;
3030 return extra;
3033 /* Remove .toe section from other PT_LOAD segments and put it in
3034 a segment of its own. Put overlays in separate segments too. */
3036 static bfd_boolean
3037 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
3039 asection *toe, *s;
3040 struct elf_segment_map *m;
3041 unsigned int i;
3043 if (info == NULL)
3044 return TRUE;
3046 toe = bfd_get_section_by_name (abfd, ".toe");
3047 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
3048 if (m->p_type == PT_LOAD && m->count > 1)
3049 for (i = 0; i < m->count; i++)
3050 if ((s = m->sections[i]) == toe
3051 || spu_elf_section_data (s)->ovl_index != 0)
3053 struct elf_segment_map *m2;
3054 bfd_vma amt;
3056 if (i + 1 < m->count)
3058 amt = sizeof (struct elf_segment_map);
3059 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
3060 m2 = bfd_zalloc (abfd, amt);
3061 if (m2 == NULL)
3062 return FALSE;
3063 m2->count = m->count - (i + 1);
3064 memcpy (m2->sections, m->sections + i + 1,
3065 m2->count * sizeof (m->sections[0]));
3066 m2->p_type = PT_LOAD;
3067 m2->next = m->next;
3068 m->next = m2;
3070 m->count = 1;
3071 if (i != 0)
3073 m->count = i;
3074 amt = sizeof (struct elf_segment_map);
3075 m2 = bfd_zalloc (abfd, amt);
3076 if (m2 == NULL)
3077 return FALSE;
3078 m2->p_type = PT_LOAD;
3079 m2->count = 1;
3080 m2->sections[0] = s;
3081 m2->next = m->next;
3082 m->next = m2;
3084 break;
3087 return TRUE;
3090 /* Check that all loadable section VMAs lie in the range
3091 LO .. HI inclusive. */
3093 asection *
3094 spu_elf_check_vma (bfd *abfd, bfd_vma lo, bfd_vma hi)
3096 struct elf_segment_map *m;
3097 unsigned int i;
3099 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
3100 if (m->p_type == PT_LOAD)
3101 for (i = 0; i < m->count; i++)
3102 if (m->sections[i]->size != 0
3103 && (m->sections[i]->vma < lo
3104 || m->sections[i]->vma > hi
3105 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
3106 return m->sections[i];
3108 return NULL;
3111 /* Tweak the section type of .note.spu_name. */
3113 static bfd_boolean
3114 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
3115 Elf_Internal_Shdr *hdr,
3116 asection *sec)
3118 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
3119 hdr->sh_type = SHT_NOTE;
3120 return TRUE;
3123 /* Tweak phdrs before writing them out. */
3125 static int
3126 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
3128 const struct elf_backend_data *bed;
3129 struct elf_obj_tdata *tdata;
3130 Elf_Internal_Phdr *phdr, *last;
3131 struct spu_link_hash_table *htab;
3132 unsigned int count;
3133 unsigned int i;
3135 if (info == NULL)
3136 return TRUE;
3138 bed = get_elf_backend_data (abfd);
3139 tdata = elf_tdata (abfd);
3140 phdr = tdata->phdr;
3141 count = tdata->program_header_size / bed->s->sizeof_phdr;
3142 htab = spu_hash_table (info);
3143 if (htab->num_overlays != 0)
3145 struct elf_segment_map *m;
3146 unsigned int o;
3148 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
3149 if (m->count != 0
3150 && (o = spu_elf_section_data (m->sections[0])->ovl_index) != 0)
3152 /* Mark this as an overlay header. */
3153 phdr[i].p_flags |= PF_OVERLAY;
3155 if (htab->ovtab != NULL && htab->ovtab->size != 0)
3157 bfd_byte *p = htab->ovtab->contents;
3158 unsigned int off = (o - 1) * 16 + 8;
3160 /* Write file_off into _ovly_table. */
3161 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
3166 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3167 of 16. This should always be possible when using the standard
3168 linker scripts, but don't create overlapping segments if
3169 someone is playing games with linker scripts. */
3170 last = NULL;
3171 for (i = count; i-- != 0; )
3172 if (phdr[i].p_type == PT_LOAD)
3174 unsigned adjust;
3176 adjust = -phdr[i].p_filesz & 15;
3177 if (adjust != 0
3178 && last != NULL
3179 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
3180 break;
3182 adjust = -phdr[i].p_memsz & 15;
3183 if (adjust != 0
3184 && last != NULL
3185 && phdr[i].p_filesz != 0
3186 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
3187 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
3188 break;
3190 if (phdr[i].p_filesz != 0)
3191 last = &phdr[i];
3194 if (i == (unsigned int) -1)
3195 for (i = count; i-- != 0; )
3196 if (phdr[i].p_type == PT_LOAD)
3198 unsigned adjust;
3200 adjust = -phdr[i].p_filesz & 15;
3201 phdr[i].p_filesz += adjust;
3203 adjust = -phdr[i].p_memsz & 15;
3204 phdr[i].p_memsz += adjust;
3207 return TRUE;
3210 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3211 #define TARGET_BIG_NAME "elf32-spu"
3212 #define ELF_ARCH bfd_arch_spu
3213 #define ELF_MACHINE_CODE EM_SPU
3214 /* This matches the alignment need for DMA. */
3215 #define ELF_MAXPAGESIZE 0x80
3216 #define elf_backend_rela_normal 1
3217 #define elf_backend_can_gc_sections 1
3219 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3220 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3221 #define elf_info_to_howto spu_elf_info_to_howto
3222 #define elf_backend_count_relocs spu_elf_count_relocs
3223 #define elf_backend_relocate_section spu_elf_relocate_section
3224 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3225 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3226 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3227 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3228 #define bfd_elf32_bfd_link_hash_table_free spu_elf_link_hash_table_free
3230 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3231 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3232 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3233 #define elf_backend_post_process_headers spu_elf_post_process_headers
3234 #define elf_backend_fake_sections spu_elf_fake_sections
3235 #define elf_backend_special_sections spu_elf_special_sections
3236 #define bfd_elf32_bfd_final_link spu_elf_final_link
3238 #include "elf32-target.h"