daily update
[binutils.git] / bfd / elf32-spu.c
blobeb0b0182f53f2fe029dbe5b2824d065056874885
1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
21 #include "sysdep.h"
22 #include "bfd.h"
23 #include "bfdlink.h"
24 #include "libbfd.h"
25 #include "elf-bfd.h"
26 #include "elf/spu.h"
27 #include "elf32-spu.h"
29 /* We use RELA style relocs. Don't define USE_REL. */
31 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
32 void *, asection *,
33 bfd *, char **);
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
38 static reloc_howto_type elf_howto_table[] = {
39 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
40 bfd_elf_generic_reloc, "SPU_NONE",
41 FALSE, 0, 0x00000000, FALSE),
42 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
43 bfd_elf_generic_reloc, "SPU_ADDR10",
44 FALSE, 0, 0x00ffc000, FALSE),
45 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
46 bfd_elf_generic_reloc, "SPU_ADDR16",
47 FALSE, 0, 0x007fff80, FALSE),
48 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
49 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
50 FALSE, 0, 0x007fff80, FALSE),
51 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
52 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
53 FALSE, 0, 0x007fff80, FALSE),
54 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
55 bfd_elf_generic_reloc, "SPU_ADDR18",
56 FALSE, 0, 0x01ffff80, FALSE),
57 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "SPU_ADDR32",
59 FALSE, 0, 0xffffffff, FALSE),
60 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "SPU_REL16",
62 FALSE, 0, 0x007fff80, TRUE),
63 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
64 bfd_elf_generic_reloc, "SPU_ADDR7",
65 FALSE, 0, 0x001fc000, FALSE),
66 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
67 spu_elf_rel9, "SPU_REL9",
68 FALSE, 0, 0x0180007f, TRUE),
69 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
70 spu_elf_rel9, "SPU_REL9I",
71 FALSE, 0, 0x0000c07f, TRUE),
72 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
73 bfd_elf_generic_reloc, "SPU_ADDR10I",
74 FALSE, 0, 0x00ffc000, FALSE),
75 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
76 bfd_elf_generic_reloc, "SPU_ADDR16I",
77 FALSE, 0, 0x007fff80, FALSE),
78 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
79 bfd_elf_generic_reloc, "SPU_REL32",
80 FALSE, 0, 0xffffffff, TRUE),
81 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "SPU_ADDR16X",
83 FALSE, 0, 0x007fff80, FALSE),
84 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
85 bfd_elf_generic_reloc, "SPU_PPU32",
86 FALSE, 0, 0xffffffff, FALSE),
87 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
88 bfd_elf_generic_reloc, "SPU_PPU64",
89 FALSE, 0, -1, FALSE),
92 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
93 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
94 { NULL, 0, 0, 0, 0 }
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
100 switch (code)
102 default:
103 return R_SPU_NONE;
104 case BFD_RELOC_SPU_IMM10W:
105 return R_SPU_ADDR10;
106 case BFD_RELOC_SPU_IMM16W:
107 return R_SPU_ADDR16;
108 case BFD_RELOC_SPU_LO16:
109 return R_SPU_ADDR16_LO;
110 case BFD_RELOC_SPU_HI16:
111 return R_SPU_ADDR16_HI;
112 case BFD_RELOC_SPU_IMM18:
113 return R_SPU_ADDR18;
114 case BFD_RELOC_SPU_PCREL16:
115 return R_SPU_REL16;
116 case BFD_RELOC_SPU_IMM7:
117 return R_SPU_ADDR7;
118 case BFD_RELOC_SPU_IMM8:
119 return R_SPU_NONE;
120 case BFD_RELOC_SPU_PCREL9a:
121 return R_SPU_REL9;
122 case BFD_RELOC_SPU_PCREL9b:
123 return R_SPU_REL9I;
124 case BFD_RELOC_SPU_IMM10:
125 return R_SPU_ADDR10I;
126 case BFD_RELOC_SPU_IMM16:
127 return R_SPU_ADDR16I;
128 case BFD_RELOC_32:
129 return R_SPU_ADDR32;
130 case BFD_RELOC_32_PCREL:
131 return R_SPU_REL32;
132 case BFD_RELOC_SPU_PPU32:
133 return R_SPU_PPU32;
134 case BFD_RELOC_SPU_PPU64:
135 return R_SPU_PPU64;
139 static void
140 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
141 arelent *cache_ptr,
142 Elf_Internal_Rela *dst)
144 enum elf_spu_reloc_type r_type;
146 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
147 BFD_ASSERT (r_type < R_SPU_max);
148 cache_ptr->howto = &elf_howto_table[(int) r_type];
151 static reloc_howto_type *
152 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
153 bfd_reloc_code_real_type code)
155 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
157 if (r_type == R_SPU_NONE)
158 return NULL;
160 return elf_howto_table + r_type;
163 static reloc_howto_type *
164 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
165 const char *r_name)
167 unsigned int i;
169 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
170 if (elf_howto_table[i].name != NULL
171 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
172 return &elf_howto_table[i];
174 return NULL;
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
181 void *data, asection *input_section,
182 bfd *output_bfd, char **error_message)
184 bfd_size_type octets;
185 bfd_vma val;
186 long insn;
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
190 link time. */
191 if (output_bfd != NULL)
192 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
193 input_section, output_bfd, error_message);
195 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
196 return bfd_reloc_outofrange;
197 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
199 /* Get symbol value. */
200 val = 0;
201 if (!bfd_is_com_section (symbol->section))
202 val = symbol->value;
203 if (symbol->section->output_section)
204 val += symbol->section->output_section->vma;
206 val += reloc_entry->addend;
208 /* Make it pc-relative. */
209 val -= input_section->output_section->vma + input_section->output_offset;
211 val >>= 2;
212 if (val + 256 >= 512)
213 return bfd_reloc_overflow;
215 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
220 insn &= ~reloc_entry->howto->dst_mask;
221 insn |= val & reloc_entry->howto->dst_mask;
222 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
223 return bfd_reloc_ok;
226 static bfd_boolean
227 spu_elf_new_section_hook (bfd *abfd, asection *sec)
229 if (!sec->used_by_bfd)
231 struct _spu_elf_section_data *sdata;
233 sdata = bfd_zalloc (abfd, sizeof (*sdata));
234 if (sdata == NULL)
235 return FALSE;
236 sec->used_by_bfd = sdata;
239 return _bfd_elf_new_section_hook (abfd, sec);
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
245 static void
246 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
248 if (sym->name != NULL
249 && sym->section != bfd_abs_section_ptr
250 && strncmp (sym->name, "_EAR_", 5) == 0)
251 sym->flags |= BSF_KEEP;
254 /* SPU ELF linker hash table. */
256 struct spu_link_hash_table
258 struct elf_link_hash_table elf;
260 /* The stub hash table. */
261 struct bfd_hash_table stub_hash_table;
263 /* Shortcuts to overlay sections. */
264 asection *stub;
265 asection *ovtab;
267 struct elf_link_hash_entry *ovly_load;
269 /* An array of two output sections per overlay region, chosen such that
270 the first section vma is the overlay buffer vma (ie. the section has
271 the lowest vma in the group that occupy the region), and the second
272 section vma+size specifies the end of the region. We keep pointers
273 to sections like this because section vmas may change when laying
274 them out. */
275 asection **ovl_region;
277 /* Number of overlay buffers. */
278 unsigned int num_buf;
280 /* Total number of overlays. */
281 unsigned int num_overlays;
283 /* Set if we should emit symbols for stubs. */
284 unsigned int emit_stub_syms:1;
286 /* Set if we want stubs on calls out of overlay regions to
287 non-overlay regions. */
288 unsigned int non_overlay_stubs : 1;
290 /* Set on error. */
291 unsigned int stub_overflow : 1;
293 /* Set if stack size analysis should be done. */
294 unsigned int stack_analysis : 1;
296 /* Set if __stack_* syms will be emitted. */
297 unsigned int emit_stack_syms : 1;
300 #define spu_hash_table(p) \
301 ((struct spu_link_hash_table *) ((p)->hash))
303 struct spu_stub_hash_entry
305 struct bfd_hash_entry root;
307 /* Destination of this stub. */
308 asection *target_section;
309 bfd_vma target_off;
311 /* Offset of entry in stub section. */
312 bfd_vma off;
314 /* Offset from this stub to stub that loads the overlay index. */
315 bfd_vma delta;
318 /* Create an entry in a spu stub hash table. */
320 static struct bfd_hash_entry *
321 stub_hash_newfunc (struct bfd_hash_entry *entry,
322 struct bfd_hash_table *table,
323 const char *string)
325 /* Allocate the structure if it has not already been allocated by a
326 subclass. */
327 if (entry == NULL)
329 entry = bfd_hash_allocate (table, sizeof (struct spu_stub_hash_entry));
330 if (entry == NULL)
331 return entry;
334 /* Call the allocation method of the superclass. */
335 entry = bfd_hash_newfunc (entry, table, string);
336 if (entry != NULL)
338 struct spu_stub_hash_entry *sh = (struct spu_stub_hash_entry *) entry;
340 sh->target_section = NULL;
341 sh->target_off = 0;
342 sh->off = 0;
343 sh->delta = 0;
346 return entry;
349 /* Create a spu ELF linker hash table. */
351 static struct bfd_link_hash_table *
352 spu_elf_link_hash_table_create (bfd *abfd)
354 struct spu_link_hash_table *htab;
356 htab = bfd_malloc (sizeof (*htab));
357 if (htab == NULL)
358 return NULL;
360 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
361 _bfd_elf_link_hash_newfunc,
362 sizeof (struct elf_link_hash_entry)))
364 free (htab);
365 return NULL;
368 /* Init the stub hash table too. */
369 if (!bfd_hash_table_init (&htab->stub_hash_table, stub_hash_newfunc,
370 sizeof (struct spu_stub_hash_entry)))
371 return NULL;
373 memset (&htab->stub, 0,
374 sizeof (*htab) - offsetof (struct spu_link_hash_table, stub));
376 return &htab->elf.root;
379 /* Free the derived linker hash table. */
381 static void
382 spu_elf_link_hash_table_free (struct bfd_link_hash_table *hash)
384 struct spu_link_hash_table *ret = (struct spu_link_hash_table *) hash;
386 bfd_hash_table_free (&ret->stub_hash_table);
387 _bfd_generic_link_hash_table_free (hash);
390 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
391 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
392 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
394 static bfd_boolean
395 get_sym_h (struct elf_link_hash_entry **hp,
396 Elf_Internal_Sym **symp,
397 asection **symsecp,
398 Elf_Internal_Sym **locsymsp,
399 unsigned long r_symndx,
400 bfd *ibfd)
402 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
404 if (r_symndx >= symtab_hdr->sh_info)
406 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
407 struct elf_link_hash_entry *h;
409 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
410 while (h->root.type == bfd_link_hash_indirect
411 || h->root.type == bfd_link_hash_warning)
412 h = (struct elf_link_hash_entry *) h->root.u.i.link;
414 if (hp != NULL)
415 *hp = h;
417 if (symp != NULL)
418 *symp = NULL;
420 if (symsecp != NULL)
422 asection *symsec = NULL;
423 if (h->root.type == bfd_link_hash_defined
424 || h->root.type == bfd_link_hash_defweak)
425 symsec = h->root.u.def.section;
426 *symsecp = symsec;
429 else
431 Elf_Internal_Sym *sym;
432 Elf_Internal_Sym *locsyms = *locsymsp;
434 if (locsyms == NULL)
436 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
437 if (locsyms == NULL)
439 size_t symcount = symtab_hdr->sh_info;
441 /* If we are reading symbols into the contents, then
442 read the global syms too. This is done to cache
443 syms for later stack analysis. */
444 if ((unsigned char **) locsymsp == &symtab_hdr->contents)
445 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
446 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
447 NULL, NULL, NULL);
449 if (locsyms == NULL)
450 return FALSE;
451 *locsymsp = locsyms;
453 sym = locsyms + r_symndx;
455 if (hp != NULL)
456 *hp = NULL;
458 if (symp != NULL)
459 *symp = sym;
461 if (symsecp != NULL)
463 asection *symsec = NULL;
464 if ((sym->st_shndx != SHN_UNDEF
465 && sym->st_shndx < SHN_LORESERVE)
466 || sym->st_shndx > SHN_HIRESERVE)
467 symsec = bfd_section_from_elf_index (ibfd, sym->st_shndx);
468 *symsecp = symsec;
472 return TRUE;
475 /* Build a name for an entry in the stub hash table. We can't use a
476 local symbol name because ld -r might generate duplicate local symbols. */
478 static char *
479 spu_stub_name (const asection *sym_sec,
480 const struct elf_link_hash_entry *h,
481 const Elf_Internal_Rela *rel)
483 char *stub_name;
484 bfd_size_type len;
486 if (h)
488 len = strlen (h->root.root.string) + 1 + 8 + 1;
489 stub_name = bfd_malloc (len);
490 if (stub_name == NULL)
491 return stub_name;
493 sprintf (stub_name, "%s+%x",
494 h->root.root.string,
495 (int) rel->r_addend & 0xffffffff);
496 len -= 8;
498 else
500 len = 8 + 1 + 8 + 1 + 8 + 1;
501 stub_name = bfd_malloc (len);
502 if (stub_name == NULL)
503 return stub_name;
505 sprintf (stub_name, "%x:%x+%x",
506 sym_sec->id & 0xffffffff,
507 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
508 (int) rel->r_addend & 0xffffffff);
509 len = strlen (stub_name);
512 if (stub_name[len - 2] == '+'
513 && stub_name[len - 1] == '0'
514 && stub_name[len] == 0)
515 stub_name[len - 2] = 0;
517 return stub_name;
520 /* Create the note section if not already present. This is done early so
521 that the linker maps the sections to the right place in the output. */
523 bfd_boolean
524 spu_elf_create_sections (bfd *output_bfd,
525 struct bfd_link_info *info,
526 int stack_analysis,
527 int emit_stack_syms)
529 bfd *ibfd;
530 struct spu_link_hash_table *htab = spu_hash_table (info);
532 /* Stash some options away where we can get at them later. */
533 htab->stack_analysis = stack_analysis;
534 htab->emit_stack_syms = emit_stack_syms;
536 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
537 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
538 break;
540 if (ibfd == NULL)
542 /* Make SPU_PTNOTE_SPUNAME section. */
543 asection *s;
544 size_t name_len;
545 size_t size;
546 bfd_byte *data;
547 flagword flags;
549 ibfd = info->input_bfds;
550 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
551 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
552 if (s == NULL
553 || !bfd_set_section_alignment (ibfd, s, 4))
554 return FALSE;
556 name_len = strlen (bfd_get_filename (output_bfd)) + 1;
557 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
558 size += (name_len + 3) & -4;
560 if (!bfd_set_section_size (ibfd, s, size))
561 return FALSE;
563 data = bfd_zalloc (ibfd, size);
564 if (data == NULL)
565 return FALSE;
567 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
568 bfd_put_32 (ibfd, name_len, data + 4);
569 bfd_put_32 (ibfd, 1, data + 8);
570 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
571 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
572 bfd_get_filename (output_bfd), name_len);
573 s->contents = data;
576 return TRUE;
579 /* qsort predicate to sort sections by vma. */
581 static int
582 sort_sections (const void *a, const void *b)
584 const asection *const *s1 = a;
585 const asection *const *s2 = b;
586 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
588 if (delta != 0)
589 return delta < 0 ? -1 : 1;
591 return (*s1)->index - (*s2)->index;
594 /* Identify overlays in the output bfd, and number them. */
596 bfd_boolean
597 spu_elf_find_overlays (bfd *output_bfd, struct bfd_link_info *info)
599 struct spu_link_hash_table *htab = spu_hash_table (info);
600 asection **alloc_sec;
601 unsigned int i, n, ovl_index, num_buf;
602 asection *s;
603 bfd_vma ovl_end;
605 if (output_bfd->section_count < 2)
606 return FALSE;
608 alloc_sec = bfd_malloc (output_bfd->section_count * sizeof (*alloc_sec));
609 if (alloc_sec == NULL)
610 return FALSE;
612 /* Pick out all the alloced sections. */
613 for (n = 0, s = output_bfd->sections; s != NULL; s = s->next)
614 if ((s->flags & SEC_ALLOC) != 0
615 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
616 && s->size != 0)
617 alloc_sec[n++] = s;
619 if (n == 0)
621 free (alloc_sec);
622 return FALSE;
625 /* Sort them by vma. */
626 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
628 /* Look for overlapping vmas. Any with overlap must be overlays.
629 Count them. Also count the number of overlay regions and for
630 each region save a section from that region with the lowest vma
631 and another section with the highest end vma. */
632 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
633 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
635 s = alloc_sec[i];
636 if (s->vma < ovl_end)
638 asection *s0 = alloc_sec[i - 1];
640 if (spu_elf_section_data (s0)->ovl_index == 0)
642 spu_elf_section_data (s0)->ovl_index = ++ovl_index;
643 alloc_sec[num_buf * 2] = s0;
644 alloc_sec[num_buf * 2 + 1] = s0;
645 num_buf++;
647 spu_elf_section_data (s)->ovl_index = ++ovl_index;
648 if (ovl_end < s->vma + s->size)
650 ovl_end = s->vma + s->size;
651 alloc_sec[num_buf * 2 - 1] = s;
654 else
655 ovl_end = s->vma + s->size;
658 htab->num_overlays = ovl_index;
659 htab->num_buf = num_buf;
660 if (ovl_index == 0)
662 free (alloc_sec);
663 return FALSE;
666 alloc_sec = bfd_realloc (alloc_sec, num_buf * 2 * sizeof (*alloc_sec));
667 if (alloc_sec == NULL)
668 return FALSE;
670 htab->ovl_region = alloc_sec;
671 return TRUE;
674 /* One of these per stub. */
675 #define SIZEOF_STUB1 8
676 #define ILA_79 0x4200004f /* ila $79,function_address */
677 #define BR 0x32000000 /* br stub2 */
679 /* One of these per overlay. */
680 #define SIZEOF_STUB2 8
681 #define ILA_78 0x4200004e /* ila $78,overlay_number */
682 /* br __ovly_load */
683 #define NOP 0x40200000
685 /* Return true for all relative and absolute branch instructions.
686 bra 00110000 0..
687 brasl 00110001 0..
688 br 00110010 0..
689 brsl 00110011 0..
690 brz 00100000 0..
691 brnz 00100001 0..
692 brhz 00100010 0..
693 brhnz 00100011 0.. */
695 static bfd_boolean
696 is_branch (const unsigned char *insn)
698 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
701 /* Return true for branch hint instructions.
702 hbra 0001000..
703 hbrr 0001001.. */
705 static bfd_boolean
706 is_hint (const unsigned char *insn)
708 return (insn[0] & 0xfc) == 0x10;
711 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
713 static bfd_boolean
714 needs_ovl_stub (const char *sym_name,
715 asection *sym_sec,
716 asection *input_section,
717 struct spu_link_hash_table *htab,
718 bfd_boolean is_branch)
720 if (htab->num_overlays == 0)
721 return FALSE;
723 if (sym_sec == NULL
724 || sym_sec->output_section == NULL
725 || spu_elf_section_data (sym_sec->output_section) == NULL)
726 return FALSE;
728 /* setjmp always goes via an overlay stub, because then the return
729 and hence the longjmp goes via __ovly_return. That magically
730 makes setjmp/longjmp between overlays work. */
731 if (strncmp (sym_name, "setjmp", 6) == 0
732 && (sym_name[6] == '\0' || sym_name[6] == '@'))
733 return TRUE;
735 /* Usually, symbols in non-overlay sections don't need stubs. */
736 if (spu_elf_section_data (sym_sec->output_section)->ovl_index == 0
737 && !htab->non_overlay_stubs)
738 return FALSE;
740 /* A reference from some other section to a symbol in an overlay
741 section needs a stub. */
742 if (spu_elf_section_data (sym_sec->output_section)->ovl_index
743 != spu_elf_section_data (input_section->output_section)->ovl_index)
744 return TRUE;
746 /* If this insn isn't a branch then we are possibly taking the
747 address of a function and passing it out somehow. */
748 return !is_branch;
751 struct stubarr {
752 struct bfd_hash_table *stub_hash_table;
753 struct spu_stub_hash_entry **sh;
754 unsigned int count;
755 int err;
758 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
759 symbols. */
761 static bfd_boolean
762 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
764 /* Symbols starting with _SPUEAR_ need a stub because they may be
765 invoked by the PPU. */
766 if ((h->root.type == bfd_link_hash_defined
767 || h->root.type == bfd_link_hash_defweak)
768 && h->def_regular
769 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
771 struct stubarr *stubs = inf;
772 static Elf_Internal_Rela zero_rel;
773 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
774 struct spu_stub_hash_entry *sh;
776 if (stub_name == NULL)
778 stubs->err = 1;
779 return FALSE;
782 sh = (struct spu_stub_hash_entry *)
783 bfd_hash_lookup (stubs->stub_hash_table, stub_name, TRUE, FALSE);
784 if (sh == NULL)
786 free (stub_name);
787 return FALSE;
790 /* If this entry isn't new, we already have a stub. */
791 if (sh->target_section != NULL)
793 free (stub_name);
794 return TRUE;
797 sh->target_section = h->root.u.def.section;
798 sh->target_off = h->root.u.def.value;
799 stubs->count += 1;
802 return TRUE;
805 /* Called via bfd_hash_traverse to set up pointers to all symbols
806 in the stub hash table. */
808 static bfd_boolean
809 populate_stubs (struct bfd_hash_entry *bh, void *inf)
811 struct stubarr *stubs = inf;
813 stubs->sh[--stubs->count] = (struct spu_stub_hash_entry *) bh;
814 return TRUE;
817 /* qsort predicate to sort stubs by overlay number. */
819 static int
820 sort_stubs (const void *a, const void *b)
822 const struct spu_stub_hash_entry *const *sa = a;
823 const struct spu_stub_hash_entry *const *sb = b;
824 int i;
825 bfd_signed_vma d;
827 i = spu_elf_section_data ((*sa)->target_section->output_section)->ovl_index;
828 i -= spu_elf_section_data ((*sb)->target_section->output_section)->ovl_index;
829 if (i != 0)
830 return i;
832 d = ((*sa)->target_section->output_section->vma
833 + (*sa)->target_section->output_offset
834 + (*sa)->target_off
835 - (*sb)->target_section->output_section->vma
836 - (*sb)->target_section->output_offset
837 - (*sb)->target_off);
838 if (d != 0)
839 return d < 0 ? -1 : 1;
841 /* Two functions at the same address. Aliases perhaps. */
842 i = strcmp ((*sb)->root.string, (*sa)->root.string);
843 BFD_ASSERT (i != 0);
844 return i;
847 /* Allocate space for overlay call and return stubs. */
849 bfd_boolean
850 spu_elf_size_stubs (bfd *output_bfd,
851 struct bfd_link_info *info,
852 int non_overlay_stubs,
853 int stack_analysis,
854 asection **stub,
855 asection **ovtab,
856 asection **toe)
858 struct spu_link_hash_table *htab = spu_hash_table (info);
859 bfd *ibfd;
860 struct stubarr stubs;
861 unsigned i, group;
862 flagword flags;
864 htab->non_overlay_stubs = non_overlay_stubs;
865 stubs.stub_hash_table = &htab->stub_hash_table;
866 stubs.count = 0;
867 stubs.err = 0;
868 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
870 extern const bfd_target bfd_elf32_spu_vec;
871 Elf_Internal_Shdr *symtab_hdr;
872 asection *section;
873 Elf_Internal_Sym *local_syms = NULL;
874 void *psyms;
876 if (ibfd->xvec != &bfd_elf32_spu_vec)
877 continue;
879 /* We'll need the symbol table in a second. */
880 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
881 if (symtab_hdr->sh_info == 0)
882 continue;
884 /* Arrange to read and keep global syms for later stack analysis. */
885 psyms = &local_syms;
886 if (stack_analysis)
887 psyms = &symtab_hdr->contents;
889 /* Walk over each section attached to the input bfd. */
890 for (section = ibfd->sections; section != NULL; section = section->next)
892 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
894 /* If there aren't any relocs, then there's nothing more to do. */
895 if ((section->flags & SEC_RELOC) == 0
896 || (section->flags & SEC_ALLOC) == 0
897 || (section->flags & SEC_LOAD) == 0
898 || section->reloc_count == 0)
899 continue;
901 /* If this section is a link-once section that will be
902 discarded, then don't create any stubs. */
903 if (section->output_section == NULL
904 || section->output_section->owner != output_bfd)
905 continue;
907 /* Get the relocs. */
908 internal_relocs
909 = _bfd_elf_link_read_relocs (ibfd, section, NULL, NULL,
910 info->keep_memory);
911 if (internal_relocs == NULL)
912 goto error_ret_free_local;
914 /* Now examine each relocation. */
915 irela = internal_relocs;
916 irelaend = irela + section->reloc_count;
917 for (; irela < irelaend; irela++)
919 enum elf_spu_reloc_type r_type;
920 unsigned int r_indx;
921 asection *sym_sec;
922 Elf_Internal_Sym *sym;
923 struct elf_link_hash_entry *h;
924 const char *sym_name;
925 char *stub_name;
926 struct spu_stub_hash_entry *sh;
927 unsigned int sym_type;
928 enum _insn_type { non_branch, branch, call } insn_type;
930 r_type = ELF32_R_TYPE (irela->r_info);
931 r_indx = ELF32_R_SYM (irela->r_info);
933 if (r_type >= R_SPU_max)
935 bfd_set_error (bfd_error_bad_value);
936 goto error_ret_free_internal;
939 /* Determine the reloc target section. */
940 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
941 goto error_ret_free_internal;
943 if (sym_sec == NULL
944 || sym_sec->output_section == NULL
945 || sym_sec->output_section->owner != output_bfd)
946 continue;
948 /* Ensure no stubs for user supplied overlay manager syms. */
949 if (h != NULL
950 && (strcmp (h->root.root.string, "__ovly_load") == 0
951 || strcmp (h->root.root.string, "__ovly_return") == 0))
952 continue;
954 insn_type = non_branch;
955 if (r_type == R_SPU_REL16
956 || r_type == R_SPU_ADDR16)
958 unsigned char insn[4];
960 if (!bfd_get_section_contents (ibfd, section, insn,
961 irela->r_offset, 4))
962 goto error_ret_free_internal;
964 if (is_branch (insn) || is_hint (insn))
966 insn_type = branch;
967 if ((insn[0] & 0xfd) == 0x31)
968 insn_type = call;
972 /* We are only interested in function symbols. */
973 if (h != NULL)
975 sym_type = h->type;
976 sym_name = h->root.root.string;
978 else
980 sym_type = ELF_ST_TYPE (sym->st_info);
981 sym_name = bfd_elf_sym_name (sym_sec->owner,
982 symtab_hdr,
983 sym,
984 sym_sec);
986 if (sym_type != STT_FUNC)
988 /* It's common for people to write assembly and forget
989 to give function symbols the right type. Handle
990 calls to such symbols, but warn so that (hopefully)
991 people will fix their code. We need the symbol
992 type to be correct to distinguish function pointer
993 initialisation from other pointer initialisation. */
994 if (insn_type == call)
995 (*_bfd_error_handler) (_("warning: call to non-function"
996 " symbol %s defined in %B"),
997 sym_sec->owner, sym_name);
998 else
999 continue;
1002 if (!needs_ovl_stub (sym_name, sym_sec, section, htab,
1003 insn_type != non_branch))
1004 continue;
1006 stub_name = spu_stub_name (sym_sec, h, irela);
1007 if (stub_name == NULL)
1008 goto error_ret_free_internal;
1010 sh = (struct spu_stub_hash_entry *)
1011 bfd_hash_lookup (&htab->stub_hash_table, stub_name,
1012 TRUE, FALSE);
1013 if (sh == NULL)
1015 free (stub_name);
1016 error_ret_free_internal:
1017 if (elf_section_data (section)->relocs != internal_relocs)
1018 free (internal_relocs);
1019 error_ret_free_local:
1020 if (local_syms != NULL
1021 && (symtab_hdr->contents
1022 != (unsigned char *) local_syms))
1023 free (local_syms);
1024 return FALSE;
1027 /* If this entry isn't new, we already have a stub. */
1028 if (sh->target_section != NULL)
1030 free (stub_name);
1031 continue;
1034 sh->target_section = sym_sec;
1035 if (h != NULL)
1036 sh->target_off = h->root.u.def.value;
1037 else
1038 sh->target_off = sym->st_value;
1039 sh->target_off += irela->r_addend;
1041 stubs.count += 1;
1044 /* We're done with the internal relocs, free them. */
1045 if (elf_section_data (section)->relocs != internal_relocs)
1046 free (internal_relocs);
1049 if (local_syms != NULL
1050 && symtab_hdr->contents != (unsigned char *) local_syms)
1052 if (!info->keep_memory)
1053 free (local_syms);
1054 else
1055 symtab_hdr->contents = (unsigned char *) local_syms;
1059 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, &stubs);
1060 if (stubs.err)
1061 return FALSE;
1063 *stub = NULL;
1064 if (stubs.count == 0)
1065 return TRUE;
1067 ibfd = info->input_bfds;
1068 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1069 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1070 htab->stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1071 *stub = htab->stub;
1072 if (htab->stub == NULL
1073 || !bfd_set_section_alignment (ibfd, htab->stub, 2))
1074 return FALSE;
1076 flags = (SEC_ALLOC | SEC_LOAD
1077 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1078 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1079 *ovtab = htab->ovtab;
1080 if (htab->ovtab == NULL
1081 || !bfd_set_section_alignment (ibfd, htab->stub, 4))
1082 return FALSE;
1084 *toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1085 if (*toe == NULL
1086 || !bfd_set_section_alignment (ibfd, *toe, 4))
1087 return FALSE;
1088 (*toe)->size = 16;
1090 /* Retrieve all the stubs and sort. */
1091 stubs.sh = bfd_malloc (stubs.count * sizeof (*stubs.sh));
1092 if (stubs.sh == NULL)
1093 return FALSE;
1094 i = stubs.count;
1095 bfd_hash_traverse (&htab->stub_hash_table, populate_stubs, &stubs);
1096 BFD_ASSERT (stubs.count == 0);
1098 stubs.count = i;
1099 qsort (stubs.sh, stubs.count, sizeof (*stubs.sh), sort_stubs);
1101 /* Now that the stubs are sorted, place them in the stub section.
1102 Stubs are grouped per overlay
1103 . ila $79,func1
1104 . br 1f
1105 . ila $79,func2
1106 . br 1f
1109 . ila $79,funcn
1110 . nop
1111 . 1:
1112 . ila $78,ovl_index
1113 . br __ovly_load */
1115 group = 0;
1116 for (i = 0; i < stubs.count; i++)
1118 if (spu_elf_section_data (stubs.sh[group]->target_section
1119 ->output_section)->ovl_index
1120 != spu_elf_section_data (stubs.sh[i]->target_section
1121 ->output_section)->ovl_index)
1123 htab->stub->size += SIZEOF_STUB2;
1124 for (; group != i; group++)
1125 stubs.sh[group]->delta
1126 = stubs.sh[i - 1]->off - stubs.sh[group]->off;
1128 if (group == i
1129 || ((stubs.sh[i - 1]->target_section->output_section->vma
1130 + stubs.sh[i - 1]->target_section->output_offset
1131 + stubs.sh[i - 1]->target_off)
1132 != (stubs.sh[i]->target_section->output_section->vma
1133 + stubs.sh[i]->target_section->output_offset
1134 + stubs.sh[i]->target_off)))
1136 stubs.sh[i]->off = htab->stub->size;
1137 htab->stub->size += SIZEOF_STUB1;
1139 else
1140 stubs.sh[i]->off = stubs.sh[i - 1]->off;
1142 if (group != i)
1143 htab->stub->size += SIZEOF_STUB2;
1144 for (; group != i; group++)
1145 stubs.sh[group]->delta = stubs.sh[i - 1]->off - stubs.sh[group]->off;
1147 /* htab->ovtab consists of two arrays.
1148 . struct {
1149 . u32 vma;
1150 . u32 size;
1151 . u32 file_off;
1152 . u32 buf;
1153 . } _ovly_table[];
1155 . struct {
1156 . u32 mapped;
1157 . } _ovly_buf_table[]; */
1159 htab->ovtab->alignment_power = 4;
1160 htab->ovtab->size = htab->num_overlays * 16 + htab->num_buf * 4;
1162 return TRUE;
1165 /* Functions to handle embedded spu_ovl.o object. */
1167 static void *
1168 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1170 return stream;
1173 static file_ptr
1174 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1175 void *stream,
1176 void *buf,
1177 file_ptr nbytes,
1178 file_ptr offset)
1180 struct _ovl_stream *os;
1181 size_t count;
1182 size_t max;
1184 os = (struct _ovl_stream *) stream;
1185 max = (const char *) os->end - (const char *) os->start;
1187 if ((ufile_ptr) offset >= max)
1188 return 0;
1190 count = nbytes;
1191 if (count > max - offset)
1192 count = max - offset;
1194 memcpy (buf, (const char *) os->start + offset, count);
1195 return count;
1198 bfd_boolean
1199 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1201 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1202 "elf32-spu",
1203 ovl_mgr_open,
1204 (void *) stream,
1205 ovl_mgr_pread,
1206 NULL,
1207 NULL);
1208 return *ovl_bfd != NULL;
1211 /* Fill in the ila and br for a stub. On the last stub for a group,
1212 write the stub that sets the overlay number too. */
1214 static bfd_boolean
1215 write_one_stub (struct bfd_hash_entry *bh, void *inf)
1217 struct spu_stub_hash_entry *ent = (struct spu_stub_hash_entry *) bh;
1218 struct spu_link_hash_table *htab = inf;
1219 asection *sec = htab->stub;
1220 asection *s = ent->target_section;
1221 unsigned int ovl;
1222 bfd_vma val;
1224 val = ent->target_off + s->output_offset + s->output_section->vma;
1225 bfd_put_32 (sec->owner, ILA_79 + ((val << 7) & 0x01ffff80),
1226 sec->contents + ent->off);
1227 val = ent->delta + 4;
1228 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1229 sec->contents + ent->off + 4);
1231 /* If this is the last stub of this group, write stub2. */
1232 if (ent->delta == 0)
1234 bfd_put_32 (sec->owner, NOP,
1235 sec->contents + ent->off + 4);
1237 ovl = spu_elf_section_data (s->output_section)->ovl_index;
1238 bfd_put_32 (sec->owner, ILA_78 + ((ovl << 7) & 0x01ffff80),
1239 sec->contents + ent->off + 8);
1241 val = (htab->ovly_load->root.u.def.section->output_section->vma
1242 + htab->ovly_load->root.u.def.section->output_offset
1243 + htab->ovly_load->root.u.def.value
1244 - (sec->output_section->vma
1245 + sec->output_offset
1246 + ent->off + 12));
1248 if (val + 0x20000 >= 0x40000)
1249 htab->stub_overflow = TRUE;
1251 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1252 sec->contents + ent->off + 12);
1255 if (htab->emit_stub_syms)
1257 struct elf_link_hash_entry *h;
1258 size_t len1, len2;
1259 char *name;
1261 len1 = sizeof ("00000000.ovl_call.") - 1;
1262 len2 = strlen (ent->root.string);
1263 name = bfd_malloc (len1 + len2 + 1);
1264 if (name == NULL)
1265 return FALSE;
1266 memcpy (name, "00000000.ovl_call.", len1);
1267 memcpy (name + len1, ent->root.string, len2 + 1);
1268 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1269 free (name);
1270 if (h == NULL)
1271 return FALSE;
1272 if (h->root.type == bfd_link_hash_new)
1274 h->root.type = bfd_link_hash_defined;
1275 h->root.u.def.section = sec;
1276 h->root.u.def.value = ent->off;
1277 h->size = (ent->delta == 0
1278 ? SIZEOF_STUB1 + SIZEOF_STUB2 : SIZEOF_STUB1);
1279 h->type = STT_FUNC;
1280 h->ref_regular = 1;
1281 h->def_regular = 1;
1282 h->ref_regular_nonweak = 1;
1283 h->forced_local = 1;
1284 h->non_elf = 0;
1288 return TRUE;
1291 /* Define an STT_OBJECT symbol. */
1293 static struct elf_link_hash_entry *
1294 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1296 struct elf_link_hash_entry *h;
1298 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1299 if (h == NULL)
1300 return NULL;
1302 if (h->root.type != bfd_link_hash_defined
1303 || !h->def_regular)
1305 h->root.type = bfd_link_hash_defined;
1306 h->root.u.def.section = htab->ovtab;
1307 h->type = STT_OBJECT;
1308 h->ref_regular = 1;
1309 h->def_regular = 1;
1310 h->ref_regular_nonweak = 1;
1311 h->non_elf = 0;
1313 else
1315 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1316 h->root.u.def.section->owner,
1317 h->root.root.string);
1318 bfd_set_error (bfd_error_bad_value);
1319 return NULL;
1322 return h;
1325 /* Fill in all stubs and the overlay tables. */
1327 bfd_boolean
1328 spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms, asection *toe)
1330 struct spu_link_hash_table *htab = spu_hash_table (info);
1331 struct elf_link_hash_entry *h;
1332 bfd_byte *p;
1333 asection *s;
1334 bfd *obfd;
1335 unsigned int i;
1337 htab->emit_stub_syms = emit_syms;
1338 htab->stub->contents = bfd_zalloc (htab->stub->owner, htab->stub->size);
1339 if (htab->stub->contents == NULL)
1340 return FALSE;
1342 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1343 htab->ovly_load = h;
1344 BFD_ASSERT (h != NULL
1345 && (h->root.type == bfd_link_hash_defined
1346 || h->root.type == bfd_link_hash_defweak)
1347 && h->def_regular);
1349 s = h->root.u.def.section->output_section;
1350 if (spu_elf_section_data (s)->ovl_index)
1352 (*_bfd_error_handler) (_("%s in overlay section"),
1353 h->root.u.def.section->owner);
1354 bfd_set_error (bfd_error_bad_value);
1355 return FALSE;
1358 /* Write out all the stubs. */
1359 bfd_hash_traverse (&htab->stub_hash_table, write_one_stub, htab);
1361 if (htab->stub_overflow)
1363 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1364 bfd_set_error (bfd_error_bad_value);
1365 return FALSE;
1368 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1369 if (htab->ovtab->contents == NULL)
1370 return FALSE;
1372 /* Write out _ovly_table. */
1373 p = htab->ovtab->contents;
1374 obfd = htab->ovtab->output_section->owner;
1375 for (s = obfd->sections; s != NULL; s = s->next)
1377 unsigned int ovl_index = spu_elf_section_data (s)->ovl_index;
1379 if (ovl_index != 0)
1381 unsigned int lo, hi, mid;
1382 unsigned long off = (ovl_index - 1) * 16;
1383 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1384 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1385 /* file_off written later in spu_elf_modify_program_headers. */
1387 lo = 0;
1388 hi = htab->num_buf;
1389 while (lo < hi)
1391 mid = (lo + hi) >> 1;
1392 if (htab->ovl_region[2 * mid + 1]->vma
1393 + htab->ovl_region[2 * mid + 1]->size <= s->vma)
1394 lo = mid + 1;
1395 else if (htab->ovl_region[2 * mid]->vma > s->vma)
1396 hi = mid;
1397 else
1399 bfd_put_32 (htab->ovtab->owner, mid + 1, p + off + 12);
1400 break;
1403 BFD_ASSERT (lo < hi);
1407 /* Write out _ovly_buf_table. */
1408 p = htab->ovtab->contents + htab->num_overlays * 16;
1409 for (i = 0; i < htab->num_buf; i++)
1411 bfd_put_32 (htab->ovtab->owner, 0, p);
1412 p += 4;
1415 h = define_ovtab_symbol (htab, "_ovly_table");
1416 if (h == NULL)
1417 return FALSE;
1418 h->root.u.def.value = 0;
1419 h->size = htab->num_overlays * 16;
1421 h = define_ovtab_symbol (htab, "_ovly_table_end");
1422 if (h == NULL)
1423 return FALSE;
1424 h->root.u.def.value = htab->num_overlays * 16;
1425 h->size = 0;
1427 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1428 if (h == NULL)
1429 return FALSE;
1430 h->root.u.def.value = htab->num_overlays * 16;
1431 h->size = htab->num_buf * 4;
1433 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1434 if (h == NULL)
1435 return FALSE;
1436 h->root.u.def.value = htab->num_overlays * 16 + htab->num_buf * 4;
1437 h->size = 0;
1439 h = define_ovtab_symbol (htab, "_EAR_");
1440 if (h == NULL)
1441 return FALSE;
1442 h->root.u.def.section = toe;
1443 h->root.u.def.value = 0;
1444 h->size = 16;
1446 return TRUE;
1449 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1450 Search for stack adjusting insns, and return the sp delta. */
1452 static int
1453 find_function_stack_adjust (asection *sec, bfd_vma offset)
1455 int unrecog;
1456 int reg[128];
1458 memset (reg, 0, sizeof (reg));
1459 for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1461 unsigned char buf[4];
1462 int rt, ra;
1463 int imm;
1465 /* Assume no relocs on stack adjusing insns. */
1466 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1467 break;
1469 if (buf[0] == 0x24 /* stqd */)
1470 continue;
1472 rt = buf[3] & 0x7f;
1473 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1474 /* Partly decoded immediate field. */
1475 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1477 if (buf[0] == 0x1c /* ai */)
1479 imm >>= 7;
1480 imm = (imm ^ 0x200) - 0x200;
1481 reg[rt] = reg[ra] + imm;
1483 if (rt == 1 /* sp */)
1485 if (imm > 0)
1486 break;
1487 return reg[rt];
1490 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1492 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1494 reg[rt] = reg[ra] + reg[rb];
1495 if (rt == 1)
1496 return reg[rt];
1498 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1500 if (buf[0] >= 0x42 /* ila */)
1501 imm |= (buf[0] & 1) << 17;
1502 else
1504 imm &= 0xffff;
1506 if (buf[0] == 0x40 /* il */)
1508 if ((buf[1] & 0x80) == 0)
1509 goto unknown_insn;
1510 imm = (imm ^ 0x8000) - 0x8000;
1512 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1513 imm <<= 16;
1515 reg[rt] = imm;
1516 continue;
1518 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1520 reg[rt] |= imm & 0xffff;
1521 continue;
1523 else if (buf[0] == 0x04 /* ori */)
1525 imm >>= 7;
1526 imm = (imm ^ 0x200) - 0x200;
1527 reg[rt] = reg[ra] | imm;
1528 continue;
1530 else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1531 || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1533 /* Used in pic reg load. Say rt is trashed. */
1534 reg[rt] = 0;
1535 continue;
1537 else if (is_branch (buf))
1538 /* If we hit a branch then we must be out of the prologue. */
1539 break;
1540 unknown_insn:
1541 ++unrecog;
1544 return 0;
1547 /* qsort predicate to sort symbols by section and value. */
1549 static Elf_Internal_Sym *sort_syms_syms;
1550 static asection **sort_syms_psecs;
1552 static int
1553 sort_syms (const void *a, const void *b)
1555 Elf_Internal_Sym *const *s1 = a;
1556 Elf_Internal_Sym *const *s2 = b;
1557 asection *sec1,*sec2;
1558 bfd_signed_vma delta;
1560 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1561 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1563 if (sec1 != sec2)
1564 return sec1->index - sec2->index;
1566 delta = (*s1)->st_value - (*s2)->st_value;
1567 if (delta != 0)
1568 return delta < 0 ? -1 : 1;
1570 delta = (*s2)->st_size - (*s1)->st_size;
1571 if (delta != 0)
1572 return delta < 0 ? -1 : 1;
1574 return *s1 < *s2 ? -1 : 1;
1577 struct call_info
1579 struct function_info *fun;
1580 struct call_info *next;
1581 int is_tail;
1584 struct function_info
1586 /* List of functions called. Also branches to hot/cold part of
1587 function. */
1588 struct call_info *call_list;
1589 /* For hot/cold part of function, point to owner. */
1590 struct function_info *start;
1591 /* Symbol at start of function. */
1592 union {
1593 Elf_Internal_Sym *sym;
1594 struct elf_link_hash_entry *h;
1595 } u;
1596 /* Function section. */
1597 asection *sec;
1598 /* Address range of (this part of) function. */
1599 bfd_vma lo, hi;
1600 /* Stack usage. */
1601 int stack;
1602 /* Set if global symbol. */
1603 unsigned int global : 1;
1604 /* Set if known to be start of function (as distinct from a hunk
1605 in hot/cold section. */
1606 unsigned int is_func : 1;
1607 /* Flags used during call tree traversal. */
1608 unsigned int visit1 : 1;
1609 unsigned int non_root : 1;
1610 unsigned int visit2 : 1;
1611 unsigned int marking : 1;
1612 unsigned int visit3 : 1;
1615 struct spu_elf_stack_info
1617 int num_fun;
1618 int max_fun;
1619 /* Variable size array describing functions, one per contiguous
1620 address range belonging to a function. */
1621 struct function_info fun[1];
1624 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1625 entries for section SEC. */
1627 static struct spu_elf_stack_info *
1628 alloc_stack_info (asection *sec, int max_fun)
1630 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1631 bfd_size_type amt;
1633 amt = sizeof (struct spu_elf_stack_info);
1634 amt += (max_fun - 1) * sizeof (struct function_info);
1635 sec_data->stack_info = bfd_zmalloc (amt);
1636 if (sec_data->stack_info != NULL)
1637 sec_data->stack_info->max_fun = max_fun;
1638 return sec_data->stack_info;
1641 /* Add a new struct function_info describing a (part of a) function
1642 starting at SYM_H. Keep the array sorted by address. */
1644 static struct function_info *
1645 maybe_insert_function (asection *sec,
1646 void *sym_h,
1647 bfd_boolean global,
1648 bfd_boolean is_func)
1650 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1651 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1652 int i;
1653 bfd_vma off, size;
1655 if (sinfo == NULL)
1657 sinfo = alloc_stack_info (sec, 20);
1658 if (sinfo == NULL)
1659 return NULL;
1662 if (!global)
1664 Elf_Internal_Sym *sym = sym_h;
1665 off = sym->st_value;
1666 size = sym->st_size;
1668 else
1670 struct elf_link_hash_entry *h = sym_h;
1671 off = h->root.u.def.value;
1672 size = h->size;
1675 for (i = sinfo->num_fun; --i >= 0; )
1676 if (sinfo->fun[i].lo <= off)
1677 break;
1679 if (i >= 0)
1681 /* Don't add another entry for an alias, but do update some
1682 info. */
1683 if (sinfo->fun[i].lo == off)
1685 /* Prefer globals over local syms. */
1686 if (global && !sinfo->fun[i].global)
1688 sinfo->fun[i].global = TRUE;
1689 sinfo->fun[i].u.h = sym_h;
1691 if (is_func)
1692 sinfo->fun[i].is_func = TRUE;
1693 return &sinfo->fun[i];
1695 /* Ignore a zero-size symbol inside an existing function. */
1696 else if (sinfo->fun[i].hi > off && size == 0)
1697 return &sinfo->fun[i];
1700 if (++i < sinfo->num_fun)
1701 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1702 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1703 else if (i >= sinfo->max_fun)
1705 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1706 bfd_size_type old = amt;
1708 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1709 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1710 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1711 sinfo = bfd_realloc (sinfo, amt);
1712 if (sinfo == NULL)
1713 return NULL;
1714 memset ((char *) sinfo + old, 0, amt - old);
1715 sec_data->stack_info = sinfo;
1717 sinfo->fun[i].is_func = is_func;
1718 sinfo->fun[i].global = global;
1719 sinfo->fun[i].sec = sec;
1720 if (global)
1721 sinfo->fun[i].u.h = sym_h;
1722 else
1723 sinfo->fun[i].u.sym = sym_h;
1724 sinfo->fun[i].lo = off;
1725 sinfo->fun[i].hi = off + size;
1726 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1727 sinfo->num_fun += 1;
1728 return &sinfo->fun[i];
1731 /* Return the name of FUN. */
1733 static const char *
1734 func_name (struct function_info *fun)
1736 asection *sec;
1737 bfd *ibfd;
1738 Elf_Internal_Shdr *symtab_hdr;
1740 while (fun->start != NULL)
1741 fun = fun->start;
1743 if (fun->global)
1744 return fun->u.h->root.root.string;
1746 sec = fun->sec;
1747 if (fun->u.sym->st_name == 0)
1749 size_t len = strlen (sec->name);
1750 char *name = bfd_malloc (len + 10);
1751 if (name == NULL)
1752 return "(null)";
1753 sprintf (name, "%s+%lx", sec->name,
1754 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1755 return name;
1757 ibfd = sec->owner;
1758 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1759 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1762 /* Read the instruction at OFF in SEC. Return true iff the instruction
1763 is a nop, lnop, or stop 0 (all zero insn). */
1765 static bfd_boolean
1766 is_nop (asection *sec, bfd_vma off)
1768 unsigned char insn[4];
1770 if (off + 4 > sec->size
1771 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1772 return FALSE;
1773 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1774 return TRUE;
1775 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1776 return TRUE;
1777 return FALSE;
1780 /* Extend the range of FUN to cover nop padding up to LIMIT.
1781 Return TRUE iff some instruction other than a NOP was found. */
1783 static bfd_boolean
1784 insns_at_end (struct function_info *fun, bfd_vma limit)
1786 bfd_vma off = (fun->hi + 3) & -4;
1788 while (off < limit && is_nop (fun->sec, off))
1789 off += 4;
1790 if (off < limit)
1792 fun->hi = off;
1793 return TRUE;
1795 fun->hi = limit;
1796 return FALSE;
1799 /* Check and fix overlapping function ranges. Return TRUE iff there
1800 are gaps in the current info we have about functions in SEC. */
1802 static bfd_boolean
1803 check_function_ranges (asection *sec, struct bfd_link_info *info)
1805 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1806 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1807 int i;
1808 bfd_boolean gaps = FALSE;
1810 if (sinfo == NULL)
1811 return FALSE;
1813 for (i = 1; i < sinfo->num_fun; i++)
1814 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1816 /* Fix overlapping symbols. */
1817 const char *f1 = func_name (&sinfo->fun[i - 1]);
1818 const char *f2 = func_name (&sinfo->fun[i]);
1820 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1821 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1823 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1824 gaps = TRUE;
1826 if (sinfo->num_fun == 0)
1827 gaps = TRUE;
1828 else
1830 if (sinfo->fun[0].lo != 0)
1831 gaps = TRUE;
1832 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1834 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1836 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1837 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1839 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1840 gaps = TRUE;
1842 return gaps;
1845 /* Search current function info for a function that contains address
1846 OFFSET in section SEC. */
1848 static struct function_info *
1849 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1851 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1852 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1853 int lo, hi, mid;
1855 lo = 0;
1856 hi = sinfo->num_fun;
1857 while (lo < hi)
1859 mid = (lo + hi) / 2;
1860 if (offset < sinfo->fun[mid].lo)
1861 hi = mid;
1862 else if (offset >= sinfo->fun[mid].hi)
1863 lo = mid + 1;
1864 else
1865 return &sinfo->fun[mid];
1867 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
1868 sec, offset);
1869 return NULL;
1872 /* Add CALLEE to CALLER call list if not already present. */
1874 static bfd_boolean
1875 insert_callee (struct function_info *caller, struct call_info *callee)
1877 struct call_info *p;
1878 for (p = caller->call_list; p != NULL; p = p->next)
1879 if (p->fun == callee->fun)
1881 /* Tail calls use less stack than normal calls. Retain entry
1882 for normal call over one for tail call. */
1883 if (p->is_tail > callee->is_tail)
1884 p->is_tail = callee->is_tail;
1885 return FALSE;
1887 callee->next = caller->call_list;
1888 caller->call_list = callee;
1889 return TRUE;
1892 /* Rummage through the relocs for SEC, looking for function calls.
1893 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1894 mark destination symbols on calls as being functions. Also
1895 look at branches, which may be tail calls or go to hot/cold
1896 section part of same function. */
1898 static bfd_boolean
1899 mark_functions_via_relocs (asection *sec,
1900 struct bfd_link_info *info,
1901 int call_tree)
1903 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1904 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1905 Elf_Internal_Sym *syms;
1906 void *psyms;
1907 static bfd_boolean warned;
1909 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
1910 info->keep_memory);
1911 if (internal_relocs == NULL)
1912 return FALSE;
1914 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1915 psyms = &symtab_hdr->contents;
1916 syms = *(Elf_Internal_Sym **) psyms;
1917 irela = internal_relocs;
1918 irelaend = irela + sec->reloc_count;
1919 for (; irela < irelaend; irela++)
1921 enum elf_spu_reloc_type r_type;
1922 unsigned int r_indx;
1923 asection *sym_sec;
1924 Elf_Internal_Sym *sym;
1925 struct elf_link_hash_entry *h;
1926 bfd_vma val;
1927 unsigned char insn[4];
1928 bfd_boolean is_call;
1929 struct function_info *caller;
1930 struct call_info *callee;
1932 r_type = ELF32_R_TYPE (irela->r_info);
1933 if (r_type != R_SPU_REL16
1934 && r_type != R_SPU_ADDR16)
1935 continue;
1937 r_indx = ELF32_R_SYM (irela->r_info);
1938 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
1939 return FALSE;
1941 if (sym_sec == NULL
1942 || sym_sec->output_section == NULL
1943 || sym_sec->output_section->owner != sec->output_section->owner)
1944 continue;
1946 if (!bfd_get_section_contents (sec->owner, sec, insn,
1947 irela->r_offset, 4))
1948 return FALSE;
1949 if (!is_branch (insn))
1950 continue;
1952 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1953 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1955 if (!call_tree)
1956 warned = TRUE;
1957 if (!call_tree || !warned)
1958 info->callbacks->einfo (_("%B(%A+0x%v): call to non-code section"
1959 " %B(%A), stack analysis incomplete\n"),
1960 sec->owner, sec, irela->r_offset,
1961 sym_sec->owner, sym_sec);
1962 continue;
1965 is_call = (insn[0] & 0xfd) == 0x31;
1967 if (h)
1968 val = h->root.u.def.value;
1969 else
1970 val = sym->st_value;
1971 val += irela->r_addend;
1973 if (!call_tree)
1975 struct function_info *fun;
1977 if (irela->r_addend != 0)
1979 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
1980 if (fake == NULL)
1981 return FALSE;
1982 fake->st_value = val;
1983 fake->st_shndx
1984 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
1985 sym = fake;
1987 if (sym)
1988 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
1989 else
1990 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
1991 if (fun == NULL)
1992 return FALSE;
1993 if (irela->r_addend != 0
1994 && fun->u.sym != sym)
1995 free (sym);
1996 continue;
1999 caller = find_function (sec, irela->r_offset, info);
2000 if (caller == NULL)
2001 return FALSE;
2002 callee = bfd_malloc (sizeof *callee);
2003 if (callee == NULL)
2004 return FALSE;
2006 callee->fun = find_function (sym_sec, val, info);
2007 if (callee->fun == NULL)
2008 return FALSE;
2009 callee->is_tail = !is_call;
2010 if (!insert_callee (caller, callee))
2011 free (callee);
2012 else if (!is_call
2013 && !callee->fun->is_func
2014 && callee->fun->stack == 0)
2016 /* This is either a tail call or a branch from one part of
2017 the function to another, ie. hot/cold section. If the
2018 destination has been called by some other function then
2019 it is a separate function. We also assume that functions
2020 are not split across input files. */
2021 if (callee->fun->start != NULL
2022 || sec->owner != sym_sec->owner)
2024 callee->fun->start = NULL;
2025 callee->fun->is_func = TRUE;
2027 else
2028 callee->fun->start = caller;
2032 return TRUE;
2035 /* Handle something like .init or .fini, which has a piece of a function.
2036 These sections are pasted together to form a single function. */
2038 static bfd_boolean
2039 pasted_function (asection *sec, struct bfd_link_info *info)
2041 struct bfd_link_order *l;
2042 struct _spu_elf_section_data *sec_data;
2043 struct spu_elf_stack_info *sinfo;
2044 Elf_Internal_Sym *fake;
2045 struct function_info *fun, *fun_start;
2047 fake = bfd_zmalloc (sizeof (*fake));
2048 if (fake == NULL)
2049 return FALSE;
2050 fake->st_value = 0;
2051 fake->st_size = sec->size;
2052 fake->st_shndx
2053 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2054 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2055 if (!fun)
2056 return FALSE;
2058 /* Find a function immediately preceding this section. */
2059 fun_start = NULL;
2060 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2062 if (l->u.indirect.section == sec)
2064 if (fun_start != NULL)
2066 if (fun_start->start)
2067 fun_start = fun_start->start;
2068 fun->start = fun_start;
2070 return TRUE;
2072 if (l->type == bfd_indirect_link_order
2073 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2074 && (sinfo = sec_data->stack_info) != NULL
2075 && sinfo->num_fun != 0)
2076 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2079 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2080 return FALSE;
2083 /* We're only interested in code sections. */
2085 static bfd_boolean
2086 interesting_section (asection *s, bfd *obfd, struct spu_link_hash_table *htab)
2088 return (s != htab->stub
2089 && s->output_section != NULL
2090 && s->output_section->owner == obfd
2091 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2092 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2093 && s->size != 0);
2096 /* Map address ranges in code sections to functions. */
2098 static bfd_boolean
2099 discover_functions (bfd *output_bfd, struct bfd_link_info *info)
2101 struct spu_link_hash_table *htab = spu_hash_table (info);
2102 bfd *ibfd;
2103 int bfd_idx;
2104 Elf_Internal_Sym ***psym_arr;
2105 asection ***sec_arr;
2106 bfd_boolean gaps = FALSE;
2108 bfd_idx = 0;
2109 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2110 bfd_idx++;
2112 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2113 if (psym_arr == NULL)
2114 return FALSE;
2115 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2116 if (sec_arr == NULL)
2117 return FALSE;
2120 for (ibfd = info->input_bfds, bfd_idx = 0;
2121 ibfd != NULL;
2122 ibfd = ibfd->link_next, bfd_idx++)
2124 extern const bfd_target bfd_elf32_spu_vec;
2125 Elf_Internal_Shdr *symtab_hdr;
2126 asection *sec;
2127 size_t symcount;
2128 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2129 asection **psecs, **p;
2131 if (ibfd->xvec != &bfd_elf32_spu_vec)
2132 continue;
2134 /* Read all the symbols. */
2135 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2136 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2137 if (symcount == 0)
2138 continue;
2140 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2141 if (syms == NULL)
2143 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2144 NULL, NULL, NULL);
2145 symtab_hdr->contents = (void *) syms;
2146 if (syms == NULL)
2147 return FALSE;
2150 /* Select defined function symbols that are going to be output. */
2151 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2152 if (psyms == NULL)
2153 return FALSE;
2154 psym_arr[bfd_idx] = psyms;
2155 psecs = bfd_malloc (symcount * sizeof (*psecs));
2156 if (psecs == NULL)
2157 return FALSE;
2158 sec_arr[bfd_idx] = psecs;
2159 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2160 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2161 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2163 asection *s;
2165 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2166 if (s != NULL && interesting_section (s, output_bfd, htab))
2167 *psy++ = sy;
2169 symcount = psy - psyms;
2170 *psy = NULL;
2172 /* Sort them by section and offset within section. */
2173 sort_syms_syms = syms;
2174 sort_syms_psecs = psecs;
2175 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2177 /* Now inspect the function symbols. */
2178 for (psy = psyms; psy < psyms + symcount; )
2180 asection *s = psecs[*psy - syms];
2181 Elf_Internal_Sym **psy2;
2183 for (psy2 = psy; ++psy2 < psyms + symcount; )
2184 if (psecs[*psy2 - syms] != s)
2185 break;
2187 if (!alloc_stack_info (s, psy2 - psy))
2188 return FALSE;
2189 psy = psy2;
2192 /* First install info about properly typed and sized functions.
2193 In an ideal world this will cover all code sections, except
2194 when partitioning functions into hot and cold sections,
2195 and the horrible pasted together .init and .fini functions. */
2196 for (psy = psyms; psy < psyms + symcount; ++psy)
2198 sy = *psy;
2199 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2201 asection *s = psecs[sy - syms];
2202 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2203 return FALSE;
2207 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2208 if (interesting_section (sec, output_bfd, htab))
2209 gaps |= check_function_ranges (sec, info);
2212 if (gaps)
2214 /* See if we can discover more function symbols by looking at
2215 relocations. */
2216 for (ibfd = info->input_bfds, bfd_idx = 0;
2217 ibfd != NULL;
2218 ibfd = ibfd->link_next, bfd_idx++)
2220 asection *sec;
2222 if (psym_arr[bfd_idx] == NULL)
2223 continue;
2225 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2226 if (interesting_section (sec, output_bfd, htab)
2227 && sec->reloc_count != 0)
2229 if (!mark_functions_via_relocs (sec, info, FALSE))
2230 return FALSE;
2234 for (ibfd = info->input_bfds, bfd_idx = 0;
2235 ibfd != NULL;
2236 ibfd = ibfd->link_next, bfd_idx++)
2238 Elf_Internal_Shdr *symtab_hdr;
2239 asection *sec;
2240 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2241 asection **psecs;
2243 if ((psyms = psym_arr[bfd_idx]) == NULL)
2244 continue;
2246 psecs = sec_arr[bfd_idx];
2248 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2249 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2251 gaps = FALSE;
2252 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2253 if (interesting_section (sec, output_bfd, htab))
2254 gaps |= check_function_ranges (sec, info);
2255 if (!gaps)
2256 continue;
2258 /* Finally, install all globals. */
2259 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2261 asection *s;
2263 s = psecs[sy - syms];
2265 /* Global syms might be improperly typed functions. */
2266 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2267 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2269 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2270 return FALSE;
2274 /* Some of the symbols we've installed as marking the
2275 beginning of functions may have a size of zero. Extend
2276 the range of such functions to the beginning of the
2277 next symbol of interest. */
2278 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2279 if (interesting_section (sec, output_bfd, htab))
2281 struct _spu_elf_section_data *sec_data;
2282 struct spu_elf_stack_info *sinfo;
2284 sec_data = spu_elf_section_data (sec);
2285 sinfo = sec_data->stack_info;
2286 if (sinfo != NULL)
2288 int fun_idx;
2289 bfd_vma hi = sec->size;
2291 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2293 sinfo->fun[fun_idx].hi = hi;
2294 hi = sinfo->fun[fun_idx].lo;
2297 /* No symbols in this section. Must be .init or .fini
2298 or something similar. */
2299 else if (!pasted_function (sec, info))
2300 return FALSE;
2305 for (ibfd = info->input_bfds, bfd_idx = 0;
2306 ibfd != NULL;
2307 ibfd = ibfd->link_next, bfd_idx++)
2309 if (psym_arr[bfd_idx] == NULL)
2310 continue;
2312 free (psym_arr[bfd_idx]);
2313 free (sec_arr[bfd_idx]);
2316 free (psym_arr);
2317 free (sec_arr);
2319 return TRUE;
2322 /* Mark nodes in the call graph that are called by some other node. */
2324 static void
2325 mark_non_root (struct function_info *fun)
2327 struct call_info *call;
2329 fun->visit1 = TRUE;
2330 for (call = fun->call_list; call; call = call->next)
2332 call->fun->non_root = TRUE;
2333 if (!call->fun->visit1)
2334 mark_non_root (call->fun);
2338 /* Remove cycles from the call graph. */
2340 static void
2341 call_graph_traverse (struct function_info *fun, struct bfd_link_info *info)
2343 struct call_info **callp, *call;
2345 fun->visit2 = TRUE;
2346 fun->marking = TRUE;
2348 callp = &fun->call_list;
2349 while ((call = *callp) != NULL)
2351 if (!call->fun->visit2)
2352 call_graph_traverse (call->fun, info);
2353 else if (call->fun->marking)
2355 const char *f1 = func_name (fun);
2356 const char *f2 = func_name (call->fun);
2358 info->callbacks->info (_("Stack analysis will ignore the call "
2359 "from %s to %s\n"),
2360 f1, f2);
2361 *callp = call->next;
2362 continue;
2364 callp = &call->next;
2366 fun->marking = FALSE;
2369 /* Populate call_list for each function. */
2371 static bfd_boolean
2372 build_call_tree (bfd *output_bfd, struct bfd_link_info *info)
2374 struct spu_link_hash_table *htab = spu_hash_table (info);
2375 bfd *ibfd;
2377 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2379 extern const bfd_target bfd_elf32_spu_vec;
2380 asection *sec;
2382 if (ibfd->xvec != &bfd_elf32_spu_vec)
2383 continue;
2385 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2387 if (!interesting_section (sec, output_bfd, htab)
2388 || sec->reloc_count == 0)
2389 continue;
2391 if (!mark_functions_via_relocs (sec, info, TRUE))
2392 return FALSE;
2395 /* Transfer call info from hot/cold section part of function
2396 to main entry. */
2397 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2399 struct _spu_elf_section_data *sec_data;
2400 struct spu_elf_stack_info *sinfo;
2402 if ((sec_data = spu_elf_section_data (sec)) != NULL
2403 && (sinfo = sec_data->stack_info) != NULL)
2405 int i;
2406 for (i = 0; i < sinfo->num_fun; ++i)
2408 if (sinfo->fun[i].start != NULL)
2410 struct call_info *call = sinfo->fun[i].call_list;
2412 while (call != NULL)
2414 struct call_info *call_next = call->next;
2415 if (!insert_callee (sinfo->fun[i].start, call))
2416 free (call);
2417 call = call_next;
2419 sinfo->fun[i].call_list = NULL;
2420 sinfo->fun[i].non_root = TRUE;
2427 /* Find the call graph root(s). */
2428 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2430 extern const bfd_target bfd_elf32_spu_vec;
2431 asection *sec;
2433 if (ibfd->xvec != &bfd_elf32_spu_vec)
2434 continue;
2436 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2438 struct _spu_elf_section_data *sec_data;
2439 struct spu_elf_stack_info *sinfo;
2441 if ((sec_data = spu_elf_section_data (sec)) != NULL
2442 && (sinfo = sec_data->stack_info) != NULL)
2444 int i;
2445 for (i = 0; i < sinfo->num_fun; ++i)
2446 if (!sinfo->fun[i].visit1)
2447 mark_non_root (&sinfo->fun[i]);
2452 /* Remove cycles from the call graph. We start from the root node(s)
2453 so that we break cycles in a reasonable place. */
2454 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2456 extern const bfd_target bfd_elf32_spu_vec;
2457 asection *sec;
2459 if (ibfd->xvec != &bfd_elf32_spu_vec)
2460 continue;
2462 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2464 struct _spu_elf_section_data *sec_data;
2465 struct spu_elf_stack_info *sinfo;
2467 if ((sec_data = spu_elf_section_data (sec)) != NULL
2468 && (sinfo = sec_data->stack_info) != NULL)
2470 int i;
2471 for (i = 0; i < sinfo->num_fun; ++i)
2472 if (!sinfo->fun[i].non_root)
2473 call_graph_traverse (&sinfo->fun[i], info);
2478 return TRUE;
2481 /* Descend the call graph for FUN, accumulating total stack required. */
2483 static bfd_vma
2484 sum_stack (struct function_info *fun,
2485 struct bfd_link_info *info,
2486 int emit_stack_syms)
2488 struct call_info *call;
2489 struct function_info *max = NULL;
2490 bfd_vma max_stack = fun->stack;
2491 bfd_vma stack;
2492 const char *f1;
2494 if (fun->visit3)
2495 return max_stack;
2497 for (call = fun->call_list; call; call = call->next)
2499 stack = sum_stack (call->fun, info, emit_stack_syms);
2500 /* Include caller stack for normal calls, don't do so for
2501 tail calls. fun->stack here is local stack usage for
2502 this function. */
2503 if (!call->is_tail)
2504 stack += fun->stack;
2505 if (max_stack < stack)
2507 max_stack = stack;
2508 max = call->fun;
2512 f1 = func_name (fun);
2513 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"), f1, fun->stack, max_stack);
2515 if (fun->call_list)
2517 info->callbacks->minfo (_(" calls:\n"));
2518 for (call = fun->call_list; call; call = call->next)
2520 const char *f2 = func_name (call->fun);
2521 const char *ann1 = call->fun == max ? "*" : " ";
2522 const char *ann2 = call->is_tail ? "t" : " ";
2524 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
2528 /* Now fun->stack holds cumulative stack. */
2529 fun->stack = max_stack;
2530 fun->visit3 = TRUE;
2532 if (emit_stack_syms)
2534 struct spu_link_hash_table *htab = spu_hash_table (info);
2535 char *name = bfd_malloc (18 + strlen (f1));
2536 struct elf_link_hash_entry *h;
2538 if (name != NULL)
2540 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
2541 sprintf (name, "__stack_%s", f1);
2542 else
2543 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
2545 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
2546 free (name);
2547 if (h != NULL
2548 && (h->root.type == bfd_link_hash_new
2549 || h->root.type == bfd_link_hash_undefined
2550 || h->root.type == bfd_link_hash_undefweak))
2552 h->root.type = bfd_link_hash_defined;
2553 h->root.u.def.section = bfd_abs_section_ptr;
2554 h->root.u.def.value = max_stack;
2555 h->size = 0;
2556 h->type = 0;
2557 h->ref_regular = 1;
2558 h->def_regular = 1;
2559 h->ref_regular_nonweak = 1;
2560 h->forced_local = 1;
2561 h->non_elf = 0;
2566 return max_stack;
2569 /* Provide an estimate of total stack required. */
2571 static bfd_boolean
2572 spu_elf_stack_analysis (bfd *output_bfd,
2573 struct bfd_link_info *info,
2574 int emit_stack_syms)
2576 bfd *ibfd;
2577 bfd_vma max_stack = 0;
2579 if (!discover_functions (output_bfd, info))
2580 return FALSE;
2582 if (!build_call_tree (output_bfd, info))
2583 return FALSE;
2585 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
2586 info->callbacks->minfo (_("\nStack size for functions. "
2587 "Annotations: '*' max stack, 't' tail call\n"));
2588 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2590 extern const bfd_target bfd_elf32_spu_vec;
2591 asection *sec;
2593 if (ibfd->xvec != &bfd_elf32_spu_vec)
2594 continue;
2596 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2598 struct _spu_elf_section_data *sec_data;
2599 struct spu_elf_stack_info *sinfo;
2601 if ((sec_data = spu_elf_section_data (sec)) != NULL
2602 && (sinfo = sec_data->stack_info) != NULL)
2604 int i;
2605 for (i = 0; i < sinfo->num_fun; ++i)
2607 if (!sinfo->fun[i].non_root)
2609 bfd_vma stack;
2610 const char *f1;
2612 stack = sum_stack (&sinfo->fun[i], info,
2613 emit_stack_syms);
2614 f1 = func_name (&sinfo->fun[i]);
2615 info->callbacks->info (_(" %s: 0x%v\n"),
2616 f1, stack);
2617 if (max_stack < stack)
2618 max_stack = stack;
2625 info->callbacks->info (_("Maximum stack required is 0x%v\n"), max_stack);
2626 return TRUE;
2629 /* Perform a final link. */
2631 static bfd_boolean
2632 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
2634 struct spu_link_hash_table *htab = spu_hash_table (info);
2636 if (htab->stack_analysis
2637 && !spu_elf_stack_analysis (output_bfd, info, htab->emit_stack_syms))
2638 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
2640 return bfd_elf_final_link (output_bfd, info);
2643 /* Called when not normally emitting relocs, ie. !info->relocatable
2644 and !info->emitrelocations. Returns a count of special relocs
2645 that need to be emitted. */
2647 static unsigned int
2648 spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
2650 unsigned int count = 0;
2651 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
2653 for (; relocs < relend; relocs++)
2655 int r_type = ELF32_R_TYPE (relocs->r_info);
2656 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2657 ++count;
2660 return count;
2663 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2665 static bfd_boolean
2666 spu_elf_relocate_section (bfd *output_bfd,
2667 struct bfd_link_info *info,
2668 bfd *input_bfd,
2669 asection *input_section,
2670 bfd_byte *contents,
2671 Elf_Internal_Rela *relocs,
2672 Elf_Internal_Sym *local_syms,
2673 asection **local_sections)
2675 Elf_Internal_Shdr *symtab_hdr;
2676 struct elf_link_hash_entry **sym_hashes;
2677 Elf_Internal_Rela *rel, *relend;
2678 struct spu_link_hash_table *htab;
2679 bfd_boolean ret = TRUE;
2680 bfd_boolean emit_these_relocs = FALSE;
2682 htab = spu_hash_table (info);
2683 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2684 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
2686 rel = relocs;
2687 relend = relocs + input_section->reloc_count;
2688 for (; rel < relend; rel++)
2690 int r_type;
2691 reloc_howto_type *howto;
2692 unsigned long r_symndx;
2693 Elf_Internal_Sym *sym;
2694 asection *sec;
2695 struct elf_link_hash_entry *h;
2696 const char *sym_name;
2697 bfd_vma relocation;
2698 bfd_vma addend;
2699 bfd_reloc_status_type r;
2700 bfd_boolean unresolved_reloc;
2701 bfd_boolean warned;
2702 bfd_boolean branch;
2704 r_symndx = ELF32_R_SYM (rel->r_info);
2705 r_type = ELF32_R_TYPE (rel->r_info);
2706 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2708 emit_these_relocs = TRUE;
2709 continue;
2712 howto = elf_howto_table + r_type;
2713 unresolved_reloc = FALSE;
2714 warned = FALSE;
2715 h = NULL;
2716 sym = NULL;
2717 sec = NULL;
2718 if (r_symndx < symtab_hdr->sh_info)
2720 sym = local_syms + r_symndx;
2721 sec = local_sections[r_symndx];
2722 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
2723 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2725 else
2727 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2728 r_symndx, symtab_hdr, sym_hashes,
2729 h, sec, relocation,
2730 unresolved_reloc, warned);
2731 sym_name = h->root.root.string;
2734 if (sec != NULL && elf_discarded_section (sec))
2736 /* For relocs against symbols from removed linkonce sections,
2737 or sections discarded by a linker script, we just want the
2738 section contents zeroed. Avoid any special processing. */
2739 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
2740 rel->r_info = 0;
2741 rel->r_addend = 0;
2742 continue;
2745 if (info->relocatable)
2746 continue;
2748 if (unresolved_reloc)
2750 (*_bfd_error_handler)
2751 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2752 input_bfd,
2753 bfd_get_section_name (input_bfd, input_section),
2754 (long) rel->r_offset,
2755 howto->name,
2756 sym_name);
2757 ret = FALSE;
2760 /* If this symbol is in an overlay area, we may need to relocate
2761 to the overlay stub. */
2762 addend = rel->r_addend;
2763 branch = (is_branch (contents + rel->r_offset)
2764 || is_hint (contents + rel->r_offset));
2765 if (needs_ovl_stub (sym_name, sec, input_section, htab, branch))
2767 char *stub_name;
2768 struct spu_stub_hash_entry *sh;
2770 stub_name = spu_stub_name (sec, h, rel);
2771 if (stub_name == NULL)
2772 return FALSE;
2774 sh = (struct spu_stub_hash_entry *)
2775 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2776 if (sh != NULL)
2778 relocation = (htab->stub->output_section->vma
2779 + htab->stub->output_offset
2780 + sh->off);
2781 addend = 0;
2783 free (stub_name);
2786 r = _bfd_final_link_relocate (howto,
2787 input_bfd,
2788 input_section,
2789 contents,
2790 rel->r_offset, relocation, addend);
2792 if (r != bfd_reloc_ok)
2794 const char *msg = (const char *) 0;
2796 switch (r)
2798 case bfd_reloc_overflow:
2799 if (!((*info->callbacks->reloc_overflow)
2800 (info, (h ? &h->root : NULL), sym_name, howto->name,
2801 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
2802 return FALSE;
2803 break;
2805 case bfd_reloc_undefined:
2806 if (!((*info->callbacks->undefined_symbol)
2807 (info, sym_name, input_bfd, input_section,
2808 rel->r_offset, TRUE)))
2809 return FALSE;
2810 break;
2812 case bfd_reloc_outofrange:
2813 msg = _("internal error: out of range error");
2814 goto common_error;
2816 case bfd_reloc_notsupported:
2817 msg = _("internal error: unsupported relocation error");
2818 goto common_error;
2820 case bfd_reloc_dangerous:
2821 msg = _("internal error: dangerous error");
2822 goto common_error;
2824 default:
2825 msg = _("internal error: unknown error");
2826 /* fall through */
2828 common_error:
2829 if (!((*info->callbacks->warning)
2830 (info, msg, sym_name, input_bfd, input_section,
2831 rel->r_offset)))
2832 return FALSE;
2833 break;
2838 if (ret
2839 && emit_these_relocs
2840 && !info->relocatable
2841 && !info->emitrelocations)
2843 Elf_Internal_Rela *wrel;
2844 Elf_Internal_Shdr *rel_hdr;
2846 wrel = rel = relocs;
2847 relend = relocs + input_section->reloc_count;
2848 for (; rel < relend; rel++)
2850 int r_type;
2852 r_type = ELF32_R_TYPE (rel->r_info);
2853 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2854 *wrel++ = *rel;
2856 input_section->reloc_count = wrel - relocs;
2857 /* Backflips for _bfd_elf_link_output_relocs. */
2858 rel_hdr = &elf_section_data (input_section)->rel_hdr;
2859 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
2860 ret = 2;
2863 return ret;
2866 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2868 static bfd_boolean
2869 spu_elf_output_symbol_hook (struct bfd_link_info *info,
2870 const char *sym_name ATTRIBUTE_UNUSED,
2871 Elf_Internal_Sym *sym,
2872 asection *sym_sec ATTRIBUTE_UNUSED,
2873 struct elf_link_hash_entry *h)
2875 struct spu_link_hash_table *htab = spu_hash_table (info);
2877 if (!info->relocatable
2878 && htab->num_overlays != 0
2879 && h != NULL
2880 && (h->root.type == bfd_link_hash_defined
2881 || h->root.type == bfd_link_hash_defweak)
2882 && h->def_regular
2883 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
2885 static Elf_Internal_Rela zero_rel;
2886 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
2887 struct spu_stub_hash_entry *sh;
2889 if (stub_name == NULL)
2890 return FALSE;
2891 sh = (struct spu_stub_hash_entry *)
2892 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2893 free (stub_name);
2894 if (sh == NULL)
2895 return TRUE;
2896 sym->st_shndx
2897 = _bfd_elf_section_from_bfd_section (htab->stub->output_section->owner,
2898 htab->stub->output_section);
2899 sym->st_value = (htab->stub->output_section->vma
2900 + htab->stub->output_offset
2901 + sh->off);
2904 return TRUE;
2907 static int spu_plugin = 0;
2909 void
2910 spu_elf_plugin (int val)
2912 spu_plugin = val;
2915 /* Set ELF header e_type for plugins. */
2917 static void
2918 spu_elf_post_process_headers (bfd *abfd,
2919 struct bfd_link_info *info ATTRIBUTE_UNUSED)
2921 if (spu_plugin)
2923 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
2925 i_ehdrp->e_type = ET_DYN;
2929 /* We may add an extra PT_LOAD segment for .toe. We also need extra
2930 segments for overlays. */
2932 static int
2933 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
2935 struct spu_link_hash_table *htab = spu_hash_table (info);
2936 int extra = htab->num_overlays;
2937 asection *sec;
2939 if (extra)
2940 ++extra;
2942 sec = bfd_get_section_by_name (abfd, ".toe");
2943 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
2944 ++extra;
2946 return extra;
2949 /* Remove .toe section from other PT_LOAD segments and put it in
2950 a segment of its own. Put overlays in separate segments too. */
2952 static bfd_boolean
2953 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
2955 asection *toe, *s;
2956 struct elf_segment_map *m;
2957 unsigned int i;
2959 if (info == NULL)
2960 return TRUE;
2962 toe = bfd_get_section_by_name (abfd, ".toe");
2963 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2964 if (m->p_type == PT_LOAD && m->count > 1)
2965 for (i = 0; i < m->count; i++)
2966 if ((s = m->sections[i]) == toe
2967 || spu_elf_section_data (s)->ovl_index != 0)
2969 struct elf_segment_map *m2;
2970 bfd_vma amt;
2972 if (i + 1 < m->count)
2974 amt = sizeof (struct elf_segment_map);
2975 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
2976 m2 = bfd_zalloc (abfd, amt);
2977 if (m2 == NULL)
2978 return FALSE;
2979 m2->count = m->count - (i + 1);
2980 memcpy (m2->sections, m->sections + i + 1,
2981 m2->count * sizeof (m->sections[0]));
2982 m2->p_type = PT_LOAD;
2983 m2->next = m->next;
2984 m->next = m2;
2986 m->count = 1;
2987 if (i != 0)
2989 m->count = i;
2990 amt = sizeof (struct elf_segment_map);
2991 m2 = bfd_zalloc (abfd, amt);
2992 if (m2 == NULL)
2993 return FALSE;
2994 m2->p_type = PT_LOAD;
2995 m2->count = 1;
2996 m2->sections[0] = s;
2997 m2->next = m->next;
2998 m->next = m2;
3000 break;
3003 return TRUE;
3006 /* Check that all loadable section VMAs lie in the range
3007 LO .. HI inclusive. */
3009 asection *
3010 spu_elf_check_vma (bfd *abfd, bfd_vma lo, bfd_vma hi)
3012 struct elf_segment_map *m;
3013 unsigned int i;
3015 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
3016 if (m->p_type == PT_LOAD)
3017 for (i = 0; i < m->count; i++)
3018 if (m->sections[i]->size != 0
3019 && (m->sections[i]->vma < lo
3020 || m->sections[i]->vma > hi
3021 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
3022 return m->sections[i];
3024 return NULL;
3027 /* Tweak the section type of .note.spu_name. */
3029 static bfd_boolean
3030 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
3031 Elf_Internal_Shdr *hdr,
3032 asection *sec)
3034 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
3035 hdr->sh_type = SHT_NOTE;
3036 return TRUE;
3039 /* Tweak phdrs before writing them out. */
3041 static int
3042 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
3044 const struct elf_backend_data *bed;
3045 struct elf_obj_tdata *tdata;
3046 Elf_Internal_Phdr *phdr, *last;
3047 struct spu_link_hash_table *htab;
3048 unsigned int count;
3049 unsigned int i;
3051 if (info == NULL)
3052 return TRUE;
3054 bed = get_elf_backend_data (abfd);
3055 tdata = elf_tdata (abfd);
3056 phdr = tdata->phdr;
3057 count = tdata->program_header_size / bed->s->sizeof_phdr;
3058 htab = spu_hash_table (info);
3059 if (htab->num_overlays != 0)
3061 struct elf_segment_map *m;
3062 unsigned int o;
3064 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
3065 if (m->count != 0
3066 && (o = spu_elf_section_data (m->sections[0])->ovl_index) != 0)
3068 /* Mark this as an overlay header. */
3069 phdr[i].p_flags |= PF_OVERLAY;
3071 if (htab->ovtab != NULL && htab->ovtab->size != 0)
3073 bfd_byte *p = htab->ovtab->contents;
3074 unsigned int off = (o - 1) * 16 + 8;
3076 /* Write file_off into _ovly_table. */
3077 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
3082 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3083 of 16. This should always be possible when using the standard
3084 linker scripts, but don't create overlapping segments if
3085 someone is playing games with linker scripts. */
3086 last = NULL;
3087 for (i = count; i-- != 0; )
3088 if (phdr[i].p_type == PT_LOAD)
3090 unsigned adjust;
3092 adjust = -phdr[i].p_filesz & 15;
3093 if (adjust != 0
3094 && last != NULL
3095 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
3096 break;
3098 adjust = -phdr[i].p_memsz & 15;
3099 if (adjust != 0
3100 && last != NULL
3101 && phdr[i].p_filesz != 0
3102 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
3103 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
3104 break;
3106 if (phdr[i].p_filesz != 0)
3107 last = &phdr[i];
3110 if (i == (unsigned int) -1)
3111 for (i = count; i-- != 0; )
3112 if (phdr[i].p_type == PT_LOAD)
3114 unsigned adjust;
3116 adjust = -phdr[i].p_filesz & 15;
3117 phdr[i].p_filesz += adjust;
3119 adjust = -phdr[i].p_memsz & 15;
3120 phdr[i].p_memsz += adjust;
3123 return TRUE;
3126 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3127 #define TARGET_BIG_NAME "elf32-spu"
3128 #define ELF_ARCH bfd_arch_spu
3129 #define ELF_MACHINE_CODE EM_SPU
3130 /* This matches the alignment need for DMA. */
3131 #define ELF_MAXPAGESIZE 0x80
3132 #define elf_backend_rela_normal 1
3133 #define elf_backend_can_gc_sections 1
3135 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3136 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3137 #define elf_info_to_howto spu_elf_info_to_howto
3138 #define elf_backend_count_relocs spu_elf_count_relocs
3139 #define elf_backend_relocate_section spu_elf_relocate_section
3140 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3141 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3142 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3143 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3144 #define bfd_elf32_bfd_link_hash_table_free spu_elf_link_hash_table_free
3146 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3147 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3148 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3149 #define elf_backend_post_process_headers spu_elf_post_process_headers
3150 #define elf_backend_fake_sections spu_elf_fake_sections
3151 #define elf_backend_special_sections spu_elf_special_sections
3152 #define bfd_elf32_bfd_final_link spu_elf_final_link
3154 #include "elf32-target.h"