1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
27 #include "elf32-spu.h"
29 /* We use RELA style relocs. Don't define USE_REL. */
31 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
38 static reloc_howto_type elf_howto_table
[] = {
39 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
40 bfd_elf_generic_reloc
, "SPU_NONE",
41 FALSE
, 0, 0x00000000, FALSE
),
42 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
43 bfd_elf_generic_reloc
, "SPU_ADDR10",
44 FALSE
, 0, 0x00ffc000, FALSE
),
45 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
46 bfd_elf_generic_reloc
, "SPU_ADDR16",
47 FALSE
, 0, 0x007fff80, FALSE
),
48 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
49 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
50 FALSE
, 0, 0x007fff80, FALSE
),
51 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
52 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
53 FALSE
, 0, 0x007fff80, FALSE
),
54 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
55 bfd_elf_generic_reloc
, "SPU_ADDR18",
56 FALSE
, 0, 0x01ffff80, FALSE
),
57 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
58 bfd_elf_generic_reloc
, "SPU_ADDR32",
59 FALSE
, 0, 0xffffffff, FALSE
),
60 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
61 bfd_elf_generic_reloc
, "SPU_REL16",
62 FALSE
, 0, 0x007fff80, TRUE
),
63 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
64 bfd_elf_generic_reloc
, "SPU_ADDR7",
65 FALSE
, 0, 0x001fc000, FALSE
),
66 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
67 spu_elf_rel9
, "SPU_REL9",
68 FALSE
, 0, 0x0180007f, TRUE
),
69 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
70 spu_elf_rel9
, "SPU_REL9I",
71 FALSE
, 0, 0x0000c07f, TRUE
),
72 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
73 bfd_elf_generic_reloc
, "SPU_ADDR10I",
74 FALSE
, 0, 0x00ffc000, FALSE
),
75 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
76 bfd_elf_generic_reloc
, "SPU_ADDR16I",
77 FALSE
, 0, 0x007fff80, FALSE
),
78 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
79 bfd_elf_generic_reloc
, "SPU_REL32",
80 FALSE
, 0, 0xffffffff, TRUE
),
81 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
82 bfd_elf_generic_reloc
, "SPU_ADDR16X",
83 FALSE
, 0, 0x007fff80, FALSE
),
84 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
85 bfd_elf_generic_reloc
, "SPU_PPU32",
86 FALSE
, 0, 0xffffffff, FALSE
),
87 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
88 bfd_elf_generic_reloc
, "SPU_PPU64",
92 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
93 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
104 case BFD_RELOC_SPU_IMM10W
:
106 case BFD_RELOC_SPU_IMM16W
:
108 case BFD_RELOC_SPU_LO16
:
109 return R_SPU_ADDR16_LO
;
110 case BFD_RELOC_SPU_HI16
:
111 return R_SPU_ADDR16_HI
;
112 case BFD_RELOC_SPU_IMM18
:
114 case BFD_RELOC_SPU_PCREL16
:
116 case BFD_RELOC_SPU_IMM7
:
118 case BFD_RELOC_SPU_IMM8
:
120 case BFD_RELOC_SPU_PCREL9a
:
122 case BFD_RELOC_SPU_PCREL9b
:
124 case BFD_RELOC_SPU_IMM10
:
125 return R_SPU_ADDR10I
;
126 case BFD_RELOC_SPU_IMM16
:
127 return R_SPU_ADDR16I
;
130 case BFD_RELOC_32_PCREL
:
132 case BFD_RELOC_SPU_PPU32
:
134 case BFD_RELOC_SPU_PPU64
:
140 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
142 Elf_Internal_Rela
*dst
)
144 enum elf_spu_reloc_type r_type
;
146 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
147 BFD_ASSERT (r_type
< R_SPU_max
);
148 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
151 static reloc_howto_type
*
152 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
153 bfd_reloc_code_real_type code
)
155 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
157 if (r_type
== R_SPU_NONE
)
160 return elf_howto_table
+ r_type
;
163 static reloc_howto_type
*
164 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
169 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
170 if (elf_howto_table
[i
].name
!= NULL
171 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
172 return &elf_howto_table
[i
];
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
181 void *data
, asection
*input_section
,
182 bfd
*output_bfd
, char **error_message
)
184 bfd_size_type octets
;
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
191 if (output_bfd
!= NULL
)
192 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
193 input_section
, output_bfd
, error_message
);
195 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
196 return bfd_reloc_outofrange
;
197 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
199 /* Get symbol value. */
201 if (!bfd_is_com_section (symbol
->section
))
203 if (symbol
->section
->output_section
)
204 val
+= symbol
->section
->output_section
->vma
;
206 val
+= reloc_entry
->addend
;
208 /* Make it pc-relative. */
209 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
212 if (val
+ 256 >= 512)
213 return bfd_reloc_overflow
;
215 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
220 insn
&= ~reloc_entry
->howto
->dst_mask
;
221 insn
|= val
& reloc_entry
->howto
->dst_mask
;
222 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
227 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
229 if (!sec
->used_by_bfd
)
231 struct _spu_elf_section_data
*sdata
;
233 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
236 sec
->used_by_bfd
= sdata
;
239 return _bfd_elf_new_section_hook (abfd
, sec
);
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
246 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
248 if (sym
->name
!= NULL
249 && sym
->section
!= bfd_abs_section_ptr
250 && strncmp (sym
->name
, "_EAR_", 5) == 0)
251 sym
->flags
|= BSF_KEEP
;
254 /* SPU ELF linker hash table. */
256 struct spu_link_hash_table
258 struct elf_link_hash_table elf
;
260 /* The stub hash table. */
261 struct bfd_hash_table stub_hash_table
;
263 /* Sorted array of stubs. */
265 struct spu_stub_hash_entry
**sh
;
270 /* Shortcuts to overlay sections. */
274 struct elf_link_hash_entry
*ovly_load
;
275 unsigned long ovly_load_r_symndx
;
277 /* An array of two output sections per overlay region, chosen such that
278 the first section vma is the overlay buffer vma (ie. the section has
279 the lowest vma in the group that occupy the region), and the second
280 section vma+size specifies the end of the region. We keep pointers
281 to sections like this because section vmas may change when laying
283 asection
**ovl_region
;
285 /* Number of overlay buffers. */
286 unsigned int num_buf
;
288 /* Total number of overlays. */
289 unsigned int num_overlays
;
291 /* Set if we should emit symbols for stubs. */
292 unsigned int emit_stub_syms
:1;
294 /* Set if we want stubs on calls out of overlay regions to
295 non-overlay regions. */
296 unsigned int non_overlay_stubs
: 1;
299 unsigned int stub_overflow
: 1;
301 /* Set if stack size analysis should be done. */
302 unsigned int stack_analysis
: 1;
304 /* Set if __stack_* syms will be emitted. */
305 unsigned int emit_stack_syms
: 1;
308 #define spu_hash_table(p) \
309 ((struct spu_link_hash_table *) ((p)->hash))
311 struct spu_stub_hash_entry
313 struct bfd_hash_entry root
;
315 /* Destination of this stub. */
316 asection
*target_section
;
319 /* Offset of entry in stub section. */
322 /* Offset from this stub to stub that loads the overlay index. */
326 /* Create an entry in a spu stub hash table. */
328 static struct bfd_hash_entry
*
329 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
330 struct bfd_hash_table
*table
,
333 /* Allocate the structure if it has not already been allocated by a
337 entry
= bfd_hash_allocate (table
, sizeof (struct spu_stub_hash_entry
));
342 /* Call the allocation method of the superclass. */
343 entry
= bfd_hash_newfunc (entry
, table
, string
);
346 struct spu_stub_hash_entry
*sh
= (struct spu_stub_hash_entry
*) entry
;
348 sh
->target_section
= NULL
;
357 /* Create a spu ELF linker hash table. */
359 static struct bfd_link_hash_table
*
360 spu_elf_link_hash_table_create (bfd
*abfd
)
362 struct spu_link_hash_table
*htab
;
364 htab
= bfd_malloc (sizeof (*htab
));
368 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
369 _bfd_elf_link_hash_newfunc
,
370 sizeof (struct elf_link_hash_entry
)))
376 /* Init the stub hash table too. */
377 if (!bfd_hash_table_init (&htab
->stub_hash_table
, stub_hash_newfunc
,
378 sizeof (struct spu_stub_hash_entry
)))
381 memset (&htab
->stubs
, 0,
382 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, stubs
));
384 return &htab
->elf
.root
;
387 /* Free the derived linker hash table. */
390 spu_elf_link_hash_table_free (struct bfd_link_hash_table
*hash
)
392 struct spu_link_hash_table
*ret
= (struct spu_link_hash_table
*) hash
;
394 bfd_hash_table_free (&ret
->stub_hash_table
);
395 _bfd_generic_link_hash_table_free (hash
);
398 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
399 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
400 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
403 get_sym_h (struct elf_link_hash_entry
**hp
,
404 Elf_Internal_Sym
**symp
,
406 Elf_Internal_Sym
**locsymsp
,
407 unsigned long r_symndx
,
410 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
412 if (r_symndx
>= symtab_hdr
->sh_info
)
414 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
415 struct elf_link_hash_entry
*h
;
417 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
418 while (h
->root
.type
== bfd_link_hash_indirect
419 || h
->root
.type
== bfd_link_hash_warning
)
420 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
430 asection
*symsec
= NULL
;
431 if (h
->root
.type
== bfd_link_hash_defined
432 || h
->root
.type
== bfd_link_hash_defweak
)
433 symsec
= h
->root
.u
.def
.section
;
439 Elf_Internal_Sym
*sym
;
440 Elf_Internal_Sym
*locsyms
= *locsymsp
;
444 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
447 size_t symcount
= symtab_hdr
->sh_info
;
449 /* If we are reading symbols into the contents, then
450 read the global syms too. This is done to cache
451 syms for later stack analysis. */
452 if ((unsigned char **) locsymsp
== &symtab_hdr
->contents
)
453 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
454 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
461 sym
= locsyms
+ r_symndx
;
471 asection
*symsec
= NULL
;
472 if ((sym
->st_shndx
!= SHN_UNDEF
473 && sym
->st_shndx
< SHN_LORESERVE
)
474 || sym
->st_shndx
> SHN_HIRESERVE
)
475 symsec
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
483 /* Build a name for an entry in the stub hash table. We can't use a
484 local symbol name because ld -r might generate duplicate local symbols. */
487 spu_stub_name (const asection
*sym_sec
,
488 const struct elf_link_hash_entry
*h
,
489 const Elf_Internal_Rela
*rel
)
496 len
= strlen (h
->root
.root
.string
) + 1 + 8 + 1;
497 stub_name
= bfd_malloc (len
);
498 if (stub_name
== NULL
)
501 sprintf (stub_name
, "%s+%x",
503 (int) rel
->r_addend
& 0xffffffff);
508 len
= 8 + 1 + 8 + 1 + 8 + 1;
509 stub_name
= bfd_malloc (len
);
510 if (stub_name
== NULL
)
513 sprintf (stub_name
, "%x:%x+%x",
514 sym_sec
->id
& 0xffffffff,
515 (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
516 (int) rel
->r_addend
& 0xffffffff);
517 len
= strlen (stub_name
);
520 if (stub_name
[len
- 2] == '+'
521 && stub_name
[len
- 1] == '0'
522 && stub_name
[len
] == 0)
523 stub_name
[len
- 2] = 0;
528 /* Create the note section if not already present. This is done early so
529 that the linker maps the sections to the right place in the output. */
532 spu_elf_create_sections (bfd
*output_bfd
,
533 struct bfd_link_info
*info
,
538 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
540 /* Stash some options away where we can get at them later. */
541 htab
->stack_analysis
= stack_analysis
;
542 htab
->emit_stack_syms
= emit_stack_syms
;
544 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
545 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
550 /* Make SPU_PTNOTE_SPUNAME section. */
557 ibfd
= info
->input_bfds
;
558 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
559 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
561 || !bfd_set_section_alignment (ibfd
, s
, 4))
564 name_len
= strlen (bfd_get_filename (output_bfd
)) + 1;
565 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
566 size
+= (name_len
+ 3) & -4;
568 if (!bfd_set_section_size (ibfd
, s
, size
))
571 data
= bfd_zalloc (ibfd
, size
);
575 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
576 bfd_put_32 (ibfd
, name_len
, data
+ 4);
577 bfd_put_32 (ibfd
, 1, data
+ 8);
578 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
579 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
580 bfd_get_filename (output_bfd
), name_len
);
587 /* qsort predicate to sort sections by vma. */
590 sort_sections (const void *a
, const void *b
)
592 const asection
*const *s1
= a
;
593 const asection
*const *s2
= b
;
594 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
597 return delta
< 0 ? -1 : 1;
599 return (*s1
)->index
- (*s2
)->index
;
602 /* Identify overlays in the output bfd, and number them. */
605 spu_elf_find_overlays (bfd
*output_bfd
, struct bfd_link_info
*info
)
607 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
608 asection
**alloc_sec
;
609 unsigned int i
, n
, ovl_index
, num_buf
;
613 if (output_bfd
->section_count
< 2)
616 alloc_sec
= bfd_malloc (output_bfd
->section_count
* sizeof (*alloc_sec
));
617 if (alloc_sec
== NULL
)
620 /* Pick out all the alloced sections. */
621 for (n
= 0, s
= output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
622 if ((s
->flags
& SEC_ALLOC
) != 0
623 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
633 /* Sort them by vma. */
634 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
636 /* Look for overlapping vmas. Any with overlap must be overlays.
637 Count them. Also count the number of overlay regions and for
638 each region save a section from that region with the lowest vma
639 and another section with the highest end vma. */
640 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
641 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
644 if (s
->vma
< ovl_end
)
646 asection
*s0
= alloc_sec
[i
- 1];
648 if (spu_elf_section_data (s0
)->ovl_index
== 0)
650 spu_elf_section_data (s0
)->ovl_index
= ++ovl_index
;
651 alloc_sec
[num_buf
* 2] = s0
;
652 alloc_sec
[num_buf
* 2 + 1] = s0
;
655 spu_elf_section_data (s
)->ovl_index
= ++ovl_index
;
656 if (ovl_end
< s
->vma
+ s
->size
)
658 ovl_end
= s
->vma
+ s
->size
;
659 alloc_sec
[num_buf
* 2 - 1] = s
;
663 ovl_end
= s
->vma
+ s
->size
;
666 htab
->num_overlays
= ovl_index
;
667 htab
->num_buf
= num_buf
;
674 alloc_sec
= bfd_realloc (alloc_sec
, num_buf
* 2 * sizeof (*alloc_sec
));
675 if (alloc_sec
== NULL
)
678 htab
->ovl_region
= alloc_sec
;
682 /* One of these per stub. */
683 #define SIZEOF_STUB1 8
684 #define ILA_79 0x4200004f /* ila $79,function_address */
685 #define BR 0x32000000 /* br stub2 */
687 /* One of these per overlay. */
688 #define SIZEOF_STUB2 8
689 #define ILA_78 0x4200004e /* ila $78,overlay_number */
691 #define NOP 0x40200000
693 /* Return true for all relative and absolute branch instructions.
701 brhnz 00100011 0.. */
704 is_branch (const unsigned char *insn
)
706 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
709 /* Return true for all indirect branch instructions.
717 bihnz 00100101 011 */
720 is_indirect_branch (const unsigned char *insn
)
722 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
725 /* Return true for branch hint instructions.
730 is_hint (const unsigned char *insn
)
732 return (insn
[0] & 0xfc) == 0x10;
735 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
738 needs_ovl_stub (const char *sym_name
,
740 asection
*input_section
,
741 struct spu_link_hash_table
*htab
,
742 bfd_boolean is_branch
)
744 if (htab
->num_overlays
== 0)
748 || sym_sec
->output_section
== NULL
749 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
752 /* setjmp always goes via an overlay stub, because then the return
753 and hence the longjmp goes via __ovly_return. That magically
754 makes setjmp/longjmp between overlays work. */
755 if (strncmp (sym_name
, "setjmp", 6) == 0
756 && (sym_name
[6] == '\0' || sym_name
[6] == '@'))
759 /* Usually, symbols in non-overlay sections don't need stubs. */
760 if (spu_elf_section_data (sym_sec
->output_section
)->ovl_index
== 0
761 && !htab
->non_overlay_stubs
)
764 /* A reference from some other section to a symbol in an overlay
765 section needs a stub. */
766 if (spu_elf_section_data (sym_sec
->output_section
)->ovl_index
767 != spu_elf_section_data (input_section
->output_section
)->ovl_index
)
770 /* If this insn isn't a branch then we are possibly taking the
771 address of a function and passing it out somehow. */
775 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
779 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
781 /* Symbols starting with _SPUEAR_ need a stub because they may be
782 invoked by the PPU. */
783 if ((h
->root
.type
== bfd_link_hash_defined
784 || h
->root
.type
== bfd_link_hash_defweak
)
786 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
788 struct spu_link_hash_table
*htab
= inf
;
789 static Elf_Internal_Rela zero_rel
;
790 char *stub_name
= spu_stub_name (h
->root
.u
.def
.section
, h
, &zero_rel
);
791 struct spu_stub_hash_entry
*sh
;
793 if (stub_name
== NULL
)
799 sh
= (struct spu_stub_hash_entry
*)
800 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
, TRUE
, FALSE
);
807 /* If this entry isn't new, we already have a stub. */
808 if (sh
->target_section
!= NULL
)
814 sh
->target_section
= h
->root
.u
.def
.section
;
815 sh
->target_off
= h
->root
.u
.def
.value
;
816 htab
->stubs
.count
+= 1;
822 /* Called via bfd_hash_traverse to set up pointers to all symbols
823 in the stub hash table. */
826 populate_stubs (struct bfd_hash_entry
*bh
, void *inf
)
828 struct spu_link_hash_table
*htab
= inf
;
830 htab
->stubs
.sh
[--htab
->stubs
.count
] = (struct spu_stub_hash_entry
*) bh
;
834 /* qsort predicate to sort stubs by overlay number. */
837 sort_stubs (const void *a
, const void *b
)
839 const struct spu_stub_hash_entry
*const *sa
= a
;
840 const struct spu_stub_hash_entry
*const *sb
= b
;
844 i
= spu_elf_section_data ((*sa
)->target_section
->output_section
)->ovl_index
;
845 i
-= spu_elf_section_data ((*sb
)->target_section
->output_section
)->ovl_index
;
849 d
= ((*sa
)->target_section
->output_section
->vma
850 + (*sa
)->target_section
->output_offset
852 - (*sb
)->target_section
->output_section
->vma
853 - (*sb
)->target_section
->output_offset
854 - (*sb
)->target_off
);
856 return d
< 0 ? -1 : 1;
858 /* Two functions at the same address. Aliases perhaps. */
859 i
= strcmp ((*sb
)->root
.string
, (*sa
)->root
.string
);
864 /* Allocate space for overlay call and return stubs. */
867 spu_elf_size_stubs (bfd
*output_bfd
,
868 struct bfd_link_info
*info
,
869 int non_overlay_stubs
,
875 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
880 htab
->non_overlay_stubs
= non_overlay_stubs
;
881 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
883 extern const bfd_target bfd_elf32_spu_vec
;
884 Elf_Internal_Shdr
*symtab_hdr
;
886 Elf_Internal_Sym
*local_syms
= NULL
;
889 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
892 /* We'll need the symbol table in a second. */
893 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
894 if (symtab_hdr
->sh_info
== 0)
897 /* Arrange to read and keep global syms for later stack analysis. */
900 psyms
= &symtab_hdr
->contents
;
902 /* Walk over each section attached to the input bfd. */
903 for (section
= ibfd
->sections
; section
!= NULL
; section
= section
->next
)
905 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
907 /* If there aren't any relocs, then there's nothing more to do. */
908 if ((section
->flags
& SEC_RELOC
) == 0
909 || (section
->flags
& SEC_ALLOC
) == 0
910 || (section
->flags
& SEC_LOAD
) == 0
911 || section
->reloc_count
== 0)
914 /* If this section is a link-once section that will be
915 discarded, then don't create any stubs. */
916 if (section
->output_section
== NULL
917 || section
->output_section
->owner
!= output_bfd
)
920 /* Get the relocs. */
922 = _bfd_elf_link_read_relocs (ibfd
, section
, NULL
, NULL
,
924 if (internal_relocs
== NULL
)
925 goto error_ret_free_local
;
927 /* Now examine each relocation. */
928 irela
= internal_relocs
;
929 irelaend
= irela
+ section
->reloc_count
;
930 for (; irela
< irelaend
; irela
++)
932 enum elf_spu_reloc_type r_type
;
935 Elf_Internal_Sym
*sym
;
936 struct elf_link_hash_entry
*h
;
937 const char *sym_name
;
939 struct spu_stub_hash_entry
*sh
;
940 unsigned int sym_type
;
941 enum _insn_type
{ non_branch
, branch
, call
} insn_type
;
943 r_type
= ELF32_R_TYPE (irela
->r_info
);
944 r_indx
= ELF32_R_SYM (irela
->r_info
);
946 if (r_type
>= R_SPU_max
)
948 bfd_set_error (bfd_error_bad_value
);
949 goto error_ret_free_internal
;
952 /* Determine the reloc target section. */
953 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, ibfd
))
954 goto error_ret_free_internal
;
957 || sym_sec
->output_section
== NULL
958 || sym_sec
->output_section
->owner
!= output_bfd
)
961 /* Ensure no stubs for user supplied overlay manager syms. */
963 && (strcmp (h
->root
.root
.string
, "__ovly_load") == 0
964 || strcmp (h
->root
.root
.string
, "__ovly_return") == 0))
967 insn_type
= non_branch
;
968 if (r_type
== R_SPU_REL16
969 || r_type
== R_SPU_ADDR16
)
971 unsigned char insn
[4];
973 if (!bfd_get_section_contents (ibfd
, section
, insn
,
975 goto error_ret_free_internal
;
977 if (is_branch (insn
) || is_hint (insn
))
980 if ((insn
[0] & 0xfd) == 0x31)
985 /* We are only interested in function symbols. */
989 sym_name
= h
->root
.root
.string
;
993 sym_type
= ELF_ST_TYPE (sym
->st_info
);
994 sym_name
= bfd_elf_sym_name (sym_sec
->owner
,
1000 if (sym_type
!= STT_FUNC
)
1002 /* It's common for people to write assembly and forget
1003 to give function symbols the right type. Handle
1004 calls to such symbols, but warn so that (hopefully)
1005 people will fix their code. We need the symbol
1006 type to be correct to distinguish function pointer
1007 initialisation from other pointer initialisation. */
1008 if (insn_type
== call
)
1009 (*_bfd_error_handler
) (_("warning: call to non-function"
1010 " symbol %s defined in %B"),
1011 sym_sec
->owner
, sym_name
);
1012 else if (insn_type
== non_branch
)
1016 if (!needs_ovl_stub (sym_name
, sym_sec
, section
, htab
,
1017 insn_type
!= non_branch
))
1020 stub_name
= spu_stub_name (sym_sec
, h
, irela
);
1021 if (stub_name
== NULL
)
1022 goto error_ret_free_internal
;
1024 sh
= (struct spu_stub_hash_entry
*)
1025 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
,
1030 error_ret_free_internal
:
1031 if (elf_section_data (section
)->relocs
!= internal_relocs
)
1032 free (internal_relocs
);
1033 error_ret_free_local
:
1034 if (local_syms
!= NULL
1035 && (symtab_hdr
->contents
1036 != (unsigned char *) local_syms
))
1041 /* If this entry isn't new, we already have a stub. */
1042 if (sh
->target_section
!= NULL
)
1048 sh
->target_section
= sym_sec
;
1050 sh
->target_off
= h
->root
.u
.def
.value
;
1052 sh
->target_off
= sym
->st_value
;
1053 sh
->target_off
+= irela
->r_addend
;
1055 htab
->stubs
.count
+= 1;
1058 /* We're done with the internal relocs, free them. */
1059 if (elf_section_data (section
)->relocs
!= internal_relocs
)
1060 free (internal_relocs
);
1063 if (local_syms
!= NULL
1064 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1066 if (!info
->keep_memory
)
1069 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1073 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, htab
);
1074 if (htab
->stubs
.err
)
1078 if (htab
->stubs
.count
== 0)
1081 ibfd
= info
->input_bfds
;
1082 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1083 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1084 htab
->stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1086 if (htab
->stub
== NULL
1087 || !bfd_set_section_alignment (ibfd
, htab
->stub
, 4))
1090 flags
= (SEC_ALLOC
| SEC_LOAD
1091 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1092 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1093 *ovtab
= htab
->ovtab
;
1094 if (htab
->ovtab
== NULL
1095 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1098 *toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1100 || !bfd_set_section_alignment (ibfd
, *toe
, 4))
1104 /* Retrieve all the stubs and sort. */
1105 htab
->stubs
.sh
= bfd_malloc (htab
->stubs
.count
* sizeof (*htab
->stubs
.sh
));
1106 if (htab
->stubs
.sh
== NULL
)
1108 i
= htab
->stubs
.count
;
1109 bfd_hash_traverse (&htab
->stub_hash_table
, populate_stubs
, htab
);
1110 BFD_ASSERT (htab
->stubs
.count
== 0);
1112 htab
->stubs
.count
= i
;
1113 qsort (htab
->stubs
.sh
, htab
->stubs
.count
, sizeof (*htab
->stubs
.sh
),
1116 /* Now that the stubs are sorted, place them in the stub section.
1117 Stubs are grouped per overlay
1131 for (i
= 0; i
< htab
->stubs
.count
; i
++)
1133 if (spu_elf_section_data (htab
->stubs
.sh
[group
]->target_section
1134 ->output_section
)->ovl_index
1135 != spu_elf_section_data (htab
->stubs
.sh
[i
]->target_section
1136 ->output_section
)->ovl_index
)
1138 htab
->stub
->size
+= SIZEOF_STUB2
;
1139 for (; group
!= i
; group
++)
1140 htab
->stubs
.sh
[group
]->delta
1141 = htab
->stubs
.sh
[i
- 1]->off
- htab
->stubs
.sh
[group
]->off
;
1144 || ((htab
->stubs
.sh
[i
- 1]->target_section
->output_section
->vma
1145 + htab
->stubs
.sh
[i
- 1]->target_section
->output_offset
1146 + htab
->stubs
.sh
[i
- 1]->target_off
)
1147 != (htab
->stubs
.sh
[i
]->target_section
->output_section
->vma
1148 + htab
->stubs
.sh
[i
]->target_section
->output_offset
1149 + htab
->stubs
.sh
[i
]->target_off
)))
1151 htab
->stubs
.sh
[i
]->off
= htab
->stub
->size
;
1152 htab
->stub
->size
+= SIZEOF_STUB1
;
1153 if (info
->emitrelocations
)
1154 htab
->stub
->reloc_count
+= 1;
1157 htab
->stubs
.sh
[i
]->off
= htab
->stubs
.sh
[i
- 1]->off
;
1160 htab
->stub
->size
+= SIZEOF_STUB2
;
1161 if (info
->emitrelocations
)
1162 htab
->stub
->flags
|= SEC_RELOC
;
1163 for (; group
!= i
; group
++)
1164 htab
->stubs
.sh
[group
]->delta
1165 = htab
->stubs
.sh
[i
- 1]->off
- htab
->stubs
.sh
[group
]->off
;
1167 /* htab->ovtab consists of two arrays.
1177 . } _ovly_buf_table[]; */
1179 htab
->ovtab
->alignment_power
= 4;
1180 htab
->ovtab
->size
= htab
->num_overlays
* 16 + htab
->num_buf
* 4;
1185 /* Functions to handle embedded spu_ovl.o object. */
1188 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1194 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1200 struct _ovl_stream
*os
;
1204 os
= (struct _ovl_stream
*) stream
;
1205 max
= (const char *) os
->end
- (const char *) os
->start
;
1207 if ((ufile_ptr
) offset
>= max
)
1211 if (count
> max
- offset
)
1212 count
= max
- offset
;
1214 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1219 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1221 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1228 return *ovl_bfd
!= NULL
;
1231 /* Fill in the ila and br for a stub. On the last stub for a group,
1232 write the stub that sets the overlay number too. */
1235 write_one_stub (struct spu_stub_hash_entry
*ent
, struct bfd_link_info
*info
)
1237 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1238 asection
*sec
= htab
->stub
;
1239 asection
*s
= ent
->target_section
;
1243 val
= ent
->target_off
+ s
->output_offset
+ s
->output_section
->vma
;
1244 bfd_put_32 (sec
->owner
, ILA_79
+ ((val
<< 7) & 0x01ffff80),
1245 sec
->contents
+ ent
->off
);
1246 val
= ent
->delta
+ 4;
1247 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
1248 sec
->contents
+ ent
->off
+ 4);
1250 if (info
->emitrelocations
)
1252 Elf_Internal_Rela
*relocs
, *r
;
1253 struct bfd_elf_section_data
*elfsec_data
;
1255 elfsec_data
= elf_section_data (sec
);
1256 relocs
= elfsec_data
->relocs
;
1259 bfd_size_type relsize
;
1260 Elf_Internal_Shdr
*symtab_hdr
;
1261 struct elf_link_hash_entry
**sym_hash
;
1262 unsigned long symcount
;
1265 relsize
= sec
->reloc_count
* sizeof (*relocs
);
1266 relocs
= bfd_alloc (sec
->owner
, relsize
);
1269 elfsec_data
->relocs
= relocs
;
1270 elfsec_data
->rel_hdr
.sh_size
1271 = sec
->reloc_count
* sizeof (Elf32_External_Rela
);
1272 elfsec_data
->rel_hdr
.sh_entsize
= sizeof (Elf32_External_Rela
);
1273 sec
->reloc_count
= 0;
1275 /* Increase the size of symbol hash array on the bfd to
1276 which we attached our .stub section. This hack allows
1277 us to create relocs against global symbols. */
1278 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1279 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
1280 symcount
-= symtab_hdr
->sh_info
;
1281 amt
= symcount
* sizeof (*sym_hash
);
1282 sym_hash
= bfd_alloc (sec
->owner
, amt
+ sizeof (*sym_hash
));
1283 if (sym_hash
== NULL
)
1285 memcpy (sym_hash
, elf_sym_hashes (sec
->owner
), amt
);
1286 sym_hash
[symcount
] = htab
->ovly_load
;
1287 htab
->ovly_load_r_symndx
= symcount
+ symtab_hdr
->sh_info
;
1288 elf_sym_hashes (sec
->owner
) = sym_hash
;
1290 r
= relocs
+ sec
->reloc_count
;
1291 sec
->reloc_count
+= 1;
1292 r
->r_offset
= ent
->off
+ 4;
1293 r
->r_info
= ELF32_R_INFO (0, R_SPU_REL16
);
1294 r
->r_addend
= (sec
->output_section
->vma
1295 + sec
->output_offset
1300 /* If this is the last stub of this group, write stub2. */
1301 if (ent
->delta
== 0)
1303 bfd_put_32 (sec
->owner
, NOP
,
1304 sec
->contents
+ ent
->off
+ 4);
1306 ovl
= spu_elf_section_data (s
->output_section
)->ovl_index
;
1307 bfd_put_32 (sec
->owner
, ILA_78
+ ((ovl
<< 7) & 0x01ffff80),
1308 sec
->contents
+ ent
->off
+ 8);
1310 val
= (htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
1311 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
1312 + htab
->ovly_load
->root
.u
.def
.value
1313 - (sec
->output_section
->vma
1314 + sec
->output_offset
1317 if (val
+ 0x20000 >= 0x40000)
1318 htab
->stub_overflow
= TRUE
;
1320 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
1321 sec
->contents
+ ent
->off
+ 12);
1323 if (info
->emitrelocations
)
1325 Elf_Internal_Rela
*relocs
, *r
;
1326 struct bfd_elf_section_data
*elfsec_data
;
1328 elfsec_data
= elf_section_data (sec
);
1329 relocs
= elfsec_data
->relocs
;
1330 /* The last branch is overwritten, so overwrite its reloc too. */
1331 r
= relocs
+ sec
->reloc_count
- 1;
1332 r
->r_offset
= ent
->off
+ 12;
1333 r
->r_info
= ELF32_R_INFO (htab
->ovly_load_r_symndx
, R_SPU_REL16
);
1338 if (htab
->emit_stub_syms
)
1340 struct elf_link_hash_entry
*h
;
1344 len1
= sizeof ("00000000.ovl_call.") - 1;
1345 len2
= strlen (ent
->root
.string
);
1346 name
= bfd_malloc (len1
+ len2
+ 1);
1349 memcpy (name
, "00000000.ovl_call.", len1
);
1350 memcpy (name
+ len1
, ent
->root
.string
, len2
+ 1);
1351 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1355 if (h
->root
.type
== bfd_link_hash_new
)
1357 h
->root
.type
= bfd_link_hash_defined
;
1358 h
->root
.u
.def
.section
= sec
;
1359 h
->root
.u
.def
.value
= ent
->off
;
1360 h
->size
= (ent
->delta
== 0
1361 ? SIZEOF_STUB1
+ SIZEOF_STUB2
: SIZEOF_STUB1
);
1365 h
->ref_regular_nonweak
= 1;
1366 h
->forced_local
= 1;
1374 /* Define an STT_OBJECT symbol. */
1376 static struct elf_link_hash_entry
*
1377 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1379 struct elf_link_hash_entry
*h
;
1381 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1385 if (h
->root
.type
!= bfd_link_hash_defined
1388 h
->root
.type
= bfd_link_hash_defined
;
1389 h
->root
.u
.def
.section
= htab
->ovtab
;
1390 h
->type
= STT_OBJECT
;
1393 h
->ref_regular_nonweak
= 1;
1398 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1399 h
->root
.u
.def
.section
->owner
,
1400 h
->root
.root
.string
);
1401 bfd_set_error (bfd_error_bad_value
);
1408 /* Fill in all stubs and the overlay tables. */
1411 spu_elf_build_stubs (struct bfd_link_info
*info
, int emit_syms
, asection
*toe
)
1413 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1414 struct elf_link_hash_entry
*h
;
1420 htab
->emit_stub_syms
= emit_syms
;
1421 htab
->stub
->contents
= bfd_zalloc (htab
->stub
->owner
, htab
->stub
->size
);
1422 if (htab
->stub
->contents
== NULL
)
1425 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1426 htab
->ovly_load
= h
;
1427 BFD_ASSERT (h
!= NULL
1428 && (h
->root
.type
== bfd_link_hash_defined
1429 || h
->root
.type
== bfd_link_hash_defweak
)
1432 s
= h
->root
.u
.def
.section
->output_section
;
1433 if (spu_elf_section_data (s
)->ovl_index
)
1435 (*_bfd_error_handler
) (_("%s in overlay section"),
1436 h
->root
.u
.def
.section
->owner
);
1437 bfd_set_error (bfd_error_bad_value
);
1441 /* Write out all the stubs. */
1442 for (i
= 0; i
< htab
->stubs
.count
; i
++)
1443 write_one_stub (htab
->stubs
.sh
[i
], info
);
1445 if (htab
->stub_overflow
)
1447 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1448 bfd_set_error (bfd_error_bad_value
);
1452 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1453 if (htab
->ovtab
->contents
== NULL
)
1456 /* Write out _ovly_table. */
1457 p
= htab
->ovtab
->contents
;
1458 obfd
= htab
->ovtab
->output_section
->owner
;
1459 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1461 unsigned int ovl_index
= spu_elf_section_data (s
)->ovl_index
;
1465 unsigned int lo
, hi
, mid
;
1466 unsigned long off
= (ovl_index
- 1) * 16;
1467 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1468 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1469 /* file_off written later in spu_elf_modify_program_headers. */
1475 mid
= (lo
+ hi
) >> 1;
1476 if (htab
->ovl_region
[2 * mid
+ 1]->vma
1477 + htab
->ovl_region
[2 * mid
+ 1]->size
<= s
->vma
)
1479 else if (htab
->ovl_region
[2 * mid
]->vma
> s
->vma
)
1483 bfd_put_32 (htab
->ovtab
->owner
, mid
+ 1, p
+ off
+ 12);
1487 BFD_ASSERT (lo
< hi
);
1491 /* Write out _ovly_buf_table. */
1492 p
= htab
->ovtab
->contents
+ htab
->num_overlays
* 16;
1493 for (i
= 0; i
< htab
->num_buf
; i
++)
1495 bfd_put_32 (htab
->ovtab
->owner
, 0, p
);
1499 h
= define_ovtab_symbol (htab
, "_ovly_table");
1502 h
->root
.u
.def
.value
= 0;
1503 h
->size
= htab
->num_overlays
* 16;
1505 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1508 h
->root
.u
.def
.value
= htab
->num_overlays
* 16;
1511 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1514 h
->root
.u
.def
.value
= htab
->num_overlays
* 16;
1515 h
->size
= htab
->num_buf
* 4;
1517 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1520 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + htab
->num_buf
* 4;
1523 h
= define_ovtab_symbol (htab
, "_EAR_");
1526 h
->root
.u
.def
.section
= toe
;
1527 h
->root
.u
.def
.value
= 0;
1533 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1534 Search for stack adjusting insns, and return the sp delta. */
1537 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1542 memset (reg
, 0, sizeof (reg
));
1543 for (unrecog
= 0; offset
+ 4 <= sec
->size
&& unrecog
< 32; offset
+= 4)
1545 unsigned char buf
[4];
1549 /* Assume no relocs on stack adjusing insns. */
1550 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1553 if (buf
[0] == 0x24 /* stqd */)
1557 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1558 /* Partly decoded immediate field. */
1559 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1561 if (buf
[0] == 0x1c /* ai */)
1564 imm
= (imm
^ 0x200) - 0x200;
1565 reg
[rt
] = reg
[ra
] + imm
;
1567 if (rt
== 1 /* sp */)
1574 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1576 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1578 reg
[rt
] = reg
[ra
] + reg
[rb
];
1582 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1584 if (buf
[0] >= 0x42 /* ila */)
1585 imm
|= (buf
[0] & 1) << 17;
1590 if (buf
[0] == 0x40 /* il */)
1592 if ((buf
[1] & 0x80) == 0)
1594 imm
= (imm
^ 0x8000) - 0x8000;
1596 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1602 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1604 reg
[rt
] |= imm
& 0xffff;
1607 else if (buf
[0] == 0x04 /* ori */)
1610 imm
= (imm
^ 0x200) - 0x200;
1611 reg
[rt
] = reg
[ra
] | imm
;
1614 else if ((buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1615 || (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */))
1617 /* Used in pic reg load. Say rt is trashed. */
1621 else if (is_branch (buf
) || is_indirect_branch (buf
))
1622 /* If we hit a branch then we must be out of the prologue. */
1631 /* qsort predicate to sort symbols by section and value. */
1633 static Elf_Internal_Sym
*sort_syms_syms
;
1634 static asection
**sort_syms_psecs
;
1637 sort_syms (const void *a
, const void *b
)
1639 Elf_Internal_Sym
*const *s1
= a
;
1640 Elf_Internal_Sym
*const *s2
= b
;
1641 asection
*sec1
,*sec2
;
1642 bfd_signed_vma delta
;
1644 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1645 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1648 return sec1
->index
- sec2
->index
;
1650 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1652 return delta
< 0 ? -1 : 1;
1654 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1656 return delta
< 0 ? -1 : 1;
1658 return *s1
< *s2
? -1 : 1;
1663 struct function_info
*fun
;
1664 struct call_info
*next
;
1668 struct function_info
1670 /* List of functions called. Also branches to hot/cold part of
1672 struct call_info
*call_list
;
1673 /* For hot/cold part of function, point to owner. */
1674 struct function_info
*start
;
1675 /* Symbol at start of function. */
1677 Elf_Internal_Sym
*sym
;
1678 struct elf_link_hash_entry
*h
;
1680 /* Function section. */
1682 /* Address range of (this part of) function. */
1686 /* Set if global symbol. */
1687 unsigned int global
: 1;
1688 /* Set if known to be start of function (as distinct from a hunk
1689 in hot/cold section. */
1690 unsigned int is_func
: 1;
1691 /* Flags used during call tree traversal. */
1692 unsigned int visit1
: 1;
1693 unsigned int non_root
: 1;
1694 unsigned int visit2
: 1;
1695 unsigned int marking
: 1;
1696 unsigned int visit3
: 1;
1699 struct spu_elf_stack_info
1703 /* Variable size array describing functions, one per contiguous
1704 address range belonging to a function. */
1705 struct function_info fun
[1];
1708 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1709 entries for section SEC. */
1711 static struct spu_elf_stack_info
*
1712 alloc_stack_info (asection
*sec
, int max_fun
)
1714 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1717 amt
= sizeof (struct spu_elf_stack_info
);
1718 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1719 sec_data
->stack_info
= bfd_zmalloc (amt
);
1720 if (sec_data
->stack_info
!= NULL
)
1721 sec_data
->stack_info
->max_fun
= max_fun
;
1722 return sec_data
->stack_info
;
1725 /* Add a new struct function_info describing a (part of a) function
1726 starting at SYM_H. Keep the array sorted by address. */
1728 static struct function_info
*
1729 maybe_insert_function (asection
*sec
,
1732 bfd_boolean is_func
)
1734 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1735 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1741 sinfo
= alloc_stack_info (sec
, 20);
1748 Elf_Internal_Sym
*sym
= sym_h
;
1749 off
= sym
->st_value
;
1750 size
= sym
->st_size
;
1754 struct elf_link_hash_entry
*h
= sym_h
;
1755 off
= h
->root
.u
.def
.value
;
1759 for (i
= sinfo
->num_fun
; --i
>= 0; )
1760 if (sinfo
->fun
[i
].lo
<= off
)
1765 /* Don't add another entry for an alias, but do update some
1767 if (sinfo
->fun
[i
].lo
== off
)
1769 /* Prefer globals over local syms. */
1770 if (global
&& !sinfo
->fun
[i
].global
)
1772 sinfo
->fun
[i
].global
= TRUE
;
1773 sinfo
->fun
[i
].u
.h
= sym_h
;
1776 sinfo
->fun
[i
].is_func
= TRUE
;
1777 return &sinfo
->fun
[i
];
1779 /* Ignore a zero-size symbol inside an existing function. */
1780 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1781 return &sinfo
->fun
[i
];
1784 if (++i
< sinfo
->num_fun
)
1785 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1786 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1787 else if (i
>= sinfo
->max_fun
)
1789 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1790 bfd_size_type old
= amt
;
1792 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1793 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1794 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1795 sinfo
= bfd_realloc (sinfo
, amt
);
1798 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1799 sec_data
->stack_info
= sinfo
;
1801 sinfo
->fun
[i
].is_func
= is_func
;
1802 sinfo
->fun
[i
].global
= global
;
1803 sinfo
->fun
[i
].sec
= sec
;
1805 sinfo
->fun
[i
].u
.h
= sym_h
;
1807 sinfo
->fun
[i
].u
.sym
= sym_h
;
1808 sinfo
->fun
[i
].lo
= off
;
1809 sinfo
->fun
[i
].hi
= off
+ size
;
1810 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1811 sinfo
->num_fun
+= 1;
1812 return &sinfo
->fun
[i
];
1815 /* Return the name of FUN. */
1818 func_name (struct function_info
*fun
)
1822 Elf_Internal_Shdr
*symtab_hdr
;
1824 while (fun
->start
!= NULL
)
1828 return fun
->u
.h
->root
.root
.string
;
1831 if (fun
->u
.sym
->st_name
== 0)
1833 size_t len
= strlen (sec
->name
);
1834 char *name
= bfd_malloc (len
+ 10);
1837 sprintf (name
, "%s+%lx", sec
->name
,
1838 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1842 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1843 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1846 /* Read the instruction at OFF in SEC. Return true iff the instruction
1847 is a nop, lnop, or stop 0 (all zero insn). */
1850 is_nop (asection
*sec
, bfd_vma off
)
1852 unsigned char insn
[4];
1854 if (off
+ 4 > sec
->size
1855 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1857 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1859 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1864 /* Extend the range of FUN to cover nop padding up to LIMIT.
1865 Return TRUE iff some instruction other than a NOP was found. */
1868 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1870 bfd_vma off
= (fun
->hi
+ 3) & -4;
1872 while (off
< limit
&& is_nop (fun
->sec
, off
))
1883 /* Check and fix overlapping function ranges. Return TRUE iff there
1884 are gaps in the current info we have about functions in SEC. */
1887 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1889 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1890 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1892 bfd_boolean gaps
= FALSE
;
1897 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1898 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1900 /* Fix overlapping symbols. */
1901 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1902 const char *f2
= func_name (&sinfo
->fun
[i
]);
1904 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
1905 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
1907 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
1910 if (sinfo
->num_fun
== 0)
1914 if (sinfo
->fun
[0].lo
!= 0)
1916 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
1918 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
1920 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
1921 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
1923 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
1929 /* Search current function info for a function that contains address
1930 OFFSET in section SEC. */
1932 static struct function_info
*
1933 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
1935 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1936 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1940 hi
= sinfo
->num_fun
;
1943 mid
= (lo
+ hi
) / 2;
1944 if (offset
< sinfo
->fun
[mid
].lo
)
1946 else if (offset
>= sinfo
->fun
[mid
].hi
)
1949 return &sinfo
->fun
[mid
];
1951 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
1956 /* Add CALLEE to CALLER call list if not already present. */
1959 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
1961 struct call_info
*p
;
1962 for (p
= caller
->call_list
; p
!= NULL
; p
= p
->next
)
1963 if (p
->fun
== callee
->fun
)
1965 /* Tail calls use less stack than normal calls. Retain entry
1966 for normal call over one for tail call. */
1967 if (p
->is_tail
> callee
->is_tail
)
1968 p
->is_tail
= callee
->is_tail
;
1971 callee
->next
= caller
->call_list
;
1972 caller
->call_list
= callee
;
1976 /* Rummage through the relocs for SEC, looking for function calls.
1977 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1978 mark destination symbols on calls as being functions. Also
1979 look at branches, which may be tail calls or go to hot/cold
1980 section part of same function. */
1983 mark_functions_via_relocs (asection
*sec
,
1984 struct bfd_link_info
*info
,
1987 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1988 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1989 Elf_Internal_Sym
*syms
;
1991 static bfd_boolean warned
;
1993 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
1995 if (internal_relocs
== NULL
)
1998 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1999 psyms
= &symtab_hdr
->contents
;
2000 syms
= *(Elf_Internal_Sym
**) psyms
;
2001 irela
= internal_relocs
;
2002 irelaend
= irela
+ sec
->reloc_count
;
2003 for (; irela
< irelaend
; irela
++)
2005 enum elf_spu_reloc_type r_type
;
2006 unsigned int r_indx
;
2008 Elf_Internal_Sym
*sym
;
2009 struct elf_link_hash_entry
*h
;
2011 unsigned char insn
[4];
2012 bfd_boolean is_call
;
2013 struct function_info
*caller
;
2014 struct call_info
*callee
;
2016 r_type
= ELF32_R_TYPE (irela
->r_info
);
2017 if (r_type
!= R_SPU_REL16
2018 && r_type
!= R_SPU_ADDR16
)
2021 r_indx
= ELF32_R_SYM (irela
->r_info
);
2022 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2026 || sym_sec
->output_section
== NULL
2027 || sym_sec
->output_section
->owner
!= sec
->output_section
->owner
)
2030 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2031 irela
->r_offset
, 4))
2033 if (!is_branch (insn
))
2036 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2037 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2041 if (!call_tree
|| !warned
)
2042 info
->callbacks
->einfo (_("%B(%A+0x%v): call to non-code section"
2043 " %B(%A), stack analysis incomplete\n"),
2044 sec
->owner
, sec
, irela
->r_offset
,
2045 sym_sec
->owner
, sym_sec
);
2049 is_call
= (insn
[0] & 0xfd) == 0x31;
2052 val
= h
->root
.u
.def
.value
;
2054 val
= sym
->st_value
;
2055 val
+= irela
->r_addend
;
2059 struct function_info
*fun
;
2061 if (irela
->r_addend
!= 0)
2063 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2066 fake
->st_value
= val
;
2068 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2072 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2074 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2077 if (irela
->r_addend
!= 0
2078 && fun
->u
.sym
!= sym
)
2083 caller
= find_function (sec
, irela
->r_offset
, info
);
2086 callee
= bfd_malloc (sizeof *callee
);
2090 callee
->fun
= find_function (sym_sec
, val
, info
);
2091 if (callee
->fun
== NULL
)
2093 callee
->is_tail
= !is_call
;
2094 if (!insert_callee (caller
, callee
))
2097 && !callee
->fun
->is_func
2098 && callee
->fun
->stack
== 0)
2100 /* This is either a tail call or a branch from one part of
2101 the function to another, ie. hot/cold section. If the
2102 destination has been called by some other function then
2103 it is a separate function. We also assume that functions
2104 are not split across input files. */
2105 if (callee
->fun
->start
!= NULL
2106 || sec
->owner
!= sym_sec
->owner
)
2108 callee
->fun
->start
= NULL
;
2109 callee
->fun
->is_func
= TRUE
;
2112 callee
->fun
->start
= caller
;
2119 /* Handle something like .init or .fini, which has a piece of a function.
2120 These sections are pasted together to form a single function. */
2123 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2125 struct bfd_link_order
*l
;
2126 struct _spu_elf_section_data
*sec_data
;
2127 struct spu_elf_stack_info
*sinfo
;
2128 Elf_Internal_Sym
*fake
;
2129 struct function_info
*fun
, *fun_start
;
2131 fake
= bfd_zmalloc (sizeof (*fake
));
2135 fake
->st_size
= sec
->size
;
2137 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2138 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2142 /* Find a function immediately preceding this section. */
2144 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2146 if (l
->u
.indirect
.section
== sec
)
2148 if (fun_start
!= NULL
)
2150 if (fun_start
->start
)
2151 fun_start
= fun_start
->start
;
2152 fun
->start
= fun_start
;
2156 if (l
->type
== bfd_indirect_link_order
2157 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2158 && (sinfo
= sec_data
->stack_info
) != NULL
2159 && sinfo
->num_fun
!= 0)
2160 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2163 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2167 /* We're only interested in code sections. */
2170 interesting_section (asection
*s
, bfd
*obfd
, struct spu_link_hash_table
*htab
)
2172 return (s
!= htab
->stub
2173 && s
->output_section
!= NULL
2174 && s
->output_section
->owner
== obfd
2175 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2176 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2180 /* Map address ranges in code sections to functions. */
2183 discover_functions (bfd
*output_bfd
, struct bfd_link_info
*info
)
2185 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2188 Elf_Internal_Sym
***psym_arr
;
2189 asection
***sec_arr
;
2190 bfd_boolean gaps
= FALSE
;
2193 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2196 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2197 if (psym_arr
== NULL
)
2199 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2200 if (sec_arr
== NULL
)
2204 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2206 ibfd
= ibfd
->link_next
, bfd_idx
++)
2208 extern const bfd_target bfd_elf32_spu_vec
;
2209 Elf_Internal_Shdr
*symtab_hdr
;
2212 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2213 asection
**psecs
, **p
;
2215 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2218 /* Read all the symbols. */
2219 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2220 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2224 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2227 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2229 symtab_hdr
->contents
= (void *) syms
;
2234 /* Select defined function symbols that are going to be output. */
2235 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2238 psym_arr
[bfd_idx
] = psyms
;
2239 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2242 sec_arr
[bfd_idx
] = psecs
;
2243 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2244 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2245 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2249 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2250 if (s
!= NULL
&& interesting_section (s
, output_bfd
, htab
))
2253 symcount
= psy
- psyms
;
2256 /* Sort them by section and offset within section. */
2257 sort_syms_syms
= syms
;
2258 sort_syms_psecs
= psecs
;
2259 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2261 /* Now inspect the function symbols. */
2262 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2264 asection
*s
= psecs
[*psy
- syms
];
2265 Elf_Internal_Sym
**psy2
;
2267 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2268 if (psecs
[*psy2
- syms
] != s
)
2271 if (!alloc_stack_info (s
, psy2
- psy
))
2276 /* First install info about properly typed and sized functions.
2277 In an ideal world this will cover all code sections, except
2278 when partitioning functions into hot and cold sections,
2279 and the horrible pasted together .init and .fini functions. */
2280 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2283 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2285 asection
*s
= psecs
[sy
- syms
];
2286 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2291 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2292 if (interesting_section (sec
, output_bfd
, htab
))
2293 gaps
|= check_function_ranges (sec
, info
);
2298 /* See if we can discover more function symbols by looking at
2300 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2302 ibfd
= ibfd
->link_next
, bfd_idx
++)
2306 if (psym_arr
[bfd_idx
] == NULL
)
2309 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2310 if (interesting_section (sec
, output_bfd
, htab
)
2311 && sec
->reloc_count
!= 0)
2313 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2318 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2320 ibfd
= ibfd
->link_next
, bfd_idx
++)
2322 Elf_Internal_Shdr
*symtab_hdr
;
2324 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2327 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2330 psecs
= sec_arr
[bfd_idx
];
2332 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2333 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2336 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2337 if (interesting_section (sec
, output_bfd
, htab
))
2338 gaps
|= check_function_ranges (sec
, info
);
2342 /* Finally, install all globals. */
2343 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2347 s
= psecs
[sy
- syms
];
2349 /* Global syms might be improperly typed functions. */
2350 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2351 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2353 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2358 /* Some of the symbols we've installed as marking the
2359 beginning of functions may have a size of zero. Extend
2360 the range of such functions to the beginning of the
2361 next symbol of interest. */
2362 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2363 if (interesting_section (sec
, output_bfd
, htab
))
2365 struct _spu_elf_section_data
*sec_data
;
2366 struct spu_elf_stack_info
*sinfo
;
2368 sec_data
= spu_elf_section_data (sec
);
2369 sinfo
= sec_data
->stack_info
;
2373 bfd_vma hi
= sec
->size
;
2375 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2377 sinfo
->fun
[fun_idx
].hi
= hi
;
2378 hi
= sinfo
->fun
[fun_idx
].lo
;
2381 /* No symbols in this section. Must be .init or .fini
2382 or something similar. */
2383 else if (!pasted_function (sec
, info
))
2389 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2391 ibfd
= ibfd
->link_next
, bfd_idx
++)
2393 if (psym_arr
[bfd_idx
] == NULL
)
2396 free (psym_arr
[bfd_idx
]);
2397 free (sec_arr
[bfd_idx
]);
2406 /* Mark nodes in the call graph that are called by some other node. */
2409 mark_non_root (struct function_info
*fun
)
2411 struct call_info
*call
;
2414 for (call
= fun
->call_list
; call
; call
= call
->next
)
2416 call
->fun
->non_root
= TRUE
;
2417 if (!call
->fun
->visit1
)
2418 mark_non_root (call
->fun
);
2422 /* Remove cycles from the call graph. */
2425 call_graph_traverse (struct function_info
*fun
, struct bfd_link_info
*info
)
2427 struct call_info
**callp
, *call
;
2430 fun
->marking
= TRUE
;
2432 callp
= &fun
->call_list
;
2433 while ((call
= *callp
) != NULL
)
2435 if (!call
->fun
->visit2
)
2436 call_graph_traverse (call
->fun
, info
);
2437 else if (call
->fun
->marking
)
2439 const char *f1
= func_name (fun
);
2440 const char *f2
= func_name (call
->fun
);
2442 info
->callbacks
->info (_("Stack analysis will ignore the call "
2445 *callp
= call
->next
;
2448 callp
= &call
->next
;
2450 fun
->marking
= FALSE
;
2453 /* Populate call_list for each function. */
2456 build_call_tree (bfd
*output_bfd
, struct bfd_link_info
*info
)
2458 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2461 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2463 extern const bfd_target bfd_elf32_spu_vec
;
2466 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2469 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2471 if (!interesting_section (sec
, output_bfd
, htab
)
2472 || sec
->reloc_count
== 0)
2475 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2479 /* Transfer call info from hot/cold section part of function
2481 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2483 struct _spu_elf_section_data
*sec_data
;
2484 struct spu_elf_stack_info
*sinfo
;
2486 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2487 && (sinfo
= sec_data
->stack_info
) != NULL
)
2490 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2492 if (sinfo
->fun
[i
].start
!= NULL
)
2494 struct call_info
*call
= sinfo
->fun
[i
].call_list
;
2496 while (call
!= NULL
)
2498 struct call_info
*call_next
= call
->next
;
2499 if (!insert_callee (sinfo
->fun
[i
].start
, call
))
2503 sinfo
->fun
[i
].call_list
= NULL
;
2504 sinfo
->fun
[i
].non_root
= TRUE
;
2511 /* Find the call graph root(s). */
2512 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2514 extern const bfd_target bfd_elf32_spu_vec
;
2517 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2520 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2522 struct _spu_elf_section_data
*sec_data
;
2523 struct spu_elf_stack_info
*sinfo
;
2525 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2526 && (sinfo
= sec_data
->stack_info
) != NULL
)
2529 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2530 if (!sinfo
->fun
[i
].visit1
)
2531 mark_non_root (&sinfo
->fun
[i
]);
2536 /* Remove cycles from the call graph. We start from the root node(s)
2537 so that we break cycles in a reasonable place. */
2538 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2540 extern const bfd_target bfd_elf32_spu_vec
;
2543 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2546 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2548 struct _spu_elf_section_data
*sec_data
;
2549 struct spu_elf_stack_info
*sinfo
;
2551 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2552 && (sinfo
= sec_data
->stack_info
) != NULL
)
2555 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2556 if (!sinfo
->fun
[i
].non_root
)
2557 call_graph_traverse (&sinfo
->fun
[i
], info
);
2565 /* Descend the call graph for FUN, accumulating total stack required. */
2568 sum_stack (struct function_info
*fun
,
2569 struct bfd_link_info
*info
,
2570 int emit_stack_syms
)
2572 struct call_info
*call
;
2573 struct function_info
*max
= NULL
;
2574 bfd_vma max_stack
= fun
->stack
;
2581 for (call
= fun
->call_list
; call
; call
= call
->next
)
2583 stack
= sum_stack (call
->fun
, info
, emit_stack_syms
);
2584 /* Include caller stack for normal calls, don't do so for
2585 tail calls. fun->stack here is local stack usage for
2588 stack
+= fun
->stack
;
2589 if (max_stack
< stack
)
2596 f1
= func_name (fun
);
2597 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
2598 f1
, (bfd_vma
) fun
->stack
, max_stack
);
2602 info
->callbacks
->minfo (_(" calls:\n"));
2603 for (call
= fun
->call_list
; call
; call
= call
->next
)
2605 const char *f2
= func_name (call
->fun
);
2606 const char *ann1
= call
->fun
== max
? "*" : " ";
2607 const char *ann2
= call
->is_tail
? "t" : " ";
2609 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
2613 /* Now fun->stack holds cumulative stack. */
2614 fun
->stack
= max_stack
;
2617 if (emit_stack_syms
)
2619 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2620 char *name
= bfd_malloc (18 + strlen (f1
));
2621 struct elf_link_hash_entry
*h
;
2625 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
2626 sprintf (name
, "__stack_%s", f1
);
2628 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
2630 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
2633 && (h
->root
.type
== bfd_link_hash_new
2634 || h
->root
.type
== bfd_link_hash_undefined
2635 || h
->root
.type
== bfd_link_hash_undefweak
))
2637 h
->root
.type
= bfd_link_hash_defined
;
2638 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2639 h
->root
.u
.def
.value
= max_stack
;
2644 h
->ref_regular_nonweak
= 1;
2645 h
->forced_local
= 1;
2654 /* Provide an estimate of total stack required. */
2657 spu_elf_stack_analysis (bfd
*output_bfd
,
2658 struct bfd_link_info
*info
,
2659 int emit_stack_syms
)
2662 bfd_vma max_stack
= 0;
2664 if (!discover_functions (output_bfd
, info
))
2667 if (!build_call_tree (output_bfd
, info
))
2670 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
2671 info
->callbacks
->minfo (_("\nStack size for functions. "
2672 "Annotations: '*' max stack, 't' tail call\n"));
2673 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2675 extern const bfd_target bfd_elf32_spu_vec
;
2678 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2681 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2683 struct _spu_elf_section_data
*sec_data
;
2684 struct spu_elf_stack_info
*sinfo
;
2686 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2687 && (sinfo
= sec_data
->stack_info
) != NULL
)
2690 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2692 if (!sinfo
->fun
[i
].non_root
)
2697 stack
= sum_stack (&sinfo
->fun
[i
], info
,
2699 f1
= func_name (&sinfo
->fun
[i
]);
2700 info
->callbacks
->info (_(" %s: 0x%v\n"),
2702 if (max_stack
< stack
)
2710 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"), max_stack
);
2714 /* Perform a final link. */
2717 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
2719 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2721 if (htab
->stack_analysis
2722 && !spu_elf_stack_analysis (output_bfd
, info
, htab
->emit_stack_syms
))
2723 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
2725 return bfd_elf_final_link (output_bfd
, info
);
2728 /* Called when not normally emitting relocs, ie. !info->relocatable
2729 and !info->emitrelocations. Returns a count of special relocs
2730 that need to be emitted. */
2733 spu_elf_count_relocs (asection
*sec
, Elf_Internal_Rela
*relocs
)
2735 unsigned int count
= 0;
2736 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
2738 for (; relocs
< relend
; relocs
++)
2740 int r_type
= ELF32_R_TYPE (relocs
->r_info
);
2741 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2748 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2751 spu_elf_relocate_section (bfd
*output_bfd
,
2752 struct bfd_link_info
*info
,
2754 asection
*input_section
,
2756 Elf_Internal_Rela
*relocs
,
2757 Elf_Internal_Sym
*local_syms
,
2758 asection
**local_sections
)
2760 Elf_Internal_Shdr
*symtab_hdr
;
2761 struct elf_link_hash_entry
**sym_hashes
;
2762 Elf_Internal_Rela
*rel
, *relend
;
2763 struct spu_link_hash_table
*htab
;
2764 bfd_boolean ret
= TRUE
;
2765 bfd_boolean emit_these_relocs
= FALSE
;
2767 htab
= spu_hash_table (info
);
2768 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
2769 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
2772 relend
= relocs
+ input_section
->reloc_count
;
2773 for (; rel
< relend
; rel
++)
2776 reloc_howto_type
*howto
;
2777 unsigned long r_symndx
;
2778 Elf_Internal_Sym
*sym
;
2780 struct elf_link_hash_entry
*h
;
2781 const char *sym_name
;
2784 bfd_reloc_status_type r
;
2785 bfd_boolean unresolved_reloc
;
2789 r_symndx
= ELF32_R_SYM (rel
->r_info
);
2790 r_type
= ELF32_R_TYPE (rel
->r_info
);
2791 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2793 emit_these_relocs
= TRUE
;
2797 howto
= elf_howto_table
+ r_type
;
2798 unresolved_reloc
= FALSE
;
2803 if (r_symndx
< symtab_hdr
->sh_info
)
2805 sym
= local_syms
+ r_symndx
;
2806 sec
= local_sections
[r_symndx
];
2807 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
2808 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
2812 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
2813 r_symndx
, symtab_hdr
, sym_hashes
,
2815 unresolved_reloc
, warned
);
2816 sym_name
= h
->root
.root
.string
;
2819 if (sec
!= NULL
&& elf_discarded_section (sec
))
2821 /* For relocs against symbols from removed linkonce sections,
2822 or sections discarded by a linker script, we just want the
2823 section contents zeroed. Avoid any special processing. */
2824 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
2830 if (info
->relocatable
)
2833 if (unresolved_reloc
)
2835 (*_bfd_error_handler
)
2836 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2838 bfd_get_section_name (input_bfd
, input_section
),
2839 (long) rel
->r_offset
,
2845 /* If this symbol is in an overlay area, we may need to relocate
2846 to the overlay stub. */
2847 addend
= rel
->r_addend
;
2848 branch
= (is_branch (contents
+ rel
->r_offset
)
2849 || is_hint (contents
+ rel
->r_offset
));
2850 if (needs_ovl_stub (sym_name
, sec
, input_section
, htab
, branch
))
2853 struct spu_stub_hash_entry
*sh
;
2855 stub_name
= spu_stub_name (sec
, h
, rel
);
2856 if (stub_name
== NULL
)
2859 sh
= (struct spu_stub_hash_entry
*)
2860 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
, FALSE
);
2863 relocation
= (htab
->stub
->output_section
->vma
2864 + htab
->stub
->output_offset
2871 r
= _bfd_final_link_relocate (howto
,
2875 rel
->r_offset
, relocation
, addend
);
2877 if (r
!= bfd_reloc_ok
)
2879 const char *msg
= (const char *) 0;
2883 case bfd_reloc_overflow
:
2884 if (!((*info
->callbacks
->reloc_overflow
)
2885 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
2886 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
2890 case bfd_reloc_undefined
:
2891 if (!((*info
->callbacks
->undefined_symbol
)
2892 (info
, sym_name
, input_bfd
, input_section
,
2893 rel
->r_offset
, TRUE
)))
2897 case bfd_reloc_outofrange
:
2898 msg
= _("internal error: out of range error");
2901 case bfd_reloc_notsupported
:
2902 msg
= _("internal error: unsupported relocation error");
2905 case bfd_reloc_dangerous
:
2906 msg
= _("internal error: dangerous error");
2910 msg
= _("internal error: unknown error");
2914 if (!((*info
->callbacks
->warning
)
2915 (info
, msg
, sym_name
, input_bfd
, input_section
,
2924 && emit_these_relocs
2925 && !info
->relocatable
2926 && !info
->emitrelocations
)
2928 Elf_Internal_Rela
*wrel
;
2929 Elf_Internal_Shdr
*rel_hdr
;
2931 wrel
= rel
= relocs
;
2932 relend
= relocs
+ input_section
->reloc_count
;
2933 for (; rel
< relend
; rel
++)
2937 r_type
= ELF32_R_TYPE (rel
->r_info
);
2938 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2941 input_section
->reloc_count
= wrel
- relocs
;
2942 /* Backflips for _bfd_elf_link_output_relocs. */
2943 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
2944 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
2951 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2954 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
2955 const char *sym_name ATTRIBUTE_UNUSED
,
2956 Elf_Internal_Sym
*sym
,
2957 asection
*sym_sec ATTRIBUTE_UNUSED
,
2958 struct elf_link_hash_entry
*h
)
2960 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2962 if (!info
->relocatable
2963 && htab
->num_overlays
!= 0
2965 && (h
->root
.type
== bfd_link_hash_defined
2966 || h
->root
.type
== bfd_link_hash_defweak
)
2968 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
2970 static Elf_Internal_Rela zero_rel
;
2971 char *stub_name
= spu_stub_name (h
->root
.u
.def
.section
, h
, &zero_rel
);
2972 struct spu_stub_hash_entry
*sh
;
2974 if (stub_name
== NULL
)
2976 sh
= (struct spu_stub_hash_entry
*)
2977 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
, FALSE
);
2982 = _bfd_elf_section_from_bfd_section (htab
->stub
->output_section
->owner
,
2983 htab
->stub
->output_section
);
2984 sym
->st_value
= (htab
->stub
->output_section
->vma
2985 + htab
->stub
->output_offset
2992 static int spu_plugin
= 0;
2995 spu_elf_plugin (int val
)
3000 /* Set ELF header e_type for plugins. */
3003 spu_elf_post_process_headers (bfd
*abfd
,
3004 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
3008 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
3010 i_ehdrp
->e_type
= ET_DYN
;
3014 /* We may add an extra PT_LOAD segment for .toe. We also need extra
3015 segments for overlays. */
3018 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
3020 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3021 int extra
= htab
->num_overlays
;
3027 sec
= bfd_get_section_by_name (abfd
, ".toe");
3028 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
3034 /* Remove .toe section from other PT_LOAD segments and put it in
3035 a segment of its own. Put overlays in separate segments too. */
3038 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
3041 struct elf_segment_map
*m
;
3047 toe
= bfd_get_section_by_name (abfd
, ".toe");
3048 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3049 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
3050 for (i
= 0; i
< m
->count
; i
++)
3051 if ((s
= m
->sections
[i
]) == toe
3052 || spu_elf_section_data (s
)->ovl_index
!= 0)
3054 struct elf_segment_map
*m2
;
3057 if (i
+ 1 < m
->count
)
3059 amt
= sizeof (struct elf_segment_map
);
3060 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
3061 m2
= bfd_zalloc (abfd
, amt
);
3064 m2
->count
= m
->count
- (i
+ 1);
3065 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
3066 m2
->count
* sizeof (m
->sections
[0]));
3067 m2
->p_type
= PT_LOAD
;
3075 amt
= sizeof (struct elf_segment_map
);
3076 m2
= bfd_zalloc (abfd
, amt
);
3079 m2
->p_type
= PT_LOAD
;
3081 m2
->sections
[0] = s
;
3091 /* Check that all loadable section VMAs lie in the range
3092 LO .. HI inclusive. */
3095 spu_elf_check_vma (bfd
*abfd
, bfd_vma lo
, bfd_vma hi
)
3097 struct elf_segment_map
*m
;
3100 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3101 if (m
->p_type
== PT_LOAD
)
3102 for (i
= 0; i
< m
->count
; i
++)
3103 if (m
->sections
[i
]->size
!= 0
3104 && (m
->sections
[i
]->vma
< lo
3105 || m
->sections
[i
]->vma
> hi
3106 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
3107 return m
->sections
[i
];
3112 /* Tweak the section type of .note.spu_name. */
3115 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
3116 Elf_Internal_Shdr
*hdr
,
3119 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
3120 hdr
->sh_type
= SHT_NOTE
;
3124 /* Tweak phdrs before writing them out. */
3127 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
3129 const struct elf_backend_data
*bed
;
3130 struct elf_obj_tdata
*tdata
;
3131 Elf_Internal_Phdr
*phdr
, *last
;
3132 struct spu_link_hash_table
*htab
;
3139 bed
= get_elf_backend_data (abfd
);
3140 tdata
= elf_tdata (abfd
);
3142 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
3143 htab
= spu_hash_table (info
);
3144 if (htab
->num_overlays
!= 0)
3146 struct elf_segment_map
*m
;
3149 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
3151 && (o
= spu_elf_section_data (m
->sections
[0])->ovl_index
) != 0)
3153 /* Mark this as an overlay header. */
3154 phdr
[i
].p_flags
|= PF_OVERLAY
;
3156 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
3158 bfd_byte
*p
= htab
->ovtab
->contents
;
3159 unsigned int off
= (o
- 1) * 16 + 8;
3161 /* Write file_off into _ovly_table. */
3162 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
3167 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3168 of 16. This should always be possible when using the standard
3169 linker scripts, but don't create overlapping segments if
3170 someone is playing games with linker scripts. */
3172 for (i
= count
; i
-- != 0; )
3173 if (phdr
[i
].p_type
== PT_LOAD
)
3177 adjust
= -phdr
[i
].p_filesz
& 15;
3180 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
3183 adjust
= -phdr
[i
].p_memsz
& 15;
3186 && phdr
[i
].p_filesz
!= 0
3187 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
3188 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
3191 if (phdr
[i
].p_filesz
!= 0)
3195 if (i
== (unsigned int) -1)
3196 for (i
= count
; i
-- != 0; )
3197 if (phdr
[i
].p_type
== PT_LOAD
)
3201 adjust
= -phdr
[i
].p_filesz
& 15;
3202 phdr
[i
].p_filesz
+= adjust
;
3204 adjust
= -phdr
[i
].p_memsz
& 15;
3205 phdr
[i
].p_memsz
+= adjust
;
3211 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3212 #define TARGET_BIG_NAME "elf32-spu"
3213 #define ELF_ARCH bfd_arch_spu
3214 #define ELF_MACHINE_CODE EM_SPU
3215 /* This matches the alignment need for DMA. */
3216 #define ELF_MAXPAGESIZE 0x80
3217 #define elf_backend_rela_normal 1
3218 #define elf_backend_can_gc_sections 1
3220 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3221 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3222 #define elf_info_to_howto spu_elf_info_to_howto
3223 #define elf_backend_count_relocs spu_elf_count_relocs
3224 #define elf_backend_relocate_section spu_elf_relocate_section
3225 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3226 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3227 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3228 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3229 #define bfd_elf32_bfd_link_hash_table_free spu_elf_link_hash_table_free
3231 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3232 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3233 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3234 #define elf_backend_post_process_headers spu_elf_post_process_headers
3235 #define elf_backend_fake_sections spu_elf_fake_sections
3236 #define elf_backend_special_sections spu_elf_special_sections
3237 #define bfd_elf32_bfd_final_link spu_elf_final_link
3239 #include "elf32-target.h"