1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
27 #include "elf32-spu.h"
29 /* We use RELA style relocs. Don't define USE_REL. */
31 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
38 static reloc_howto_type elf_howto_table
[] = {
39 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
40 bfd_elf_generic_reloc
, "SPU_NONE",
41 FALSE
, 0, 0x00000000, FALSE
),
42 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
43 bfd_elf_generic_reloc
, "SPU_ADDR10",
44 FALSE
, 0, 0x00ffc000, FALSE
),
45 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
46 bfd_elf_generic_reloc
, "SPU_ADDR16",
47 FALSE
, 0, 0x007fff80, FALSE
),
48 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
49 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
50 FALSE
, 0, 0x007fff80, FALSE
),
51 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
52 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
53 FALSE
, 0, 0x007fff80, FALSE
),
54 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
55 bfd_elf_generic_reloc
, "SPU_ADDR18",
56 FALSE
, 0, 0x01ffff80, FALSE
),
57 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
58 bfd_elf_generic_reloc
, "SPU_ADDR32",
59 FALSE
, 0, 0xffffffff, FALSE
),
60 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
61 bfd_elf_generic_reloc
, "SPU_REL16",
62 FALSE
, 0, 0x007fff80, TRUE
),
63 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
64 bfd_elf_generic_reloc
, "SPU_ADDR7",
65 FALSE
, 0, 0x001fc000, FALSE
),
66 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
67 spu_elf_rel9
, "SPU_REL9",
68 FALSE
, 0, 0x0180007f, TRUE
),
69 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
70 spu_elf_rel9
, "SPU_REL9I",
71 FALSE
, 0, 0x0000c07f, TRUE
),
72 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
73 bfd_elf_generic_reloc
, "SPU_ADDR10I",
74 FALSE
, 0, 0x00ffc000, FALSE
),
75 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
76 bfd_elf_generic_reloc
, "SPU_ADDR16I",
77 FALSE
, 0, 0x007fff80, FALSE
),
78 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
79 bfd_elf_generic_reloc
, "SPU_REL32",
80 FALSE
, 0, 0xffffffff, TRUE
),
81 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
82 bfd_elf_generic_reloc
, "SPU_ADDR16X",
83 FALSE
, 0, 0x007fff80, FALSE
),
84 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
85 bfd_elf_generic_reloc
, "SPU_PPU32",
86 FALSE
, 0, 0xffffffff, FALSE
),
87 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
88 bfd_elf_generic_reloc
, "SPU_PPU64",
92 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
93 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
104 case BFD_RELOC_SPU_IMM10W
:
106 case BFD_RELOC_SPU_IMM16W
:
108 case BFD_RELOC_SPU_LO16
:
109 return R_SPU_ADDR16_LO
;
110 case BFD_RELOC_SPU_HI16
:
111 return R_SPU_ADDR16_HI
;
112 case BFD_RELOC_SPU_IMM18
:
114 case BFD_RELOC_SPU_PCREL16
:
116 case BFD_RELOC_SPU_IMM7
:
118 case BFD_RELOC_SPU_IMM8
:
120 case BFD_RELOC_SPU_PCREL9a
:
122 case BFD_RELOC_SPU_PCREL9b
:
124 case BFD_RELOC_SPU_IMM10
:
125 return R_SPU_ADDR10I
;
126 case BFD_RELOC_SPU_IMM16
:
127 return R_SPU_ADDR16I
;
130 case BFD_RELOC_32_PCREL
:
132 case BFD_RELOC_SPU_PPU32
:
134 case BFD_RELOC_SPU_PPU64
:
140 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
142 Elf_Internal_Rela
*dst
)
144 enum elf_spu_reloc_type r_type
;
146 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
147 BFD_ASSERT (r_type
< R_SPU_max
);
148 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
151 static reloc_howto_type
*
152 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
153 bfd_reloc_code_real_type code
)
155 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
157 if (r_type
== R_SPU_NONE
)
160 return elf_howto_table
+ r_type
;
163 static reloc_howto_type
*
164 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
169 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
170 if (elf_howto_table
[i
].name
!= NULL
171 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
172 return &elf_howto_table
[i
];
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
181 void *data
, asection
*input_section
,
182 bfd
*output_bfd
, char **error_message
)
184 bfd_size_type octets
;
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
191 if (output_bfd
!= NULL
)
192 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
193 input_section
, output_bfd
, error_message
);
195 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
196 return bfd_reloc_outofrange
;
197 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
199 /* Get symbol value. */
201 if (!bfd_is_com_section (symbol
->section
))
203 if (symbol
->section
->output_section
)
204 val
+= symbol
->section
->output_section
->vma
;
206 val
+= reloc_entry
->addend
;
208 /* Make it pc-relative. */
209 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
212 if (val
+ 256 >= 512)
213 return bfd_reloc_overflow
;
215 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
220 insn
&= ~reloc_entry
->howto
->dst_mask
;
221 insn
|= val
& reloc_entry
->howto
->dst_mask
;
222 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
227 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
229 if (!sec
->used_by_bfd
)
231 struct _spu_elf_section_data
*sdata
;
233 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
236 sec
->used_by_bfd
= sdata
;
239 return _bfd_elf_new_section_hook (abfd
, sec
);
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
246 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
248 if (sym
->name
!= NULL
249 && sym
->section
!= bfd_abs_section_ptr
250 && strncmp (sym
->name
, "_EAR_", 5) == 0)
251 sym
->flags
|= BSF_KEEP
;
254 /* SPU ELF linker hash table. */
256 struct spu_link_hash_table
258 struct elf_link_hash_table elf
;
260 /* The stub hash table. */
261 struct bfd_hash_table stub_hash_table
;
263 /* Sorted array of stubs. */
265 struct spu_stub_hash_entry
**sh
;
270 /* Shortcuts to overlay sections. */
274 struct elf_link_hash_entry
*ovly_load
;
275 unsigned long ovly_load_r_symndx
;
277 /* An array of two output sections per overlay region, chosen such that
278 the first section vma is the overlay buffer vma (ie. the section has
279 the lowest vma in the group that occupy the region), and the second
280 section vma+size specifies the end of the region. We keep pointers
281 to sections like this because section vmas may change when laying
283 asection
**ovl_region
;
285 /* Number of overlay buffers. */
286 unsigned int num_buf
;
288 /* Total number of overlays. */
289 unsigned int num_overlays
;
291 /* Set if we should emit symbols for stubs. */
292 unsigned int emit_stub_syms
:1;
294 /* Set if we want stubs on calls out of overlay regions to
295 non-overlay regions. */
296 unsigned int non_overlay_stubs
: 1;
299 unsigned int stub_overflow
: 1;
301 /* Set if stack size analysis should be done. */
302 unsigned int stack_analysis
: 1;
304 /* Set if __stack_* syms will be emitted. */
305 unsigned int emit_stack_syms
: 1;
308 #define spu_hash_table(p) \
309 ((struct spu_link_hash_table *) ((p)->hash))
311 struct spu_stub_hash_entry
313 struct bfd_hash_entry root
;
315 /* Destination of this stub. */
316 asection
*target_section
;
319 /* Offset of entry in stub section. */
322 /* Offset from this stub to stub that loads the overlay index. */
326 /* Create an entry in a spu stub hash table. */
328 static struct bfd_hash_entry
*
329 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
330 struct bfd_hash_table
*table
,
333 /* Allocate the structure if it has not already been allocated by a
337 entry
= bfd_hash_allocate (table
, sizeof (struct spu_stub_hash_entry
));
342 /* Call the allocation method of the superclass. */
343 entry
= bfd_hash_newfunc (entry
, table
, string
);
346 struct spu_stub_hash_entry
*sh
= (struct spu_stub_hash_entry
*) entry
;
348 sh
->target_section
= NULL
;
357 /* Create a spu ELF linker hash table. */
359 static struct bfd_link_hash_table
*
360 spu_elf_link_hash_table_create (bfd
*abfd
)
362 struct spu_link_hash_table
*htab
;
364 htab
= bfd_malloc (sizeof (*htab
));
368 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
369 _bfd_elf_link_hash_newfunc
,
370 sizeof (struct elf_link_hash_entry
)))
376 /* Init the stub hash table too. */
377 if (!bfd_hash_table_init (&htab
->stub_hash_table
, stub_hash_newfunc
,
378 sizeof (struct spu_stub_hash_entry
)))
381 memset (&htab
->stubs
, 0,
382 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, stubs
));
384 return &htab
->elf
.root
;
387 /* Free the derived linker hash table. */
390 spu_elf_link_hash_table_free (struct bfd_link_hash_table
*hash
)
392 struct spu_link_hash_table
*ret
= (struct spu_link_hash_table
*) hash
;
394 bfd_hash_table_free (&ret
->stub_hash_table
);
395 _bfd_generic_link_hash_table_free (hash
);
398 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
399 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
400 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
403 get_sym_h (struct elf_link_hash_entry
**hp
,
404 Elf_Internal_Sym
**symp
,
406 Elf_Internal_Sym
**locsymsp
,
407 unsigned long r_symndx
,
410 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
412 if (r_symndx
>= symtab_hdr
->sh_info
)
414 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
415 struct elf_link_hash_entry
*h
;
417 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
418 while (h
->root
.type
== bfd_link_hash_indirect
419 || h
->root
.type
== bfd_link_hash_warning
)
420 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
430 asection
*symsec
= NULL
;
431 if (h
->root
.type
== bfd_link_hash_defined
432 || h
->root
.type
== bfd_link_hash_defweak
)
433 symsec
= h
->root
.u
.def
.section
;
439 Elf_Internal_Sym
*sym
;
440 Elf_Internal_Sym
*locsyms
= *locsymsp
;
444 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
447 size_t symcount
= symtab_hdr
->sh_info
;
449 /* If we are reading symbols into the contents, then
450 read the global syms too. This is done to cache
451 syms for later stack analysis. */
452 if ((unsigned char **) locsymsp
== &symtab_hdr
->contents
)
453 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
454 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
461 sym
= locsyms
+ r_symndx
;
471 asection
*symsec
= NULL
;
472 if ((sym
->st_shndx
!= SHN_UNDEF
473 && sym
->st_shndx
< SHN_LORESERVE
)
474 || sym
->st_shndx
> SHN_HIRESERVE
)
475 symsec
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
483 /* Build a name for an entry in the stub hash table. We can't use a
484 local symbol name because ld -r might generate duplicate local symbols. */
487 spu_stub_name (const asection
*sym_sec
,
488 const struct elf_link_hash_entry
*h
,
489 const Elf_Internal_Rela
*rel
)
496 len
= strlen (h
->root
.root
.string
) + 1 + 8 + 1;
497 stub_name
= bfd_malloc (len
);
498 if (stub_name
== NULL
)
501 sprintf (stub_name
, "%s+%x",
503 (int) rel
->r_addend
& 0xffffffff);
508 len
= 8 + 1 + 8 + 1 + 8 + 1;
509 stub_name
= bfd_malloc (len
);
510 if (stub_name
== NULL
)
513 sprintf (stub_name
, "%x:%x+%x",
514 sym_sec
->id
& 0xffffffff,
515 (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
516 (int) rel
->r_addend
& 0xffffffff);
517 len
= strlen (stub_name
);
520 if (stub_name
[len
- 2] == '+'
521 && stub_name
[len
- 1] == '0'
522 && stub_name
[len
] == 0)
523 stub_name
[len
- 2] = 0;
528 /* Create the note section if not already present. This is done early so
529 that the linker maps the sections to the right place in the output. */
532 spu_elf_create_sections (bfd
*output_bfd
,
533 struct bfd_link_info
*info
,
538 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
540 /* Stash some options away where we can get at them later. */
541 htab
->stack_analysis
= stack_analysis
;
542 htab
->emit_stack_syms
= emit_stack_syms
;
544 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
545 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
550 /* Make SPU_PTNOTE_SPUNAME section. */
557 ibfd
= info
->input_bfds
;
558 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
559 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
561 || !bfd_set_section_alignment (ibfd
, s
, 4))
564 name_len
= strlen (bfd_get_filename (output_bfd
)) + 1;
565 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
566 size
+= (name_len
+ 3) & -4;
568 if (!bfd_set_section_size (ibfd
, s
, size
))
571 data
= bfd_zalloc (ibfd
, size
);
575 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
576 bfd_put_32 (ibfd
, name_len
, data
+ 4);
577 bfd_put_32 (ibfd
, 1, data
+ 8);
578 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
579 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
580 bfd_get_filename (output_bfd
), name_len
);
587 /* qsort predicate to sort sections by vma. */
590 sort_sections (const void *a
, const void *b
)
592 const asection
*const *s1
= a
;
593 const asection
*const *s2
= b
;
594 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
597 return delta
< 0 ? -1 : 1;
599 return (*s1
)->index
- (*s2
)->index
;
602 /* Identify overlays in the output bfd, and number them. */
605 spu_elf_find_overlays (bfd
*output_bfd
, struct bfd_link_info
*info
)
607 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
608 asection
**alloc_sec
;
609 unsigned int i
, n
, ovl_index
, num_buf
;
613 if (output_bfd
->section_count
< 2)
616 alloc_sec
= bfd_malloc (output_bfd
->section_count
* sizeof (*alloc_sec
));
617 if (alloc_sec
== NULL
)
620 /* Pick out all the alloced sections. */
621 for (n
= 0, s
= output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
622 if ((s
->flags
& SEC_ALLOC
) != 0
623 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
633 /* Sort them by vma. */
634 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
636 /* Look for overlapping vmas. Any with overlap must be overlays.
637 Count them. Also count the number of overlay regions and for
638 each region save a section from that region with the lowest vma
639 and another section with the highest end vma. */
640 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
641 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
644 if (s
->vma
< ovl_end
)
646 asection
*s0
= alloc_sec
[i
- 1];
648 if (spu_elf_section_data (s0
)->ovl_index
== 0)
650 spu_elf_section_data (s0
)->ovl_index
= ++ovl_index
;
651 alloc_sec
[num_buf
* 2] = s0
;
652 alloc_sec
[num_buf
* 2 + 1] = s0
;
655 spu_elf_section_data (s
)->ovl_index
= ++ovl_index
;
656 if (ovl_end
< s
->vma
+ s
->size
)
658 ovl_end
= s
->vma
+ s
->size
;
659 alloc_sec
[num_buf
* 2 - 1] = s
;
663 ovl_end
= s
->vma
+ s
->size
;
666 htab
->num_overlays
= ovl_index
;
667 htab
->num_buf
= num_buf
;
674 alloc_sec
= bfd_realloc (alloc_sec
, num_buf
* 2 * sizeof (*alloc_sec
));
675 if (alloc_sec
== NULL
)
678 htab
->ovl_region
= alloc_sec
;
682 /* One of these per stub. */
683 #define SIZEOF_STUB1 8
684 #define ILA_79 0x4200004f /* ila $79,function_address */
685 #define BR 0x32000000 /* br stub2 */
687 /* One of these per overlay. */
688 #define SIZEOF_STUB2 8
689 #define ILA_78 0x4200004e /* ila $78,overlay_number */
691 #define NOP 0x40200000
693 /* Return true for all relative and absolute branch instructions.
701 brhnz 00100011 0.. */
704 is_branch (const unsigned char *insn
)
706 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
709 /* Return true for all indirect branch instructions.
717 bihnz 00100101 011 */
720 is_indirect_branch (const unsigned char *insn
)
722 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
725 /* Return true for branch hint instructions.
730 is_hint (const unsigned char *insn
)
732 return (insn
[0] & 0xfc) == 0x10;
735 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
738 needs_ovl_stub (const char *sym_name
,
740 asection
*input_section
,
741 struct spu_link_hash_table
*htab
,
742 bfd_boolean is_branch
)
744 if (htab
->num_overlays
== 0)
748 || sym_sec
->output_section
== NULL
749 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
752 /* setjmp always goes via an overlay stub, because then the return
753 and hence the longjmp goes via __ovly_return. That magically
754 makes setjmp/longjmp between overlays work. */
755 if (strncmp (sym_name
, "setjmp", 6) == 0
756 && (sym_name
[6] == '\0' || sym_name
[6] == '@'))
759 /* Usually, symbols in non-overlay sections don't need stubs. */
760 if (spu_elf_section_data (sym_sec
->output_section
)->ovl_index
== 0
761 && !htab
->non_overlay_stubs
)
764 /* A reference from some other section to a symbol in an overlay
765 section needs a stub. */
766 if (spu_elf_section_data (sym_sec
->output_section
)->ovl_index
767 != spu_elf_section_data (input_section
->output_section
)->ovl_index
)
770 /* If this insn isn't a branch then we are possibly taking the
771 address of a function and passing it out somehow. */
775 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
779 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
781 /* Symbols starting with _SPUEAR_ need a stub because they may be
782 invoked by the PPU. */
783 if ((h
->root
.type
== bfd_link_hash_defined
784 || h
->root
.type
== bfd_link_hash_defweak
)
786 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
788 struct spu_link_hash_table
*htab
= inf
;
789 static Elf_Internal_Rela zero_rel
;
790 char *stub_name
= spu_stub_name (h
->root
.u
.def
.section
, h
, &zero_rel
);
791 struct spu_stub_hash_entry
*sh
;
793 if (stub_name
== NULL
)
799 sh
= (struct spu_stub_hash_entry
*)
800 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
, TRUE
, FALSE
);
807 /* If this entry isn't new, we already have a stub. */
808 if (sh
->target_section
!= NULL
)
814 sh
->target_section
= h
->root
.u
.def
.section
;
815 sh
->target_off
= h
->root
.u
.def
.value
;
816 htab
->stubs
.count
+= 1;
822 /* Called via bfd_hash_traverse to set up pointers to all symbols
823 in the stub hash table. */
826 populate_stubs (struct bfd_hash_entry
*bh
, void *inf
)
828 struct spu_link_hash_table
*htab
= inf
;
830 htab
->stubs
.sh
[--htab
->stubs
.count
] = (struct spu_stub_hash_entry
*) bh
;
834 /* qsort predicate to sort stubs by overlay number. */
837 sort_stubs (const void *a
, const void *b
)
839 const struct spu_stub_hash_entry
*const *sa
= a
;
840 const struct spu_stub_hash_entry
*const *sb
= b
;
844 i
= spu_elf_section_data ((*sa
)->target_section
->output_section
)->ovl_index
;
845 i
-= spu_elf_section_data ((*sb
)->target_section
->output_section
)->ovl_index
;
849 d
= ((*sa
)->target_section
->output_section
->vma
850 + (*sa
)->target_section
->output_offset
852 - (*sb
)->target_section
->output_section
->vma
853 - (*sb
)->target_section
->output_offset
854 - (*sb
)->target_off
);
856 return d
< 0 ? -1 : 1;
858 /* Two functions at the same address. Aliases perhaps. */
859 i
= strcmp ((*sb
)->root
.string
, (*sa
)->root
.string
);
864 /* Allocate space for overlay call and return stubs. */
867 spu_elf_size_stubs (bfd
*output_bfd
,
868 struct bfd_link_info
*info
,
869 int non_overlay_stubs
,
875 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
880 htab
->non_overlay_stubs
= non_overlay_stubs
;
881 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
883 extern const bfd_target bfd_elf32_spu_vec
;
884 Elf_Internal_Shdr
*symtab_hdr
;
886 Elf_Internal_Sym
*local_syms
= NULL
;
889 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
892 /* We'll need the symbol table in a second. */
893 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
894 if (symtab_hdr
->sh_info
== 0)
897 /* Arrange to read and keep global syms for later stack analysis. */
900 psyms
= &symtab_hdr
->contents
;
902 /* Walk over each section attached to the input bfd. */
903 for (section
= ibfd
->sections
; section
!= NULL
; section
= section
->next
)
905 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
907 /* If there aren't any relocs, then there's nothing more to do. */
908 if ((section
->flags
& SEC_RELOC
) == 0
909 || (section
->flags
& SEC_ALLOC
) == 0
910 || (section
->flags
& SEC_LOAD
) == 0
911 || section
->reloc_count
== 0)
914 /* If this section is a link-once section that will be
915 discarded, then don't create any stubs. */
916 if (section
->output_section
== NULL
917 || section
->output_section
->owner
!= output_bfd
)
920 /* Get the relocs. */
922 = _bfd_elf_link_read_relocs (ibfd
, section
, NULL
, NULL
,
924 if (internal_relocs
== NULL
)
925 goto error_ret_free_local
;
927 /* Now examine each relocation. */
928 irela
= internal_relocs
;
929 irelaend
= irela
+ section
->reloc_count
;
930 for (; irela
< irelaend
; irela
++)
932 enum elf_spu_reloc_type r_type
;
935 Elf_Internal_Sym
*sym
;
936 struct elf_link_hash_entry
*h
;
937 const char *sym_name
;
939 struct spu_stub_hash_entry
*sh
;
940 unsigned int sym_type
;
941 enum _insn_type
{ non_branch
, branch
, call
} insn_type
;
943 r_type
= ELF32_R_TYPE (irela
->r_info
);
944 r_indx
= ELF32_R_SYM (irela
->r_info
);
946 if (r_type
>= R_SPU_max
)
948 bfd_set_error (bfd_error_bad_value
);
949 goto error_ret_free_internal
;
952 /* Determine the reloc target section. */
953 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, ibfd
))
954 goto error_ret_free_internal
;
957 || sym_sec
->output_section
== NULL
958 || sym_sec
->output_section
->owner
!= output_bfd
)
961 /* Ensure no stubs for user supplied overlay manager syms. */
963 && (strcmp (h
->root
.root
.string
, "__ovly_load") == 0
964 || strcmp (h
->root
.root
.string
, "__ovly_return") == 0))
967 insn_type
= non_branch
;
968 if (r_type
== R_SPU_REL16
969 || r_type
== R_SPU_ADDR16
)
971 unsigned char insn
[4];
973 if (!bfd_get_section_contents (ibfd
, section
, insn
,
975 goto error_ret_free_internal
;
977 if (is_branch (insn
) || is_hint (insn
))
980 if ((insn
[0] & 0xfd) == 0x31)
985 /* We are only interested in function symbols. */
989 sym_name
= h
->root
.root
.string
;
993 sym_type
= ELF_ST_TYPE (sym
->st_info
);
994 sym_name
= bfd_elf_sym_name (sym_sec
->owner
,
999 if (sym_type
!= STT_FUNC
)
1001 /* It's common for people to write assembly and forget
1002 to give function symbols the right type. Handle
1003 calls to such symbols, but warn so that (hopefully)
1004 people will fix their code. We need the symbol
1005 type to be correct to distinguish function pointer
1006 initialisation from other pointer initialisation. */
1007 if (insn_type
== call
)
1008 (*_bfd_error_handler
) (_("warning: call to non-function"
1009 " symbol %s defined in %B"),
1010 sym_sec
->owner
, sym_name
);
1015 if (!needs_ovl_stub (sym_name
, sym_sec
, section
, htab
,
1016 insn_type
!= non_branch
))
1019 stub_name
= spu_stub_name (sym_sec
, h
, irela
);
1020 if (stub_name
== NULL
)
1021 goto error_ret_free_internal
;
1023 sh
= (struct spu_stub_hash_entry
*)
1024 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
,
1029 error_ret_free_internal
:
1030 if (elf_section_data (section
)->relocs
!= internal_relocs
)
1031 free (internal_relocs
);
1032 error_ret_free_local
:
1033 if (local_syms
!= NULL
1034 && (symtab_hdr
->contents
1035 != (unsigned char *) local_syms
))
1040 /* If this entry isn't new, we already have a stub. */
1041 if (sh
->target_section
!= NULL
)
1047 sh
->target_section
= sym_sec
;
1049 sh
->target_off
= h
->root
.u
.def
.value
;
1051 sh
->target_off
= sym
->st_value
;
1052 sh
->target_off
+= irela
->r_addend
;
1054 htab
->stubs
.count
+= 1;
1057 /* We're done with the internal relocs, free them. */
1058 if (elf_section_data (section
)->relocs
!= internal_relocs
)
1059 free (internal_relocs
);
1062 if (local_syms
!= NULL
1063 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1065 if (!info
->keep_memory
)
1068 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1072 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, htab
);
1073 if (htab
->stubs
.err
)
1077 if (htab
->stubs
.count
== 0)
1080 ibfd
= info
->input_bfds
;
1081 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1082 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1083 htab
->stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1085 if (htab
->stub
== NULL
1086 || !bfd_set_section_alignment (ibfd
, htab
->stub
, 2))
1089 flags
= (SEC_ALLOC
| SEC_LOAD
1090 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1091 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1092 *ovtab
= htab
->ovtab
;
1093 if (htab
->ovtab
== NULL
1094 || !bfd_set_section_alignment (ibfd
, htab
->stub
, 4))
1097 *toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1099 || !bfd_set_section_alignment (ibfd
, *toe
, 4))
1103 /* Retrieve all the stubs and sort. */
1104 htab
->stubs
.sh
= bfd_malloc (htab
->stubs
.count
* sizeof (*htab
->stubs
.sh
));
1105 if (htab
->stubs
.sh
== NULL
)
1107 i
= htab
->stubs
.count
;
1108 bfd_hash_traverse (&htab
->stub_hash_table
, populate_stubs
, htab
);
1109 BFD_ASSERT (htab
->stubs
.count
== 0);
1111 htab
->stubs
.count
= i
;
1112 qsort (htab
->stubs
.sh
, htab
->stubs
.count
, sizeof (*htab
->stubs
.sh
),
1115 /* Now that the stubs are sorted, place them in the stub section.
1116 Stubs are grouped per overlay
1130 for (i
= 0; i
< htab
->stubs
.count
; i
++)
1132 if (spu_elf_section_data (htab
->stubs
.sh
[group
]->target_section
1133 ->output_section
)->ovl_index
1134 != spu_elf_section_data (htab
->stubs
.sh
[i
]->target_section
1135 ->output_section
)->ovl_index
)
1137 htab
->stub
->size
+= SIZEOF_STUB2
;
1138 for (; group
!= i
; group
++)
1139 htab
->stubs
.sh
[group
]->delta
1140 = htab
->stubs
.sh
[i
- 1]->off
- htab
->stubs
.sh
[group
]->off
;
1143 || ((htab
->stubs
.sh
[i
- 1]->target_section
->output_section
->vma
1144 + htab
->stubs
.sh
[i
- 1]->target_section
->output_offset
1145 + htab
->stubs
.sh
[i
- 1]->target_off
)
1146 != (htab
->stubs
.sh
[i
]->target_section
->output_section
->vma
1147 + htab
->stubs
.sh
[i
]->target_section
->output_offset
1148 + htab
->stubs
.sh
[i
]->target_off
)))
1150 htab
->stubs
.sh
[i
]->off
= htab
->stub
->size
;
1151 htab
->stub
->size
+= SIZEOF_STUB1
;
1152 if (info
->emitrelocations
)
1153 htab
->stub
->reloc_count
+= 1;
1156 htab
->stubs
.sh
[i
]->off
= htab
->stubs
.sh
[i
- 1]->off
;
1159 htab
->stub
->size
+= SIZEOF_STUB2
;
1160 if (info
->emitrelocations
)
1161 htab
->stub
->flags
|= SEC_RELOC
;
1162 for (; group
!= i
; group
++)
1163 htab
->stubs
.sh
[group
]->delta
1164 = htab
->stubs
.sh
[i
- 1]->off
- htab
->stubs
.sh
[group
]->off
;
1166 /* htab->ovtab consists of two arrays.
1176 . } _ovly_buf_table[]; */
1178 htab
->ovtab
->alignment_power
= 4;
1179 htab
->ovtab
->size
= htab
->num_overlays
* 16 + htab
->num_buf
* 4;
1184 /* Functions to handle embedded spu_ovl.o object. */
1187 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1193 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1199 struct _ovl_stream
*os
;
1203 os
= (struct _ovl_stream
*) stream
;
1204 max
= (const char *) os
->end
- (const char *) os
->start
;
1206 if ((ufile_ptr
) offset
>= max
)
1210 if (count
> max
- offset
)
1211 count
= max
- offset
;
1213 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1218 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1220 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1227 return *ovl_bfd
!= NULL
;
1230 /* Fill in the ila and br for a stub. On the last stub for a group,
1231 write the stub that sets the overlay number too. */
1234 write_one_stub (struct spu_stub_hash_entry
*ent
, struct bfd_link_info
*info
)
1236 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1237 asection
*sec
= htab
->stub
;
1238 asection
*s
= ent
->target_section
;
1242 val
= ent
->target_off
+ s
->output_offset
+ s
->output_section
->vma
;
1243 bfd_put_32 (sec
->owner
, ILA_79
+ ((val
<< 7) & 0x01ffff80),
1244 sec
->contents
+ ent
->off
);
1245 val
= ent
->delta
+ 4;
1246 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
1247 sec
->contents
+ ent
->off
+ 4);
1249 if (info
->emitrelocations
)
1251 Elf_Internal_Rela
*relocs
, *r
;
1252 struct bfd_elf_section_data
*elfsec_data
;
1254 elfsec_data
= elf_section_data (sec
);
1255 relocs
= elfsec_data
->relocs
;
1258 bfd_size_type relsize
;
1259 Elf_Internal_Shdr
*symtab_hdr
;
1260 struct elf_link_hash_entry
**sym_hash
;
1261 unsigned long symcount
;
1264 relsize
= sec
->reloc_count
* sizeof (*relocs
);
1265 relocs
= bfd_alloc (sec
->owner
, relsize
);
1268 elfsec_data
->relocs
= relocs
;
1269 elfsec_data
->rel_hdr
.sh_size
1270 = sec
->reloc_count
* sizeof (Elf32_External_Rela
);
1271 elfsec_data
->rel_hdr
.sh_entsize
= sizeof (Elf32_External_Rela
);
1272 sec
->reloc_count
= 0;
1274 /* Increase the size of symbol hash array on the bfd to
1275 which we attached our .stub section. This hack allows
1276 us to create relocs against global symbols. */
1277 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1278 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
1279 symcount
-= symtab_hdr
->sh_info
;
1280 amt
= symcount
* sizeof (*sym_hash
);
1281 sym_hash
= bfd_alloc (sec
->owner
, amt
+ sizeof (*sym_hash
));
1282 if (sym_hash
== NULL
)
1284 memcpy (sym_hash
, elf_sym_hashes (sec
->owner
), amt
);
1285 sym_hash
[symcount
] = htab
->ovly_load
;
1286 htab
->ovly_load_r_symndx
= symcount
+ symtab_hdr
->sh_info
;
1287 elf_sym_hashes (sec
->owner
) = sym_hash
;
1289 r
= relocs
+ sec
->reloc_count
;
1290 sec
->reloc_count
+= 1;
1291 r
->r_offset
= ent
->off
+ 4;
1292 r
->r_info
= ELF32_R_INFO (0, R_SPU_REL16
);
1293 r
->r_addend
= (sec
->output_section
->vma
1294 + sec
->output_offset
1299 /* If this is the last stub of this group, write stub2. */
1300 if (ent
->delta
== 0)
1302 bfd_put_32 (sec
->owner
, NOP
,
1303 sec
->contents
+ ent
->off
+ 4);
1305 ovl
= spu_elf_section_data (s
->output_section
)->ovl_index
;
1306 bfd_put_32 (sec
->owner
, ILA_78
+ ((ovl
<< 7) & 0x01ffff80),
1307 sec
->contents
+ ent
->off
+ 8);
1309 val
= (htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
1310 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
1311 + htab
->ovly_load
->root
.u
.def
.value
1312 - (sec
->output_section
->vma
1313 + sec
->output_offset
1316 if (val
+ 0x20000 >= 0x40000)
1317 htab
->stub_overflow
= TRUE
;
1319 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
1320 sec
->contents
+ ent
->off
+ 12);
1322 if (info
->emitrelocations
)
1324 Elf_Internal_Rela
*relocs
, *r
;
1325 struct bfd_elf_section_data
*elfsec_data
;
1327 elfsec_data
= elf_section_data (sec
);
1328 relocs
= elfsec_data
->relocs
;
1329 /* The last branch is overwritten, so overwrite its reloc too. */
1330 r
= relocs
+ sec
->reloc_count
- 1;
1331 r
->r_offset
= ent
->off
+ 12;
1332 r
->r_info
= ELF32_R_INFO (htab
->ovly_load_r_symndx
, R_SPU_REL16
);
1337 if (htab
->emit_stub_syms
)
1339 struct elf_link_hash_entry
*h
;
1343 len1
= sizeof ("00000000.ovl_call.") - 1;
1344 len2
= strlen (ent
->root
.string
);
1345 name
= bfd_malloc (len1
+ len2
+ 1);
1348 memcpy (name
, "00000000.ovl_call.", len1
);
1349 memcpy (name
+ len1
, ent
->root
.string
, len2
+ 1);
1350 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1354 if (h
->root
.type
== bfd_link_hash_new
)
1356 h
->root
.type
= bfd_link_hash_defined
;
1357 h
->root
.u
.def
.section
= sec
;
1358 h
->root
.u
.def
.value
= ent
->off
;
1359 h
->size
= (ent
->delta
== 0
1360 ? SIZEOF_STUB1
+ SIZEOF_STUB2
: SIZEOF_STUB1
);
1364 h
->ref_regular_nonweak
= 1;
1365 h
->forced_local
= 1;
1373 /* Define an STT_OBJECT symbol. */
1375 static struct elf_link_hash_entry
*
1376 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1378 struct elf_link_hash_entry
*h
;
1380 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1384 if (h
->root
.type
!= bfd_link_hash_defined
1387 h
->root
.type
= bfd_link_hash_defined
;
1388 h
->root
.u
.def
.section
= htab
->ovtab
;
1389 h
->type
= STT_OBJECT
;
1392 h
->ref_regular_nonweak
= 1;
1397 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1398 h
->root
.u
.def
.section
->owner
,
1399 h
->root
.root
.string
);
1400 bfd_set_error (bfd_error_bad_value
);
1407 /* Fill in all stubs and the overlay tables. */
1410 spu_elf_build_stubs (struct bfd_link_info
*info
, int emit_syms
, asection
*toe
)
1412 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1413 struct elf_link_hash_entry
*h
;
1419 htab
->emit_stub_syms
= emit_syms
;
1420 htab
->stub
->contents
= bfd_zalloc (htab
->stub
->owner
, htab
->stub
->size
);
1421 if (htab
->stub
->contents
== NULL
)
1424 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1425 htab
->ovly_load
= h
;
1426 BFD_ASSERT (h
!= NULL
1427 && (h
->root
.type
== bfd_link_hash_defined
1428 || h
->root
.type
== bfd_link_hash_defweak
)
1431 s
= h
->root
.u
.def
.section
->output_section
;
1432 if (spu_elf_section_data (s
)->ovl_index
)
1434 (*_bfd_error_handler
) (_("%s in overlay section"),
1435 h
->root
.u
.def
.section
->owner
);
1436 bfd_set_error (bfd_error_bad_value
);
1440 /* Write out all the stubs. */
1441 for (i
= 0; i
< htab
->stubs
.count
; i
++)
1442 write_one_stub (htab
->stubs
.sh
[i
], info
);
1444 if (htab
->stub_overflow
)
1446 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1447 bfd_set_error (bfd_error_bad_value
);
1451 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1452 if (htab
->ovtab
->contents
== NULL
)
1455 /* Write out _ovly_table. */
1456 p
= htab
->ovtab
->contents
;
1457 obfd
= htab
->ovtab
->output_section
->owner
;
1458 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1460 unsigned int ovl_index
= spu_elf_section_data (s
)->ovl_index
;
1464 unsigned int lo
, hi
, mid
;
1465 unsigned long off
= (ovl_index
- 1) * 16;
1466 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1467 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1468 /* file_off written later in spu_elf_modify_program_headers. */
1474 mid
= (lo
+ hi
) >> 1;
1475 if (htab
->ovl_region
[2 * mid
+ 1]->vma
1476 + htab
->ovl_region
[2 * mid
+ 1]->size
<= s
->vma
)
1478 else if (htab
->ovl_region
[2 * mid
]->vma
> s
->vma
)
1482 bfd_put_32 (htab
->ovtab
->owner
, mid
+ 1, p
+ off
+ 12);
1486 BFD_ASSERT (lo
< hi
);
1490 /* Write out _ovly_buf_table. */
1491 p
= htab
->ovtab
->contents
+ htab
->num_overlays
* 16;
1492 for (i
= 0; i
< htab
->num_buf
; i
++)
1494 bfd_put_32 (htab
->ovtab
->owner
, 0, p
);
1498 h
= define_ovtab_symbol (htab
, "_ovly_table");
1501 h
->root
.u
.def
.value
= 0;
1502 h
->size
= htab
->num_overlays
* 16;
1504 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1507 h
->root
.u
.def
.value
= htab
->num_overlays
* 16;
1510 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1513 h
->root
.u
.def
.value
= htab
->num_overlays
* 16;
1514 h
->size
= htab
->num_buf
* 4;
1516 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1519 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + htab
->num_buf
* 4;
1522 h
= define_ovtab_symbol (htab
, "_EAR_");
1525 h
->root
.u
.def
.section
= toe
;
1526 h
->root
.u
.def
.value
= 0;
1532 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1533 Search for stack adjusting insns, and return the sp delta. */
1536 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1541 memset (reg
, 0, sizeof (reg
));
1542 for (unrecog
= 0; offset
+ 4 <= sec
->size
&& unrecog
< 32; offset
+= 4)
1544 unsigned char buf
[4];
1548 /* Assume no relocs on stack adjusing insns. */
1549 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1552 if (buf
[0] == 0x24 /* stqd */)
1556 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1557 /* Partly decoded immediate field. */
1558 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1560 if (buf
[0] == 0x1c /* ai */)
1563 imm
= (imm
^ 0x200) - 0x200;
1564 reg
[rt
] = reg
[ra
] + imm
;
1566 if (rt
== 1 /* sp */)
1573 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1575 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1577 reg
[rt
] = reg
[ra
] + reg
[rb
];
1581 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1583 if (buf
[0] >= 0x42 /* ila */)
1584 imm
|= (buf
[0] & 1) << 17;
1589 if (buf
[0] == 0x40 /* il */)
1591 if ((buf
[1] & 0x80) == 0)
1593 imm
= (imm
^ 0x8000) - 0x8000;
1595 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1601 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1603 reg
[rt
] |= imm
& 0xffff;
1606 else if (buf
[0] == 0x04 /* ori */)
1609 imm
= (imm
^ 0x200) - 0x200;
1610 reg
[rt
] = reg
[ra
] | imm
;
1613 else if ((buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1614 || (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */))
1616 /* Used in pic reg load. Say rt is trashed. */
1620 else if (is_branch (buf
) || is_indirect_branch (buf
))
1621 /* If we hit a branch then we must be out of the prologue. */
1630 /* qsort predicate to sort symbols by section and value. */
1632 static Elf_Internal_Sym
*sort_syms_syms
;
1633 static asection
**sort_syms_psecs
;
1636 sort_syms (const void *a
, const void *b
)
1638 Elf_Internal_Sym
*const *s1
= a
;
1639 Elf_Internal_Sym
*const *s2
= b
;
1640 asection
*sec1
,*sec2
;
1641 bfd_signed_vma delta
;
1643 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1644 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1647 return sec1
->index
- sec2
->index
;
1649 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1651 return delta
< 0 ? -1 : 1;
1653 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1655 return delta
< 0 ? -1 : 1;
1657 return *s1
< *s2
? -1 : 1;
1662 struct function_info
*fun
;
1663 struct call_info
*next
;
1667 struct function_info
1669 /* List of functions called. Also branches to hot/cold part of
1671 struct call_info
*call_list
;
1672 /* For hot/cold part of function, point to owner. */
1673 struct function_info
*start
;
1674 /* Symbol at start of function. */
1676 Elf_Internal_Sym
*sym
;
1677 struct elf_link_hash_entry
*h
;
1679 /* Function section. */
1681 /* Address range of (this part of) function. */
1685 /* Set if global symbol. */
1686 unsigned int global
: 1;
1687 /* Set if known to be start of function (as distinct from a hunk
1688 in hot/cold section. */
1689 unsigned int is_func
: 1;
1690 /* Flags used during call tree traversal. */
1691 unsigned int visit1
: 1;
1692 unsigned int non_root
: 1;
1693 unsigned int visit2
: 1;
1694 unsigned int marking
: 1;
1695 unsigned int visit3
: 1;
1698 struct spu_elf_stack_info
1702 /* Variable size array describing functions, one per contiguous
1703 address range belonging to a function. */
1704 struct function_info fun
[1];
1707 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1708 entries for section SEC. */
1710 static struct spu_elf_stack_info
*
1711 alloc_stack_info (asection
*sec
, int max_fun
)
1713 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1716 amt
= sizeof (struct spu_elf_stack_info
);
1717 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1718 sec_data
->stack_info
= bfd_zmalloc (amt
);
1719 if (sec_data
->stack_info
!= NULL
)
1720 sec_data
->stack_info
->max_fun
= max_fun
;
1721 return sec_data
->stack_info
;
1724 /* Add a new struct function_info describing a (part of a) function
1725 starting at SYM_H. Keep the array sorted by address. */
1727 static struct function_info
*
1728 maybe_insert_function (asection
*sec
,
1731 bfd_boolean is_func
)
1733 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1734 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1740 sinfo
= alloc_stack_info (sec
, 20);
1747 Elf_Internal_Sym
*sym
= sym_h
;
1748 off
= sym
->st_value
;
1749 size
= sym
->st_size
;
1753 struct elf_link_hash_entry
*h
= sym_h
;
1754 off
= h
->root
.u
.def
.value
;
1758 for (i
= sinfo
->num_fun
; --i
>= 0; )
1759 if (sinfo
->fun
[i
].lo
<= off
)
1764 /* Don't add another entry for an alias, but do update some
1766 if (sinfo
->fun
[i
].lo
== off
)
1768 /* Prefer globals over local syms. */
1769 if (global
&& !sinfo
->fun
[i
].global
)
1771 sinfo
->fun
[i
].global
= TRUE
;
1772 sinfo
->fun
[i
].u
.h
= sym_h
;
1775 sinfo
->fun
[i
].is_func
= TRUE
;
1776 return &sinfo
->fun
[i
];
1778 /* Ignore a zero-size symbol inside an existing function. */
1779 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1780 return &sinfo
->fun
[i
];
1783 if (++i
< sinfo
->num_fun
)
1784 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1785 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1786 else if (i
>= sinfo
->max_fun
)
1788 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1789 bfd_size_type old
= amt
;
1791 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1792 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1793 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1794 sinfo
= bfd_realloc (sinfo
, amt
);
1797 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1798 sec_data
->stack_info
= sinfo
;
1800 sinfo
->fun
[i
].is_func
= is_func
;
1801 sinfo
->fun
[i
].global
= global
;
1802 sinfo
->fun
[i
].sec
= sec
;
1804 sinfo
->fun
[i
].u
.h
= sym_h
;
1806 sinfo
->fun
[i
].u
.sym
= sym_h
;
1807 sinfo
->fun
[i
].lo
= off
;
1808 sinfo
->fun
[i
].hi
= off
+ size
;
1809 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1810 sinfo
->num_fun
+= 1;
1811 return &sinfo
->fun
[i
];
1814 /* Return the name of FUN. */
1817 func_name (struct function_info
*fun
)
1821 Elf_Internal_Shdr
*symtab_hdr
;
1823 while (fun
->start
!= NULL
)
1827 return fun
->u
.h
->root
.root
.string
;
1830 if (fun
->u
.sym
->st_name
== 0)
1832 size_t len
= strlen (sec
->name
);
1833 char *name
= bfd_malloc (len
+ 10);
1836 sprintf (name
, "%s+%lx", sec
->name
,
1837 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1841 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1842 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1845 /* Read the instruction at OFF in SEC. Return true iff the instruction
1846 is a nop, lnop, or stop 0 (all zero insn). */
1849 is_nop (asection
*sec
, bfd_vma off
)
1851 unsigned char insn
[4];
1853 if (off
+ 4 > sec
->size
1854 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1856 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1858 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1863 /* Extend the range of FUN to cover nop padding up to LIMIT.
1864 Return TRUE iff some instruction other than a NOP was found. */
1867 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1869 bfd_vma off
= (fun
->hi
+ 3) & -4;
1871 while (off
< limit
&& is_nop (fun
->sec
, off
))
1882 /* Check and fix overlapping function ranges. Return TRUE iff there
1883 are gaps in the current info we have about functions in SEC. */
1886 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1888 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1889 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1891 bfd_boolean gaps
= FALSE
;
1896 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1897 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1899 /* Fix overlapping symbols. */
1900 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1901 const char *f2
= func_name (&sinfo
->fun
[i
]);
1903 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
1904 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
1906 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
1909 if (sinfo
->num_fun
== 0)
1913 if (sinfo
->fun
[0].lo
!= 0)
1915 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
1917 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
1919 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
1920 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
1922 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
1928 /* Search current function info for a function that contains address
1929 OFFSET in section SEC. */
1931 static struct function_info
*
1932 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
1934 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1935 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1939 hi
= sinfo
->num_fun
;
1942 mid
= (lo
+ hi
) / 2;
1943 if (offset
< sinfo
->fun
[mid
].lo
)
1945 else if (offset
>= sinfo
->fun
[mid
].hi
)
1948 return &sinfo
->fun
[mid
];
1950 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
1955 /* Add CALLEE to CALLER call list if not already present. */
1958 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
1960 struct call_info
*p
;
1961 for (p
= caller
->call_list
; p
!= NULL
; p
= p
->next
)
1962 if (p
->fun
== callee
->fun
)
1964 /* Tail calls use less stack than normal calls. Retain entry
1965 for normal call over one for tail call. */
1966 if (p
->is_tail
> callee
->is_tail
)
1967 p
->is_tail
= callee
->is_tail
;
1970 callee
->next
= caller
->call_list
;
1971 caller
->call_list
= callee
;
1975 /* Rummage through the relocs for SEC, looking for function calls.
1976 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1977 mark destination symbols on calls as being functions. Also
1978 look at branches, which may be tail calls or go to hot/cold
1979 section part of same function. */
1982 mark_functions_via_relocs (asection
*sec
,
1983 struct bfd_link_info
*info
,
1986 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1987 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1988 Elf_Internal_Sym
*syms
;
1990 static bfd_boolean warned
;
1992 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
1994 if (internal_relocs
== NULL
)
1997 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1998 psyms
= &symtab_hdr
->contents
;
1999 syms
= *(Elf_Internal_Sym
**) psyms
;
2000 irela
= internal_relocs
;
2001 irelaend
= irela
+ sec
->reloc_count
;
2002 for (; irela
< irelaend
; irela
++)
2004 enum elf_spu_reloc_type r_type
;
2005 unsigned int r_indx
;
2007 Elf_Internal_Sym
*sym
;
2008 struct elf_link_hash_entry
*h
;
2010 unsigned char insn
[4];
2011 bfd_boolean is_call
;
2012 struct function_info
*caller
;
2013 struct call_info
*callee
;
2015 r_type
= ELF32_R_TYPE (irela
->r_info
);
2016 if (r_type
!= R_SPU_REL16
2017 && r_type
!= R_SPU_ADDR16
)
2020 r_indx
= ELF32_R_SYM (irela
->r_info
);
2021 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2025 || sym_sec
->output_section
== NULL
2026 || sym_sec
->output_section
->owner
!= sec
->output_section
->owner
)
2029 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2030 irela
->r_offset
, 4))
2032 if (!is_branch (insn
))
2035 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2036 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2040 if (!call_tree
|| !warned
)
2041 info
->callbacks
->einfo (_("%B(%A+0x%v): call to non-code section"
2042 " %B(%A), stack analysis incomplete\n"),
2043 sec
->owner
, sec
, irela
->r_offset
,
2044 sym_sec
->owner
, sym_sec
);
2048 is_call
= (insn
[0] & 0xfd) == 0x31;
2051 val
= h
->root
.u
.def
.value
;
2053 val
= sym
->st_value
;
2054 val
+= irela
->r_addend
;
2058 struct function_info
*fun
;
2060 if (irela
->r_addend
!= 0)
2062 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2065 fake
->st_value
= val
;
2067 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2071 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2073 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2076 if (irela
->r_addend
!= 0
2077 && fun
->u
.sym
!= sym
)
2082 caller
= find_function (sec
, irela
->r_offset
, info
);
2085 callee
= bfd_malloc (sizeof *callee
);
2089 callee
->fun
= find_function (sym_sec
, val
, info
);
2090 if (callee
->fun
== NULL
)
2092 callee
->is_tail
= !is_call
;
2093 if (!insert_callee (caller
, callee
))
2096 && !callee
->fun
->is_func
2097 && callee
->fun
->stack
== 0)
2099 /* This is either a tail call or a branch from one part of
2100 the function to another, ie. hot/cold section. If the
2101 destination has been called by some other function then
2102 it is a separate function. We also assume that functions
2103 are not split across input files. */
2104 if (callee
->fun
->start
!= NULL
2105 || sec
->owner
!= sym_sec
->owner
)
2107 callee
->fun
->start
= NULL
;
2108 callee
->fun
->is_func
= TRUE
;
2111 callee
->fun
->start
= caller
;
2118 /* Handle something like .init or .fini, which has a piece of a function.
2119 These sections are pasted together to form a single function. */
2122 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2124 struct bfd_link_order
*l
;
2125 struct _spu_elf_section_data
*sec_data
;
2126 struct spu_elf_stack_info
*sinfo
;
2127 Elf_Internal_Sym
*fake
;
2128 struct function_info
*fun
, *fun_start
;
2130 fake
= bfd_zmalloc (sizeof (*fake
));
2134 fake
->st_size
= sec
->size
;
2136 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2137 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2141 /* Find a function immediately preceding this section. */
2143 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2145 if (l
->u
.indirect
.section
== sec
)
2147 if (fun_start
!= NULL
)
2149 if (fun_start
->start
)
2150 fun_start
= fun_start
->start
;
2151 fun
->start
= fun_start
;
2155 if (l
->type
== bfd_indirect_link_order
2156 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2157 && (sinfo
= sec_data
->stack_info
) != NULL
2158 && sinfo
->num_fun
!= 0)
2159 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2162 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2166 /* We're only interested in code sections. */
2169 interesting_section (asection
*s
, bfd
*obfd
, struct spu_link_hash_table
*htab
)
2171 return (s
!= htab
->stub
2172 && s
->output_section
!= NULL
2173 && s
->output_section
->owner
== obfd
2174 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2175 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2179 /* Map address ranges in code sections to functions. */
2182 discover_functions (bfd
*output_bfd
, struct bfd_link_info
*info
)
2184 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2187 Elf_Internal_Sym
***psym_arr
;
2188 asection
***sec_arr
;
2189 bfd_boolean gaps
= FALSE
;
2192 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2195 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2196 if (psym_arr
== NULL
)
2198 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2199 if (sec_arr
== NULL
)
2203 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2205 ibfd
= ibfd
->link_next
, bfd_idx
++)
2207 extern const bfd_target bfd_elf32_spu_vec
;
2208 Elf_Internal_Shdr
*symtab_hdr
;
2211 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2212 asection
**psecs
, **p
;
2214 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2217 /* Read all the symbols. */
2218 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2219 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2223 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2226 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2228 symtab_hdr
->contents
= (void *) syms
;
2233 /* Select defined function symbols that are going to be output. */
2234 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2237 psym_arr
[bfd_idx
] = psyms
;
2238 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2241 sec_arr
[bfd_idx
] = psecs
;
2242 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2243 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2244 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2248 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2249 if (s
!= NULL
&& interesting_section (s
, output_bfd
, htab
))
2252 symcount
= psy
- psyms
;
2255 /* Sort them by section and offset within section. */
2256 sort_syms_syms
= syms
;
2257 sort_syms_psecs
= psecs
;
2258 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2260 /* Now inspect the function symbols. */
2261 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2263 asection
*s
= psecs
[*psy
- syms
];
2264 Elf_Internal_Sym
**psy2
;
2266 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2267 if (psecs
[*psy2
- syms
] != s
)
2270 if (!alloc_stack_info (s
, psy2
- psy
))
2275 /* First install info about properly typed and sized functions.
2276 In an ideal world this will cover all code sections, except
2277 when partitioning functions into hot and cold sections,
2278 and the horrible pasted together .init and .fini functions. */
2279 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2282 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2284 asection
*s
= psecs
[sy
- syms
];
2285 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2290 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2291 if (interesting_section (sec
, output_bfd
, htab
))
2292 gaps
|= check_function_ranges (sec
, info
);
2297 /* See if we can discover more function symbols by looking at
2299 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2301 ibfd
= ibfd
->link_next
, bfd_idx
++)
2305 if (psym_arr
[bfd_idx
] == NULL
)
2308 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2309 if (interesting_section (sec
, output_bfd
, htab
)
2310 && sec
->reloc_count
!= 0)
2312 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2317 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2319 ibfd
= ibfd
->link_next
, bfd_idx
++)
2321 Elf_Internal_Shdr
*symtab_hdr
;
2323 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2326 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2329 psecs
= sec_arr
[bfd_idx
];
2331 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2332 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2335 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2336 if (interesting_section (sec
, output_bfd
, htab
))
2337 gaps
|= check_function_ranges (sec
, info
);
2341 /* Finally, install all globals. */
2342 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2346 s
= psecs
[sy
- syms
];
2348 /* Global syms might be improperly typed functions. */
2349 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2350 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2352 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2357 /* Some of the symbols we've installed as marking the
2358 beginning of functions may have a size of zero. Extend
2359 the range of such functions to the beginning of the
2360 next symbol of interest. */
2361 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2362 if (interesting_section (sec
, output_bfd
, htab
))
2364 struct _spu_elf_section_data
*sec_data
;
2365 struct spu_elf_stack_info
*sinfo
;
2367 sec_data
= spu_elf_section_data (sec
);
2368 sinfo
= sec_data
->stack_info
;
2372 bfd_vma hi
= sec
->size
;
2374 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2376 sinfo
->fun
[fun_idx
].hi
= hi
;
2377 hi
= sinfo
->fun
[fun_idx
].lo
;
2380 /* No symbols in this section. Must be .init or .fini
2381 or something similar. */
2382 else if (!pasted_function (sec
, info
))
2388 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2390 ibfd
= ibfd
->link_next
, bfd_idx
++)
2392 if (psym_arr
[bfd_idx
] == NULL
)
2395 free (psym_arr
[bfd_idx
]);
2396 free (sec_arr
[bfd_idx
]);
2405 /* Mark nodes in the call graph that are called by some other node. */
2408 mark_non_root (struct function_info
*fun
)
2410 struct call_info
*call
;
2413 for (call
= fun
->call_list
; call
; call
= call
->next
)
2415 call
->fun
->non_root
= TRUE
;
2416 if (!call
->fun
->visit1
)
2417 mark_non_root (call
->fun
);
2421 /* Remove cycles from the call graph. */
2424 call_graph_traverse (struct function_info
*fun
, struct bfd_link_info
*info
)
2426 struct call_info
**callp
, *call
;
2429 fun
->marking
= TRUE
;
2431 callp
= &fun
->call_list
;
2432 while ((call
= *callp
) != NULL
)
2434 if (!call
->fun
->visit2
)
2435 call_graph_traverse (call
->fun
, info
);
2436 else if (call
->fun
->marking
)
2438 const char *f1
= func_name (fun
);
2439 const char *f2
= func_name (call
->fun
);
2441 info
->callbacks
->info (_("Stack analysis will ignore the call "
2444 *callp
= call
->next
;
2447 callp
= &call
->next
;
2449 fun
->marking
= FALSE
;
2452 /* Populate call_list for each function. */
2455 build_call_tree (bfd
*output_bfd
, struct bfd_link_info
*info
)
2457 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2460 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2462 extern const bfd_target bfd_elf32_spu_vec
;
2465 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2468 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2470 if (!interesting_section (sec
, output_bfd
, htab
)
2471 || sec
->reloc_count
== 0)
2474 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2478 /* Transfer call info from hot/cold section part of function
2480 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2482 struct _spu_elf_section_data
*sec_data
;
2483 struct spu_elf_stack_info
*sinfo
;
2485 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2486 && (sinfo
= sec_data
->stack_info
) != NULL
)
2489 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2491 if (sinfo
->fun
[i
].start
!= NULL
)
2493 struct call_info
*call
= sinfo
->fun
[i
].call_list
;
2495 while (call
!= NULL
)
2497 struct call_info
*call_next
= call
->next
;
2498 if (!insert_callee (sinfo
->fun
[i
].start
, call
))
2502 sinfo
->fun
[i
].call_list
= NULL
;
2503 sinfo
->fun
[i
].non_root
= TRUE
;
2510 /* Find the call graph root(s). */
2511 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2513 extern const bfd_target bfd_elf32_spu_vec
;
2516 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2519 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2521 struct _spu_elf_section_data
*sec_data
;
2522 struct spu_elf_stack_info
*sinfo
;
2524 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2525 && (sinfo
= sec_data
->stack_info
) != NULL
)
2528 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2529 if (!sinfo
->fun
[i
].visit1
)
2530 mark_non_root (&sinfo
->fun
[i
]);
2535 /* Remove cycles from the call graph. We start from the root node(s)
2536 so that we break cycles in a reasonable place. */
2537 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2539 extern const bfd_target bfd_elf32_spu_vec
;
2542 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2545 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2547 struct _spu_elf_section_data
*sec_data
;
2548 struct spu_elf_stack_info
*sinfo
;
2550 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2551 && (sinfo
= sec_data
->stack_info
) != NULL
)
2554 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2555 if (!sinfo
->fun
[i
].non_root
)
2556 call_graph_traverse (&sinfo
->fun
[i
], info
);
2564 /* Descend the call graph for FUN, accumulating total stack required. */
2567 sum_stack (struct function_info
*fun
,
2568 struct bfd_link_info
*info
,
2569 int emit_stack_syms
)
2571 struct call_info
*call
;
2572 struct function_info
*max
= NULL
;
2573 bfd_vma max_stack
= fun
->stack
;
2580 for (call
= fun
->call_list
; call
; call
= call
->next
)
2582 stack
= sum_stack (call
->fun
, info
, emit_stack_syms
);
2583 /* Include caller stack for normal calls, don't do so for
2584 tail calls. fun->stack here is local stack usage for
2587 stack
+= fun
->stack
;
2588 if (max_stack
< stack
)
2595 f1
= func_name (fun
);
2596 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
2597 f1
, (bfd_vma
) fun
->stack
, max_stack
);
2601 info
->callbacks
->minfo (_(" calls:\n"));
2602 for (call
= fun
->call_list
; call
; call
= call
->next
)
2604 const char *f2
= func_name (call
->fun
);
2605 const char *ann1
= call
->fun
== max
? "*" : " ";
2606 const char *ann2
= call
->is_tail
? "t" : " ";
2608 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
2612 /* Now fun->stack holds cumulative stack. */
2613 fun
->stack
= max_stack
;
2616 if (emit_stack_syms
)
2618 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2619 char *name
= bfd_malloc (18 + strlen (f1
));
2620 struct elf_link_hash_entry
*h
;
2624 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
2625 sprintf (name
, "__stack_%s", f1
);
2627 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
2629 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
2632 && (h
->root
.type
== bfd_link_hash_new
2633 || h
->root
.type
== bfd_link_hash_undefined
2634 || h
->root
.type
== bfd_link_hash_undefweak
))
2636 h
->root
.type
= bfd_link_hash_defined
;
2637 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2638 h
->root
.u
.def
.value
= max_stack
;
2643 h
->ref_regular_nonweak
= 1;
2644 h
->forced_local
= 1;
2653 /* Provide an estimate of total stack required. */
2656 spu_elf_stack_analysis (bfd
*output_bfd
,
2657 struct bfd_link_info
*info
,
2658 int emit_stack_syms
)
2661 bfd_vma max_stack
= 0;
2663 if (!discover_functions (output_bfd
, info
))
2666 if (!build_call_tree (output_bfd
, info
))
2669 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
2670 info
->callbacks
->minfo (_("\nStack size for functions. "
2671 "Annotations: '*' max stack, 't' tail call\n"));
2672 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2674 extern const bfd_target bfd_elf32_spu_vec
;
2677 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2680 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2682 struct _spu_elf_section_data
*sec_data
;
2683 struct spu_elf_stack_info
*sinfo
;
2685 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2686 && (sinfo
= sec_data
->stack_info
) != NULL
)
2689 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2691 if (!sinfo
->fun
[i
].non_root
)
2696 stack
= sum_stack (&sinfo
->fun
[i
], info
,
2698 f1
= func_name (&sinfo
->fun
[i
]);
2699 info
->callbacks
->info (_(" %s: 0x%v\n"),
2701 if (max_stack
< stack
)
2709 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"), max_stack
);
2713 /* Perform a final link. */
2716 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
2718 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2720 if (htab
->stack_analysis
2721 && !spu_elf_stack_analysis (output_bfd
, info
, htab
->emit_stack_syms
))
2722 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
2724 return bfd_elf_final_link (output_bfd
, info
);
2727 /* Called when not normally emitting relocs, ie. !info->relocatable
2728 and !info->emitrelocations. Returns a count of special relocs
2729 that need to be emitted. */
2732 spu_elf_count_relocs (asection
*sec
, Elf_Internal_Rela
*relocs
)
2734 unsigned int count
= 0;
2735 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
2737 for (; relocs
< relend
; relocs
++)
2739 int r_type
= ELF32_R_TYPE (relocs
->r_info
);
2740 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2747 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2750 spu_elf_relocate_section (bfd
*output_bfd
,
2751 struct bfd_link_info
*info
,
2753 asection
*input_section
,
2755 Elf_Internal_Rela
*relocs
,
2756 Elf_Internal_Sym
*local_syms
,
2757 asection
**local_sections
)
2759 Elf_Internal_Shdr
*symtab_hdr
;
2760 struct elf_link_hash_entry
**sym_hashes
;
2761 Elf_Internal_Rela
*rel
, *relend
;
2762 struct spu_link_hash_table
*htab
;
2763 bfd_boolean ret
= TRUE
;
2764 bfd_boolean emit_these_relocs
= FALSE
;
2766 htab
= spu_hash_table (info
);
2767 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
2768 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
2771 relend
= relocs
+ input_section
->reloc_count
;
2772 for (; rel
< relend
; rel
++)
2775 reloc_howto_type
*howto
;
2776 unsigned long r_symndx
;
2777 Elf_Internal_Sym
*sym
;
2779 struct elf_link_hash_entry
*h
;
2780 const char *sym_name
;
2783 bfd_reloc_status_type r
;
2784 bfd_boolean unresolved_reloc
;
2788 r_symndx
= ELF32_R_SYM (rel
->r_info
);
2789 r_type
= ELF32_R_TYPE (rel
->r_info
);
2790 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2792 emit_these_relocs
= TRUE
;
2796 howto
= elf_howto_table
+ r_type
;
2797 unresolved_reloc
= FALSE
;
2802 if (r_symndx
< symtab_hdr
->sh_info
)
2804 sym
= local_syms
+ r_symndx
;
2805 sec
= local_sections
[r_symndx
];
2806 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
2807 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
2811 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
2812 r_symndx
, symtab_hdr
, sym_hashes
,
2814 unresolved_reloc
, warned
);
2815 sym_name
= h
->root
.root
.string
;
2818 if (sec
!= NULL
&& elf_discarded_section (sec
))
2820 /* For relocs against symbols from removed linkonce sections,
2821 or sections discarded by a linker script, we just want the
2822 section contents zeroed. Avoid any special processing. */
2823 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
2829 if (info
->relocatable
)
2832 if (unresolved_reloc
)
2834 (*_bfd_error_handler
)
2835 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2837 bfd_get_section_name (input_bfd
, input_section
),
2838 (long) rel
->r_offset
,
2844 /* If this symbol is in an overlay area, we may need to relocate
2845 to the overlay stub. */
2846 addend
= rel
->r_addend
;
2847 branch
= (is_branch (contents
+ rel
->r_offset
)
2848 || is_hint (contents
+ rel
->r_offset
));
2849 if (needs_ovl_stub (sym_name
, sec
, input_section
, htab
, branch
))
2852 struct spu_stub_hash_entry
*sh
;
2854 stub_name
= spu_stub_name (sec
, h
, rel
);
2855 if (stub_name
== NULL
)
2858 sh
= (struct spu_stub_hash_entry
*)
2859 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
, FALSE
);
2862 relocation
= (htab
->stub
->output_section
->vma
2863 + htab
->stub
->output_offset
2870 r
= _bfd_final_link_relocate (howto
,
2874 rel
->r_offset
, relocation
, addend
);
2876 if (r
!= bfd_reloc_ok
)
2878 const char *msg
= (const char *) 0;
2882 case bfd_reloc_overflow
:
2883 if (!((*info
->callbacks
->reloc_overflow
)
2884 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
2885 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
2889 case bfd_reloc_undefined
:
2890 if (!((*info
->callbacks
->undefined_symbol
)
2891 (info
, sym_name
, input_bfd
, input_section
,
2892 rel
->r_offset
, TRUE
)))
2896 case bfd_reloc_outofrange
:
2897 msg
= _("internal error: out of range error");
2900 case bfd_reloc_notsupported
:
2901 msg
= _("internal error: unsupported relocation error");
2904 case bfd_reloc_dangerous
:
2905 msg
= _("internal error: dangerous error");
2909 msg
= _("internal error: unknown error");
2913 if (!((*info
->callbacks
->warning
)
2914 (info
, msg
, sym_name
, input_bfd
, input_section
,
2923 && emit_these_relocs
2924 && !info
->relocatable
2925 && !info
->emitrelocations
)
2927 Elf_Internal_Rela
*wrel
;
2928 Elf_Internal_Shdr
*rel_hdr
;
2930 wrel
= rel
= relocs
;
2931 relend
= relocs
+ input_section
->reloc_count
;
2932 for (; rel
< relend
; rel
++)
2936 r_type
= ELF32_R_TYPE (rel
->r_info
);
2937 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2940 input_section
->reloc_count
= wrel
- relocs
;
2941 /* Backflips for _bfd_elf_link_output_relocs. */
2942 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
2943 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
2950 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2953 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
2954 const char *sym_name ATTRIBUTE_UNUSED
,
2955 Elf_Internal_Sym
*sym
,
2956 asection
*sym_sec ATTRIBUTE_UNUSED
,
2957 struct elf_link_hash_entry
*h
)
2959 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2961 if (!info
->relocatable
2962 && htab
->num_overlays
!= 0
2964 && (h
->root
.type
== bfd_link_hash_defined
2965 || h
->root
.type
== bfd_link_hash_defweak
)
2967 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
2969 static Elf_Internal_Rela zero_rel
;
2970 char *stub_name
= spu_stub_name (h
->root
.u
.def
.section
, h
, &zero_rel
);
2971 struct spu_stub_hash_entry
*sh
;
2973 if (stub_name
== NULL
)
2975 sh
= (struct spu_stub_hash_entry
*)
2976 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
, FALSE
);
2981 = _bfd_elf_section_from_bfd_section (htab
->stub
->output_section
->owner
,
2982 htab
->stub
->output_section
);
2983 sym
->st_value
= (htab
->stub
->output_section
->vma
2984 + htab
->stub
->output_offset
2991 static int spu_plugin
= 0;
2994 spu_elf_plugin (int val
)
2999 /* Set ELF header e_type for plugins. */
3002 spu_elf_post_process_headers (bfd
*abfd
,
3003 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
3007 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
3009 i_ehdrp
->e_type
= ET_DYN
;
3013 /* We may add an extra PT_LOAD segment for .toe. We also need extra
3014 segments for overlays. */
3017 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
3019 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3020 int extra
= htab
->num_overlays
;
3026 sec
= bfd_get_section_by_name (abfd
, ".toe");
3027 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
3033 /* Remove .toe section from other PT_LOAD segments and put it in
3034 a segment of its own. Put overlays in separate segments too. */
3037 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
3040 struct elf_segment_map
*m
;
3046 toe
= bfd_get_section_by_name (abfd
, ".toe");
3047 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3048 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
3049 for (i
= 0; i
< m
->count
; i
++)
3050 if ((s
= m
->sections
[i
]) == toe
3051 || spu_elf_section_data (s
)->ovl_index
!= 0)
3053 struct elf_segment_map
*m2
;
3056 if (i
+ 1 < m
->count
)
3058 amt
= sizeof (struct elf_segment_map
);
3059 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
3060 m2
= bfd_zalloc (abfd
, amt
);
3063 m2
->count
= m
->count
- (i
+ 1);
3064 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
3065 m2
->count
* sizeof (m
->sections
[0]));
3066 m2
->p_type
= PT_LOAD
;
3074 amt
= sizeof (struct elf_segment_map
);
3075 m2
= bfd_zalloc (abfd
, amt
);
3078 m2
->p_type
= PT_LOAD
;
3080 m2
->sections
[0] = s
;
3090 /* Check that all loadable section VMAs lie in the range
3091 LO .. HI inclusive. */
3094 spu_elf_check_vma (bfd
*abfd
, bfd_vma lo
, bfd_vma hi
)
3096 struct elf_segment_map
*m
;
3099 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3100 if (m
->p_type
== PT_LOAD
)
3101 for (i
= 0; i
< m
->count
; i
++)
3102 if (m
->sections
[i
]->size
!= 0
3103 && (m
->sections
[i
]->vma
< lo
3104 || m
->sections
[i
]->vma
> hi
3105 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
3106 return m
->sections
[i
];
3111 /* Tweak the section type of .note.spu_name. */
3114 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
3115 Elf_Internal_Shdr
*hdr
,
3118 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
3119 hdr
->sh_type
= SHT_NOTE
;
3123 /* Tweak phdrs before writing them out. */
3126 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
3128 const struct elf_backend_data
*bed
;
3129 struct elf_obj_tdata
*tdata
;
3130 Elf_Internal_Phdr
*phdr
, *last
;
3131 struct spu_link_hash_table
*htab
;
3138 bed
= get_elf_backend_data (abfd
);
3139 tdata
= elf_tdata (abfd
);
3141 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
3142 htab
= spu_hash_table (info
);
3143 if (htab
->num_overlays
!= 0)
3145 struct elf_segment_map
*m
;
3148 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
3150 && (o
= spu_elf_section_data (m
->sections
[0])->ovl_index
) != 0)
3152 /* Mark this as an overlay header. */
3153 phdr
[i
].p_flags
|= PF_OVERLAY
;
3155 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
3157 bfd_byte
*p
= htab
->ovtab
->contents
;
3158 unsigned int off
= (o
- 1) * 16 + 8;
3160 /* Write file_off into _ovly_table. */
3161 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
3166 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3167 of 16. This should always be possible when using the standard
3168 linker scripts, but don't create overlapping segments if
3169 someone is playing games with linker scripts. */
3171 for (i
= count
; i
-- != 0; )
3172 if (phdr
[i
].p_type
== PT_LOAD
)
3176 adjust
= -phdr
[i
].p_filesz
& 15;
3179 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
3182 adjust
= -phdr
[i
].p_memsz
& 15;
3185 && phdr
[i
].p_filesz
!= 0
3186 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
3187 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
3190 if (phdr
[i
].p_filesz
!= 0)
3194 if (i
== (unsigned int) -1)
3195 for (i
= count
; i
-- != 0; )
3196 if (phdr
[i
].p_type
== PT_LOAD
)
3200 adjust
= -phdr
[i
].p_filesz
& 15;
3201 phdr
[i
].p_filesz
+= adjust
;
3203 adjust
= -phdr
[i
].p_memsz
& 15;
3204 phdr
[i
].p_memsz
+= adjust
;
3210 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3211 #define TARGET_BIG_NAME "elf32-spu"
3212 #define ELF_ARCH bfd_arch_spu
3213 #define ELF_MACHINE_CODE EM_SPU
3214 /* This matches the alignment need for DMA. */
3215 #define ELF_MAXPAGESIZE 0x80
3216 #define elf_backend_rela_normal 1
3217 #define elf_backend_can_gc_sections 1
3219 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3220 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3221 #define elf_info_to_howto spu_elf_info_to_howto
3222 #define elf_backend_count_relocs spu_elf_count_relocs
3223 #define elf_backend_relocate_section spu_elf_relocate_section
3224 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3225 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3226 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3227 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3228 #define bfd_elf32_bfd_link_hash_table_free spu_elf_link_hash_table_free
3230 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3231 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3232 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3233 #define elf_backend_post_process_headers spu_elf_post_process_headers
3234 #define elf_backend_fake_sections spu_elf_fake_sections
3235 #define elf_backend_special_sections spu_elf_special_sections
3236 #define bfd_elf32_bfd_final_link spu_elf_final_link
3238 #include "elf32-target.h"