Fix RELOC_FOR_GLOBAL_SYMBOLS macro so that it can cope with user defined symbols...
[binutils-gdb.git] / bfd / elfnn-kvx.c
blob516ce2fb988a3c78f6df5af0217c457861f289c8
1 /* KVX-specific support for NN-bit ELF.
2 Copyright (C) 2009-2024 Free Software Foundation, Inc.
3 Contributed by Kalray SA.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 #include "sysdep.h"
22 #include "bfd.h"
23 #include "libiberty.h"
24 #include "libbfd.h"
25 #include "elf-bfd.h"
26 #include "bfdlink.h"
27 #include "objalloc.h"
28 #include "elf/kvx.h"
29 #include "elfxx-kvx.h"
31 #define ARCH_SIZE NN
33 #if ARCH_SIZE == 64
34 #define LOG_FILE_ALIGN 3
35 #endif
37 #if ARCH_SIZE == 32
38 #define LOG_FILE_ALIGN 2
39 #endif
41 #define IS_KVX_TLS_RELOC(R_TYPE) \
42 ((R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_LO10 \
43 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_UP27 \
44 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_LO10 \
45 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_UP27 \
46 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_EX6 \
47 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10 \
48 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27 \
49 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10 \
50 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27 \
51 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6 \
52 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_LO10 \
53 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_UP27 \
54 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_LO10 \
55 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_UP27 \
56 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_EX6 \
57 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_LO10 \
58 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_UP27 \
59 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_LO10 \
60 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_UP27 \
61 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_EX6 \
62 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_LO10 \
63 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_UP27 \
64 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_LO10 \
65 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_UP27 \
66 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_EX6 \
69 #define IS_KVX_TLS_RELAX_RELOC(R_TYPE) 0
71 #define ELIMINATE_COPY_RELOCS 0
73 /* Return size of a relocation entry. HTAB is the bfd's
74 elf_kvx_link_hash_entry. */
75 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
77 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
78 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
79 #define PLT_ENTRY_SIZE (32)
81 #define PLT_SMALL_ENTRY_SIZE (4*4)
83 /* Encoding of the nop instruction */
84 #define INSN_NOP 0x00f0037f
86 #define kvx_compute_jump_table_size(htab) \
87 (((htab)->root.srelplt == NULL) ? 0 \
88 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
90 static const bfd_byte elfNN_kvx_small_plt0_entry[PLT_ENTRY_SIZE] =
92 /* FIXME KVX: no first entry, not used yet */
96 /* Per function entry in a procedure linkage table looks like this
97 if the distance between the PLTGOT and the PLT is < 4GB use
98 these PLT entries. */
99 static const bfd_byte elfNN_kvx_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
101 0x10, 0x00, 0xc4, 0x0f, /* get $r16 = $pc ;; */
102 #if ARCH_SIZE == 32
103 0x10, 0x00, 0x40, 0xb0, /* lwz $r16 = 0[$r16] ;; */
104 #else
105 0x10, 0x00, 0x40, 0xb8, /* ld $r16 = 0[$r16] ;; */
106 #endif
107 0x00, 0x00, 0x00, 0x18, /* upper 27 bits for LSU */
108 0x10, 0x00, 0xd8, 0x0f, /* igoto $r16 ;; */
111 /* Long stub use 43bits format of make. */
112 static const uint32_t elfNN_kvx_long_branch_stub[] =
114 0xe0400000, /* make $r16 = LO10<emm43> EX6<imm43> */
115 0x00000000, /* UP27<imm43> ;; */
116 0x0fd80010, /* igoto "r16 ;; */
119 #define elf_info_to_howto elfNN_kvx_info_to_howto
120 #define elf_info_to_howto_rel elfNN_kvx_info_to_howto
122 #define KVX_ELF_ABI_VERSION 0
124 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
125 #define ALL_ONES (~ (bfd_vma) 0)
127 /* Indexed by the bfd interal reloc enumerators.
128 Therefore, the table needs to be synced with BFD_RELOC_KVX_*
129 in reloc.c. */
131 #define KVX_KV3_V1_KV3_V2_KV4_V1
132 #include "elfxx-kvx-relocs.h"
133 #undef KVX_KV3_V1_KV3_V2_KV4_V1
135 /* Given HOWTO, return the bfd internal relocation enumerator. */
137 static bfd_reloc_code_real_type
138 elfNN_kvx_bfd_reloc_from_howto (reloc_howto_type *howto)
140 const int size = (int) ARRAY_SIZE (elf_kvx_howto_table);
141 const ptrdiff_t offset = howto - elf_kvx_howto_table;
143 if (offset >= 0 && offset < size)
144 return BFD_RELOC_KVX_RELOC_START + offset + 1;
146 return BFD_RELOC_KVX_RELOC_START + 1;
149 /* Given R_TYPE, return the bfd internal relocation enumerator. */
151 static bfd_reloc_code_real_type
152 elfNN_kvx_bfd_reloc_from_type (bfd *abfd ATTRIBUTE_UNUSED, unsigned int r_type)
154 static bool initialized_p = false;
155 /* Indexed by R_TYPE, values are offsets in the howto_table. */
156 static unsigned int offsets[R_KVX_end];
158 if (!initialized_p)
160 unsigned int i;
162 for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i)
163 offsets[elf_kvx_howto_table[i].type] = i;
165 initialized_p = true;
168 /* PR 17512: file: b371e70a. */
169 if (r_type >= R_KVX_end)
171 bfd_set_error (bfd_error_bad_value);
172 return BFD_RELOC_KVX_RELOC_END;
175 return (BFD_RELOC_KVX_RELOC_START + 1) + offsets[r_type];
178 struct elf_kvx_reloc_map
180 bfd_reloc_code_real_type from;
181 bfd_reloc_code_real_type to;
184 /* Map bfd generic reloc to KVX-specific reloc. */
185 static const struct elf_kvx_reloc_map elf_kvx_reloc_map[] =
187 {BFD_RELOC_NONE, BFD_RELOC_KVX_NONE},
189 /* Basic data relocations. */
190 {BFD_RELOC_CTOR, BFD_RELOC_KVX_NN},
191 {BFD_RELOC_64, BFD_RELOC_KVX_64},
192 {BFD_RELOC_32, BFD_RELOC_KVX_32},
193 {BFD_RELOC_16, BFD_RELOC_KVX_16},
194 {BFD_RELOC_8, BFD_RELOC_KVX_8},
196 {BFD_RELOC_64_PCREL, BFD_RELOC_KVX_64_PCREL},
197 {BFD_RELOC_32_PCREL, BFD_RELOC_KVX_32_PCREL},
200 /* Given the bfd internal relocation enumerator in CODE, return the
201 corresponding howto entry. */
203 static reloc_howto_type *
204 elfNN_kvx_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
206 unsigned int i;
208 /* Convert bfd generic reloc to KVX-specific reloc. */
209 if (code < BFD_RELOC_KVX_RELOC_START || code > BFD_RELOC_KVX_RELOC_END)
210 for (i = 0; i < ARRAY_SIZE (elf_kvx_reloc_map) ; i++)
211 if (elf_kvx_reloc_map[i].from == code)
213 code = elf_kvx_reloc_map[i].to;
214 break;
217 if (code > BFD_RELOC_KVX_RELOC_START && code < BFD_RELOC_KVX_RELOC_END)
218 return &elf_kvx_howto_table[code - (BFD_RELOC_KVX_RELOC_START + 1)];
220 return NULL;
223 static reloc_howto_type *
224 elfNN_kvx_howto_from_type (bfd *abfd, unsigned int r_type)
226 bfd_reloc_code_real_type val;
227 reloc_howto_type *howto;
229 #if ARCH_SIZE == 32
230 if (r_type > 256)
232 bfd_set_error (bfd_error_bad_value);
233 return NULL;
235 #endif
237 val = elfNN_kvx_bfd_reloc_from_type (abfd, r_type);
238 howto = elfNN_kvx_howto_from_bfd_reloc (val);
240 if (howto != NULL)
241 return howto;
243 bfd_set_error (bfd_error_bad_value);
244 return NULL;
247 static bool
248 elfNN_kvx_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
249 Elf_Internal_Rela *elf_reloc)
251 unsigned int r_type;
253 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
254 bfd_reloc->howto = elfNN_kvx_howto_from_type (abfd, r_type);
256 if (bfd_reloc->howto == NULL)
258 /* xgettext:c-format */
259 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
260 abfd, r_type);
261 return false;
263 return true;
266 static reloc_howto_type *
267 elfNN_kvx_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
268 bfd_reloc_code_real_type code)
270 reloc_howto_type *howto = elfNN_kvx_howto_from_bfd_reloc (code);
272 if (howto != NULL)
273 return howto;
275 bfd_set_error (bfd_error_bad_value);
276 return NULL;
279 static reloc_howto_type *
280 elfNN_kvx_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
281 const char *r_name)
283 unsigned int i;
285 for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i)
286 if (elf_kvx_howto_table[i].name != NULL
287 && strcasecmp (elf_kvx_howto_table[i].name, r_name) == 0)
288 return &elf_kvx_howto_table[i];
290 return NULL;
293 #define TARGET_LITTLE_SYM kvx_elfNN_vec
294 #define TARGET_LITTLE_NAME "elfNN-kvx"
296 /* The linker script knows the section names for placement.
297 The entry_names are used to do simple name mangling on the stubs.
298 Given a function name, and its type, the stub can be found. The
299 name can be changed. The only requirement is the %s be present. */
300 #define STUB_ENTRY_NAME "__%s_veneer"
302 /* The name of the dynamic interpreter. This is put in the .interp
303 section. */
304 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
307 /* PCREL 27 is signed-extended and scaled by 4 */
308 #define KVX_MAX_FWD_CALL_OFFSET \
309 (((1 << 26) - 1) << 2)
310 #define KVX_MAX_BWD_CALL_OFFSET \
311 (-((1 << 26) << 2))
313 /* Check that the destination of the call is within the PCREL27
314 range. */
315 static int
316 kvx_valid_call_p (bfd_vma value, bfd_vma place)
318 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
319 return (offset <= KVX_MAX_FWD_CALL_OFFSET
320 && offset >= KVX_MAX_BWD_CALL_OFFSET);
323 /* Section name for stubs is the associated section name plus this
324 string. */
325 #define STUB_SUFFIX ".stub"
327 enum elf_kvx_stub_type
329 kvx_stub_none,
330 kvx_stub_long_branch,
333 struct elf_kvx_stub_hash_entry
335 /* Base hash table entry structure. */
336 struct bfd_hash_entry root;
338 /* The stub section. */
339 asection *stub_sec;
341 /* Offset within stub_sec of the beginning of this stub. */
342 bfd_vma stub_offset;
344 /* Given the symbol's value and its section we can determine its final
345 value when building the stubs (so the stub knows where to jump). */
346 bfd_vma target_value;
347 asection *target_section;
349 enum elf_kvx_stub_type stub_type;
351 /* The symbol table entry, if any, that this was derived from. */
352 struct elf_kvx_link_hash_entry *h;
354 /* Destination symbol type */
355 unsigned char st_type;
357 /* Where this stub is being called from, or, in the case of combined
358 stub sections, the first input section in the group. */
359 asection *id_sec;
361 /* The name for the local symbol at the start of this stub. The
362 stub name in the hash table has to be unique; this does not, so
363 it can be friendlier. */
364 char *output_name;
367 /* Used to build a map of a section. This is required for mixed-endian
368 code/data. */
370 typedef struct elf_elf_section_map
372 bfd_vma vma;
373 char type;
375 elf_kvx_section_map;
378 typedef struct _kvx_elf_section_data
380 struct bfd_elf_section_data elf;
381 unsigned int mapcount;
382 unsigned int mapsize;
383 elf_kvx_section_map *map;
385 _kvx_elf_section_data;
387 #define elf_kvx_section_data(sec) \
388 ((_kvx_elf_section_data *) elf_section_data (sec))
390 struct elf_kvx_local_symbol
392 unsigned int got_type;
393 bfd_signed_vma got_refcount;
394 bfd_vma got_offset;
397 struct elf_kvx_obj_tdata
399 struct elf_obj_tdata root;
401 /* local symbol descriptors */
402 struct elf_kvx_local_symbol *locals;
404 /* Zero to warn when linking objects with incompatible enum sizes. */
405 int no_enum_size_warning;
407 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
408 int no_wchar_size_warning;
411 #define elf_kvx_tdata(bfd) \
412 ((struct elf_kvx_obj_tdata *) (bfd)->tdata.any)
414 #define elf_kvx_locals(bfd) (elf_kvx_tdata (bfd)->locals)
416 #define is_kvx_elf(bfd) \
417 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
418 && elf_tdata (bfd) != NULL \
419 && elf_object_id (bfd) == KVX_ELF_DATA)
421 static bool
422 elfNN_kvx_mkobject (bfd *abfd)
424 return bfd_elf_allocate_object (abfd, sizeof (struct elf_kvx_obj_tdata),
425 KVX_ELF_DATA);
428 #define elf_kvx_hash_entry(ent) \
429 ((struct elf_kvx_link_hash_entry *)(ent))
431 #define GOT_UNKNOWN 0
432 #define GOT_NORMAL 1
434 #define GOT_TLS_GD 2
435 #define GOT_TLS_IE 4
436 #define GOT_TLS_LD 8
438 /* KVX ELF linker hash entry. */
439 struct elf_kvx_link_hash_entry
441 struct elf_link_hash_entry root;
443 /* Since PLT entries have variable size, we need to record the
444 index into .got.plt instead of recomputing it from the PLT
445 offset. */
446 bfd_signed_vma plt_got_offset;
448 /* Bit mask representing the type of GOT entry(s) if any required by
449 this symbol. */
450 unsigned int got_type;
452 /* A pointer to the most recently used stub hash entry against this
453 symbol. */
454 struct elf_kvx_stub_hash_entry *stub_cache;
457 /* Get the KVX elf linker hash table from a link_info structure. */
458 #define elf_kvx_hash_table(info) \
459 ((struct elf_kvx_link_hash_table *) ((info)->hash))
461 #define kvx_stub_hash_lookup(table, string, create, copy) \
462 ((struct elf_kvx_stub_hash_entry *) \
463 bfd_hash_lookup ((table), (string), (create), (copy)))
465 /* KVX ELF linker hash table. */
466 struct elf_kvx_link_hash_table
468 /* The main hash table. */
469 struct elf_link_hash_table root;
471 /* Nonzero to force PIC branch veneers. */
472 int pic_veneer;
474 /* The number of bytes in the initial entry in the PLT. */
475 bfd_size_type plt_header_size;
477 /* The number of bytes in the subsequent PLT etries. */
478 bfd_size_type plt_entry_size;
480 /* The bytes of the subsequent PLT entry. */
481 const bfd_byte *plt_entry;
483 /* Short-cuts to get to dynamic linker sections. */
484 asection *sdynbss;
485 asection *srelbss;
487 /* Small local sym cache. */
488 struct sym_cache sym_cache;
490 /* For convenience in allocate_dynrelocs. */
491 bfd *obfd;
493 /* The amount of space used by the reserved portion of the sgotplt
494 section, plus whatever space is used by the jump slots. */
495 bfd_vma sgotplt_jump_table_size;
497 /* The stub hash table. */
498 struct bfd_hash_table stub_hash_table;
500 /* Linker stub bfd. */
501 bfd *stub_bfd;
503 /* Linker call-backs. */
504 asection *(*add_stub_section) (const char *, asection *);
505 void (*layout_sections_again) (void);
507 /* Array to keep track of which stub sections have been created, and
508 information on stub grouping. */
509 struct map_stub
511 /* This is the section to which stubs in the group will be
512 attached. */
513 asection *link_sec;
514 /* The stub section. */
515 asection *stub_sec;
516 } *stub_group;
518 /* Assorted information used by elfNN_kvx_size_stubs. */
519 unsigned int bfd_count;
520 unsigned int top_index;
521 asection **input_list;
524 /* Create an entry in an KVX ELF linker hash table. */
526 static struct bfd_hash_entry *
527 elfNN_kvx_link_hash_newfunc (struct bfd_hash_entry *entry,
528 struct bfd_hash_table *table,
529 const char *string)
531 struct elf_kvx_link_hash_entry *ret =
532 (struct elf_kvx_link_hash_entry *) entry;
534 /* Allocate the structure if it has not already been allocated by a
535 subclass. */
536 if (ret == NULL)
537 ret = bfd_hash_allocate (table,
538 sizeof (struct elf_kvx_link_hash_entry));
539 if (ret == NULL)
540 return (struct bfd_hash_entry *) ret;
542 /* Call the allocation method of the superclass. */
543 ret = ((struct elf_kvx_link_hash_entry *)
544 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
545 table, string));
546 if (ret != NULL)
548 ret->got_type = GOT_UNKNOWN;
549 ret->plt_got_offset = (bfd_vma) - 1;
550 ret->stub_cache = NULL;
553 return (struct bfd_hash_entry *) ret;
556 /* Initialize an entry in the stub hash table. */
558 static struct bfd_hash_entry *
559 stub_hash_newfunc (struct bfd_hash_entry *entry,
560 struct bfd_hash_table *table, const char *string)
562 /* Allocate the structure if it has not already been allocated by a
563 subclass. */
564 if (entry == NULL)
566 entry = bfd_hash_allocate (table,
567 sizeof (struct
568 elf_kvx_stub_hash_entry));
569 if (entry == NULL)
570 return entry;
573 /* Call the allocation method of the superclass. */
574 entry = bfd_hash_newfunc (entry, table, string);
575 if (entry != NULL)
577 struct elf_kvx_stub_hash_entry *eh;
579 /* Initialize the local fields. */
580 eh = (struct elf_kvx_stub_hash_entry *) entry;
581 eh->stub_sec = NULL;
582 eh->stub_offset = 0;
583 eh->target_value = 0;
584 eh->target_section = NULL;
585 eh->stub_type = kvx_stub_none;
586 eh->h = NULL;
587 eh->id_sec = NULL;
590 return entry;
593 /* Copy the extra info we tack onto an elf_link_hash_entry. */
595 static void
596 elfNN_kvx_copy_indirect_symbol (struct bfd_link_info *info,
597 struct elf_link_hash_entry *dir,
598 struct elf_link_hash_entry *ind)
600 struct elf_kvx_link_hash_entry *edir, *eind;
602 edir = (struct elf_kvx_link_hash_entry *) dir;
603 eind = (struct elf_kvx_link_hash_entry *) ind;
605 if (ind->root.type == bfd_link_hash_indirect)
607 /* Copy over PLT info. */
608 if (dir->got.refcount <= 0)
610 edir->got_type = eind->got_type;
611 eind->got_type = GOT_UNKNOWN;
615 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
618 /* Destroy a KVX elf linker hash table. */
620 static void
621 elfNN_kvx_link_hash_table_free (bfd *obfd)
623 struct elf_kvx_link_hash_table *ret
624 = (struct elf_kvx_link_hash_table *) obfd->link.hash;
626 bfd_hash_table_free (&ret->stub_hash_table);
627 _bfd_elf_link_hash_table_free (obfd);
630 /* Create a KVX elf linker hash table. */
632 static struct bfd_link_hash_table *
633 elfNN_kvx_link_hash_table_create (bfd *abfd)
635 struct elf_kvx_link_hash_table *ret;
636 bfd_size_type amt = sizeof (struct elf_kvx_link_hash_table);
638 ret = bfd_zmalloc (amt);
639 if (ret == NULL)
640 return NULL;
642 if (!_bfd_elf_link_hash_table_init
643 (&ret->root, abfd, elfNN_kvx_link_hash_newfunc,
644 sizeof (struct elf_kvx_link_hash_entry), KVX_ELF_DATA))
646 free (ret);
647 return NULL;
650 ret->plt_header_size = PLT_ENTRY_SIZE;
651 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
652 ret->plt_entry = elfNN_kvx_small_plt_entry;
654 ret->obfd = abfd;
656 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
657 sizeof (struct elf_kvx_stub_hash_entry)))
659 _bfd_elf_link_hash_table_free (abfd);
660 return NULL;
663 ret->root.root.hash_table_free = elfNN_kvx_link_hash_table_free;
665 return &ret->root.root;
668 static bfd_reloc_status_type
669 kvx_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
670 bfd_vma offset, bfd_vma value)
672 reloc_howto_type *howto;
674 howto = elfNN_kvx_howto_from_type (input_bfd, r_type);
675 r_type = elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type);
676 return _bfd_kvx_elf_put_addend (input_bfd,
677 input_section->contents + offset, r_type,
678 howto, value);
681 /* Determine the type of stub needed, if any, for a call. */
683 static enum elf_kvx_stub_type
684 kvx_type_of_stub (asection *input_sec,
685 const Elf_Internal_Rela *rel,
686 asection *sym_sec,
687 unsigned char st_type,
688 bfd_vma destination)
690 bfd_vma location;
691 bfd_signed_vma branch_offset;
692 unsigned int r_type;
693 enum elf_kvx_stub_type stub_type = kvx_stub_none;
695 if (st_type != STT_FUNC
696 && (sym_sec == input_sec))
697 return stub_type;
699 /* Determine where the call point is. */
700 location = (input_sec->output_offset
701 + input_sec->output_section->vma + rel->r_offset);
703 branch_offset = (bfd_signed_vma) (destination - location);
705 r_type = ELFNN_R_TYPE (rel->r_info);
707 /* We don't want to redirect any old unconditional jump in this way,
708 only one which is being used for a sibcall, where it is
709 acceptable for the R16 and R17 registers to be clobbered. */
710 if (r_type == R_KVX_PCREL27
711 && (branch_offset > KVX_MAX_FWD_CALL_OFFSET
712 || branch_offset < KVX_MAX_BWD_CALL_OFFSET))
714 stub_type = kvx_stub_long_branch;
717 return stub_type;
720 /* Build a name for an entry in the stub hash table. */
722 static char *
723 elfNN_kvx_stub_name (const asection *input_section,
724 const asection *sym_sec,
725 const struct elf_kvx_link_hash_entry *hash,
726 const Elf_Internal_Rela *rel)
728 char *stub_name;
729 bfd_size_type len;
731 if (hash)
733 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
734 stub_name = bfd_malloc (len);
735 if (stub_name != NULL)
736 snprintf (stub_name, len, "%08x_%s+%" PRIx64 "x",
737 (unsigned int) input_section->id,
738 hash->root.root.root.string,
739 (uint64_t) rel->r_addend);
741 else
743 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
744 stub_name = bfd_malloc (len);
745 if (stub_name != NULL)
746 snprintf (stub_name, len, "%08x_%x:%x+%" PRIx64 "x",
747 (unsigned int) input_section->id,
748 (unsigned int) sym_sec->id,
749 (unsigned int) ELFNN_R_SYM (rel->r_info),
750 (uint64_t) rel->r_addend);
753 return stub_name;
756 /* Return true if symbol H should be hashed in the `.gnu.hash' section. For
757 executable PLT slots where the executable never takes the address of those
758 functions, the function symbols are not added to the hash table. */
760 static bool
761 elf_kvx_hash_symbol (struct elf_link_hash_entry *h)
763 if (h->plt.offset != (bfd_vma) -1
764 && !h->def_regular
765 && !h->pointer_equality_needed)
766 return false;
768 return _bfd_elf_hash_symbol (h);
772 /* Look up an entry in the stub hash. Stub entries are cached because
773 creating the stub name takes a bit of time. */
775 static struct elf_kvx_stub_hash_entry *
776 elfNN_kvx_get_stub_entry (const asection *input_section,
777 const asection *sym_sec,
778 struct elf_link_hash_entry *hash,
779 const Elf_Internal_Rela *rel,
780 struct elf_kvx_link_hash_table *htab)
782 struct elf_kvx_stub_hash_entry *stub_entry;
783 struct elf_kvx_link_hash_entry *h =
784 (struct elf_kvx_link_hash_entry *) hash;
785 const asection *id_sec;
787 if ((input_section->flags & SEC_CODE) == 0)
788 return NULL;
790 /* If this input section is part of a group of sections sharing one
791 stub section, then use the id of the first section in the group.
792 Stub names need to include a section id, as there may well be
793 more than one stub used to reach say, printf, and we need to
794 distinguish between them. */
795 id_sec = htab->stub_group[input_section->id].link_sec;
797 if (h != NULL && h->stub_cache != NULL
798 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
800 stub_entry = h->stub_cache;
802 else
804 char *stub_name;
806 stub_name = elfNN_kvx_stub_name (id_sec, sym_sec, h, rel);
807 if (stub_name == NULL)
808 return NULL;
810 stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table,
811 stub_name, false, false);
812 if (h != NULL)
813 h->stub_cache = stub_entry;
815 free (stub_name);
818 return stub_entry;
822 /* Create a stub section. */
824 static asection *
825 _bfd_kvx_create_stub_section (asection *section,
826 struct elf_kvx_link_hash_table *htab)
829 size_t namelen;
830 bfd_size_type len;
831 char *s_name;
833 namelen = strlen (section->name);
834 len = namelen + sizeof (STUB_SUFFIX);
835 s_name = bfd_alloc (htab->stub_bfd, len);
836 if (s_name == NULL)
837 return NULL;
839 memcpy (s_name, section->name, namelen);
840 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
841 return (*htab->add_stub_section) (s_name, section);
845 /* Find or create a stub section for a link section.
847 Fix or create the stub section used to collect stubs attached to
848 the specified link section. */
850 static asection *
851 _bfd_kvx_get_stub_for_link_section (asection *link_section,
852 struct elf_kvx_link_hash_table *htab)
854 if (htab->stub_group[link_section->id].stub_sec == NULL)
855 htab->stub_group[link_section->id].stub_sec
856 = _bfd_kvx_create_stub_section (link_section, htab);
857 return htab->stub_group[link_section->id].stub_sec;
861 /* Find or create a stub section in the stub group for an input
862 section. */
864 static asection *
865 _bfd_kvx_create_or_find_stub_sec (asection *section,
866 struct elf_kvx_link_hash_table *htab)
868 asection *link_sec = htab->stub_group[section->id].link_sec;
869 return _bfd_kvx_get_stub_for_link_section (link_sec, htab);
873 /* Add a new stub entry in the stub group associated with an input
874 section to the stub hash. Not all fields of the new stub entry are
875 initialised. */
877 static struct elf_kvx_stub_hash_entry *
878 _bfd_kvx_add_stub_entry_in_group (const char *stub_name,
879 asection *section,
880 struct elf_kvx_link_hash_table *htab)
882 asection *link_sec;
883 asection *stub_sec;
884 struct elf_kvx_stub_hash_entry *stub_entry;
886 link_sec = htab->stub_group[section->id].link_sec;
887 stub_sec = _bfd_kvx_create_or_find_stub_sec (section, htab);
889 /* Enter this entry into the linker stub hash table. */
890 stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table, stub_name,
891 true, false);
892 if (stub_entry == NULL)
894 /* xgettext:c-format */
895 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
896 section->owner, stub_name);
897 return NULL;
900 stub_entry->stub_sec = stub_sec;
901 stub_entry->stub_offset = 0;
902 stub_entry->id_sec = link_sec;
904 return stub_entry;
907 static bool
908 kvx_build_one_stub (struct bfd_hash_entry *gen_entry,
909 void *in_arg)
911 struct elf_kvx_stub_hash_entry *stub_entry;
912 asection *stub_sec;
913 bfd *stub_bfd;
914 bfd_byte *loc;
915 bfd_vma sym_value;
916 unsigned int template_size;
917 const uint32_t *template;
918 unsigned int i;
919 struct bfd_link_info *info;
921 /* Massage our args to the form they really have. */
922 stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
924 info = (struct bfd_link_info *) in_arg;
926 /* Fail if the target section could not be assigned to an output
927 section. The user should fix his linker script. */
928 if (stub_entry->target_section->output_section == NULL
929 && info->non_contiguous_regions)
930 info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
931 "Retry without "
932 "--enable-non-contiguous-regions.\n"),
933 stub_entry->target_section);
935 stub_sec = stub_entry->stub_sec;
937 /* Make a note of the offset within the stubs for this entry. */
938 stub_entry->stub_offset = stub_sec->size;
939 loc = stub_sec->contents + stub_entry->stub_offset;
941 stub_bfd = stub_sec->owner;
943 /* This is the address of the stub destination. */
944 sym_value = (stub_entry->target_value
945 + stub_entry->target_section->output_offset
946 + stub_entry->target_section->output_section->vma);
948 switch (stub_entry->stub_type)
950 case kvx_stub_long_branch:
951 template = elfNN_kvx_long_branch_stub;
952 template_size = sizeof (elfNN_kvx_long_branch_stub);
953 break;
954 default:
955 abort ();
958 for (i = 0; i < (template_size / sizeof template[0]); i++)
960 bfd_putl32 (template[i], loc);
961 loc += 4;
964 stub_sec->size += template_size;
966 switch (stub_entry->stub_type)
968 case kvx_stub_long_branch:
969 /* The stub uses a make insn with 43bits immediate.
970 We need to apply 3 relocations:
971 BFD_RELOC_KVX_S43_LO10,
972 BFD_RELOC_KVX_S43_UP27,
973 BFD_RELOC_KVX_S43_EX6. */
974 if (kvx_relocate (R_KVX_S43_LO10, stub_bfd, stub_sec,
975 stub_entry->stub_offset, sym_value) != bfd_reloc_ok)
976 BFD_FAIL ();
977 if (kvx_relocate (R_KVX_S43_EX6, stub_bfd, stub_sec,
978 stub_entry->stub_offset, sym_value) != bfd_reloc_ok)
979 BFD_FAIL ();
980 if (kvx_relocate (R_KVX_S43_UP27, stub_bfd, stub_sec,
981 stub_entry->stub_offset + 4, sym_value) != bfd_reloc_ok)
982 BFD_FAIL ();
983 break;
984 default:
985 abort ();
988 return true;
991 /* As above, but don't actually build the stub. Just bump offset so
992 we know stub section sizes. */
994 static bool
995 kvx_size_one_stub (struct bfd_hash_entry *gen_entry,
996 void *in_arg ATTRIBUTE_UNUSED)
998 struct elf_kvx_stub_hash_entry *stub_entry;
999 int size;
1001 /* Massage our args to the form they really have. */
1002 stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
1004 switch (stub_entry->stub_type)
1006 case kvx_stub_long_branch:
1007 size = sizeof (elfNN_kvx_long_branch_stub);
1008 break;
1009 default:
1010 abort ();
1013 stub_entry->stub_sec->size += size;
1014 return true;
1017 /* External entry points for sizing and building linker stubs. */
1019 /* Set up various things so that we can make a list of input sections
1020 for each output section included in the link. Returns -1 on error,
1021 0 when no stubs will be needed, and 1 on success. */
1024 elfNN_kvx_setup_section_lists (bfd *output_bfd,
1025 struct bfd_link_info *info)
1027 bfd *input_bfd;
1028 unsigned int bfd_count;
1029 unsigned int top_id, top_index;
1030 asection *section;
1031 asection **input_list, **list;
1032 bfd_size_type amt;
1033 struct elf_kvx_link_hash_table *htab =
1034 elf_kvx_hash_table (info);
1036 if (!is_elf_hash_table ((const struct bfd_link_hash_table *)htab))
1037 return 0;
1039 /* Count the number of input BFDs and find the top input section id. */
1040 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
1041 input_bfd != NULL; input_bfd = input_bfd->link.next)
1043 bfd_count += 1;
1044 for (section = input_bfd->sections;
1045 section != NULL; section = section->next)
1047 if (top_id < section->id)
1048 top_id = section->id;
1051 htab->bfd_count = bfd_count;
1053 amt = sizeof (struct map_stub) * (top_id + 1);
1054 htab->stub_group = bfd_zmalloc (amt);
1055 if (htab->stub_group == NULL)
1056 return -1;
1058 /* We can't use output_bfd->section_count here to find the top output
1059 section index as some sections may have been removed, and
1060 _bfd_strip_section_from_output doesn't renumber the indices. */
1061 for (section = output_bfd->sections, top_index = 0;
1062 section != NULL; section = section->next)
1064 if (top_index < section->index)
1065 top_index = section->index;
1068 htab->top_index = top_index;
1069 amt = sizeof (asection *) * (top_index + 1);
1070 input_list = bfd_malloc (amt);
1071 htab->input_list = input_list;
1072 if (input_list == NULL)
1073 return -1;
1075 /* For sections we aren't interested in, mark their entries with a
1076 value we can check later. */
1077 list = input_list + top_index;
1079 *list = bfd_abs_section_ptr;
1080 while (list-- != input_list);
1082 for (section = output_bfd->sections;
1083 section != NULL; section = section->next)
1085 if ((section->flags & SEC_CODE) != 0)
1086 input_list[section->index] = NULL;
1089 return 1;
1092 /* Used by elfNN_kvx_next_input_section and group_sections. */
1093 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
1095 /* The linker repeatedly calls this function for each input section,
1096 in the order that input sections are linked into output sections.
1097 Build lists of input sections to determine groupings between which
1098 we may insert linker stubs. */
1100 void
1101 elfNN_kvx_next_input_section (struct bfd_link_info *info, asection *isec)
1103 struct elf_kvx_link_hash_table *htab =
1104 elf_kvx_hash_table (info);
1106 if (isec->output_section->index <= htab->top_index)
1108 asection **list = htab->input_list + isec->output_section->index;
1110 if (*list != bfd_abs_section_ptr)
1112 /* Steal the link_sec pointer for our list. */
1113 /* This happens to make the list in reverse order,
1114 which is what we want. */
1115 PREV_SEC (isec) = *list;
1116 *list = isec;
1121 /* See whether we can group stub sections together. Grouping stub
1122 sections may result in fewer stubs. More importantly, we need to
1123 put all .init* and .fini* stubs at the beginning of the .init or
1124 .fini output sections respectively, because glibc splits the
1125 _init and _fini functions into multiple parts. Putting a stub in
1126 the middle of a function is not a good idea. */
1128 static void
1129 group_sections (struct elf_kvx_link_hash_table *htab,
1130 bfd_size_type stub_group_size,
1131 bool stubs_always_after_branch)
1133 asection **list = htab->input_list;
1137 asection *tail = *list;
1138 asection *head;
1140 if (tail == bfd_abs_section_ptr)
1141 continue;
1143 /* Reverse the list: we must avoid placing stubs at the
1144 beginning of the section because the beginning of the text
1145 section may be required for an interrupt vector in bare metal
1146 code. */
1147 #define NEXT_SEC PREV_SEC
1148 head = NULL;
1149 while (tail != NULL)
1151 /* Pop from tail. */
1152 asection *item = tail;
1153 tail = PREV_SEC (item);
1155 /* Push on head. */
1156 NEXT_SEC (item) = head;
1157 head = item;
1160 while (head != NULL)
1162 asection *curr;
1163 asection *next;
1164 bfd_vma stub_group_start = head->output_offset;
1165 bfd_vma end_of_next;
1167 curr = head;
1168 while (NEXT_SEC (curr) != NULL)
1170 next = NEXT_SEC (curr);
1171 end_of_next = next->output_offset + next->size;
1172 if (end_of_next - stub_group_start >= stub_group_size)
1173 /* End of NEXT is too far from start, so stop. */
1174 break;
1175 /* Add NEXT to the group. */
1176 curr = next;
1179 /* OK, the size from the start to the start of CURR is less
1180 than stub_group_size and thus can be handled by one stub
1181 section. (Or the head section is itself larger than
1182 stub_group_size, in which case we may be toast.)
1183 We should really be keeping track of the total size of
1184 stubs added here, as stubs contribute to the final output
1185 section size. */
1188 next = NEXT_SEC (head);
1189 /* Set up this stub group. */
1190 htab->stub_group[head->id].link_sec = curr;
1192 while (head != curr && (head = next) != NULL);
1194 /* But wait, there's more! Input sections up to stub_group_size
1195 bytes after the stub section can be handled by it too. */
1196 if (!stubs_always_after_branch)
1198 stub_group_start = curr->output_offset + curr->size;
1200 while (next != NULL)
1202 end_of_next = next->output_offset + next->size;
1203 if (end_of_next - stub_group_start >= stub_group_size)
1204 /* End of NEXT is too far from stubs, so stop. */
1205 break;
1206 /* Add NEXT to the stub group. */
1207 head = next;
1208 next = NEXT_SEC (head);
1209 htab->stub_group[head->id].link_sec = curr;
1212 head = next;
1215 while (list++ != htab->input_list + htab->top_index);
1217 free (htab->input_list);
1220 static void
1221 _bfd_kvx_resize_stubs (struct elf_kvx_link_hash_table *htab)
1223 asection *section;
1225 /* OK, we've added some stubs. Find out the new size of the
1226 stub sections. */
1227 for (section = htab->stub_bfd->sections;
1228 section != NULL; section = section->next)
1230 /* Ignore non-stub sections. */
1231 if (!strstr (section->name, STUB_SUFFIX))
1232 continue;
1233 section->size = 0;
1236 bfd_hash_traverse (&htab->stub_hash_table, kvx_size_one_stub, htab);
1239 /* Satisfy the ELF linker by filling in some fields in our fake bfd. */
1241 bool
1242 kvx_elfNN_init_stub_bfd (struct bfd_link_info *info,
1243 bfd *stub_bfd)
1245 struct elf_kvx_link_hash_table *htab;
1247 elf_elfheader (stub_bfd)->e_ident[EI_CLASS] = ELFCLASSNN;
1249 /* Always hook our dynamic sections into the first bfd, which is the
1250 linker created stub bfd. This ensures that the GOT header is at
1251 the start of the output TOC section. */
1252 htab = elf_kvx_hash_table (info);
1253 if (htab == NULL)
1254 return false;
1256 return true;
1259 /* Determine and set the size of the stub section for a final link.
1261 The basic idea here is to examine all the relocations looking for
1262 PC-relative calls to a target that is unreachable with a 27bits
1263 immediate (found in call and goto). */
1265 bool
1266 elfNN_kvx_size_stubs (bfd *output_bfd,
1267 bfd *stub_bfd,
1268 struct bfd_link_info *info,
1269 bfd_signed_vma group_size,
1270 asection * (*add_stub_section) (const char *,
1271 asection *),
1272 void (*layout_sections_again) (void))
1274 bfd_size_type stub_group_size;
1275 bool stubs_always_before_branch;
1276 bool stub_changed = false;
1277 struct elf_kvx_link_hash_table *htab = elf_kvx_hash_table (info);
1279 /* Propagate mach to stub bfd, because it may not have been
1280 finalized when we created stub_bfd. */
1281 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
1282 bfd_get_mach (output_bfd));
1284 /* Stash our params away. */
1285 htab->stub_bfd = stub_bfd;
1286 htab->add_stub_section = add_stub_section;
1287 htab->layout_sections_again = layout_sections_again;
1288 stubs_always_before_branch = group_size < 0;
1289 if (group_size < 0)
1290 stub_group_size = -group_size;
1291 else
1292 stub_group_size = group_size;
1294 if (stub_group_size == 1)
1296 /* Default values. */
1297 /* KVX branch range is +-256MB. The value used is 1MB less. */
1298 stub_group_size = 255 * 1024 * 1024;
1301 group_sections (htab, stub_group_size, stubs_always_before_branch);
1303 (*htab->layout_sections_again) ();
1305 while (1)
1307 bfd *input_bfd;
1309 for (input_bfd = info->input_bfds;
1310 input_bfd != NULL; input_bfd = input_bfd->link.next)
1312 Elf_Internal_Shdr *symtab_hdr;
1313 asection *section;
1314 Elf_Internal_Sym *local_syms = NULL;
1316 if (!is_kvx_elf (input_bfd)
1317 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
1318 continue;
1320 /* We'll need the symbol table in a second. */
1321 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
1322 if (symtab_hdr->sh_info == 0)
1323 continue;
1325 /* Walk over each section attached to the input bfd. */
1326 for (section = input_bfd->sections;
1327 section != NULL; section = section->next)
1329 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1331 /* If there aren't any relocs, then there's nothing more
1332 to do. */
1333 if ((section->flags & SEC_RELOC) == 0
1334 || section->reloc_count == 0
1335 || (section->flags & SEC_CODE) == 0)
1336 continue;
1338 /* If this section is a link-once section that will be
1339 discarded, then don't create any stubs. */
1340 if (section->output_section == NULL
1341 || section->output_section->owner != output_bfd)
1342 continue;
1344 /* Get the relocs. */
1345 internal_relocs
1346 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
1347 NULL, info->keep_memory);
1348 if (internal_relocs == NULL)
1349 goto error_ret_free_local;
1351 /* Now examine each relocation. */
1352 irela = internal_relocs;
1353 irelaend = irela + section->reloc_count;
1354 for (; irela < irelaend; irela++)
1356 unsigned int r_type, r_indx;
1357 enum elf_kvx_stub_type stub_type;
1358 struct elf_kvx_stub_hash_entry *stub_entry;
1359 asection *sym_sec;
1360 bfd_vma sym_value;
1361 bfd_vma destination;
1362 struct elf_kvx_link_hash_entry *hash;
1363 const char *sym_name;
1364 char *stub_name;
1365 const asection *id_sec;
1366 unsigned char st_type;
1367 bfd_size_type len;
1369 r_type = ELFNN_R_TYPE (irela->r_info);
1370 r_indx = ELFNN_R_SYM (irela->r_info);
1372 if (r_type >= (unsigned int) R_KVX_end)
1374 bfd_set_error (bfd_error_bad_value);
1375 error_ret_free_internal:
1376 if (elf_section_data (section)->relocs == NULL)
1377 free (internal_relocs);
1378 goto error_ret_free_local;
1381 /* Only look for stubs on unconditional branch and
1382 branch and link instructions. */
1383 /* This catches CALL and GOTO insn */
1384 if (r_type != (unsigned int) R_KVX_PCREL27)
1385 continue;
1387 /* Now determine the call target, its name, value,
1388 section. */
1389 sym_sec = NULL;
1390 sym_value = 0;
1391 destination = 0;
1392 hash = NULL;
1393 sym_name = NULL;
1394 if (r_indx < symtab_hdr->sh_info)
1396 /* It's a local symbol. */
1397 Elf_Internal_Sym *sym;
1398 Elf_Internal_Shdr *hdr;
1400 if (local_syms == NULL)
1402 local_syms
1403 = (Elf_Internal_Sym *) symtab_hdr->contents;
1404 if (local_syms == NULL)
1405 local_syms
1406 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
1407 symtab_hdr->sh_info, 0,
1408 NULL, NULL, NULL);
1409 if (local_syms == NULL)
1410 goto error_ret_free_internal;
1413 sym = local_syms + r_indx;
1414 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
1415 sym_sec = hdr->bfd_section;
1416 if (!sym_sec)
1417 /* This is an undefined symbol. It can never
1418 be resolved. */
1419 continue;
1421 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
1422 sym_value = sym->st_value;
1423 destination = (sym_value + irela->r_addend
1424 + sym_sec->output_offset
1425 + sym_sec->output_section->vma);
1426 st_type = ELF_ST_TYPE (sym->st_info);
1427 sym_name
1428 = bfd_elf_string_from_elf_section (input_bfd,
1429 symtab_hdr->sh_link,
1430 sym->st_name);
1432 else
1434 int e_indx;
1436 e_indx = r_indx - symtab_hdr->sh_info;
1437 hash = ((struct elf_kvx_link_hash_entry *)
1438 elf_sym_hashes (input_bfd)[e_indx]);
1440 while (hash->root.root.type == bfd_link_hash_indirect
1441 || hash->root.root.type == bfd_link_hash_warning)
1442 hash = ((struct elf_kvx_link_hash_entry *)
1443 hash->root.root.u.i.link);
1445 if (hash->root.root.type == bfd_link_hash_defined
1446 || hash->root.root.type == bfd_link_hash_defweak)
1448 struct elf_kvx_link_hash_table *globals =
1449 elf_kvx_hash_table (info);
1450 sym_sec = hash->root.root.u.def.section;
1451 sym_value = hash->root.root.u.def.value;
1452 /* For a destination in a shared library,
1453 use the PLT stub as target address to
1454 decide whether a branch stub is
1455 needed. */
1456 if (globals->root.splt != NULL && hash != NULL
1457 && hash->root.plt.offset != (bfd_vma) - 1)
1459 sym_sec = globals->root.splt;
1460 sym_value = hash->root.plt.offset;
1461 if (sym_sec->output_section != NULL)
1462 destination = (sym_value
1463 + sym_sec->output_offset
1464 + sym_sec->output_section->vma);
1466 else if (sym_sec->output_section != NULL)
1467 destination = (sym_value + irela->r_addend
1468 + sym_sec->output_offset
1469 + sym_sec->output_section->vma);
1471 else if (hash->root.root.type == bfd_link_hash_undefined
1472 || (hash->root.root.type
1473 == bfd_link_hash_undefweak))
1475 /* For a shared library, use the PLT stub as
1476 target address to decide whether a long
1477 branch stub is needed.
1478 For absolute code, they cannot be handled. */
1479 struct elf_kvx_link_hash_table *globals =
1480 elf_kvx_hash_table (info);
1482 if (globals->root.splt != NULL && hash != NULL
1483 && hash->root.plt.offset != (bfd_vma) - 1)
1485 sym_sec = globals->root.splt;
1486 sym_value = hash->root.plt.offset;
1487 if (sym_sec->output_section != NULL)
1488 destination = (sym_value
1489 + sym_sec->output_offset
1490 + sym_sec->output_section->vma);
1492 else
1493 continue;
1495 else
1497 bfd_set_error (bfd_error_bad_value);
1498 goto error_ret_free_internal;
1500 st_type = ELF_ST_TYPE (hash->root.type);
1501 sym_name = hash->root.root.root.string;
1504 /* Determine what (if any) linker stub is needed. */
1505 stub_type = kvx_type_of_stub (section, irela, sym_sec,
1506 st_type, destination);
1507 if (stub_type == kvx_stub_none)
1508 continue;
1510 /* Support for grouping stub sections. */
1511 id_sec = htab->stub_group[section->id].link_sec;
1513 /* Get the name of this stub. */
1514 stub_name = elfNN_kvx_stub_name (id_sec, sym_sec, hash,
1515 irela);
1516 if (!stub_name)
1517 goto error_ret_free_internal;
1519 stub_entry =
1520 kvx_stub_hash_lookup (&htab->stub_hash_table,
1521 stub_name, false, false);
1522 if (stub_entry != NULL)
1524 /* The proper stub has already been created. */
1525 free (stub_name);
1526 /* Always update this stub's target since it may have
1527 changed after layout. */
1528 stub_entry->target_value = sym_value + irela->r_addend;
1529 continue;
1532 stub_entry = _bfd_kvx_add_stub_entry_in_group
1533 (stub_name, section, htab);
1534 if (stub_entry == NULL)
1536 free (stub_name);
1537 goto error_ret_free_internal;
1540 stub_entry->target_value = sym_value + irela->r_addend;
1541 stub_entry->target_section = sym_sec;
1542 stub_entry->stub_type = stub_type;
1543 stub_entry->h = hash;
1544 stub_entry->st_type = st_type;
1546 if (sym_name == NULL)
1547 sym_name = "unnamed";
1548 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
1549 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
1550 if (stub_entry->output_name == NULL)
1552 free (stub_name);
1553 goto error_ret_free_internal;
1556 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
1557 sym_name);
1559 stub_changed = true;
1562 /* We're done with the internal relocs, free them. */
1563 if (elf_section_data (section)->relocs == NULL)
1564 free (internal_relocs);
1568 if (!stub_changed)
1569 break;
1571 _bfd_kvx_resize_stubs (htab);
1573 /* Ask the linker to do its stuff. */
1574 (*htab->layout_sections_again) ();
1575 stub_changed = false;
1578 return true;
1580 error_ret_free_local:
1581 return false;
1585 /* Build all the stubs associated with the current output file. The
1586 stubs are kept in a hash table attached to the main linker hash
1587 table. We also set up the .plt entries for statically linked PIC
1588 functions here. This function is called via kvx_elf_finish in the
1589 linker. */
1591 bool
1592 elfNN_kvx_build_stubs (struct bfd_link_info *info)
1594 asection *stub_sec;
1595 struct bfd_hash_table *table;
1596 struct elf_kvx_link_hash_table *htab;
1598 htab = elf_kvx_hash_table (info);
1600 for (stub_sec = htab->stub_bfd->sections;
1601 stub_sec != NULL; stub_sec = stub_sec->next)
1603 bfd_size_type size;
1605 /* Ignore non-stub sections. */
1606 if (!strstr (stub_sec->name, STUB_SUFFIX))
1607 continue;
1609 /* Allocate memory to hold the linker stubs. */
1610 size = stub_sec->size;
1611 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
1612 if (stub_sec->contents == NULL && size != 0)
1613 return false;
1614 stub_sec->size = 0;
1617 /* Build the stubs as directed by the stub hash table. */
1618 table = &htab->stub_hash_table;
1619 bfd_hash_traverse (table, kvx_build_one_stub, info);
1621 return true;
1624 static bfd_vma
1625 kvx_calculate_got_entry_vma (struct elf_link_hash_entry *h,
1626 struct elf_kvx_link_hash_table
1627 *globals, struct bfd_link_info *info,
1628 bfd_vma value, bfd *output_bfd,
1629 bool *unresolved_reloc_p)
1631 bfd_vma off = (bfd_vma) - 1;
1632 asection *basegot = globals->root.sgot;
1633 bool dyn = globals->root.dynamic_sections_created;
1635 if (h != NULL)
1637 BFD_ASSERT (basegot != NULL);
1638 off = h->got.offset;
1639 BFD_ASSERT (off != (bfd_vma) - 1);
1640 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
1641 || (bfd_link_pic (info)
1642 && SYMBOL_REFERENCES_LOCAL (info, h))
1643 || (ELF_ST_VISIBILITY (h->other)
1644 && h->root.type == bfd_link_hash_undefweak))
1646 /* This is actually a static link, or it is a -Bsymbolic link
1647 and the symbol is defined locally. We must initialize this
1648 entry in the global offset table. Since the offset must
1649 always be a multiple of 8 (4 in the case of ILP32), we use
1650 the least significant bit to record whether we have
1651 initialized it already.
1652 When doing a dynamic link, we create a .rel(a).got relocation
1653 entry to initialize the value. This is done in the
1654 finish_dynamic_symbol routine. */
1655 if ((off & 1) != 0)
1656 off &= ~1;
1657 else
1659 bfd_put_NN (output_bfd, value, basegot->contents + off);
1660 h->got.offset |= 1;
1663 else
1664 *unresolved_reloc_p = false;
1667 return off;
1670 static unsigned int
1671 kvx_reloc_got_type (bfd_reloc_code_real_type r_type)
1673 switch (r_type)
1675 /* Extracted with:
1676 awk 'match ($0, /HOWTO.*R_(KVX.*_GOT(OFF)?(64)?_.*),/,ary) \
1677 {print "case BFD_RELOC_" ary[1] ":";}' elfxx-kvxc.def */
1678 case BFD_RELOC_KVX_S37_GOTOFF_LO10:
1679 case BFD_RELOC_KVX_S37_GOTOFF_UP27:
1681 case BFD_RELOC_KVX_S37_GOT_LO10:
1682 case BFD_RELOC_KVX_S37_GOT_UP27:
1684 case BFD_RELOC_KVX_S43_GOTOFF_LO10:
1685 case BFD_RELOC_KVX_S43_GOTOFF_UP27:
1686 case BFD_RELOC_KVX_S43_GOTOFF_EX6:
1688 case BFD_RELOC_KVX_S43_GOT_LO10:
1689 case BFD_RELOC_KVX_S43_GOT_UP27:
1690 case BFD_RELOC_KVX_S43_GOT_EX6:
1691 return GOT_NORMAL;
1693 case BFD_RELOC_KVX_S37_TLS_GD_LO10:
1694 case BFD_RELOC_KVX_S37_TLS_GD_UP27:
1695 case BFD_RELOC_KVX_S43_TLS_GD_LO10:
1696 case BFD_RELOC_KVX_S43_TLS_GD_UP27:
1697 case BFD_RELOC_KVX_S43_TLS_GD_EX6:
1698 return GOT_TLS_GD;
1700 case BFD_RELOC_KVX_S37_TLS_LD_LO10:
1701 case BFD_RELOC_KVX_S37_TLS_LD_UP27:
1702 case BFD_RELOC_KVX_S43_TLS_LD_LO10:
1703 case BFD_RELOC_KVX_S43_TLS_LD_UP27:
1704 case BFD_RELOC_KVX_S43_TLS_LD_EX6:
1705 return GOT_TLS_LD;
1707 case BFD_RELOC_KVX_S37_TLS_IE_LO10:
1708 case BFD_RELOC_KVX_S37_TLS_IE_UP27:
1709 case BFD_RELOC_KVX_S43_TLS_IE_LO10:
1710 case BFD_RELOC_KVX_S43_TLS_IE_UP27:
1711 case BFD_RELOC_KVX_S43_TLS_IE_EX6:
1712 return GOT_TLS_IE;
1714 default:
1715 break;
1717 return GOT_UNKNOWN;
1720 static bool
1721 kvx_can_relax_tls (bfd *input_bfd ATTRIBUTE_UNUSED,
1722 struct bfd_link_info *info ATTRIBUTE_UNUSED,
1723 bfd_reloc_code_real_type r_type ATTRIBUTE_UNUSED,
1724 struct elf_link_hash_entry *h ATTRIBUTE_UNUSED,
1725 unsigned long r_symndx ATTRIBUTE_UNUSED)
1727 if (! IS_KVX_TLS_RELAX_RELOC (r_type))
1728 return false;
1730 /* Relaxing hook. Disabled on KVX. */
1731 /* See elfnn-aarch64.c */
1732 return true;
1735 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
1736 enumerator. */
1738 static bfd_reloc_code_real_type
1739 kvx_tls_transition (bfd *input_bfd,
1740 struct bfd_link_info *info,
1741 unsigned int r_type,
1742 struct elf_link_hash_entry *h,
1743 unsigned long r_symndx)
1745 bfd_reloc_code_real_type bfd_r_type
1746 = elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type);
1748 if (! kvx_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
1749 return bfd_r_type;
1751 return bfd_r_type;
1754 /* Return the base VMA address which should be subtracted from real addresses
1755 when resolving R_KVX_*_TLS_GD_* and R_KVX_*_TLS_LD_* relocation. */
1757 static bfd_vma
1758 dtpoff_base (struct bfd_link_info *info)
1760 /* If tls_sec is NULL, we should have signalled an error already. */
1761 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
1762 return elf_hash_table (info)->tls_sec->vma;
1765 /* Return the base VMA address which should be subtracted from real addresses
1766 when resolving R_KVX_*_TLS_IE_* and R_KVX_*_TLS_LE_* relocations. */
1768 static bfd_vma
1769 tpoff_base (struct bfd_link_info *info)
1771 struct elf_link_hash_table *htab = elf_hash_table (info);
1773 /* If tls_sec is NULL, we should have signalled an error already. */
1774 BFD_ASSERT (htab->tls_sec != NULL);
1776 bfd_vma base = align_power ((bfd_vma) 0,
1777 htab->tls_sec->alignment_power);
1778 return htab->tls_sec->vma - base;
1781 static bfd_vma *
1782 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
1783 unsigned long r_symndx)
1785 /* Calculate the address of the GOT entry for symbol
1786 referred to in h. */
1787 if (h != NULL)
1788 return &h->got.offset;
1789 else
1791 /* local symbol */
1792 struct elf_kvx_local_symbol *l;
1794 l = elf_kvx_locals (input_bfd);
1795 return &l[r_symndx].got_offset;
1799 static void
1800 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
1801 unsigned long r_symndx)
1803 bfd_vma *p;
1804 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
1805 *p |= 1;
1808 static int
1809 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
1810 unsigned long r_symndx)
1812 bfd_vma value;
1813 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
1814 return value & 1;
1817 static bfd_vma
1818 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
1819 unsigned long r_symndx)
1821 bfd_vma value;
1822 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
1823 value &= ~1;
1824 return value;
1827 /* N_ONES produces N one bits, without overflowing machine arithmetic. */
1828 #define N_ONES(n) (((((bfd_vma) 1 << ((n) -1)) - 1) << 1) | 1)
1830 /* This is a copy/paste + modification from
1831 reloc.c:_bfd_relocate_contents. Relocations are applied to 32bits
1832 words, so all overflow checks will overflow for values above
1833 32bits. */
1834 static bfd_reloc_status_type
1835 check_signed_overflow (enum complain_overflow complain_on_overflow,
1836 bfd_reloc_code_real_type bfd_r_type, bfd *input_bfd,
1837 bfd_vma relocation)
1839 bfd_reloc_status_type flag = bfd_reloc_ok;
1840 bfd_vma addrmask, fieldmask, signmask, ss;
1841 bfd_vma a, b, sum;
1842 bfd_vma x = 0;
1844 /* These usually come from howto struct. As we don't check for
1845 values fitting in bitfields or in subpart of words, we set all
1846 these to values to check as if the field is starting from first
1847 bit. */
1848 unsigned int rightshift = 0;
1849 unsigned int bitpos = 0;
1850 unsigned int bitsize = 0;
1851 bfd_vma src_mask = -1;
1853 /* Only regular symbol relocations are checked here. Others
1854 relocations (GOT, TLS) could be checked if the need is
1855 confirmed. At the moment, we keep previous behavior
1856 (ie. unchecked) for those. */
1857 switch (bfd_r_type)
1859 case BFD_RELOC_KVX_S37_LO10:
1860 case BFD_RELOC_KVX_S37_UP27:
1861 bitsize = 37;
1862 break;
1864 case BFD_RELOC_KVX_S32_LO5:
1865 case BFD_RELOC_KVX_S32_UP27:
1866 bitsize = 32;
1867 break;
1869 case BFD_RELOC_KVX_S43_LO10:
1870 case BFD_RELOC_KVX_S43_UP27:
1871 case BFD_RELOC_KVX_S43_EX6:
1872 bitsize = 43;
1873 break;
1875 case BFD_RELOC_KVX_S64_LO10:
1876 case BFD_RELOC_KVX_S64_UP27:
1877 case BFD_RELOC_KVX_S64_EX27:
1878 bitsize = 64;
1879 break;
1881 default:
1882 return bfd_reloc_ok;
1885 /* direct copy/paste from reloc.c below */
1887 /* Get the values to be added together. For signed and unsigned
1888 relocations, we assume that all values should be truncated to
1889 the size of an address. For bitfields, all the bits matter.
1890 See also bfd_check_overflow. */
1891 fieldmask = N_ONES (bitsize);
1892 signmask = ~fieldmask;
1893 addrmask = (N_ONES (bfd_arch_bits_per_address (input_bfd))
1894 | (fieldmask << rightshift));
1895 a = (relocation & addrmask) >> rightshift;
1896 b = (x & src_mask & addrmask) >> bitpos;
1897 addrmask >>= rightshift;
1899 switch (complain_on_overflow)
1901 case complain_overflow_signed:
1902 /* If any sign bits are set, all sign bits must be set.
1903 That is, A must be a valid negative address after
1904 shifting. */
1905 signmask = ~(fieldmask >> 1);
1906 /* Fall thru */
1908 case complain_overflow_bitfield:
1909 /* Much like the signed check, but for a field one bit
1910 wider. We allow a bitfield to represent numbers in the
1911 range -2**n to 2**n-1, where n is the number of bits in the
1912 field. Note that when bfd_vma is 32 bits, a 32-bit reloc
1913 can't overflow, which is exactly what we want. */
1914 ss = a & signmask;
1915 if (ss != 0 && ss != (addrmask & signmask))
1916 flag = bfd_reloc_overflow;
1918 /* We only need this next bit of code if the sign bit of B
1919 is below the sign bit of A. This would only happen if
1920 SRC_MASK had fewer bits than BITSIZE. Note that if
1921 SRC_MASK has more bits than BITSIZE, we can get into
1922 trouble; we would need to verify that B is in range, as
1923 we do for A above. */
1924 ss = ((~src_mask) >> 1) & src_mask;
1925 ss >>= bitpos;
1927 /* Set all the bits above the sign bit. */
1928 b = (b ^ ss) - ss;
1930 /* Now we can do the addition. */
1931 sum = a + b;
1933 /* See if the result has the correct sign. Bits above the
1934 sign bit are junk now; ignore them. If the sum is
1935 positive, make sure we did not have all negative inputs;
1936 if the sum is negative, make sure we did not have all
1937 positive inputs. The test below looks only at the sign
1938 bits, and it really just
1939 SIGN (A) == SIGN (B) && SIGN (A) != SIGN (SUM)
1941 We mask with addrmask here to explicitly allow an address
1942 wrap-around. The Linux kernel relies on it, and it is
1943 the only way to write assembler code which can run when
1944 loaded at a location 0x80000000 away from the location at
1945 which it is linked. */
1946 if (((~(a ^ b)) & (a ^ sum)) & signmask & addrmask)
1947 flag = bfd_reloc_overflow;
1948 break;
1950 case complain_overflow_unsigned:
1951 /* Checking for an unsigned overflow is relatively easy:
1952 trim the addresses and add, and trim the result as well.
1953 Overflow is normally indicated when the result does not
1954 fit in the field. However, we also need to consider the
1955 case when, e.g., fieldmask is 0x7fffffff or smaller, an
1956 input is 0x80000000, and bfd_vma is only 32 bits; then we
1957 will get sum == 0, but there is an overflow, since the
1958 inputs did not fit in the field. Instead of doing a
1959 separate test, we can check for this by or-ing in the
1960 operands when testing for the sum overflowing its final
1961 field. */
1962 sum = (a + b) & addrmask;
1963 if ((a | b | sum) & signmask)
1964 flag = bfd_reloc_overflow;
1965 break;
1967 default:
1968 abort ();
1970 return flag;
1973 /* Perform a relocation as part of a final link. */
1974 static bfd_reloc_status_type
1975 elfNN_kvx_final_link_relocate (reloc_howto_type *howto,
1976 bfd *input_bfd,
1977 bfd *output_bfd,
1978 asection *input_section,
1979 bfd_byte *contents,
1980 Elf_Internal_Rela *rel,
1981 bfd_vma value,
1982 struct bfd_link_info *info,
1983 asection *sym_sec,
1984 struct elf_link_hash_entry *h,
1985 bool *unresolved_reloc_p,
1986 bool save_addend,
1987 bfd_vma *saved_addend,
1988 Elf_Internal_Sym *sym)
1990 Elf_Internal_Shdr *symtab_hdr;
1991 unsigned int r_type = howto->type;
1992 bfd_reloc_code_real_type bfd_r_type
1993 = elfNN_kvx_bfd_reloc_from_howto (howto);
1994 bfd_reloc_code_real_type new_bfd_r_type;
1995 unsigned long r_symndx;
1996 bfd_byte *hit_data = contents + rel->r_offset;
1997 bfd_vma place, off;
1998 bfd_vma addend;
1999 struct elf_kvx_link_hash_table *globals;
2000 bool weak_undef_p;
2001 asection *base_got;
2002 bfd_reloc_status_type rret = bfd_reloc_ok;
2003 bool resolved_to_zero;
2004 globals = elf_kvx_hash_table (info);
2006 symtab_hdr = &elf_symtab_hdr (input_bfd);
2008 BFD_ASSERT (is_kvx_elf (input_bfd));
2010 r_symndx = ELFNN_R_SYM (rel->r_info);
2012 /* It is possible to have linker relaxations on some TLS access
2013 models. Update our information here. */
2014 new_bfd_r_type = kvx_tls_transition (input_bfd, info, r_type, h, r_symndx);
2015 if (new_bfd_r_type != bfd_r_type)
2017 bfd_r_type = new_bfd_r_type;
2018 howto = elfNN_kvx_howto_from_bfd_reloc (bfd_r_type);
2019 BFD_ASSERT (howto != NULL);
2020 r_type = howto->type;
2023 place = input_section->output_section->vma
2024 + input_section->output_offset + rel->r_offset;
2026 /* Get addend, accumulating the addend for consecutive relocs
2027 which refer to the same offset. */
2028 addend = saved_addend ? *saved_addend : 0;
2029 addend += rel->r_addend;
2031 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
2032 : bfd_is_und_section (sym_sec));
2033 resolved_to_zero = (h != NULL
2034 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
2036 switch (bfd_r_type)
2038 case BFD_RELOC_KVX_NN:
2039 #if ARCH_SIZE == 64
2040 case BFD_RELOC_KVX_32:
2041 #endif
2042 case BFD_RELOC_KVX_S37_LO10:
2043 case BFD_RELOC_KVX_S37_UP27:
2045 case BFD_RELOC_KVX_S32_LO5:
2046 case BFD_RELOC_KVX_S32_UP27:
2048 case BFD_RELOC_KVX_S43_LO10:
2049 case BFD_RELOC_KVX_S43_UP27:
2050 case BFD_RELOC_KVX_S43_EX6:
2052 case BFD_RELOC_KVX_S64_LO10:
2053 case BFD_RELOC_KVX_S64_UP27:
2054 case BFD_RELOC_KVX_S64_EX27:
2055 /* When generating a shared library or PIE, these relocations
2056 are copied into the output file to be resolved at run time. */
2057 if (bfd_link_pic (info)
2058 && (input_section->flags & SEC_ALLOC)
2059 && (h == NULL
2060 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2061 && !resolved_to_zero)
2062 || h->root.type != bfd_link_hash_undefweak))
2064 Elf_Internal_Rela outrel;
2065 bfd_byte *loc;
2066 bool skip, relocate;
2067 asection *sreloc;
2069 *unresolved_reloc_p = false;
2071 skip = false;
2072 relocate = false;
2074 outrel.r_addend = addend;
2075 outrel.r_offset =
2076 _bfd_elf_section_offset (output_bfd, info, input_section,
2077 rel->r_offset);
2078 if (outrel.r_offset == (bfd_vma) - 1)
2079 skip = true;
2080 else if (outrel.r_offset == (bfd_vma) - 2)
2082 skip = true;
2083 relocate = true;
2086 outrel.r_offset += (input_section->output_section->vma
2087 + input_section->output_offset);
2089 if (skip)
2090 memset (&outrel, 0, sizeof outrel);
2091 else if (h != NULL
2092 && h->dynindx != -1
2093 && (!bfd_link_pic (info) || !info->symbolic
2094 || !h->def_regular))
2095 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
2096 else if (bfd_r_type == BFD_RELOC_KVX_32
2097 || bfd_r_type == BFD_RELOC_KVX_64)
2099 int symbol;
2101 /* On SVR4-ish systems, the dynamic loader cannot
2102 relocate the text and data segments independently,
2103 so the symbol does not matter. */
2104 symbol = 0;
2105 outrel.r_info = ELFNN_R_INFO (symbol, R_KVX_RELATIVE);
2106 outrel.r_addend += value;
2108 else if (bfd_link_pic (info) && info->symbolic)
2110 goto skip_because_pic;
2112 else
2114 /* We may endup here from bad input code trying to
2115 insert relocation on symbols within code. We do not
2116 want that currently, and such code should use GOT +
2117 KVX_32/64 reloc that translate in KVX_RELATIVE. */
2118 const char *name;
2119 if (h && h->root.root.string)
2120 name = h->root.root.string;
2121 else
2122 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2123 NULL);
2125 (*_bfd_error_handler)
2126 /* xgettext:c-format */
2127 (_("%pB(%pA+%#" PRIx64 "): "
2128 "unresolvable %s relocation in section `%s'"),
2129 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
2130 name);
2131 return bfd_reloc_notsupported;
2134 sreloc = elf_section_data (input_section)->sreloc;
2135 if (sreloc == NULL || sreloc->contents == NULL)
2136 return bfd_reloc_notsupported;
2138 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
2139 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
2141 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
2143 /* Sanity to check that we have previously allocated
2144 sufficient space in the relocation section for the
2145 number of relocations we actually want to emit. */
2146 abort ();
2149 /* If this reloc is against an external symbol, we do not want to
2150 fiddle with the addend. Otherwise, we need to include the symbol
2151 value so that it becomes an addend for the dynamic reloc. */
2152 if (!relocate)
2153 return bfd_reloc_ok;
2155 rret = check_signed_overflow (complain_overflow_signed, bfd_r_type,
2156 input_bfd, value + addend);
2157 if (rret != bfd_reloc_ok)
2158 return rret;
2160 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2161 contents, rel->r_offset, value,
2162 addend);
2165 skip_because_pic:
2166 rret = check_signed_overflow (complain_overflow_signed, bfd_r_type,
2167 input_bfd, value + addend);
2168 if (rret != bfd_reloc_ok)
2169 return rret;
2171 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2172 contents, rel->r_offset, value,
2173 addend);
2174 break;
2176 case BFD_RELOC_KVX_PCREL17:
2177 case BFD_RELOC_KVX_PCREL27:
2179 /* BCU insn are always first in a bundle, so there is no need
2180 to correct the address using offset within bundle. */
2182 asection *splt = globals->root.splt;
2183 bool via_plt_p =
2184 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
2186 /* A call to an undefined weak symbol is converted to a jump to
2187 the next instruction unless a PLT entry will be created.
2188 The jump to the next instruction is optimized as a NOP.
2189 Do the same for local undefined symbols. */
2190 if (weak_undef_p && ! via_plt_p)
2192 bfd_putl32 (INSN_NOP, hit_data);
2193 return bfd_reloc_ok;
2196 /* If the call goes through a PLT entry, make sure to
2197 check distance to the right destination address. */
2198 if (via_plt_p)
2199 value = (splt->output_section->vma
2200 + splt->output_offset + h->plt.offset);
2202 /* Check if a stub has to be inserted because the destination
2203 is too far away. */
2204 struct elf_kvx_stub_hash_entry *stub_entry = NULL;
2206 /* If the target symbol is global and marked as a function the
2207 relocation applies a function call or a tail call. In this
2208 situation we can veneer out of range branches. The veneers
2209 use R16 and R17 hence cannot be used arbitrary out of range
2210 branches that occur within the body of a function. */
2212 /* Check if a stub has to be inserted because the destination
2213 is too far away. */
2214 if (! kvx_valid_call_p (value, place))
2216 /* The target is out of reach, so redirect the branch to
2217 the local stub for this function. */
2218 stub_entry = elfNN_kvx_get_stub_entry (input_section,
2219 sym_sec, h,
2220 rel, globals);
2221 if (stub_entry != NULL)
2222 value = (stub_entry->stub_offset
2223 + stub_entry->stub_sec->output_offset
2224 + stub_entry->stub_sec->output_section->vma);
2225 /* We have redirected the destination to stub entry address,
2226 so ignore any addend record in the original rela entry. */
2227 addend = 0;
2230 *unresolved_reloc_p = false;
2232 /* FALLTHROUGH */
2234 /* PCREL 32 are used in dwarf2 table for exception handling */
2235 case BFD_RELOC_KVX_32_PCREL:
2236 case BFD_RELOC_KVX_S64_PCREL_LO10:
2237 case BFD_RELOC_KVX_S64_PCREL_UP27:
2238 case BFD_RELOC_KVX_S64_PCREL_EX27:
2239 case BFD_RELOC_KVX_S37_PCREL_LO10:
2240 case BFD_RELOC_KVX_S37_PCREL_UP27:
2241 case BFD_RELOC_KVX_S43_PCREL_LO10:
2242 case BFD_RELOC_KVX_S43_PCREL_UP27:
2243 case BFD_RELOC_KVX_S43_PCREL_EX6:
2244 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2245 contents, rel->r_offset, value,
2246 addend);
2247 break;
2249 case BFD_RELOC_KVX_S37_TLS_LE_LO10:
2250 case BFD_RELOC_KVX_S37_TLS_LE_UP27:
2252 case BFD_RELOC_KVX_S43_TLS_LE_LO10:
2253 case BFD_RELOC_KVX_S43_TLS_LE_UP27:
2254 case BFD_RELOC_KVX_S43_TLS_LE_EX6:
2255 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2256 contents, rel->r_offset,
2257 value - tpoff_base (info), addend);
2258 break;
2260 case BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10:
2261 case BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27:
2263 case BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10:
2264 case BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27:
2265 case BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6:
2266 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2267 contents, rel->r_offset,
2268 value - dtpoff_base (info), addend);
2270 case BFD_RELOC_KVX_S37_TLS_GD_UP27:
2271 case BFD_RELOC_KVX_S37_TLS_GD_LO10:
2273 case BFD_RELOC_KVX_S43_TLS_GD_UP27:
2274 case BFD_RELOC_KVX_S43_TLS_GD_EX6:
2275 case BFD_RELOC_KVX_S43_TLS_GD_LO10:
2277 case BFD_RELOC_KVX_S37_TLS_IE_UP27:
2278 case BFD_RELOC_KVX_S37_TLS_IE_LO10:
2280 case BFD_RELOC_KVX_S43_TLS_IE_UP27:
2281 case BFD_RELOC_KVX_S43_TLS_IE_EX6:
2282 case BFD_RELOC_KVX_S43_TLS_IE_LO10:
2284 case BFD_RELOC_KVX_S37_TLS_LD_UP27:
2285 case BFD_RELOC_KVX_S37_TLS_LD_LO10:
2287 case BFD_RELOC_KVX_S43_TLS_LD_UP27:
2288 case BFD_RELOC_KVX_S43_TLS_LD_EX6:
2289 case BFD_RELOC_KVX_S43_TLS_LD_LO10:
2291 if (globals->root.sgot == NULL)
2292 return bfd_reloc_notsupported;
2293 value = symbol_got_offset (input_bfd, h, r_symndx);
2295 _bfd_final_link_relocate (howto, input_bfd, input_section,
2296 contents, rel->r_offset, value, addend);
2297 *unresolved_reloc_p = false;
2298 break;
2300 case BFD_RELOC_KVX_S37_GOTADDR_UP27:
2301 case BFD_RELOC_KVX_S37_GOTADDR_LO10:
2303 case BFD_RELOC_KVX_S43_GOTADDR_UP27:
2304 case BFD_RELOC_KVX_S43_GOTADDR_EX6:
2305 case BFD_RELOC_KVX_S43_GOTADDR_LO10:
2307 case BFD_RELOC_KVX_S64_GOTADDR_UP27:
2308 case BFD_RELOC_KVX_S64_GOTADDR_EX27:
2309 case BFD_RELOC_KVX_S64_GOTADDR_LO10:
2311 if (globals->root.sgot == NULL)
2312 BFD_ASSERT (h != NULL);
2314 value = globals->root.sgot->output_section->vma
2315 + globals->root.sgot->output_offset;
2317 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2318 contents, rel->r_offset, value,
2319 addend);
2321 break;
2323 case BFD_RELOC_KVX_S37_GOTOFF_LO10:
2324 case BFD_RELOC_KVX_S37_GOTOFF_UP27:
2326 case BFD_RELOC_KVX_32_GOTOFF:
2327 case BFD_RELOC_KVX_64_GOTOFF:
2329 case BFD_RELOC_KVX_S43_GOTOFF_LO10:
2330 case BFD_RELOC_KVX_S43_GOTOFF_UP27:
2331 case BFD_RELOC_KVX_S43_GOTOFF_EX6:
2334 asection *basegot = globals->root.sgot;
2335 /* BFD_ASSERT(h == NULL); */
2336 BFD_ASSERT(globals->root.sgot != NULL);
2337 value -= basegot->output_section->vma + basegot->output_offset;
2338 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2339 contents, rel->r_offset, value,
2340 addend);
2342 break;
2344 case BFD_RELOC_KVX_S37_GOT_LO10:
2345 case BFD_RELOC_KVX_S37_GOT_UP27:
2347 case BFD_RELOC_KVX_32_GOT:
2348 case BFD_RELOC_KVX_64_GOT:
2350 case BFD_RELOC_KVX_S43_GOT_LO10:
2351 case BFD_RELOC_KVX_S43_GOT_UP27:
2352 case BFD_RELOC_KVX_S43_GOT_EX6:
2354 if (globals->root.sgot == NULL)
2355 BFD_ASSERT (h != NULL);
2357 if (h != NULL)
2359 value = kvx_calculate_got_entry_vma (h, globals, info, value,
2360 output_bfd,
2361 unresolved_reloc_p);
2362 #ifdef UGLY_DEBUG
2363 printf("GOT_LO/HI for %s, value %x\n", h->root.root.string, value);
2364 #endif
2366 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2367 contents, rel->r_offset, value,
2368 addend);
2370 else
2372 #ifdef UGLY_DEBUG
2373 printf("GOT_LO/HI with h NULL, initial value %x\n", value);
2374 #endif
2375 struct elf_kvx_local_symbol *locals = elf_kvx_locals (input_bfd);
2377 if (locals == NULL)
2379 int howto_index = bfd_r_type - BFD_RELOC_KVX_RELOC_START;
2380 _bfd_error_handler
2381 /* xgettext:c-format */
2382 (_("%pB: local symbol descriptor table be NULL when applying "
2383 "relocation %s against local symbol"),
2384 input_bfd, elf_kvx_howto_table[howto_index].name);
2385 abort ();
2388 off = symbol_got_offset (input_bfd, h, r_symndx);
2389 base_got = globals->root.sgot;
2390 bfd_vma got_entry_addr = (base_got->output_section->vma
2391 + base_got->output_offset + off);
2393 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2395 bfd_put_64 (output_bfd, value, base_got->contents + off);
2397 if (bfd_link_pic (info))
2399 asection *s;
2400 Elf_Internal_Rela outrel;
2402 /* For PIC executables and shared libraries we need
2403 to relocate the GOT entry at run time. */
2404 s = globals->root.srelgot;
2405 if (s == NULL)
2406 abort ();
2408 outrel.r_offset = got_entry_addr;
2409 outrel.r_info = ELFNN_R_INFO (0, R_KVX_RELATIVE);
2410 outrel.r_addend = value;
2411 elf_append_rela (output_bfd, s, &outrel);
2414 symbol_got_offset_mark (input_bfd, h, r_symndx);
2417 /* Update the relocation value to GOT entry addr as we have
2418 transformed the direct data access into an indirect data
2419 access through GOT. */
2420 value = got_entry_addr;
2422 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2423 contents, rel->r_offset, off, 0);
2425 break;
2427 default:
2428 return bfd_reloc_notsupported;
2431 if (saved_addend)
2432 *saved_addend = value;
2434 /* Only apply the final relocation in a sequence. */
2435 if (save_addend)
2436 return bfd_reloc_continue;
2438 return _bfd_kvx_elf_put_addend (input_bfd, hit_data, bfd_r_type,
2439 howto, value);
2444 /* Relocate a KVX ELF section. */
2446 static int
2447 elfNN_kvx_relocate_section (bfd *output_bfd,
2448 struct bfd_link_info *info,
2449 bfd *input_bfd,
2450 asection *input_section,
2451 bfd_byte *contents,
2452 Elf_Internal_Rela *relocs,
2453 Elf_Internal_Sym *local_syms,
2454 asection **local_sections)
2456 Elf_Internal_Shdr *symtab_hdr;
2457 struct elf_link_hash_entry **sym_hashes;
2458 Elf_Internal_Rela *rel;
2459 Elf_Internal_Rela *relend;
2460 const char *name;
2461 struct elf_kvx_link_hash_table *globals;
2462 bool save_addend = false;
2463 bfd_vma addend = 0;
2465 globals = elf_kvx_hash_table (info);
2467 symtab_hdr = &elf_symtab_hdr (input_bfd);
2468 sym_hashes = elf_sym_hashes (input_bfd);
2470 rel = relocs;
2471 relend = relocs + input_section->reloc_count;
2472 for (; rel < relend; rel++)
2474 unsigned int r_type;
2475 bfd_reloc_code_real_type bfd_r_type;
2476 reloc_howto_type *howto;
2477 unsigned long r_symndx;
2478 Elf_Internal_Sym *sym;
2479 asection *sec;
2480 struct elf_link_hash_entry *h;
2481 bfd_vma relocation;
2482 bfd_reloc_status_type r;
2483 arelent bfd_reloc;
2484 char sym_type;
2485 bool unresolved_reloc = false;
2486 char *error_message = NULL;
2488 r_symndx = ELFNN_R_SYM (rel->r_info);
2489 r_type = ELFNN_R_TYPE (rel->r_info);
2491 bfd_reloc.howto = elfNN_kvx_howto_from_type (input_bfd, r_type);
2492 howto = bfd_reloc.howto;
2494 if (howto == NULL)
2495 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2497 bfd_r_type = elfNN_kvx_bfd_reloc_from_howto (howto);
2499 h = NULL;
2500 sym = NULL;
2501 sec = NULL;
2503 if (r_symndx < symtab_hdr->sh_info) /* A local symbol. */
2505 sym = local_syms + r_symndx;
2506 sym_type = ELFNN_ST_TYPE (sym->st_info);
2507 sec = local_sections[r_symndx];
2509 /* An object file might have a reference to a local
2510 undefined symbol. This is a draft object file, but we
2511 should at least do something about it. */
2512 if (r_type != R_KVX_NONE
2513 && r_type != R_KVX_S37_GOTADDR_LO10
2514 && r_type != R_KVX_S37_GOTADDR_UP27
2515 && r_type != R_KVX_S64_GOTADDR_LO10
2516 && r_type != R_KVX_S64_GOTADDR_UP27
2517 && r_type != R_KVX_S64_GOTADDR_EX27
2518 && r_type != R_KVX_S43_GOTADDR_LO10
2519 && r_type != R_KVX_S43_GOTADDR_UP27
2520 && r_type != R_KVX_S43_GOTADDR_EX6
2521 && bfd_is_und_section (sec)
2522 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
2523 (*info->callbacks->undefined_symbol)
2524 (info, bfd_elf_string_from_elf_section
2525 (input_bfd, symtab_hdr->sh_link, sym->st_name),
2526 input_bfd, input_section, rel->r_offset, true);
2528 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2530 else
2532 bool warned, ignored;
2534 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2535 r_symndx, symtab_hdr, sym_hashes,
2536 h, sec, relocation,
2537 unresolved_reloc, warned, ignored);
2539 sym_type = h->type;
2542 if (sec != NULL && discarded_section (sec))
2543 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
2544 rel, 1, relend, howto, 0, contents);
2546 if (bfd_link_relocatable (info))
2547 continue;
2549 if (h != NULL)
2550 name = h->root.root.string;
2551 else
2553 name = (bfd_elf_string_from_elf_section
2554 (input_bfd, symtab_hdr->sh_link, sym->st_name));
2555 if (name == NULL || *name == '\0')
2556 name = bfd_section_name (sec);
2559 if (r_symndx != 0
2560 && r_type != R_KVX_NONE
2561 && (h == NULL
2562 || h->root.type == bfd_link_hash_defined
2563 || h->root.type == bfd_link_hash_defweak)
2564 && IS_KVX_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
2566 (*_bfd_error_handler)
2567 ((sym_type == STT_TLS
2568 /* xgettext:c-format */
2569 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
2570 /* xgettext:c-format */
2571 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
2572 input_bfd,
2573 input_section, (uint64_t) rel->r_offset, howto->name, name);
2576 /* Original aarch64 has relaxation handling for TLS here. */
2577 r = bfd_reloc_continue;
2579 /* There may be multiple consecutive relocations for the
2580 same offset. In that case we are supposed to treat the
2581 output of each relocation as the addend for the next. */
2582 if (rel + 1 < relend
2583 && rel->r_offset == rel[1].r_offset
2584 && ELFNN_R_TYPE (rel[1].r_info) != R_KVX_NONE)
2586 save_addend = true;
2587 else
2588 save_addend = false;
2590 if (r == bfd_reloc_continue)
2591 r = elfNN_kvx_final_link_relocate (howto, input_bfd, output_bfd,
2592 input_section, contents, rel,
2593 relocation, info, sec,
2594 h, &unresolved_reloc,
2595 save_addend, &addend, sym);
2597 switch (elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type))
2599 case BFD_RELOC_KVX_S37_TLS_GD_LO10:
2600 case BFD_RELOC_KVX_S37_TLS_GD_UP27:
2602 case BFD_RELOC_KVX_S43_TLS_GD_LO10:
2603 case BFD_RELOC_KVX_S43_TLS_GD_UP27:
2604 case BFD_RELOC_KVX_S43_TLS_GD_EX6:
2606 case BFD_RELOC_KVX_S37_TLS_LD_LO10:
2607 case BFD_RELOC_KVX_S37_TLS_LD_UP27:
2609 case BFD_RELOC_KVX_S43_TLS_LD_LO10:
2610 case BFD_RELOC_KVX_S43_TLS_LD_UP27:
2611 case BFD_RELOC_KVX_S43_TLS_LD_EX6:
2613 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2615 bool need_relocs = false;
2616 bfd_byte *loc;
2617 int indx;
2618 bfd_vma off;
2620 off = symbol_got_offset (input_bfd, h, r_symndx);
2621 indx = h && h->dynindx != -1 ? h->dynindx : 0;
2623 need_relocs =
2624 (bfd_link_pic (info) || indx != 0) &&
2625 (h == NULL
2626 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2627 || h->root.type != bfd_link_hash_undefweak);
2629 BFD_ASSERT (globals->root.srelgot != NULL);
2631 if (need_relocs)
2633 Elf_Internal_Rela rela;
2634 rela.r_info = ELFNN_R_INFO (indx, R_KVX_64_DTPMOD);
2635 rela.r_addend = 0;
2636 rela.r_offset = globals->root.sgot->output_section->vma +
2637 globals->root.sgot->output_offset + off;
2639 loc = globals->root.srelgot->contents;
2640 loc += globals->root.srelgot->reloc_count++
2641 * RELOC_SIZE (htab);
2642 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
2644 bfd_reloc_code_real_type real_type =
2645 elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type);
2647 if (real_type == BFD_RELOC_KVX_S37_TLS_LD_LO10
2648 || real_type == BFD_RELOC_KVX_S37_TLS_LD_UP27
2649 || real_type == BFD_RELOC_KVX_S43_TLS_LD_LO10
2650 || real_type == BFD_RELOC_KVX_S43_TLS_LD_UP27
2651 || real_type == BFD_RELOC_KVX_S43_TLS_LD_EX6)
2653 /* For local dynamic, don't generate DTPOFF in any case.
2654 Initialize the DTPOFF slot into zero, so we get module
2655 base address when invoke runtime TLS resolver. */
2656 bfd_put_NN (output_bfd, 0,
2657 globals->root.sgot->contents + off
2658 + GOT_ENTRY_SIZE);
2660 else if (indx == 0)
2662 bfd_put_NN (output_bfd,
2663 relocation - dtpoff_base (info),
2664 globals->root.sgot->contents + off
2665 + GOT_ENTRY_SIZE);
2667 else
2669 /* This TLS symbol is global. We emit a
2670 relocation to fixup the tls offset at load
2671 time. */
2672 rela.r_info =
2673 ELFNN_R_INFO (indx, R_KVX_64_DTPOFF);
2674 rela.r_addend = 0;
2675 rela.r_offset =
2676 (globals->root.sgot->output_section->vma
2677 + globals->root.sgot->output_offset + off
2678 + GOT_ENTRY_SIZE);
2680 loc = globals->root.srelgot->contents;
2681 loc += globals->root.srelgot->reloc_count++
2682 * RELOC_SIZE (globals);
2683 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
2684 bfd_put_NN (output_bfd, (bfd_vma) 0,
2685 globals->root.sgot->contents + off
2686 + GOT_ENTRY_SIZE);
2689 else
2691 bfd_put_NN (output_bfd, (bfd_vma) 1,
2692 globals->root.sgot->contents + off);
2693 bfd_put_NN (output_bfd,
2694 relocation - dtpoff_base (info),
2695 globals->root.sgot->contents + off
2696 + GOT_ENTRY_SIZE);
2699 symbol_got_offset_mark (input_bfd, h, r_symndx);
2701 break;
2703 case BFD_RELOC_KVX_S37_TLS_IE_LO10:
2704 case BFD_RELOC_KVX_S37_TLS_IE_UP27:
2706 case BFD_RELOC_KVX_S43_TLS_IE_LO10:
2707 case BFD_RELOC_KVX_S43_TLS_IE_UP27:
2708 case BFD_RELOC_KVX_S43_TLS_IE_EX6:
2709 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2711 bool need_relocs = false;
2712 bfd_byte *loc;
2713 int indx;
2714 bfd_vma off;
2716 off = symbol_got_offset (input_bfd, h, r_symndx);
2718 indx = h && h->dynindx != -1 ? h->dynindx : 0;
2720 need_relocs =
2721 (bfd_link_pic (info) || indx != 0) &&
2722 (h == NULL
2723 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2724 || h->root.type != bfd_link_hash_undefweak);
2726 BFD_ASSERT (globals->root.srelgot != NULL);
2728 if (need_relocs)
2730 Elf_Internal_Rela rela;
2732 if (indx == 0)
2733 rela.r_addend = relocation - dtpoff_base (info);
2734 else
2735 rela.r_addend = 0;
2737 rela.r_info = ELFNN_R_INFO (indx, R_KVX_64_TPOFF);
2738 rela.r_offset = globals->root.sgot->output_section->vma +
2739 globals->root.sgot->output_offset + off;
2741 loc = globals->root.srelgot->contents;
2742 loc += globals->root.srelgot->reloc_count++
2743 * RELOC_SIZE (htab);
2745 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
2747 bfd_put_NN (output_bfd, rela.r_addend,
2748 globals->root.sgot->contents + off);
2750 else
2751 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
2752 globals->root.sgot->contents + off);
2754 symbol_got_offset_mark (input_bfd, h, r_symndx);
2756 break;
2758 default:
2759 break;
2762 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
2763 because such sections are not SEC_ALLOC and thus ld.so will
2764 not process them. */
2765 if (unresolved_reloc
2766 && !((input_section->flags & SEC_DEBUGGING) != 0
2767 && h->def_dynamic)
2768 && _bfd_elf_section_offset (output_bfd, info, input_section,
2769 +rel->r_offset) != (bfd_vma) - 1)
2771 (*_bfd_error_handler)
2772 /* xgettext:c-format */
2773 (_("%pB(%pA+%#" PRIx64 "): "
2774 "unresolvable %s relocation against symbol `%s'"),
2775 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
2776 h->root.root.string);
2777 return false;
2780 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
2782 switch (r)
2784 case bfd_reloc_overflow:
2785 (*info->callbacks->reloc_overflow)
2786 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
2787 input_bfd, input_section, rel->r_offset);
2789 /* Original aarch64 code had a check for alignement correctness */
2790 break;
2792 case bfd_reloc_undefined:
2793 (*info->callbacks->undefined_symbol)
2794 (info, name, input_bfd, input_section, rel->r_offset, true);
2795 break;
2797 case bfd_reloc_outofrange:
2798 error_message = _("out of range");
2799 goto common_error;
2801 case bfd_reloc_notsupported:
2802 error_message = _("unsupported relocation");
2803 goto common_error;
2805 case bfd_reloc_dangerous:
2806 /* error_message should already be set. */
2807 goto common_error;
2809 default:
2810 error_message = _("unknown error");
2811 /* Fall through. */
2813 common_error:
2814 BFD_ASSERT (error_message != NULL);
2815 (*info->callbacks->reloc_dangerous)
2816 (info, error_message, input_bfd, input_section, rel->r_offset);
2817 break;
2821 if (!save_addend)
2822 addend = 0;
2825 return true;
2828 /* Set the right machine number. */
2830 static bool
2831 elfNN_kvx_object_p (bfd *abfd)
2833 /* must be coherent with default arch in cpu-kvx.c */
2834 int e_set = bfd_mach_kv3_1;
2836 if (elf_elfheader (abfd)->e_machine == EM_KVX)
2838 int e_core = elf_elfheader (abfd)->e_flags & ELF_KVX_CORE_MASK;
2839 switch(e_core)
2841 #if ARCH_SIZE == 64
2842 case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1_64; break;
2843 case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2_64; break;
2844 case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1_64; break;
2845 #else
2846 case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1; break;
2847 case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2; break;
2848 case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1; break;
2849 #endif
2850 default:
2851 (*_bfd_error_handler)(_("%s: Bad ELF id: `%d'"),
2852 abfd->filename, e_core);
2855 return bfd_default_set_arch_mach (abfd, bfd_arch_kvx, e_set);
2858 /* Function to keep KVX specific flags in the ELF header. */
2860 static bool
2861 elfNN_kvx_set_private_flags (bfd *abfd, flagword flags)
2863 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
2866 else
2868 elf_elfheader (abfd)->e_flags = flags;
2869 elf_flags_init (abfd) = true;
2872 return true;
2875 /* Merge backend specific data from an object file to the output
2876 object file when linking. */
2878 static bool
2879 elfNN_kvx_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
2881 bfd *obfd = info->output_bfd;
2882 flagword out_flags;
2883 flagword in_flags;
2884 bool flags_compatible = true;
2885 asection *sec;
2887 /* Check if we have the same endianess. */
2888 if (!_bfd_generic_verify_endian_match (ibfd, info))
2889 return false;
2891 if (!is_kvx_elf (ibfd) || !is_kvx_elf (obfd))
2892 return true;
2894 /* The input BFD must have had its flags initialised. */
2895 /* The following seems bogus to me -- The flags are initialized in
2896 the assembler but I don't think an elf_flags_init field is
2897 written into the object. */
2898 /* BFD_ASSERT (elf_flags_init (ibfd)); */
2900 if (bfd_get_arch_size (ibfd) != bfd_get_arch_size (obfd))
2902 const char *msg;
2904 if (bfd_get_arch_size (ibfd) == 32
2905 && bfd_get_arch_size (obfd) == 64)
2906 msg = _("%s: compiled as 32-bit object and %s is 64-bit");
2907 else if (bfd_get_arch_size (ibfd) == 64
2908 && bfd_get_arch_size (obfd) == 32)
2909 msg = _("%s: compiled as 64-bit object and %s is 32-bit");
2910 else
2911 msg = _("%s: object size does not match that of target %s");
2913 (*_bfd_error_handler) (msg, bfd_get_filename (ibfd),
2914 bfd_get_filename (obfd));
2915 bfd_set_error (bfd_error_wrong_format);
2916 return false;
2919 in_flags = elf_elfheader (ibfd)->e_flags;
2920 out_flags = elf_elfheader (obfd)->e_flags;
2922 if (!elf_flags_init (obfd))
2924 /* If the input is the default architecture and had the default
2925 flags then do not bother setting the flags for the output
2926 architecture, instead allow future merges to do this. If no
2927 future merges ever set these flags then they will retain their
2928 uninitialised values, which surprise surprise, correspond
2929 to the default values. */
2930 if (bfd_get_arch_info (ibfd)->the_default
2931 && elf_elfheader (ibfd)->e_flags == 0)
2932 return true;
2934 elf_flags_init (obfd) = true;
2935 elf_elfheader (obfd)->e_flags = in_flags;
2937 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
2938 && bfd_get_arch_info (obfd)->the_default)
2939 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
2940 bfd_get_mach (ibfd));
2942 return true;
2945 /* Identical flags must be compatible. */
2946 if (in_flags == out_flags)
2947 return true;
2949 /* Check to see if the input BFD actually contains any sections. If
2950 not, its flags may not have been initialised either, but it
2951 cannot actually cause any incompatiblity. Do not short-circuit
2952 dynamic objects; their section list may be emptied by
2953 elf_link_add_object_symbols.
2955 Also check to see if there are no code sections in the input.
2956 In this case there is no need to check for code specific flags.
2957 XXX - do we need to worry about floating-point format compatability
2958 in data sections ? */
2959 if (!(ibfd->flags & DYNAMIC))
2961 bool null_input_bfd = true;
2962 bool only_data_sections = true;
2964 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2966 if ((bfd_section_flags (sec)
2967 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
2968 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
2969 only_data_sections = false;
2971 null_input_bfd = false;
2972 break;
2975 if (null_input_bfd || only_data_sections)
2976 return true;
2978 return flags_compatible;
2981 /* Display the flags field. */
2983 static bool
2984 elfNN_kvx_print_private_bfd_data (bfd *abfd, void *ptr)
2986 FILE *file = (FILE *) ptr;
2987 unsigned long flags;
2989 BFD_ASSERT (abfd != NULL && ptr != NULL);
2991 /* Print normal ELF private data. */
2992 _bfd_elf_print_private_bfd_data (abfd, ptr);
2994 flags = elf_elfheader (abfd)->e_flags;
2995 /* Ignore init flag - it may not be set, despite the flags field
2996 containing valid data. */
2998 /* xgettext:c-format */
2999 fprintf (file, _("Private flags = 0x%lx : "), elf_elfheader (abfd)->e_flags);
3000 if((flags & ELF_KVX_ABI_64B_ADDR_BIT) == ELF_KVX_ABI_64B_ADDR_BIT)
3002 if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1))
3003 fprintf (file, _("Coolidge (kv3) V1 64 bits"));
3004 else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2))
3005 fprintf (file, _("Coolidge (kv3) V2 64 bits"));
3006 else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1))
3007 fprintf (file, _("Coolidge (kv4) V1 64 bits"));
3009 else
3011 if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1))
3012 fprintf (file, _("Coolidge (kv3) V1 32 bits"));
3013 else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2))
3014 fprintf (file, _("Coolidge (kv3) V2 32 bits"));
3015 else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1))
3016 fprintf (file, _("Coolidge (kv4) V1 32 bits"));
3019 fputc ('\n', file);
3021 return true;
3024 /* Adjust a symbol defined by a dynamic object and referenced by a
3025 regular object. The current definition is in some section of the
3026 dynamic object, but we're not including those sections. We have to
3027 change the definition to something the rest of the link can
3028 understand. */
3030 static bool
3031 elfNN_kvx_adjust_dynamic_symbol (struct bfd_link_info *info,
3032 struct elf_link_hash_entry *h)
3034 struct elf_kvx_link_hash_table *htab;
3035 asection *s;
3037 /* If this is a function, put it in the procedure linkage table. We
3038 will fill in the contents of the procedure linkage table later,
3039 when we know the address of the .got section. */
3040 if (h->type == STT_FUNC || h->needs_plt)
3042 if (h->plt.refcount <= 0
3043 || ((SYMBOL_CALLS_LOCAL (info, h)
3044 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3045 && h->root.type == bfd_link_hash_undefweak))))
3047 /* This case can occur if we saw a CALL26 reloc in
3048 an input file, but the symbol wasn't referred to
3049 by a dynamic object or all references were
3050 garbage collected. In which case we can end up
3051 resolving. */
3052 h->plt.offset = (bfd_vma) - 1;
3053 h->needs_plt = 0;
3056 return true;
3058 else
3059 /* Otherwise, reset to -1. */
3060 h->plt.offset = (bfd_vma) - 1;
3063 /* If this is a weak symbol, and there is a real definition, the
3064 processor independent code will have arranged for us to see the
3065 real definition first, and we can just use the same value. */
3066 if (h->is_weakalias)
3068 struct elf_link_hash_entry *def = weakdef (h);
3069 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
3070 h->root.u.def.section = def->root.u.def.section;
3071 h->root.u.def.value = def->root.u.def.value;
3072 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
3073 h->non_got_ref = def->non_got_ref;
3074 return true;
3077 /* If we are creating a shared library, we must presume that the
3078 only references to the symbol are via the global offset table.
3079 For such cases we need not do anything here; the relocations will
3080 be handled correctly by relocate_section. */
3081 if (bfd_link_pic (info))
3082 return true;
3084 /* If there are no references to this symbol that do not use the
3085 GOT, we don't need to generate a copy reloc. */
3086 if (!h->non_got_ref)
3087 return true;
3089 /* If -z nocopyreloc was given, we won't generate them either. */
3090 if (info->nocopyreloc)
3092 h->non_got_ref = 0;
3093 return true;
3096 /* We must allocate the symbol in our .dynbss section, which will
3097 become part of the .bss section of the executable. There will be
3098 an entry for this symbol in the .dynsym section. The dynamic
3099 object will contain position independent code, so all references
3100 from the dynamic object to this symbol will go through the global
3101 offset table. The dynamic linker will use the .dynsym entry to
3102 determine the address it must put in the global offset table, so
3103 both the dynamic object and the regular object will refer to the
3104 same memory location for the variable. */
3106 htab = elf_kvx_hash_table (info);
3108 /* We must generate a R_KVX_COPY reloc to tell the dynamic linker
3109 to copy the initial value out of the dynamic object and into the
3110 runtime process image. */
3111 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
3113 htab->srelbss->size += RELOC_SIZE (htab);
3114 h->needs_copy = 1;
3117 s = htab->sdynbss;
3119 return _bfd_elf_adjust_dynamic_copy (info, h, s);
3122 static bool
3123 elfNN_kvx_allocate_local_symbols (bfd *abfd, unsigned number)
3125 struct elf_kvx_local_symbol *locals;
3126 locals = elf_kvx_locals (abfd);
3127 if (locals == NULL)
3129 locals = (struct elf_kvx_local_symbol *)
3130 bfd_zalloc (abfd, number * sizeof (struct elf_kvx_local_symbol));
3131 if (locals == NULL)
3132 return false;
3133 elf_kvx_locals (abfd) = locals;
3135 return true;
3138 /* Create the .got section to hold the global offset table. */
3140 static bool
3141 kvx_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
3143 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
3144 flagword flags;
3145 asection *s;
3146 struct elf_link_hash_entry *h;
3147 struct elf_link_hash_table *htab = elf_hash_table (info);
3149 /* This function may be called more than once. */
3150 s = bfd_get_linker_section (abfd, ".got");
3151 if (s != NULL)
3152 return true;
3154 flags = bed->dynamic_sec_flags;
3156 s = bfd_make_section_anyway_with_flags (abfd,
3157 (bed->rela_plts_and_copies_p
3158 ? ".rela.got" : ".rel.got"),
3159 (bed->dynamic_sec_flags
3160 | SEC_READONLY));
3161 if (s == NULL
3162 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3164 return false;
3165 htab->srelgot = s;
3167 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
3168 if (s == NULL
3169 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3170 return false;
3171 htab->sgot = s;
3172 htab->sgot->size += GOT_ENTRY_SIZE;
3174 if (bed->want_got_sym)
3176 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
3177 (or .got.plt) section. We don't do this in the linker script
3178 because we don't want to define the symbol if we are not creating
3179 a global offset table. */
3180 h = _bfd_elf_define_linkage_sym (abfd, info, s,
3181 "_GLOBAL_OFFSET_TABLE_");
3182 elf_hash_table (info)->hgot = h;
3183 if (h == NULL)
3184 return false;
3187 if (bed->want_got_plt)
3189 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
3190 if (s == NULL
3191 || !bfd_set_section_alignment (s,
3192 bed->s->log_file_align))
3193 return false;
3194 htab->sgotplt = s;
3197 /* The first bit of the global offset table is the header. */
3198 s->size += bed->got_header_size;
3200 /* we still need to handle got content when doing static link with PIC */
3201 if (bfd_link_executable (info) && !bfd_link_pic (info)) {
3202 htab->dynobj = abfd;
3205 return true;
3208 /* Look through the relocs for a section during the first phase. */
3210 static bool
3211 elfNN_kvx_check_relocs (bfd *abfd, struct bfd_link_info *info,
3212 asection *sec, const Elf_Internal_Rela *relocs)
3214 Elf_Internal_Shdr *symtab_hdr;
3215 struct elf_link_hash_entry **sym_hashes;
3216 const Elf_Internal_Rela *rel;
3217 const Elf_Internal_Rela *rel_end;
3218 asection *sreloc;
3220 struct elf_kvx_link_hash_table *htab;
3222 if (bfd_link_relocatable (info))
3223 return true;
3225 BFD_ASSERT (is_kvx_elf (abfd));
3227 htab = elf_kvx_hash_table (info);
3228 sreloc = NULL;
3230 symtab_hdr = &elf_symtab_hdr (abfd);
3231 sym_hashes = elf_sym_hashes (abfd);
3233 rel_end = relocs + sec->reloc_count;
3234 for (rel = relocs; rel < rel_end; rel++)
3236 struct elf_link_hash_entry *h;
3237 unsigned int r_symndx;
3238 unsigned int r_type;
3239 bfd_reloc_code_real_type bfd_r_type;
3240 Elf_Internal_Sym *isym;
3242 r_symndx = ELFNN_R_SYM (rel->r_info);
3243 r_type = ELFNN_R_TYPE (rel->r_info);
3245 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
3247 /* xgettext:c-format */
3248 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx);
3249 return false;
3252 if (r_symndx < symtab_hdr->sh_info)
3254 /* A local symbol. */
3255 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3256 abfd, r_symndx);
3257 if (isym == NULL)
3258 return false;
3260 h = NULL;
3262 else
3264 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
3265 while (h->root.type == bfd_link_hash_indirect
3266 || h->root.type == bfd_link_hash_warning)
3267 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3270 /* Could be done earlier, if h were already available. */
3271 bfd_r_type = kvx_tls_transition (abfd, info, r_type, h, r_symndx);
3273 if (h != NULL)
3275 /* Create the ifunc sections for static executables. If we
3276 never see an indirect function symbol nor we are building
3277 a static executable, those sections will be empty and
3278 won't appear in output. */
3279 switch (bfd_r_type)
3281 default:
3282 break;
3285 /* It is referenced by a non-shared object. */
3286 h->ref_regular = 1;
3289 switch (bfd_r_type)
3292 case BFD_RELOC_KVX_S43_LO10:
3293 case BFD_RELOC_KVX_S43_UP27:
3294 case BFD_RELOC_KVX_S43_EX6:
3296 case BFD_RELOC_KVX_S37_LO10:
3297 case BFD_RELOC_KVX_S37_UP27:
3299 case BFD_RELOC_KVX_S64_LO10:
3300 case BFD_RELOC_KVX_S64_UP27:
3301 case BFD_RELOC_KVX_S64_EX27:
3303 case BFD_RELOC_KVX_32:
3304 case BFD_RELOC_KVX_64:
3306 /* We don't need to handle relocs into sections not going into
3307 the "real" output. */
3308 if ((sec->flags & SEC_ALLOC) == 0)
3309 break;
3311 if (h != NULL)
3313 if (!bfd_link_pic (info))
3314 h->non_got_ref = 1;
3316 h->plt.refcount += 1;
3317 h->pointer_equality_needed = 1;
3320 /* No need to do anything if we're not creating a shared
3321 object. */
3322 if (! bfd_link_pic (info))
3323 break;
3326 struct elf_dyn_relocs *p;
3327 struct elf_dyn_relocs **head;
3329 /* We must copy these reloc types into the output file.
3330 Create a reloc section in dynobj and make room for
3331 this reloc. */
3332 if (sreloc == NULL)
3334 if (htab->root.dynobj == NULL)
3335 htab->root.dynobj = abfd;
3337 sreloc = _bfd_elf_make_dynamic_reloc_section
3338 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true);
3340 if (sreloc == NULL)
3341 return false;
3344 /* If this is a global symbol, we count the number of
3345 relocations we need for this symbol. */
3346 if (h != NULL)
3348 head = &h->dyn_relocs;
3350 else
3352 /* Track dynamic relocs needed for local syms too.
3353 We really need local syms available to do this
3354 easily. Oh well. */
3356 asection *s;
3357 void **vpp;
3359 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3360 abfd, r_symndx);
3361 if (isym == NULL)
3362 return false;
3364 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3365 if (s == NULL)
3366 s = sec;
3368 /* Beware of type punned pointers vs strict aliasing
3369 rules. */
3370 vpp = &(elf_section_data (s)->local_dynrel);
3371 head = (struct elf_dyn_relocs **) vpp;
3374 p = *head;
3375 if (p == NULL || p->sec != sec)
3377 bfd_size_type amt = sizeof *p;
3378 p = ((struct elf_dyn_relocs *)
3379 bfd_zalloc (htab->root.dynobj, amt));
3380 if (p == NULL)
3381 return false;
3382 p->next = *head;
3383 *head = p;
3384 p->sec = sec;
3387 p->count += 1;
3390 break;
3392 case BFD_RELOC_KVX_S37_GOT_LO10:
3393 case BFD_RELOC_KVX_S37_GOT_UP27:
3395 case BFD_RELOC_KVX_S37_GOTOFF_LO10:
3396 case BFD_RELOC_KVX_S37_GOTOFF_UP27:
3398 case BFD_RELOC_KVX_S43_GOT_LO10:
3399 case BFD_RELOC_KVX_S43_GOT_UP27:
3400 case BFD_RELOC_KVX_S43_GOT_EX6:
3402 case BFD_RELOC_KVX_S43_GOTOFF_LO10:
3403 case BFD_RELOC_KVX_S43_GOTOFF_UP27:
3404 case BFD_RELOC_KVX_S43_GOTOFF_EX6:
3406 case BFD_RELOC_KVX_S37_TLS_GD_LO10:
3407 case BFD_RELOC_KVX_S37_TLS_GD_UP27:
3409 case BFD_RELOC_KVX_S43_TLS_GD_LO10:
3410 case BFD_RELOC_KVX_S43_TLS_GD_UP27:
3411 case BFD_RELOC_KVX_S43_TLS_GD_EX6:
3413 case BFD_RELOC_KVX_S37_TLS_IE_LO10:
3414 case BFD_RELOC_KVX_S37_TLS_IE_UP27:
3416 case BFD_RELOC_KVX_S43_TLS_IE_LO10:
3417 case BFD_RELOC_KVX_S43_TLS_IE_UP27:
3418 case BFD_RELOC_KVX_S43_TLS_IE_EX6:
3420 case BFD_RELOC_KVX_S37_TLS_LD_LO10:
3421 case BFD_RELOC_KVX_S37_TLS_LD_UP27:
3423 case BFD_RELOC_KVX_S43_TLS_LD_LO10:
3424 case BFD_RELOC_KVX_S43_TLS_LD_UP27:
3425 case BFD_RELOC_KVX_S43_TLS_LD_EX6:
3427 unsigned got_type;
3428 unsigned old_got_type;
3430 got_type = kvx_reloc_got_type (bfd_r_type);
3432 if (h)
3434 h->got.refcount += 1;
3435 old_got_type = elf_kvx_hash_entry (h)->got_type;
3437 else
3439 struct elf_kvx_local_symbol *locals;
3441 if (!elfNN_kvx_allocate_local_symbols
3442 (abfd, symtab_hdr->sh_info))
3443 return false;
3445 locals = elf_kvx_locals (abfd);
3446 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
3447 locals[r_symndx].got_refcount += 1;
3448 old_got_type = locals[r_symndx].got_type;
3451 /* We will already have issued an error message if there
3452 is a TLS/non-TLS mismatch, based on the symbol type.
3453 So just combine any TLS types needed. */
3454 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
3455 && got_type != GOT_NORMAL)
3456 got_type |= old_got_type;
3458 /* If the symbol is accessed by both IE and GD methods, we
3459 are able to relax. Turn off the GD flag, without
3460 messing up with any other kind of TLS types that may be
3461 involved. */
3462 /* Disabled untested and unused TLS */
3463 /* if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type)) */
3464 /* got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD); */
3466 if (old_got_type != got_type)
3468 if (h != NULL)
3469 elf_kvx_hash_entry (h)->got_type = got_type;
3470 else
3472 struct elf_kvx_local_symbol *locals;
3473 locals = elf_kvx_locals (abfd);
3474 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
3475 locals[r_symndx].got_type = got_type;
3479 if (htab->root.dynobj == NULL)
3480 htab->root.dynobj = abfd;
3481 if (! kvx_elf_create_got_section (htab->root.dynobj, info))
3482 return false;
3483 break;
3486 case BFD_RELOC_KVX_S64_GOTADDR_LO10:
3487 case BFD_RELOC_KVX_S64_GOTADDR_UP27:
3488 case BFD_RELOC_KVX_S64_GOTADDR_EX27:
3490 case BFD_RELOC_KVX_S43_GOTADDR_LO10:
3491 case BFD_RELOC_KVX_S43_GOTADDR_UP27:
3492 case BFD_RELOC_KVX_S43_GOTADDR_EX6:
3494 case BFD_RELOC_KVX_S37_GOTADDR_LO10:
3495 case BFD_RELOC_KVX_S37_GOTADDR_UP27:
3497 if (htab->root.dynobj == NULL)
3498 htab->root.dynobj = abfd;
3499 if (! kvx_elf_create_got_section (htab->root.dynobj, info))
3500 return false;
3501 break;
3503 case BFD_RELOC_KVX_PCREL27:
3504 case BFD_RELOC_KVX_PCREL17:
3505 /* If this is a local symbol then we resolve it
3506 directly without creating a PLT entry. */
3507 if (h == NULL)
3508 continue;
3510 h->needs_plt = 1;
3511 if (h->plt.refcount <= 0)
3512 h->plt.refcount = 1;
3513 else
3514 h->plt.refcount += 1;
3515 break;
3517 default:
3518 break;
3522 return true;
3525 static bool
3526 elfNN_kvx_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
3528 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
3530 if (!_bfd_elf_init_file_header (abfd, link_info))
3531 return false;
3533 i_ehdrp = elf_elfheader (abfd);
3534 i_ehdrp->e_ident[EI_ABIVERSION] = KVX_ELF_ABI_VERSION;
3535 return true;
3538 static enum elf_reloc_type_class
3539 elfNN_kvx_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
3540 const asection *rel_sec ATTRIBUTE_UNUSED,
3541 const Elf_Internal_Rela *rela)
3543 switch ((int) ELFNN_R_TYPE (rela->r_info))
3545 case R_KVX_RELATIVE:
3546 return reloc_class_relative;
3547 case R_KVX_JMP_SLOT:
3548 return reloc_class_plt;
3549 case R_KVX_COPY:
3550 return reloc_class_copy;
3551 default:
3552 return reloc_class_normal;
3556 /* A structure used to record a list of sections, independently
3557 of the next and prev fields in the asection structure. */
3558 typedef struct section_list
3560 asection *sec;
3561 struct section_list *next;
3562 struct section_list *prev;
3564 section_list;
3566 typedef struct
3568 void *finfo;
3569 struct bfd_link_info *info;
3570 asection *sec;
3571 int sec_shndx;
3572 int (*func) (void *, const char *, Elf_Internal_Sym *,
3573 asection *, struct elf_link_hash_entry *);
3574 } output_arch_syminfo;
3576 /* Output a single local symbol for a generated stub. */
3578 static bool
3579 elfNN_kvx_output_stub_sym (output_arch_syminfo *osi, const char *name,
3580 bfd_vma offset, bfd_vma size)
3582 Elf_Internal_Sym sym;
3584 sym.st_value = (osi->sec->output_section->vma
3585 + osi->sec->output_offset + offset);
3586 sym.st_size = size;
3587 sym.st_other = 0;
3588 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
3589 sym.st_shndx = osi->sec_shndx;
3590 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
3593 static bool
3594 kvx_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
3596 struct elf_kvx_stub_hash_entry *stub_entry;
3597 asection *stub_sec;
3598 bfd_vma addr;
3599 char *stub_name;
3600 output_arch_syminfo *osi;
3602 /* Massage our args to the form they really have. */
3603 stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
3604 osi = (output_arch_syminfo *) in_arg;
3606 stub_sec = stub_entry->stub_sec;
3608 /* Ensure this stub is attached to the current section being
3609 processed. */
3610 if (stub_sec != osi->sec)
3611 return true;
3613 addr = (bfd_vma) stub_entry->stub_offset;
3615 stub_name = stub_entry->output_name;
3617 switch (stub_entry->stub_type)
3619 case kvx_stub_long_branch:
3620 if (!elfNN_kvx_output_stub_sym
3621 (osi, stub_name, addr, sizeof (elfNN_kvx_long_branch_stub)))
3622 return false;
3623 break;
3625 default:
3626 abort ();
3629 return true;
3632 /* Output mapping symbols for linker generated sections. */
3634 static bool
3635 elfNN_kvx_output_arch_local_syms (bfd *output_bfd,
3636 struct bfd_link_info *info,
3637 void *finfo,
3638 int (*func) (void *, const char *,
3639 Elf_Internal_Sym *,
3640 asection *,
3641 struct elf_link_hash_entry *))
3643 output_arch_syminfo osi;
3644 struct elf_kvx_link_hash_table *htab;
3646 htab = elf_kvx_hash_table (info);
3648 osi.finfo = finfo;
3649 osi.info = info;
3650 osi.func = func;
3652 /* Long calls stubs. */
3653 if (htab->stub_bfd && htab->stub_bfd->sections)
3655 asection *stub_sec;
3657 for (stub_sec = htab->stub_bfd->sections;
3658 stub_sec != NULL; stub_sec = stub_sec->next)
3660 /* Ignore non-stub sections. */
3661 if (!strstr (stub_sec->name, STUB_SUFFIX))
3662 continue;
3664 osi.sec = stub_sec;
3666 osi.sec_shndx = _bfd_elf_section_from_bfd_section
3667 (output_bfd, osi.sec->output_section);
3669 bfd_hash_traverse (&htab->stub_hash_table, kvx_map_one_stub,
3670 &osi);
3674 /* Finally, output mapping symbols for the PLT. */
3675 if (!htab->root.splt || htab->root.splt->size == 0)
3676 return true;
3678 osi.sec_shndx = _bfd_elf_section_from_bfd_section
3679 (output_bfd, htab->root.splt->output_section);
3680 osi.sec = htab->root.splt;
3682 return true;
3686 /* Allocate target specific section data. */
3688 static bool
3689 elfNN_kvx_new_section_hook (bfd *abfd, asection *sec)
3691 if (!sec->used_by_bfd)
3693 _kvx_elf_section_data *sdata;
3694 bfd_size_type amt = sizeof (*sdata);
3696 sdata = bfd_zalloc (abfd, amt);
3697 if (sdata == NULL)
3698 return false;
3699 sec->used_by_bfd = sdata;
3702 return _bfd_elf_new_section_hook (abfd, sec);
3705 /* Create dynamic sections. This is different from the ARM backend in that
3706 the got, plt, gotplt and their relocation sections are all created in the
3707 standard part of the bfd elf backend. */
3709 static bool
3710 elfNN_kvx_create_dynamic_sections (bfd *dynobj,
3711 struct bfd_link_info *info)
3713 struct elf_kvx_link_hash_table *htab;
3715 /* We need to create .got section. */
3716 if (!kvx_elf_create_got_section (dynobj, info))
3717 return false;
3719 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3720 return false;
3722 htab = elf_kvx_hash_table (info);
3723 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3724 if (!bfd_link_pic (info))
3725 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
3727 if (!htab->sdynbss || (!bfd_link_pic (info) && !htab->srelbss))
3728 abort ();
3730 return true;
3734 /* Allocate space in .plt, .got and associated reloc sections for
3735 dynamic relocs. */
3737 static bool
3738 elfNN_kvx_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
3740 struct bfd_link_info *info;
3741 struct elf_kvx_link_hash_table *htab;
3742 struct elf_dyn_relocs *p;
3744 /* An example of a bfd_link_hash_indirect symbol is versioned
3745 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
3746 -> __gxx_personality_v0(bfd_link_hash_defined)
3748 There is no need to process bfd_link_hash_indirect symbols here
3749 because we will also be presented with the concrete instance of
3750 the symbol and elfNN_kvx_copy_indirect_symbol () will have been
3751 called to copy all relevant data from the generic to the concrete
3752 symbol instance. */
3753 if (h->root.type == bfd_link_hash_indirect)
3754 return true;
3756 if (h->root.type == bfd_link_hash_warning)
3757 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3759 info = (struct bfd_link_info *) inf;
3760 htab = elf_kvx_hash_table (info);
3762 if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
3764 /* Make sure this symbol is output as a dynamic symbol.
3765 Undefined weak syms won't yet be marked as dynamic. */
3766 if (h->dynindx == -1 && !h->forced_local)
3768 if (!bfd_elf_link_record_dynamic_symbol (info, h))
3769 return false;
3772 if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
3774 asection *s = htab->root.splt;
3776 /* If this is the first .plt entry, make room for the special
3777 first entry. */
3778 if (s->size == 0)
3779 s->size += htab->plt_header_size;
3781 h->plt.offset = s->size;
3783 /* If this symbol is not defined in a regular file, and we are
3784 not generating a shared library, then set the symbol to this
3785 location in the .plt. This is required to make function
3786 pointers compare as equal between the normal executable and
3787 the shared library. */
3788 if (!bfd_link_pic (info) && !h->def_regular)
3790 h->root.u.def.section = s;
3791 h->root.u.def.value = h->plt.offset;
3794 /* Make room for this entry. For now we only create the
3795 small model PLT entries. We later need to find a way
3796 of relaxing into these from the large model PLT entries. */
3797 s->size += PLT_SMALL_ENTRY_SIZE;
3799 /* We also need to make an entry in the .got.plt section, which
3800 will be placed in the .got section by the linker script. */
3801 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
3803 /* We also need to make an entry in the .rela.plt section. */
3804 htab->root.srelplt->size += RELOC_SIZE (htab);
3806 /* We need to ensure that all GOT entries that serve the PLT
3807 are consecutive with the special GOT slots [0] [1] and
3808 [2]. Any addtional relocations must be placed after the
3809 PLT related entries. We abuse the reloc_count such that
3810 during sizing we adjust reloc_count to indicate the
3811 number of PLT related reserved entries. In subsequent
3812 phases when filling in the contents of the reloc entries,
3813 PLT related entries are placed by computing their PLT
3814 index (0 .. reloc_count). While other none PLT relocs are
3815 placed at the slot indicated by reloc_count and
3816 reloc_count is updated. */
3818 htab->root.srelplt->reloc_count++;
3820 else
3822 h->plt.offset = (bfd_vma) - 1;
3823 h->needs_plt = 0;
3826 else
3828 h->plt.offset = (bfd_vma) - 1;
3829 h->needs_plt = 0;
3832 if (h->got.refcount > 0)
3834 bool dyn;
3835 unsigned got_type = elf_kvx_hash_entry (h)->got_type;
3837 h->got.offset = (bfd_vma) - 1;
3839 dyn = htab->root.dynamic_sections_created;
3841 /* Make sure this symbol is output as a dynamic symbol.
3842 Undefined weak syms won't yet be marked as dynamic. */
3843 if (dyn && h->dynindx == -1 && !h->forced_local)
3845 if (!bfd_elf_link_record_dynamic_symbol (info, h))
3846 return false;
3849 if (got_type == GOT_UNKNOWN)
3851 (*_bfd_error_handler)
3852 (_("relocation against `%s' has faulty GOT type "),
3853 (h) ? h->root.root.string : "a local symbol");
3854 bfd_set_error (bfd_error_bad_value);
3855 return false;
3857 else if (got_type == GOT_NORMAL)
3859 h->got.offset = htab->root.sgot->size;
3860 htab->root.sgot->size += GOT_ENTRY_SIZE;
3861 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3862 || h->root.type != bfd_link_hash_undefweak)
3863 && (bfd_link_pic (info)
3864 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3866 htab->root.srelgot->size += RELOC_SIZE (htab);
3869 else
3871 int indx;
3873 /* Any of these will require 2 GOT slots because
3874 * they use __tls_get_addr() */
3875 if (got_type & (GOT_TLS_GD | GOT_TLS_LD))
3877 h->got.offset = htab->root.sgot->size;
3878 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
3881 if (got_type & GOT_TLS_IE)
3883 h->got.offset = htab->root.sgot->size;
3884 htab->root.sgot->size += GOT_ENTRY_SIZE;
3887 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3888 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3889 || h->root.type != bfd_link_hash_undefweak)
3890 && (bfd_link_pic (info)
3891 || indx != 0
3892 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3894 /* Only the GD case requires 2 relocations. */
3895 if (got_type & GOT_TLS_GD)
3896 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
3898 /* LD needs a DTPMOD reloc, IE needs a DTPOFF. */
3899 if (got_type & (GOT_TLS_LD | GOT_TLS_IE))
3900 htab->root.srelgot->size += RELOC_SIZE (htab);
3904 else
3906 h->got.offset = (bfd_vma) - 1;
3909 if (h->dyn_relocs == NULL)
3910 return true;
3912 /* In the shared -Bsymbolic case, discard space allocated for
3913 dynamic pc-relative relocs against symbols which turn out to be
3914 defined in regular objects. For the normal shared case, discard
3915 space for pc-relative relocs that have become local due to symbol
3916 visibility changes. */
3918 if (bfd_link_pic (info))
3920 /* Relocs that use pc_count are those that appear on a call
3921 insn, or certain REL relocs that can generated via assembly.
3922 We want calls to protected symbols to resolve directly to the
3923 function rather than going via the plt. If people want
3924 function pointer comparisons to work as expected then they
3925 should avoid writing weird assembly. */
3926 if (SYMBOL_CALLS_LOCAL (info, h))
3928 struct elf_dyn_relocs **pp;
3930 for (pp = &h->dyn_relocs; (p = *pp) != NULL;)
3932 p->count -= p->pc_count;
3933 p->pc_count = 0;
3934 if (p->count == 0)
3935 *pp = p->next;
3936 else
3937 pp = &p->next;
3941 /* Also discard relocs on undefined weak syms with non-default
3942 visibility. */
3943 if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
3945 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3946 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
3947 h->dyn_relocs = NULL;
3949 /* Make sure undefined weak symbols are output as a dynamic
3950 symbol in PIEs. */
3951 else if (h->dynindx == -1
3952 && !h->forced_local
3953 && !bfd_elf_link_record_dynamic_symbol (info, h))
3954 return false;
3958 else if (ELIMINATE_COPY_RELOCS)
3960 /* For the non-shared case, discard space for relocs against
3961 symbols which turn out to need copy relocs or are not
3962 dynamic. */
3964 if (!h->non_got_ref
3965 && ((h->def_dynamic
3966 && !h->def_regular)
3967 || (htab->root.dynamic_sections_created
3968 && (h->root.type == bfd_link_hash_undefweak
3969 || h->root.type == bfd_link_hash_undefined))))
3971 /* Make sure this symbol is output as a dynamic symbol.
3972 Undefined weak syms won't yet be marked as dynamic. */
3973 if (h->dynindx == -1
3974 && !h->forced_local
3975 && !bfd_elf_link_record_dynamic_symbol (info, h))
3976 return false;
3978 /* If that succeeded, we know we'll be keeping all the
3979 relocs. */
3980 if (h->dynindx != -1)
3981 goto keep;
3984 h->dyn_relocs = NULL;
3986 keep:;
3989 /* Finally, allocate space. */
3990 for (p = h->dyn_relocs; p != NULL; p = p->next)
3992 asection *sreloc;
3994 sreloc = elf_section_data (p->sec)->sreloc;
3996 BFD_ASSERT (sreloc != NULL);
3998 sreloc->size += p->count * RELOC_SIZE (htab);
4001 return true;
4004 /* Find any dynamic relocs that apply to read-only sections. */
4006 static bool
4007 kvx_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
4009 struct elf_dyn_relocs * p;
4011 for (p = h->dyn_relocs; p != NULL; p = p->next)
4013 asection *s = p->sec;
4015 if (s != NULL && (s->flags & SEC_READONLY) != 0)
4017 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4019 info->flags |= DF_TEXTREL;
4020 info->callbacks->minfo (_("%pB: dynamic relocation against `%pT' in "
4021 "read-only section `%pA'\n"),
4022 s->owner, h->root.root.string, s);
4024 /* Not an error, just cut short the traversal. */
4025 return false;
4028 return true;
4031 /* This is the most important function of all . Innocuosly named
4032 though ! */
4033 static bool
4034 elfNN_kvx_late_size_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
4035 struct bfd_link_info *info)
4037 struct elf_kvx_link_hash_table *htab;
4038 bfd *dynobj;
4039 asection *s;
4040 bool relocs;
4041 bfd *ibfd;
4043 htab = elf_kvx_hash_table ((info));
4044 dynobj = htab->root.dynobj;
4045 if (dynobj == NULL)
4046 return true;
4048 if (htab->root.dynamic_sections_created)
4050 if (bfd_link_executable (info) && !info->nointerp)
4052 s = bfd_get_linker_section (dynobj, ".interp");
4053 if (s == NULL)
4054 abort ();
4055 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
4056 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
4060 /* Set up .got offsets for local syms, and space for local dynamic
4061 relocs. */
4062 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4064 struct elf_kvx_local_symbol *locals = NULL;
4065 Elf_Internal_Shdr *symtab_hdr;
4066 asection *srel;
4067 unsigned int i;
4069 if (!is_kvx_elf (ibfd))
4070 continue;
4072 for (s = ibfd->sections; s != NULL; s = s->next)
4074 struct elf_dyn_relocs *p;
4076 for (p = (struct elf_dyn_relocs *)
4077 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
4079 if (!bfd_is_abs_section (p->sec)
4080 && bfd_is_abs_section (p->sec->output_section))
4082 /* Input section has been discarded, either because
4083 it is a copy of a linkonce section or due to
4084 linker script /DISCARD/, so we'll be discarding
4085 the relocs too. */
4087 else if (p->count != 0)
4089 srel = elf_section_data (p->sec)->sreloc;
4090 srel->size += p->count * RELOC_SIZE (htab);
4091 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
4092 info->flags |= DF_TEXTREL;
4097 locals = elf_kvx_locals (ibfd);
4098 if (!locals)
4099 continue;
4101 symtab_hdr = &elf_symtab_hdr (ibfd);
4102 srel = htab->root.srelgot;
4103 for (i = 0; i < symtab_hdr->sh_info; i++)
4105 locals[i].got_offset = (bfd_vma) - 1;
4106 if (locals[i].got_refcount > 0)
4108 unsigned got_type = locals[i].got_type;
4109 if (got_type & (GOT_TLS_GD | GOT_TLS_LD))
4111 locals[i].got_offset = htab->root.sgot->size;
4112 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
4115 if (got_type & (GOT_NORMAL | GOT_TLS_IE ))
4117 locals[i].got_offset = htab->root.sgot->size;
4118 htab->root.sgot->size += GOT_ENTRY_SIZE;
4121 if (got_type == GOT_UNKNOWN)
4125 if (bfd_link_pic (info))
4127 if (got_type & GOT_TLS_GD)
4128 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
4130 if (got_type & GOT_TLS_IE
4131 || got_type & GOT_TLS_LD
4132 || got_type & GOT_NORMAL)
4133 htab->root.srelgot->size += RELOC_SIZE (htab);
4136 else
4138 locals[i].got_refcount = (bfd_vma) - 1;
4144 /* Allocate global sym .plt and .got entries, and space for global
4145 sym dynamic relocs. */
4146 elf_link_hash_traverse (&htab->root, elfNN_kvx_allocate_dynrelocs,
4147 info);
4149 /* For every jump slot reserved in the sgotplt, reloc_count is
4150 incremented. However, when we reserve space for TLS descriptors,
4151 it's not incremented, so in order to compute the space reserved
4152 for them, it suffices to multiply the reloc count by the jump
4153 slot size. */
4155 if (htab->root.srelplt)
4156 htab->sgotplt_jump_table_size = kvx_compute_jump_table_size (htab);
4158 /* We now have determined the sizes of the various dynamic sections.
4159 Allocate memory for them. */
4160 relocs = false;
4161 for (s = dynobj->sections; s != NULL; s = s->next)
4163 if ((s->flags & SEC_LINKER_CREATED) == 0)
4164 continue;
4166 if (s == htab->root.splt
4167 || s == htab->root.sgot
4168 || s == htab->root.sgotplt
4169 || s == htab->root.iplt
4170 || s == htab->root.igotplt || s == htab->sdynbss)
4172 /* Strip this section if we don't need it; see the
4173 comment below. */
4175 else if (startswith (bfd_section_name (s), ".rela"))
4177 if (s->size != 0 && s != htab->root.srelplt)
4178 relocs = true;
4180 /* We use the reloc_count field as a counter if we need
4181 to copy relocs into the output file. */
4182 if (s != htab->root.srelplt)
4183 s->reloc_count = 0;
4185 else
4187 /* It's not one of our sections, so don't allocate space. */
4188 continue;
4191 if (s->size == 0)
4193 /* If we don't need this section, strip it from the
4194 output file. This is mostly to handle .rela.bss and
4195 .rela.plt. We must create both sections in
4196 create_dynamic_sections, because they must be created
4197 before the linker maps input sections to output
4198 sections. The linker does that before
4199 adjust_dynamic_symbol is called, and it is that
4200 function which decides whether anything needs to go
4201 into these sections. */
4203 s->flags |= SEC_EXCLUDE;
4204 continue;
4207 if ((s->flags & SEC_HAS_CONTENTS) == 0)
4208 continue;
4210 /* Allocate memory for the section contents. We use bfd_zalloc
4211 here in case unused entries are not reclaimed before the
4212 section's contents are written out. This should not happen,
4213 but this way if it does, we get a R_KVX_NONE reloc instead
4214 of garbage. */
4215 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
4216 if (s->contents == NULL)
4217 return false;
4220 if (htab->root.dynamic_sections_created)
4222 /* Add some entries to the .dynamic section. We fill in the
4223 values later, in elfNN_kvx_finish_dynamic_sections, but we
4224 must add the entries now so that we get the correct size for
4225 the .dynamic section. The DT_DEBUG entry is filled in by the
4226 dynamic linker and used by the debugger. */
4227 #define add_dynamic_entry(TAG, VAL) \
4228 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
4230 if (bfd_link_executable (info))
4232 if (!add_dynamic_entry (DT_DEBUG, 0))
4233 return false;
4236 if (htab->root.splt->size != 0)
4238 if (!add_dynamic_entry (DT_PLTGOT, 0)
4239 || !add_dynamic_entry (DT_PLTRELSZ, 0)
4240 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
4241 || !add_dynamic_entry (DT_JMPREL, 0))
4242 return false;
4245 if (relocs)
4247 if (!add_dynamic_entry (DT_RELA, 0)
4248 || !add_dynamic_entry (DT_RELASZ, 0)
4249 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
4250 return false;
4252 /* If any dynamic relocs apply to a read-only section,
4253 then we need a DT_TEXTREL entry. */
4254 if ((info->flags & DF_TEXTREL) == 0)
4255 elf_link_hash_traverse (&htab->root, kvx_readonly_dynrelocs,
4256 info);
4258 if ((info->flags & DF_TEXTREL) != 0)
4260 if (!add_dynamic_entry (DT_TEXTREL, 0))
4261 return false;
4265 #undef add_dynamic_entry
4267 return true;
4270 static inline void
4271 elf_kvx_update_plt_entry (bfd *output_bfd,
4272 bfd_reloc_code_real_type r_type,
4273 bfd_byte *plt_entry, bfd_vma value)
4275 reloc_howto_type *howto = elfNN_kvx_howto_from_bfd_reloc (r_type);
4276 BFD_ASSERT(howto != NULL);
4277 _bfd_kvx_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
4280 static void
4281 elfNN_kvx_create_small_pltn_entry (struct elf_link_hash_entry *h,
4282 struct elf_kvx_link_hash_table *htab,
4283 bfd *output_bfd)
4285 bfd_byte *plt_entry;
4286 bfd_vma plt_index;
4287 bfd_vma got_offset;
4288 bfd_vma gotplt_entry_address;
4289 bfd_vma plt_entry_address;
4290 Elf_Internal_Rela rela;
4291 bfd_byte *loc;
4292 asection *plt, *gotplt, *relplt;
4294 plt = htab->root.splt;
4295 gotplt = htab->root.sgotplt;
4296 relplt = htab->root.srelplt;
4298 /* Get the index in the procedure linkage table which
4299 corresponds to this symbol. This is the index of this symbol
4300 in all the symbols for which we are making plt entries. The
4301 first entry in the procedure linkage table is reserved.
4303 Get the offset into the .got table of the entry that
4304 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4305 bytes. The first three are reserved for the dynamic linker.
4307 For static executables, we don't reserve anything. */
4309 if (plt == htab->root.splt)
4311 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
4312 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
4314 else
4316 plt_index = h->plt.offset / htab->plt_entry_size;
4317 got_offset = plt_index * GOT_ENTRY_SIZE;
4320 plt_entry = plt->contents + h->plt.offset;
4321 plt_entry_address = plt->output_section->vma
4322 + plt->output_offset + h->plt.offset;
4323 gotplt_entry_address = gotplt->output_section->vma +
4324 gotplt->output_offset + got_offset;
4326 /* Copy in the boiler-plate for the PLTn entry. */
4327 memcpy (plt_entry, elfNN_kvx_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
4329 /* Patch the loading of the GOT entry, relative to the PLT entry
4330 address. */
4332 /* Use 37bits offset for both 32 and 64bits mode.
4333 Fill the LO10 of of lw $r9 = 0[$r14]. */
4334 elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_LO10,
4335 plt_entry+4,
4336 gotplt_entry_address - plt_entry_address);
4338 /* Fill the UP27 of of lw $r9 = 0[$r14]. */
4339 elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_UP27,
4340 plt_entry+8,
4341 gotplt_entry_address - plt_entry_address);
4343 rela.r_offset = gotplt_entry_address;
4345 /* Fill in the entry in the .rela.plt section. */
4346 rela.r_info = ELFNN_R_INFO (h->dynindx, R_KVX_JMP_SLOT);
4347 rela.r_addend = 0;
4349 /* Compute the relocation entry to used based on PLT index and do
4350 not adjust reloc_count. The reloc_count has already been adjusted
4351 to account for this entry. */
4352 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
4353 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4356 /* Size sections even though they're not dynamic. We use it to setup
4357 _TLS_MODULE_BASE_, if needed. */
4359 static bool
4360 elfNN_kvx_early_size_sections (bfd *output_bfd, struct bfd_link_info *info)
4362 asection *tls_sec;
4364 if (bfd_link_relocatable (info))
4365 return true;
4367 tls_sec = elf_hash_table (info)->tls_sec;
4369 if (tls_sec)
4371 struct elf_link_hash_entry *tlsbase;
4373 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
4374 "_TLS_MODULE_BASE_", true, true, false);
4376 if (tlsbase)
4378 struct bfd_link_hash_entry *h = NULL;
4379 const struct elf_backend_data *bed =
4380 get_elf_backend_data (output_bfd);
4382 if (!(_bfd_generic_link_add_one_symbol
4383 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
4384 tls_sec, 0, NULL, false, bed->collect, &h)))
4385 return false;
4387 tlsbase->type = STT_TLS;
4388 tlsbase = (struct elf_link_hash_entry *) h;
4389 tlsbase->def_regular = 1;
4390 tlsbase->other = STV_HIDDEN;
4391 (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
4395 return true;
4398 /* Finish up dynamic symbol handling. We set the contents of various
4399 dynamic sections here. */
4400 static bool
4401 elfNN_kvx_finish_dynamic_symbol (bfd *output_bfd,
4402 struct bfd_link_info *info,
4403 struct elf_link_hash_entry *h,
4404 Elf_Internal_Sym *sym)
4406 struct elf_kvx_link_hash_table *htab;
4407 htab = elf_kvx_hash_table (info);
4409 if (h->plt.offset != (bfd_vma) - 1)
4411 asection *plt = NULL, *gotplt = NULL, *relplt = NULL;
4413 /* This symbol has an entry in the procedure linkage table. Set
4414 it up. */
4416 if (htab->root.splt != NULL)
4418 plt = htab->root.splt;
4419 gotplt = htab->root.sgotplt;
4420 relplt = htab->root.srelplt;
4423 /* This symbol has an entry in the procedure linkage table. Set
4424 it up. */
4425 if ((h->dynindx == -1
4426 && !((h->forced_local || bfd_link_executable (info))
4427 && h->def_regular
4428 && h->type == STT_GNU_IFUNC))
4429 || plt == NULL
4430 || gotplt == NULL
4431 || relplt == NULL)
4432 abort ();
4434 elfNN_kvx_create_small_pltn_entry (h, htab, output_bfd);
4435 if (!h->def_regular)
4437 /* Mark the symbol as undefined, rather than as defined in
4438 the .plt section. */
4439 sym->st_shndx = SHN_UNDEF;
4440 /* If the symbol is weak we need to clear the value.
4441 Otherwise, the PLT entry would provide a definition for
4442 the symbol even if the symbol wasn't defined anywhere,
4443 and so the symbol would never be NULL. Leave the value if
4444 there were any relocations where pointer equality matters
4445 (this is a clue for the dynamic linker, to make function
4446 pointer comparisons work between an application and shared
4447 library). */
4448 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
4449 sym->st_value = 0;
4453 if (h->got.offset != (bfd_vma) - 1
4454 && elf_kvx_hash_entry (h)->got_type == GOT_NORMAL)
4456 Elf_Internal_Rela rela;
4457 bfd_byte *loc;
4459 /* This symbol has an entry in the global offset table. Set it
4460 up. */
4461 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
4462 abort ();
4464 rela.r_offset = (htab->root.sgot->output_section->vma
4465 + htab->root.sgot->output_offset
4466 + (h->got.offset & ~(bfd_vma) 1));
4468 #ifdef UGLY_DEBUG
4469 printf("setting rela at offset 0x%x(0x%x + 0x%x + 0x%x) for %s\n",
4470 rela.r_offset,
4471 htab->root.sgot->output_section->vma,
4472 htab->root.sgot->output_offset,
4473 h->got.offset,
4474 h->root.root.string);
4475 #endif
4477 if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
4479 if (!h->def_regular)
4480 return false;
4482 /* in case of PLT related GOT entry, it is not clear who is
4483 supposed to set the LSB of GOT entry...
4484 kvx_calculate_got_entry_vma() would be a good candidate,
4485 but it is not called currently
4486 So we are commenting it ATM. */
4487 // BFD_ASSERT ((h->got.offset & 1) != 0);
4488 rela.r_info = ELFNN_R_INFO (0, R_KVX_RELATIVE);
4489 rela.r_addend = (h->root.u.def.value
4490 + h->root.u.def.section->output_section->vma
4491 + h->root.u.def.section->output_offset);
4493 else
4495 BFD_ASSERT ((h->got.offset & 1) == 0);
4496 bfd_put_NN (output_bfd, (bfd_vma) 0,
4497 htab->root.sgot->contents + h->got.offset);
4498 rela.r_info = ELFNN_R_INFO (h->dynindx, R_KVX_GLOB_DAT);
4499 rela.r_addend = 0;
4502 loc = htab->root.srelgot->contents;
4503 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
4504 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4507 if (h->needs_copy)
4509 Elf_Internal_Rela rela;
4510 bfd_byte *loc;
4512 /* This symbol needs a copy reloc. Set it up. */
4514 if (h->dynindx == -1
4515 || (h->root.type != bfd_link_hash_defined
4516 && h->root.type != bfd_link_hash_defweak)
4517 || htab->srelbss == NULL)
4518 abort ();
4520 rela.r_offset = (h->root.u.def.value
4521 + h->root.u.def.section->output_section->vma
4522 + h->root.u.def.section->output_offset);
4523 rela.r_info = ELFNN_R_INFO (h->dynindx, R_KVX_COPY);
4524 rela.r_addend = 0;
4525 loc = htab->srelbss->contents;
4526 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
4527 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4530 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
4531 be NULL for local symbols. */
4532 if (sym != NULL
4533 && (h == elf_hash_table (info)->hdynamic
4534 || h == elf_hash_table (info)->hgot))
4535 sym->st_shndx = SHN_ABS;
4537 return true;
4540 static void
4541 elfNN_kvx_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
4542 struct elf_kvx_link_hash_table *htab)
4544 memcpy (htab->root.splt->contents, elfNN_kvx_small_plt0_entry,
4545 PLT_ENTRY_SIZE);
4546 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
4547 PLT_ENTRY_SIZE;
4550 static bool
4551 elfNN_kvx_finish_dynamic_sections (bfd *output_bfd,
4552 struct bfd_link_info *info)
4554 struct elf_kvx_link_hash_table *htab;
4555 bfd *dynobj;
4556 asection *sdyn;
4558 htab = elf_kvx_hash_table (info);
4559 dynobj = htab->root.dynobj;
4560 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
4562 if (htab->root.dynamic_sections_created)
4564 ElfNN_External_Dyn *dyncon, *dynconend;
4566 if (sdyn == NULL || htab->root.sgot == NULL)
4567 abort ();
4569 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
4570 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
4571 for (; dyncon < dynconend; dyncon++)
4573 Elf_Internal_Dyn dyn;
4574 asection *s;
4576 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
4578 switch (dyn.d_tag)
4580 default:
4581 continue;
4583 case DT_PLTGOT:
4584 s = htab->root.sgotplt;
4585 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4586 break;
4588 case DT_JMPREL:
4589 s = htab->root.srelplt;
4590 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4591 break;
4593 case DT_PLTRELSZ:
4594 s = htab->root.srelplt;
4595 dyn.d_un.d_val = s->size;
4596 break;
4598 case DT_RELASZ:
4599 /* The procedure linkage table relocs (DT_JMPREL) should
4600 not be included in the overall relocs (DT_RELA).
4601 Therefore, we override the DT_RELASZ entry here to
4602 make it not include the JMPREL relocs. Since the
4603 linker script arranges for .rela.plt to follow all
4604 other relocation sections, we don't have to worry
4605 about changing the DT_RELA entry. */
4606 if (htab->root.srelplt != NULL)
4608 s = htab->root.srelplt;
4609 dyn.d_un.d_val -= s->size;
4611 break;
4614 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
4619 /* Fill in the special first entry in the procedure linkage table. */
4620 if (htab->root.splt && htab->root.splt->size > 0)
4622 elfNN_kvx_init_small_plt0_entry (output_bfd, htab);
4624 elf_section_data (htab->root.splt->output_section)->
4625 this_hdr.sh_entsize = htab->plt_entry_size;
4628 if (htab->root.sgotplt)
4630 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
4632 (*_bfd_error_handler)
4633 (_("discarded output section: `%pA'"), htab->root.sgotplt);
4634 return false;
4637 /* Fill in the first three entries in the global offset table. */
4638 if (htab->root.sgotplt->size > 0)
4640 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
4642 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
4643 bfd_put_NN (output_bfd,
4644 (bfd_vma) 0,
4645 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
4646 bfd_put_NN (output_bfd,
4647 (bfd_vma) 0,
4648 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
4651 if (htab->root.sgot)
4653 if (htab->root.sgot->size > 0)
4655 bfd_vma addr =
4656 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
4657 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
4661 elf_section_data (htab->root.sgotplt->output_section)->
4662 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
4665 if (htab->root.sgot && htab->root.sgot->size > 0)
4666 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
4667 = GOT_ENTRY_SIZE;
4669 return true;
4672 /* Return address for Ith PLT stub in section PLT, for relocation REL
4673 or (bfd_vma) -1 if it should not be included. */
4675 static bfd_vma
4676 elfNN_kvx_plt_sym_val (bfd_vma i, const asection *plt,
4677 const arelent *rel ATTRIBUTE_UNUSED)
4679 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
4682 #define ELF_ARCH bfd_arch_kvx
4683 #define ELF_MACHINE_CODE EM_KVX
4684 #define ELF_MAXPAGESIZE 0x10000
4685 #define ELF_MINPAGESIZE 0x1000
4686 #define ELF_COMMONPAGESIZE 0x1000
4688 #define bfd_elfNN_bfd_link_hash_table_create \
4689 elfNN_kvx_link_hash_table_create
4691 #define bfd_elfNN_bfd_merge_private_bfd_data \
4692 elfNN_kvx_merge_private_bfd_data
4694 #define bfd_elfNN_bfd_print_private_bfd_data \
4695 elfNN_kvx_print_private_bfd_data
4697 #define bfd_elfNN_bfd_reloc_type_lookup \
4698 elfNN_kvx_reloc_type_lookup
4700 #define bfd_elfNN_bfd_reloc_name_lookup \
4701 elfNN_kvx_reloc_name_lookup
4703 #define bfd_elfNN_bfd_set_private_flags \
4704 elfNN_kvx_set_private_flags
4706 #define bfd_elfNN_mkobject \
4707 elfNN_kvx_mkobject
4709 #define bfd_elfNN_new_section_hook \
4710 elfNN_kvx_new_section_hook
4712 #define elf_backend_adjust_dynamic_symbol \
4713 elfNN_kvx_adjust_dynamic_symbol
4715 #define elf_backend_early_size_sections \
4716 elfNN_kvx_early_size_sections
4718 #define elf_backend_check_relocs \
4719 elfNN_kvx_check_relocs
4721 #define elf_backend_copy_indirect_symbol \
4722 elfNN_kvx_copy_indirect_symbol
4724 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
4725 to them in our hash. */
4726 #define elf_backend_create_dynamic_sections \
4727 elfNN_kvx_create_dynamic_sections
4729 #define elf_backend_init_index_section \
4730 _bfd_elf_init_2_index_sections
4732 #define elf_backend_finish_dynamic_sections \
4733 elfNN_kvx_finish_dynamic_sections
4735 #define elf_backend_finish_dynamic_symbol \
4736 elfNN_kvx_finish_dynamic_symbol
4738 #define elf_backend_object_p \
4739 elfNN_kvx_object_p
4741 #define elf_backend_output_arch_local_syms \
4742 elfNN_kvx_output_arch_local_syms
4744 #define elf_backend_plt_sym_val \
4745 elfNN_kvx_plt_sym_val
4747 #define elf_backend_init_file_header \
4748 elfNN_kvx_init_file_header
4750 #define elf_backend_init_process_headers \
4751 elfNN_kvx_init_process_headers
4753 #define elf_backend_relocate_section \
4754 elfNN_kvx_relocate_section
4756 #define elf_backend_reloc_type_class \
4757 elfNN_kvx_reloc_type_class
4759 #define elf_backend_late_size_sections \
4760 elfNN_kvx_late_size_sections
4762 #define elf_backend_can_refcount 1
4763 #define elf_backend_can_gc_sections 1
4764 #define elf_backend_plt_readonly 1
4765 #define elf_backend_want_got_plt 1
4766 #define elf_backend_want_plt_sym 0
4767 #define elf_backend_may_use_rel_p 0
4768 #define elf_backend_may_use_rela_p 1
4769 #define elf_backend_default_use_rela_p 1
4770 #define elf_backend_rela_normal 1
4771 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
4772 #define elf_backend_default_execstack 0
4773 #define elf_backend_extern_protected_data 1
4774 #define elf_backend_hash_symbol elf_kvx_hash_symbol
4776 #include "elfNN-target.h"