1 /* Inline functions for dynamic linking.
2 Copyright (C) 1995-2005, 2006, 2008 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20 /* This macro is used as a callback from elf_machine_rel{a,} when a
21 static TLS reloc is about to be performed. Since (in dl-load.c) we
22 permit dynamic loading of objects that might use such relocs, we
23 have to check whether each use is actually doable. If the object
24 whose TLS segment the reference resolves to was allocated space in
25 the static TLS block at startup, then it's ok. Otherwise, we make
26 an attempt to allocate it in surplus space on the fly. If that
27 can't be done, we fall back to the error that DF_STATIC_TLS is
28 intended to produce. */
29 #define CHECK_STATIC_TLS(map, sym_map) \
31 if (__builtin_expect ((sym_map)->l_tls_offset == NO_TLS_OFFSET \
32 || ((sym_map)->l_tls_offset \
33 == FORCED_DYNAMIC_TLS_OFFSET), 0)) \
34 _dl_allocate_static_tls (sym_map); \
37 #define TRY_STATIC_TLS(map, sym_map) \
38 (__builtin_expect ((sym_map)->l_tls_offset \
39 != FORCED_DYNAMIC_TLS_OFFSET, 1) \
40 && (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET, 1) \
41 || _dl_try_allocate_static_tls (sym_map) == 0))
43 int internal_function
_dl_try_allocate_static_tls (struct link_map
*map
);
49 /* We pass reloc_addr as a pointer to void, as opposed to a pointer to
50 ElfW(Addr), because not all architectures can assume that the
51 relocated address is properly aligned, whereas the compiler is
52 entitled to assume that a pointer to a type is properly aligned for
53 the type. Even if we cast the pointer back to some other type with
54 less strict alignment requirements, the compiler might still
55 remember that the pointer was originally more aligned, thereby
56 optimizing away alignment tests or using word instructions for
57 copying memory, breaking the very code written to handle the
59 # if ! ELF_MACHINE_NO_REL
60 auto inline void __attribute__((always_inline
))
61 elf_machine_rel (struct link_map
*map
, const ElfW(Rel
) *reloc
,
62 const ElfW(Sym
) *sym
, const struct r_found_version
*version
,
63 void *const reloc_addr
);
64 auto inline void __attribute__((always_inline
))
65 elf_machine_rel_relative (ElfW(Addr
) l_addr
, const ElfW(Rel
) *reloc
,
66 void *const reloc_addr
);
68 # if ! ELF_MACHINE_NO_RELA
69 auto inline void __attribute__((always_inline
))
70 elf_machine_rela (struct link_map
*map
, const ElfW(Rela
) *reloc
,
71 const ElfW(Sym
) *sym
, const struct r_found_version
*version
,
72 void *const reloc_addr
);
73 auto inline void __attribute__((always_inline
))
74 elf_machine_rela_relative (ElfW(Addr
) l_addr
, const ElfW(Rela
) *reloc
,
75 void *const reloc_addr
);
77 # if ELF_MACHINE_NO_RELA || defined ELF_MACHINE_PLT_REL
78 auto inline void __attribute__((always_inline
))
79 elf_machine_lazy_rel (struct link_map
*map
,
80 ElfW(Addr
) l_addr
, const ElfW(Rel
) *reloc
);
82 auto inline void __attribute__((always_inline
))
83 elf_machine_lazy_rel (struct link_map
*map
,
84 ElfW(Addr
) l_addr
, const ElfW(Rela
) *reloc
);
88 #include <dl-machine.h>
91 # define VERSYMIDX(sym) (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGIDX (sym))
95 /* Read the dynamic section at DYN and fill in INFO with indices DT_*. */
101 inline void __attribute__ ((unused
, always_inline
))
102 elf_get_dynamic_info (struct link_map
*l
, ElfW(Dyn
) *temp
)
104 ElfW(Dyn
) *dyn
= l
->l_ld
;
107 #ifndef RTLD_BOOTSTRAP
114 while (dyn
->d_tag
!= DT_NULL
)
116 if (dyn
->d_tag
< DT_NUM
)
117 info
[dyn
->d_tag
] = dyn
;
118 else if (dyn
->d_tag
>= DT_LOPROC
&&
119 dyn
->d_tag
< DT_LOPROC
+ DT_THISPROCNUM
)
120 info
[dyn
->d_tag
- DT_LOPROC
+ DT_NUM
] = dyn
;
121 else if ((Elf32_Word
) DT_VERSIONTAGIDX (dyn
->d_tag
) < DT_VERSIONTAGNUM
)
122 info
[VERSYMIDX (dyn
->d_tag
)] = dyn
;
123 else if ((Elf32_Word
) DT_EXTRATAGIDX (dyn
->d_tag
) < DT_EXTRANUM
)
124 info
[DT_EXTRATAGIDX (dyn
->d_tag
) + DT_NUM
+ DT_THISPROCNUM
125 + DT_VERSIONTAGNUM
] = dyn
;
126 else if ((Elf32_Word
) DT_VALTAGIDX (dyn
->d_tag
) < DT_VALNUM
)
127 info
[DT_VALTAGIDX (dyn
->d_tag
) + DT_NUM
+ DT_THISPROCNUM
128 + DT_VERSIONTAGNUM
+ DT_EXTRANUM
] = dyn
;
129 else if ((Elf32_Word
) DT_ADDRTAGIDX (dyn
->d_tag
) < DT_ADDRNUM
)
130 info
[DT_ADDRTAGIDX (dyn
->d_tag
) + DT_NUM
+ DT_THISPROCNUM
131 + DT_VERSIONTAGNUM
+ DT_EXTRANUM
+ DT_VALNUM
] = dyn
;
135 #define DL_RO_DYN_TEMP_CNT 8
137 #ifndef DL_RO_DYN_SECTION
138 /* Don't adjust .dynamic unnecessarily. */
141 ElfW(Addr
) l_addr
= l
->l_addr
;
144 # define ADJUST_DYN_INFO(tag) \
146 if (info[tag] != NULL) \
150 temp[cnt].d_tag = info[tag]->d_tag; \
151 temp[cnt].d_un.d_ptr = info[tag]->d_un.d_ptr + l_addr; \
152 info[tag] = temp + cnt++; \
155 info[tag]->d_un.d_ptr += l_addr; \
159 ADJUST_DYN_INFO (DT_HASH
);
160 ADJUST_DYN_INFO (DT_PLTGOT
);
161 ADJUST_DYN_INFO (DT_STRTAB
);
162 ADJUST_DYN_INFO (DT_SYMTAB
);
163 # if ! ELF_MACHINE_NO_RELA
164 ADJUST_DYN_INFO (DT_RELA
);
166 # if ! ELF_MACHINE_NO_REL
167 ADJUST_DYN_INFO (DT_REL
);
169 ADJUST_DYN_INFO (DT_JMPREL
);
170 ADJUST_DYN_INFO (VERSYMIDX (DT_VERSYM
));
171 ADJUST_DYN_INFO (DT_ADDRTAGIDX (DT_GNU_HASH
) + DT_NUM
+ DT_THISPROCNUM
172 + DT_VERSIONTAGNUM
+ DT_EXTRANUM
+ DT_VALNUM
);
173 # undef ADJUST_DYN_INFO
174 assert (cnt
<= DL_RO_DYN_TEMP_CNT
);
177 if (info
[DT_PLTREL
] != NULL
)
179 #if ELF_MACHINE_NO_RELA
180 assert (info
[DT_PLTREL
]->d_un
.d_val
== DT_REL
);
181 #elif ELF_MACHINE_NO_REL
182 assert (info
[DT_PLTREL
]->d_un
.d_val
== DT_RELA
);
184 assert (info
[DT_PLTREL
]->d_un
.d_val
== DT_REL
185 || info
[DT_PLTREL
]->d_un
.d_val
== DT_RELA
);
188 #if ! ELF_MACHINE_NO_RELA
189 if (info
[DT_RELA
] != NULL
)
190 assert (info
[DT_RELAENT
]->d_un
.d_val
== sizeof (ElfW(Rela
)));
192 # if ! ELF_MACHINE_NO_REL
193 if (info
[DT_REL
] != NULL
)
194 assert (info
[DT_RELENT
]->d_un
.d_val
== sizeof (ElfW(Rel
)));
196 #ifdef RTLD_BOOTSTRAP
197 /* Only the bind now flags are allowed. */
198 assert (info
[VERSYMIDX (DT_FLAGS_1
)] == NULL
199 || info
[VERSYMIDX (DT_FLAGS_1
)]->d_un
.d_val
== DF_1_NOW
);
200 assert (info
[DT_FLAGS
] == NULL
201 || info
[DT_FLAGS
]->d_un
.d_val
== DF_BIND_NOW
);
202 /* Flags must not be set for ld.so. */
203 assert (info
[DT_RUNPATH
] == NULL
);
204 assert (info
[DT_RPATH
] == NULL
);
206 if (info
[DT_FLAGS
] != NULL
)
208 /* Flags are used. Translate to the old form where available.
209 Since these l_info entries are only tested for NULL pointers it
210 is ok if they point to the DT_FLAGS entry. */
211 l
->l_flags
= info
[DT_FLAGS
]->d_un
.d_val
;
213 if (l
->l_flags
& DF_SYMBOLIC
)
214 info
[DT_SYMBOLIC
] = info
[DT_FLAGS
];
215 if (l
->l_flags
& DF_TEXTREL
)
216 info
[DT_TEXTREL
] = info
[DT_FLAGS
];
217 if (l
->l_flags
& DF_BIND_NOW
)
218 info
[DT_BIND_NOW
] = info
[DT_FLAGS
];
220 if (info
[VERSYMIDX (DT_FLAGS_1
)] != NULL
)
222 l
->l_flags_1
= info
[VERSYMIDX (DT_FLAGS_1
)]->d_un
.d_val
;
224 if (l
->l_flags_1
& DF_1_NOW
)
225 info
[DT_BIND_NOW
] = info
[VERSYMIDX (DT_FLAGS_1
)];
227 if (info
[DT_RUNPATH
] != NULL
)
228 /* If both RUNPATH and RPATH are given, the latter is ignored. */
229 info
[DT_RPATH
] = NULL
;
235 # ifdef RTLD_BOOTSTRAP
236 # define ELF_DURING_STARTUP (1)
238 # define ELF_DURING_STARTUP (0)
241 /* Get the definitions of `elf_dynamic_do_rel' and `elf_dynamic_do_rela'.
242 These functions are almost identical, so we use cpp magic to avoid
243 duplicating their code. It cannot be done in a more general function
244 because we must be able to completely inline. */
246 /* On some machines, notably SPARC, DT_REL* includes DT_JMPREL in its
247 range. Note that according to the ELF spec, this is completely legal!
248 But conditionally define things so that on machines we know this will
249 not happen we do something more optimal. */
251 # ifdef ELF_MACHINE_PLTREL_OVERLAP
252 # define _ELF_DYNAMIC_DO_RELOC(RELOC, reloc, map, do_lazy, test_rel) \
254 struct { ElfW(Addr) start, size; int lazy; } ranges[3]; \
257 ranges[0].lazy = ranges[2].lazy = 0; \
258 ranges[1].lazy = 1; \
259 ranges[0].size = ranges[1].size = ranges[2].size = 0; \
261 if ((map)->l_info[DT_##RELOC]) \
263 ranges[0].start = D_PTR ((map), l_info[DT_##RELOC]); \
264 ranges[0].size = (map)->l_info[DT_##RELOC##SZ]->d_un.d_val; \
268 && (map)->l_info[DT_PLTREL] \
269 && (!test_rel || (map)->l_info[DT_PLTREL]->d_un.d_val == DT_##RELOC)) \
271 ranges[1].start = D_PTR ((map), l_info[DT_JMPREL]); \
272 ranges[1].size = (map)->l_info[DT_PLTRELSZ]->d_un.d_val; \
273 ranges[2].start = ranges[1].start + ranges[1].size; \
274 ranges[2].size = ranges[0].start + ranges[0].size - ranges[2].start; \
275 ranges[0].size = ranges[1].start - ranges[0].start; \
278 for (ranges_index = 0; ranges_index < 3; ++ranges_index) \
279 elf_dynamic_do_##reloc ((map), \
280 ranges[ranges_index].start, \
281 ranges[ranges_index].size, \
282 ranges[ranges_index].lazy); \
285 # define _ELF_DYNAMIC_DO_RELOC(RELOC, reloc, map, do_lazy, test_rel) \
287 struct { ElfW(Addr) start, size; int lazy; } ranges[2]; \
288 ranges[0].lazy = 0; \
289 ranges[0].size = ranges[1].size = 0; \
290 ranges[0].start = 0; \
292 if ((map)->l_info[DT_##RELOC]) \
294 ranges[0].start = D_PTR ((map), l_info[DT_##RELOC]); \
295 ranges[0].size = (map)->l_info[DT_##RELOC##SZ]->d_un.d_val; \
297 if ((map)->l_info[DT_PLTREL] \
298 && (!test_rel || (map)->l_info[DT_PLTREL]->d_un.d_val == DT_##RELOC)) \
300 ElfW(Addr) start = D_PTR ((map), l_info[DT_JMPREL]); \
302 if (! ELF_DURING_STARTUP \
304 /* This test does not only detect whether the relocation \
305 sections are in the right order, it also checks whether \
306 there is a DT_REL/DT_RELA section. */ \
307 || ranges[0].start + ranges[0].size != start)) \
309 ranges[1].start = start; \
310 ranges[1].size = (map)->l_info[DT_PLTRELSZ]->d_un.d_val; \
311 ranges[1].lazy = (do_lazy); \
315 /* Combine processing the sections. */ \
316 assert (ranges[0].start + ranges[0].size == start); \
317 ranges[0].size += (map)->l_info[DT_PLTRELSZ]->d_un.d_val; \
321 if (ELF_DURING_STARTUP) \
322 elf_dynamic_do_##reloc ((map), ranges[0].start, ranges[0].size, 0); \
326 for (ranges_index = 0; ranges_index < 2; ++ranges_index) \
327 elf_dynamic_do_##reloc ((map), \
328 ranges[ranges_index].start, \
329 ranges[ranges_index].size, \
330 ranges[ranges_index].lazy); \
335 # if ELF_MACHINE_NO_REL || ELF_MACHINE_NO_RELA
336 # define _ELF_CHECK_REL 0
338 # define _ELF_CHECK_REL 1
341 # if ! ELF_MACHINE_NO_REL
343 # define ELF_DYNAMIC_DO_REL(map, lazy) \
344 _ELF_DYNAMIC_DO_RELOC (REL, rel, map, lazy, _ELF_CHECK_REL)
346 # define ELF_DYNAMIC_DO_REL(map, lazy) /* Nothing to do. */
349 # if ! ELF_MACHINE_NO_RELA
352 # define ELF_DYNAMIC_DO_RELA(map, lazy) \
353 _ELF_DYNAMIC_DO_RELOC (RELA, rela, map, lazy, _ELF_CHECK_REL)
355 # define ELF_DYNAMIC_DO_RELA(map, lazy) /* Nothing to do. */
358 /* This can't just be an inline function because GCC is too dumb
359 to inline functions containing inlines themselves. */
360 # define ELF_DYNAMIC_RELOCATE(map, lazy, consider_profile) \
362 int edr_lazy = elf_machine_runtime_setup ((map), (lazy), \
363 (consider_profile)); \
364 ELF_DYNAMIC_DO_REL ((map), edr_lazy); \
365 ELF_DYNAMIC_DO_RELA ((map), edr_lazy); \