Fix i386 / x86_64 nearbyint exception clearing (bug 15491).
[glibc.git] / elf / dl-load.h
blob1aa638ce6d347bd3183a17f4cc24c3d8d35f83c3
1 /* Map in a shared object's segments from the file.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #ifndef _DL_LOAD_H
20 #define _DL_LOAD_H 1
22 #include <link.h>
23 #include <sys/mman.h>
26 /* On some systems, no flag bits are given to specify file mapping. */
27 #ifndef MAP_FILE
28 # define MAP_FILE 0
29 #endif
31 /* The right way to map in the shared library files is MAP_COPY, which
32 makes a virtual copy of the data at the time of the mmap call; this
33 guarantees the mapped pages will be consistent even if the file is
34 overwritten. Some losing VM systems like Linux's lack MAP_COPY. All we
35 get is MAP_PRIVATE, which copies each page when it is modified; this
36 means if the file is overwritten, we may at some point get some pages
37 from the new version after starting with pages from the old version.
39 To make up for the lack and avoid the overwriting problem,
40 what Linux does have is MAP_DENYWRITE. This prevents anyone
41 from modifying the file while we have it mapped. */
42 #ifndef MAP_COPY
43 # ifdef MAP_DENYWRITE
44 # define MAP_COPY (MAP_PRIVATE | MAP_DENYWRITE)
45 # else
46 # define MAP_COPY MAP_PRIVATE
47 # endif
48 #endif
50 /* Some systems link their relocatable objects for another base address
51 than 0. We want to know the base address for these such that we can
52 subtract this address from the segment addresses during mapping.
53 This results in a more efficient address space usage. Defaults to
54 zero for almost all systems. */
55 #ifndef MAP_BASE_ADDR
56 # define MAP_BASE_ADDR(l) 0
57 #endif
60 /* Handle situations where we have a preferred location in memory for
61 the shared objects. */
62 #ifdef ELF_PREFERRED_ADDRESS_DATA
63 ELF_PREFERRED_ADDRESS_DATA;
64 #endif
65 #ifndef ELF_PREFERRED_ADDRESS
66 # define ELF_PREFERRED_ADDRESS(loader, maplength, mapstartpref) (mapstartpref)
67 #endif
68 #ifndef ELF_FIXED_ADDRESS
69 # define ELF_FIXED_ADDRESS(loader, mapstart) ((void) 0)
70 #endif
73 /* This structure describes one PT_LOAD command.
74 Its details have been expanded out and converted. */
75 struct loadcmd
77 ElfW(Addr) mapstart, mapend, dataend, allocend;
78 ElfW(Off) mapoff;
79 int prot; /* PROT_* bits. */
83 /* This is a subroutine of _dl_map_segments. It should be called for each
84 load command, some time after L->l_addr has been set correctly. It is
85 responsible for setting up the l_text_end and l_phdr fields. */
86 static void __always_inline
87 _dl_postprocess_loadcmd (struct link_map *l, const ElfW(Ehdr) *header,
88 const struct loadcmd *c)
90 if (c->prot & PROT_EXEC)
91 l->l_text_end = l->l_addr + c->mapend;
93 if (l->l_phdr == 0
94 && c->mapoff <= header->e_phoff
95 && ((size_t) (c->mapend - c->mapstart + c->mapoff)
96 >= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
97 /* Found the program header in this segment. */
98 l->l_phdr = (void *) (uintptr_t) (c->mapstart + header->e_phoff
99 - c->mapoff);
103 /* This is a subroutine of _dl_map_object_from_fd. It is responsible
104 for filling in several fields in *L: l_map_start, l_map_end, l_addr,
105 l_contiguous, l_text_end, l_phdr. On successful return, all the
106 segments are mapped (or copied, or whatever) from the file into their
107 final places in the address space, with the correct page permissions,
108 and any bss-like regions already zeroed. It returns a null pointer
109 on success, or an error message string (to be translated) on error
110 (having also set errno).
112 The file <dl-map-segments.h> defines this function. The canonical
113 implementation in elf/dl-map-segments.h might be replaced by a sysdeps
114 version. */
115 static const char *_dl_map_segments (struct link_map *l, int fd,
116 const ElfW(Ehdr) *header, int type,
117 const struct loadcmd loadcmds[],
118 size_t nloadcmds,
119 const size_t maplength,
120 bool has_holes,
121 struct link_map *loader);
123 /* All the error message strings _dl_map_segments might return are
124 listed here so that different implementations in different sysdeps
125 dl-map-segments.h files all use consistent strings that are
126 guaranteed to have translations. */
127 #define DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT \
128 N_("failed to map segment from shared object")
129 #define DL_MAP_SEGMENTS_ERROR_MPROTECT \
130 N_("cannot change memory protections")
131 #define DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL \
132 N_("cannot map zero-fill pages")
135 #endif /* dl-load.h */