Modify elf/tst-audit9.c to use test-skeleton.c
[glibc.git] / sysdeps / nacl / dl-map-segments.h
blobf305da304ae478047fed6acbaf2c7b033cd72692
1 /* Map in a shared object's segments. NaCl version.
2 Copyright (C) 2015 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dl-load.h>
21 #include <errno.h>
22 #include <stdbool.h>
23 #include <unistd.h>
24 #include <libc-internal.h>
27 /* This is basically pread, but with iteration after short reads. */
28 static bool
29 read_in_data (int fd, void *data, size_t len, off_t pos)
31 if (__glibc_unlikely (__lseek (fd, pos, SEEK_SET) == (off_t) -1))
32 return true;
33 while (len > 0)
35 ssize_t n = __read (fd, data, len);
36 if (__glibc_unlikely (n < 0))
37 return true;
38 if (__glibc_unlikely (n == 0))
40 errno = EFTYPE;
41 return true;
43 data += n;
44 len -= n;
46 return false;
49 static const char *
50 _dl_map_segments (struct link_map *l, int fd,
51 const ElfW(Ehdr) *header, int type,
52 const struct loadcmd loadcmds[], size_t nloadcmds,
53 const size_t maplength, bool has_holes,
54 struct link_map *loader)
56 if (__builtin_expect (type, ET_DYN) == ET_DYN)
58 /* This is a position-independent shared object. Let the system
59 choose where to place it.
61 As a refinement, sometimes we have an address that we would
62 prefer to map such objects at; but this is only a preference,
63 the OS can do whatever it likes. */
64 ElfW(Addr) mappref
65 = (ELF_PREFERRED_ADDRESS (loader, maplength,
66 loadcmds[0].mapstart & GLRO(dl_use_load_bias))
67 - MAP_BASE_ADDR (l));
69 uintptr_t mapstart;
70 if (__glibc_likely (loadcmds[0].prot & PROT_EXEC))
72 /* When there is a code segment, we must use the
73 allocate_code_data interface to choose a location. */
75 uintptr_t code_size = loadcmds[0].allocend - loadcmds[0].mapstart;
76 uintptr_t data_offset;
77 size_t data_size;
79 if (__glibc_likely (nloadcmds > 1))
81 data_offset = loadcmds[1].mapstart - loadcmds[0].mapstart;
82 data_size = ALIGN_UP (maplength - data_offset,
83 GLRO(dl_pagesize));
85 else
87 data_offset = 0;
88 data_size = 0;
91 int error = __nacl_irt_code_data_alloc.allocate_code_data
92 (mappref, code_size, data_offset, data_size, &mapstart);
93 if (__glibc_unlikely (error))
95 errno = error;
96 return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
99 else
101 /* With no code pages involved, plain mmap works fine. */
102 void *mapped = __mmap ((void *) mappref, maplength,
103 PROT_NONE, MAP_ANON, -1, 0);
104 if (__glibc_unlikely (mapped == MAP_FAILED))
105 return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
106 mapstart = (uintptr_t) mapped;
109 l->l_addr = mapstart - loadcmds[0].mapstart;
112 /* Remember which part of the address space this object uses. */
113 l->l_map_start = loadcmds[0].mapstart + l->l_addr;
114 l->l_map_end = l->l_map_start + maplength;
115 l->l_contiguous = !has_holes;
117 /* Now actually map (or read) in each segment. */
118 for (const struct loadcmd *c = loadcmds; c < &loadcmds[nloadcmds]; ++c)
119 if (__glibc_likely (c->mapend > c->mapstart))
121 /* Unlike POSIX mmap, NaCl's mmap does not reliably handle COW
122 faults in the remainder of the final partial page. So to get
123 the expected behavior for the unaligned boundary between data
124 and bss, it's necessary to allocate the final partial page of
125 data as anonymous memory rather than mapping it from the file. */
127 size_t maplen = c->mapend - c->mapstart;
128 if (c->mapend > c->dataend && c->allocend > c->dataend)
129 maplen = (c->dataend & -GLRO(dl_pagesize)) - c->mapstart;
131 /* Map the segment contents from the file. */
132 if (__glibc_unlikely (__mmap ((void *) (l->l_addr + c->mapstart),
133 maplen, c->prot,
134 MAP_FIXED|MAP_COPY|MAP_FILE,
135 fd, c->mapoff)
136 == MAP_FAILED))
138 switch (errno)
140 case EINVAL:
141 case ENOTSUP:
142 case ENOSYS:
143 break;
144 default:
145 return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
148 /* No mmap support for this file. */
149 if (c->prot & PROT_EXEC)
151 /* Read the data into a temporary buffer. */
152 const size_t len = c->mapend - c->mapstart;
153 void *data = __mmap (NULL, len, PROT_READ | PROT_WRITE,
154 MAP_ANON|MAP_PRIVATE, -1, 0);
155 if (__glibc_unlikely (data == MAP_FAILED))
156 return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL;
157 if (read_in_data (fd, data, len, c->mapoff))
158 return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
159 /* Now validate and install the code. */
160 int error = __nacl_irt_dyncode.dyncode_create
161 ((void *) (l->l_addr + c->mapstart), data, len);
162 __munmap (data, len);
163 if (__glibc_unlikely (error))
165 errno = error;
166 return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
169 else
171 /* Allocate the pages. */
172 if (__mmap ((void *) (l->l_addr + c->mapstart),
173 c->mapend - c->mapstart, c->prot | PROT_WRITE,
174 MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0)
175 == MAP_FAILED)
176 return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL;
177 /* Now read in the data. */
178 if (read_in_data (fd, (void *) (l->l_addr + c->mapstart),
179 c->dataend - c->mapstart, c->mapoff))
180 return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
181 /* Now that we've filled the pages, reset the page
182 protections to what they should be. */
183 if (!(c->prot & PROT_WRITE)
184 && __mprotect ((void *) (l->l_addr + c->mapstart),
185 c->mapend - c->mapstart, c->prot) < 0)
186 return DL_MAP_SEGMENTS_ERROR_MPROTECT;
189 else if (c->allocend > c->dataend)
191 /* Extra zero pages should appear at the end of this segment,
192 after the data mapped from the file. */
194 uintptr_t allocend = c->mapend;
195 if (c->mapend > c->dataend)
197 /* The final data page was partial. So we didn't map it in.
198 Instead, we must allocate an anonymous page to fill. */
199 if (c->prot & PROT_WRITE)
200 /* Do the whole allocation right here. */
201 allocend = c->allocend;
202 if (__mmap ((void *) (l->l_addr + c->mapstart + maplen),
203 allocend - (c->mapstart + maplen), c->prot,
204 MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0)
205 == MAP_FAILED)
206 return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL;
207 if (read_in_data (fd,
208 (void *) (l->l_addr + c->mapstart + maplen),
209 c->dataend & (GLRO(dl_pagesize) - 1),
210 c->mapoff + maplen))
211 return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
212 /* Now that we've filled the page, reset its
213 protections to what they should be. */
214 if (!(c->prot & PROT_WRITE)
215 && __mprotect ((void *) (l->l_addr + c->mapstart + maplen),
216 c->mapend - (c->mapstart + maplen),
217 c->prot) < 0)
218 return DL_MAP_SEGMENTS_ERROR_MPROTECT;
221 /* Now allocate the pure zero-fill pages. */
222 if (allocend < c->allocend
223 && (__mmap ((void *) (l->l_addr + c->mapstart + allocend),
224 c->allocend - (c->mapstart + allocend), c->prot,
225 MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0)
226 == MAP_FAILED))
227 return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL;
230 _dl_postprocess_loadcmd (l, header, c);
233 /* Notify ELF_PREFERRED_ADDRESS that we have to load this one
234 fixed. */
235 ELF_FIXED_ADDRESS (loader, c->mapstart);
237 return NULL;