sel_ldr: Remove support for rodata segment at start of executable
[nativeclient.git] / service_runtime / sel_mem.c
blobc7f2dd3b9b73aa1f2a3da79ed33f528cd6d184ad
1 /*
2 * Copyright 2008, Google Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
14 * distribution.
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 /* NaCl Simple/secure ELF loader (NaCl SEL) memory map.
35 #include "native_client/include/portability.h"
37 #include <assert.h>
38 #include <stdlib.h>
39 #include <stdio.h>
41 #include "native_client/service_runtime/sel_mem.h"
42 #include "native_client/service_runtime/sel_util.h"
43 #include "native_client/service_runtime/nacl_check.h"
44 #include "native_client/service_runtime/nacl_config.h"
45 #include "native_client/service_runtime/nacl_log.h"
46 #include "native_client/service_runtime/nacl_memory_object.h"
48 #define START_ENTRIES 5 /* tramp+text, rodata, data, bss, stack */
49 #define REMOVE_MARKED_DEBUG 0
52 * The memory map structure is a simple array of memory regions which
53 * may have different access protections. We do not yet merge regions
54 * with the same access protections together to reduce the region
55 * number, but may do so in the future.
57 * Regions are described by (relative) starting page number, the
58 * number of pages, and the protection that the pages should have.
60 * Takes ownership of NaClMemObj.
63 struct NaClVmmapEntry *NaClVmmapEntryMake(uintptr_t page_num,
64 size_t npages,
65 int prot,
66 struct NaClMemObj *nmop)
68 struct NaClVmmapEntry *entry;
70 NaClLog(4,
71 "NaClVmmapEntryMake(0x%"PRIxPTR",0x%"PRIxS",0x%x,0x%"PRIxPTR")\n",
72 page_num, npages, prot, (uintptr_t) nmop);
73 entry = (struct NaClVmmapEntry *) malloc(sizeof *entry);
74 if (NULL == entry) {
75 return 0;
77 NaClLog(4, "entry: 0x%"PRIxPTR"\n", (uintptr_t) entry);
78 entry->page_num = page_num;
79 entry->npages = npages;
80 entry->prot = prot;
81 entry->nmop = nmop;
82 entry->removed = 0;
83 return entry;
86 void NaClVmmapEntryFree(struct NaClVmmapEntry *entry)
88 NaClLog(4,
89 ("NaClVmmapEntryFree(0x%08"PRIxPTR"): (0x%"PRIxPTR",0x%"PRIxS","
90 "0x%x,0x%"PRIxPTR")\n"),
91 (uintptr_t) entry,
92 entry->page_num, entry->npages, entry->prot, (uintptr_t) entry->nmop);
93 if (NULL != entry->nmop) {
94 NaClMemObjDtor(entry->nmop);
95 free(entry->nmop);
97 free(entry);
101 * Print debug.
103 void NaClVmentryPrint(void *state,
104 struct NaClVmmapEntry *vmep)
106 printf("page num 0x%06x\n", (uint32_t)vmep->page_num);
107 printf("num pages %d\n", (uint32_t)vmep->npages);
108 printf("prot bits %x\n", vmep->prot);
109 fflush(stdout);
111 void NaClVmmapDebug(struct NaClVmmap *self,
112 char *msg)
114 puts(msg);
115 NaClVmmapVisit(self, NaClVmentryPrint, (void *) 0);
116 fflush(stdout);
119 int NaClVmmapCtor(struct NaClVmmap *self)
121 self->size = START_ENTRIES;
122 self->vmentry = calloc(self->size, sizeof *self->vmentry);
123 if (!self->vmentry) {
124 return 0;
126 self->nvalid = 0;
127 self->is_sorted = 1;
128 return 1;
131 void NaClVmmapDtor(struct NaClVmmap *self)
133 size_t i;
135 for (i = 0; i < self->nvalid; ++i) {
136 NaClVmmapEntryFree(self->vmentry[i]);
138 free(self->vmentry);
139 self->vmentry = 0;
143 * Comparison function for qsort. Should never encounter a
144 * removed/invalid entry.
147 static int NaClVmmapCmpEntries(void const *vleft,
148 void const *vright)
150 struct NaClVmmapEntry const *const *left =
151 (struct NaClVmmapEntry const *const *) vleft;
152 struct NaClVmmapEntry const *const *right =
153 (struct NaClVmmapEntry const *const *) vright;
155 return ((*left)->page_num - (*right)->page_num);
158 static void NaClVmmapRemoveMarked(struct NaClVmmap *self)
160 size_t i;
161 size_t last;
163 if (0 == self->nvalid)
164 return;
166 #if REMOVE_MARKED_DEBUG
167 NaClVmmapDebug(self, "Before RemoveMarked");
168 #endif
170 * Linearly scan with a move-end-to-front strategy to get rid of
171 * marked-to-be-removed entries.
174 /* forall j in [0, self->nvalid): NULL != self->vmentry[j] */
175 for (last = self->nvalid; last > 0 && self->vmentry[--last]->removed; ) {
176 NaClVmmapEntryFree(self->vmentry[last]);
177 self->vmentry[last] = NULL;
179 if (last == 0 && self->vmentry[0]->removed) {
180 NaClLog(LOG_FATAL, "No valid entries in VM map\n");
183 /* forall j in [0, last]: NULL != self->vmentry[j] */
185 /* 0 <= last < self->nvalid && !self->vmentry[last]->removed */
186 CHECK(0 <= last && last < self->nvalid);
187 CHECK(!self->vmentry[last]->removed);
188 /* forall j in (last, self->nvalid): NULL == self->vmentry[j] */
190 /* loop invar: forall j in [0, i): !self->vmentry[j]->removed */
191 for (i = 0; i < last; ++i) {
192 if (!self->vmentry[i]->removed) {
193 continue;
195 /* self->vmentry[i]->removed */
197 NaClVmmapEntryFree(self->vmentry[i]);
198 self->vmentry[i] = self->vmentry[last];
199 self->vmentry[last] = NULL;
201 /* forall j in [last, self->nvalid): NULL == self->vmentry[j] */
202 /* forall j in [0, i]: !self->vmentry[j]->removed */
204 while (--last > i && self->vmentry[last]->removed) {
205 NaClVmmapEntryFree(self->vmentry[last]);
206 self->vmentry[last] = NULL;
209 * since !self->vmentry[i]->removed, we are guaranteed that
210 * !self->vmentry[last]->removed when the while loop terminates.
212 * forall j in (last, self->nvalid):
213 * NULL == self->vmentry[j]->removed
216 /* i == last */
217 /* forall j in [0, last]: !self->vmentry[j]->removed */
218 /* forall j in (last, self->nvalid): NULL == self->vmentry[j] */
219 self->nvalid = last + 1;
221 self->is_sorted = 0;
222 #if REMOVE_MARKED_DEBUG
223 NaClVmmapDebug(self, "After RemoveMarked");
224 #endif
227 void NaClVmmapMakeSorted(struct NaClVmmap *self)
229 if (self->is_sorted)
230 return;
232 NaClVmmapRemoveMarked(self);
234 qsort(self->vmentry,
235 self->nvalid,
236 sizeof *self->vmentry,
237 NaClVmmapCmpEntries);
238 self->is_sorted = 1;
239 #if REMOVE_MARKED_DEBUG
240 NaClVmmapDebug(self, "After Sort");
241 #endif
245 * Adds an entry. Does not sort.
247 int NaClVmmapAdd(struct NaClVmmap *self,
248 uintptr_t page_num,
249 size_t npages,
250 int prot,
251 struct NaClMemObj *nmop)
253 struct NaClVmmapEntry *entry;
255 NaClLog(2,
256 ("NaClVmmapAdd(0x%08"PRIxPTR", 0x%"PRIxPTR", 0x%"PRIxS", 0x%x,"
257 " 0x%08"PRIxPTR")\n"),
258 (uintptr_t) self, page_num, npages, prot, (uintptr_t) nmop);
259 if (self->nvalid == self->size) {
260 int new_size = 2 * self->size;
261 struct NaClVmmapEntry **new_map;
263 new_map = realloc(self->vmentry, new_size * sizeof *new_map);
264 if (NULL == new_map) {
265 return 0;
267 self->vmentry = new_map;
268 self->size = new_size;
270 /* self->nvalid < self->size */
271 entry = NaClVmmapEntryMake(page_num, npages, prot, nmop);
273 self->vmentry[self->nvalid] = entry;
274 self->is_sorted = 0;
275 ++self->nvalid;
277 return 1;
281 * Update the virtual memory map. Deletion is handled by a remove
282 * flag, since a NULL nmop just means that the memory is backed by the
283 * system paging file.
286 void NaClVmmapUpdate(struct NaClVmmap *self,
287 uintptr_t page_num,
288 size_t npages,
289 int prot,
290 struct NaClMemObj *nmop,
291 int remove)
293 /* update existing entries or create new entry as needed */
294 size_t i;
295 uintptr_t new_region_end_page = page_num + npages;
297 NaClLog(2,
298 ("NaClVmmapUpdate(0x%08"PRIxPTR", 0x%"PRIxPTR", 0x%"PRIxS","
299 " 0x%x, 0x%08"PRIxPTR")\n"),
300 (uintptr_t) self, page_num, npages, prot, (uintptr_t) nmop);
301 NaClVmmapMakeSorted(self);
303 CHECK(npages > 0);
305 for (i = 0; i < self->nvalid; i++) {
306 struct NaClVmmapEntry *ent = self->vmentry[i];
307 uintptr_t ent_end_page = ent->page_num + ent->npages;
308 off_t additional_offset =
309 (new_region_end_page - ent->page_num) << NACL_PAGESHIFT;
311 if (ent->page_num < page_num && new_region_end_page < ent_end_page) {
313 * Split existing mapping into two parts, with new mapping in
314 * the middle.
316 if (!NaClVmmapAdd(self,
317 new_region_end_page,
318 ent_end_page - new_region_end_page,
319 ent->prot,
320 NaClMemObjSplit(ent->nmop, additional_offset))) {
321 NaClLog(LOG_FATAL, "NaClVmmapUpdate: could not split entry\n");
323 ent->npages = page_num - ent->page_num;
324 break;
325 } else if (ent->page_num < page_num && page_num < ent_end_page) {
326 /* New mapping overlaps end of existing mapping. */
327 ent->npages = page_num - ent->page_num;
328 } else if (ent->page_num < new_region_end_page &&
329 new_region_end_page < ent_end_page) {
330 /* New mapping overlaps start of existing mapping. */
332 NaClMemObjIncOffset(ent->nmop, additional_offset);
334 ent->page_num = new_region_end_page;
335 ent->npages = ent_end_page - new_region_end_page;
336 break;
337 } else if (page_num <= ent->page_num &&
338 ent_end_page <= new_region_end_page) {
339 /* New mapping covers all of the existing mapping. */
340 ent->removed = 1;
341 } else {
342 /* No overlap */
343 assert(new_region_end_page <= ent->page_num || ent_end_page <= page_num);
347 if (!remove) {
348 if (!NaClVmmapAdd(self, page_num, npages, prot, nmop))
349 NaClLog(LOG_FATAL, "NaClVmmapUpdate: could not add entry\n");
352 NaClVmmapRemoveMarked(self);
355 static int NaClVmmapContainCmpEntries(void const *vkey,
356 void const *vent)
358 struct NaClVmmapEntry const *const *key =
359 (struct NaClVmmapEntry const *const *) vkey;
360 struct NaClVmmapEntry const *const *ent =
361 (struct NaClVmmapEntry const *const *) vent;
363 NaClLog(5, "key->page_num = 0x%05"PRIxPTR"\n", (*key)->page_num);
365 NaClLog(5, "entry->page_num = 0x%05"PRIxPTR"\n", (*ent)->page_num);
366 NaClLog(5, "entry->npages = 0x%"PRIxS"\n", (*ent)->npages);
368 if ((*key)->page_num < (*ent)->page_num) return -1;
369 if ((*key)->page_num < (*ent)->page_num + (*ent)->npages) return 0;
370 return 1;
373 struct NaClVmmapEntry const *NaClVmmapFindPage(struct NaClVmmap *self,
374 uintptr_t pnum)
376 struct NaClVmmapEntry key;
377 struct NaClVmmapEntry *kptr;
378 struct NaClVmmapEntry *const *result_ptr;
380 NaClVmmapMakeSorted(self);
381 key.page_num = pnum;
382 kptr = &key;
383 result_ptr = ((struct NaClVmmapEntry *const *)
384 bsearch(&kptr,
385 self->vmentry,
386 self->nvalid,
387 sizeof self->vmentry[0],
388 NaClVmmapContainCmpEntries));
389 return result_ptr ? *result_ptr : NULL;
392 struct NaClVmmapIter *NaClVmmapFindPageIter(struct NaClVmmap *self,
393 uintptr_t pnum,
394 struct NaClVmmapIter *space)
396 struct NaClVmmapEntry key;
397 struct NaClVmmapEntry *kptr;
398 struct NaClVmmapEntry **result_ptr;
400 NaClVmmapMakeSorted(self);
401 key.page_num = pnum;
402 kptr = &key;
403 result_ptr = ((struct NaClVmmapEntry **)
404 bsearch(&kptr,
405 self->vmentry,
406 self->nvalid,
407 sizeof self->vmentry[0],
408 NaClVmmapContainCmpEntries));
409 space->vmmap = self;
410 if (NULL == result_ptr) {
411 space->entry_ix = self->nvalid;
412 } else {
413 space->entry_ix = result_ptr - self->vmentry;
415 return space;
418 int NaClVmmapIterAtEnd(struct NaClVmmapIter *nvip)
420 return nvip->entry_ix >= nvip->vmmap->nvalid;
424 * IterStar only permissible if not AtEnd
426 struct NaClVmmapEntry *NaClVmmapIterStar(struct NaClVmmapIter *nvip)
428 return nvip->vmmap->vmentry[nvip->entry_ix];
431 void NaClVmmapIterIncr(struct NaClVmmapIter *nvip)
433 ++nvip->entry_ix;
437 * Iterator becomes invalid after Erase. We could have a version that
438 * keep the iterator valid by copying forward, but it is unclear
439 * whether that is needed.
441 void NaClVmmapIterErase(struct NaClVmmapIter *nvip)
443 struct NaClVmmap *nvp;
445 nvp = nvip->vmmap;
446 free(nvp->vmentry[nvip->entry_ix]);
447 nvp->vmentry[nvip->entry_ix] = nvp->vmentry[--nvp->nvalid];
448 nvp->is_sorted = 0;
451 void NaClVmmapVisit(struct NaClVmmap *self,
452 void (*fn)(void *state,
453 struct NaClVmmapEntry *entry),
454 void *state)
456 int i;
457 int nentries;
459 NaClVmmapMakeSorted(self);
460 for (i = 0, nentries = self->nvalid; i < nentries; ++i) {
461 (*fn)(state, self->vmentry[i]);
466 * Linear search, from high addresses down.
468 uintptr_t NaClVmmapFindSpace(struct NaClVmmap *self,
469 size_t num_pages)
471 size_t i;
472 struct NaClVmmapEntry *vmep;
473 uintptr_t end_page;
474 uintptr_t start_page;
476 if (0 == self->nvalid)
477 return 0;
478 NaClVmmapMakeSorted(self);
479 for (i = self->nvalid; --i > 0; ) {
480 vmep = self->vmentry[i-1];
481 end_page = vmep->page_num + vmep->npages; /* end page from previous */
482 start_page = self->vmentry[i]->page_num; /* start page from current */
483 if (start_page - end_page >= num_pages) {
484 return start_page - num_pages;
487 return 0;
489 * in user addresses, page 0 is always trampoline, and user
490 * addresses are contained in system addresses, so returning a
491 * system page number of 0 can serve as error indicator: it is at
492 * worst the trampoline page, and likely to be below it.
497 * Linear search, from high addresses down. For mmap, so the starting
498 * address of the region found must be NACL_MAP_PAGESIZE aligned.
500 * For general mmap it is better to use as high an address as
501 * possible, since the stack size for the main thread is currently
502 * fixed, and the heap is the only thing that grows.
504 uintptr_t NaClVmmapFindMapSpace(struct NaClVmmap *self,
505 size_t num_pages)
507 size_t i;
508 struct NaClVmmapEntry *vmep;
509 uintptr_t end_page;
510 uintptr_t start_page;
512 if (0 == self->nvalid)
513 return 0;
514 NaClVmmapMakeSorted(self);
515 num_pages = NaClRoundPageNumUpToMapMultiple(num_pages);
517 for (i = self->nvalid; --i > 0; ) {
518 vmep = self->vmentry[i-1];
519 end_page = vmep->page_num + vmep->npages; /* end page from previous */
520 end_page = NaClRoundPageNumUpToMapMultiple(end_page);
522 start_page = self->vmentry[i]->page_num; /* start page from current */
523 if (NACL_MAP_PAGESHIFT > NACL_PAGESHIFT) {
525 start_page = NaClTruncPageNumDownToMapMultiple(start_page);
527 if (start_page <= end_page) {
528 continue;
531 if (start_page - end_page >= num_pages) {
532 return start_page - num_pages;
535 return 0;
537 * in user addresses, page 0 is always trampoline, and user
538 * addresses are contained in system addresses, so returning a
539 * system page number of 0 can serve as error indicator: it is at
540 * worst the trampoline page, and likely to be below it.
545 * Linear search, from uaddr up.
547 uintptr_t NaClVmmapFindMapSpaceAboveHint(struct NaClVmmap *self,
548 uintptr_t uaddr,
549 size_t num_pages)
551 size_t nvalid;
552 size_t i;
553 struct NaClVmmapEntry *vmep;
554 uintptr_t usr_page;
555 uintptr_t start_page;
556 uintptr_t end_page;
558 NaClVmmapMakeSorted(self);
560 usr_page = uaddr >> NACL_PAGESHIFT;
561 num_pages = NaClRoundPageNumUpToMapMultiple(num_pages);
563 nvalid = self->nvalid;
565 for (i = 1; i < nvalid; ++i) {
566 vmep = self->vmentry[i-1];
567 end_page = vmep->page_num + vmep->npages;
568 end_page = NaClRoundPageNumUpToMapMultiple(end_page);
570 start_page = self->vmentry[i]->page_num;
571 if (NACL_MAP_PAGESHIFT > NACL_PAGESHIFT) {
573 start_page = NaClTruncPageNumDownToMapMultiple(start_page);
575 if (start_page <= end_page) {
576 continue;
579 if (end_page <= usr_page && usr_page < start_page) {
580 end_page = usr_page;
582 if (usr_page <= end_page && (start_page - end_page) >= num_pages) {
583 /* found a gap at or after uaddr that's big enough */
584 return end_page;
587 return 0;
590 /* Returns whether the given range contains any executable mappings. */
591 int NaClVmmapContainsExecutablePages(struct NaClVmmap *self, uintptr_t page_num,
592 size_t npages)
594 int i;
595 NaClVmmapRemoveMarked(self);
596 for (i = 0; i < self->nvalid; i++) {
597 struct NaClVmmapEntry *ent = self->vmentry[i];
598 if ((ent->prot & NACL_ABI_PROT_EXEC) != 0 &&
599 !(page_num + npages <= ent->page_num ||
600 ent->page_num + ent->npages <= page_num))
601 return 1;
603 return 0;