ada: Disable PIE mode during the build of the Ada front-end
[official-gcc.git] / libffi / src / closures.c
blobf7bead67b54ebc08f99d07505374693f34624916
1 /* -----------------------------------------------------------------------
2 closures.c - Copyright (c) 2019 Anthony Green
3 Copyright (c) 2007, 2009, 2010 Red Hat, Inc.
4 Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc
5 Copyright (c) 2011 Plausible Labs Cooperative, Inc.
7 Code to allocate and deallocate memory for closures.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 ``Software''), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice shall be included
18 in all copies or substantial portions of the Software.
20 THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
24 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
25 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27 DEALINGS IN THE SOFTWARE.
28 ----------------------------------------------------------------------- */
30 #if defined __linux__ && !defined _GNU_SOURCE
31 #define _GNU_SOURCE 1
32 #endif
34 #include <fficonfig.h>
35 #include <ffi.h>
36 #include <ffi_common.h>
37 #include <tramp.h>
39 #ifdef __NetBSD__
40 #include <sys/param.h>
41 #endif
43 #if __NetBSD_Version__ - 0 >= 799007200
44 /* NetBSD with PROT_MPROTECT */
45 #include <sys/mman.h>
47 #include <stddef.h>
48 #include <unistd.h>
49 #ifdef HAVE_SYS_MEMFD_H
50 #include <sys/memfd.h>
51 #endif
53 static const size_t overhead =
54 (sizeof(max_align_t) > sizeof(void *) + sizeof(size_t)) ?
55 sizeof(max_align_t)
56 : sizeof(void *) + sizeof(size_t);
58 #define ADD_TO_POINTER(p, d) ((void *)((uintptr_t)(p) + (d)))
60 void *
61 ffi_closure_alloc (size_t size, void **code)
63 static size_t page_size;
64 size_t rounded_size;
65 void *codeseg, *dataseg;
66 int prot;
68 /* Expect that PAX mprotect is active and a separate code mapping is necessary. */
69 if (!code)
70 return NULL;
72 /* Obtain system page size. */
73 if (!page_size)
74 page_size = sysconf(_SC_PAGESIZE);
76 /* Round allocation size up to the next page, keeping in mind the size field and pointer to code map. */
77 rounded_size = (size + overhead + page_size - 1) & ~(page_size - 1);
79 /* Primary mapping is RW, but request permission to switch to PROT_EXEC later. */
80 prot = PROT_READ | PROT_WRITE | PROT_MPROTECT(PROT_EXEC);
81 dataseg = mmap(NULL, rounded_size, prot, MAP_ANON | MAP_PRIVATE, -1, 0);
82 if (dataseg == MAP_FAILED)
83 return NULL;
85 /* Create secondary mapping and switch it to RX. */
86 codeseg = mremap(dataseg, rounded_size, NULL, rounded_size, MAP_REMAPDUP);
87 if (codeseg == MAP_FAILED) {
88 munmap(dataseg, rounded_size);
89 return NULL;
91 if (mprotect(codeseg, rounded_size, PROT_READ | PROT_EXEC) == -1) {
92 munmap(codeseg, rounded_size);
93 munmap(dataseg, rounded_size);
94 return NULL;
97 /* Remember allocation size and location of the secondary mapping for ffi_closure_free. */
98 memcpy(dataseg, &rounded_size, sizeof(rounded_size));
99 memcpy(ADD_TO_POINTER(dataseg, sizeof(size_t)), &codeseg, sizeof(void *));
100 *code = ADD_TO_POINTER(codeseg, overhead);
101 return ADD_TO_POINTER(dataseg, overhead);
104 void
105 ffi_closure_free (void *ptr)
107 void *codeseg, *dataseg;
108 size_t rounded_size;
110 dataseg = ADD_TO_POINTER(ptr, -overhead);
111 memcpy(&rounded_size, dataseg, sizeof(rounded_size));
112 memcpy(&codeseg, ADD_TO_POINTER(dataseg, sizeof(size_t)), sizeof(void *));
113 munmap(dataseg, rounded_size);
114 munmap(codeseg, rounded_size);
118 ffi_tramp_is_present (__attribute__((unused)) void *ptr)
120 return 0;
122 #else /* !NetBSD with PROT_MPROTECT */
124 #if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE
125 # if __linux__ && !defined(__ANDROID__)
126 /* This macro indicates it may be forbidden to map anonymous memory
127 with both write and execute permission. Code compiled when this
128 option is defined will attempt to map such pages once, but if it
129 fails, it falls back to creating a temporary file in a writable and
130 executable filesystem and mapping pages from it into separate
131 locations in the virtual memory space, one location writable and
132 another executable. */
133 # define FFI_MMAP_EXEC_WRIT 1
134 # define HAVE_MNTENT 1
135 # endif
136 # if defined(_WIN32) || defined(__OS2__)
137 /* Windows systems may have Data Execution Protection (DEP) enabled,
138 which requires the use of VirtualMalloc/VirtualFree to alloc/free
139 executable memory. */
140 # define FFI_MMAP_EXEC_WRIT 1
141 # endif
142 #endif
144 #if FFI_MMAP_EXEC_WRIT && !defined FFI_MMAP_EXEC_SELINUX
145 # if defined(__linux__) && !defined(__ANDROID__)
146 /* When defined to 1 check for SELinux and if SELinux is active,
147 don't attempt PROT_EXEC|PROT_WRITE mapping at all, as that
148 might cause audit messages. */
149 # define FFI_MMAP_EXEC_SELINUX 1
150 # endif
151 #endif
153 #if FFI_CLOSURES
155 #if FFI_EXEC_TRAMPOLINE_TABLE
157 #ifdef __MACH__
159 #include <mach/mach.h>
160 #include <pthread.h>
161 #ifdef HAVE_PTRAUTH
162 #include <ptrauth.h>
163 #endif
164 #include <stdio.h>
165 #include <stdlib.h>
167 extern void *ffi_closure_trampoline_table_page;
169 typedef struct ffi_trampoline_table ffi_trampoline_table;
170 typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry;
172 struct ffi_trampoline_table
174 /* contiguous writable and executable pages */
175 vm_address_t config_page;
177 /* free list tracking */
178 uint16_t free_count;
179 ffi_trampoline_table_entry *free_list;
180 ffi_trampoline_table_entry *free_list_pool;
182 ffi_trampoline_table *prev;
183 ffi_trampoline_table *next;
186 struct ffi_trampoline_table_entry
188 void *(*trampoline) (void);
189 ffi_trampoline_table_entry *next;
192 /* Total number of trampolines that fit in one trampoline table */
193 #define FFI_TRAMPOLINE_COUNT (PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE)
195 static pthread_mutex_t ffi_trampoline_lock = PTHREAD_MUTEX_INITIALIZER;
196 static ffi_trampoline_table *ffi_trampoline_tables = NULL;
198 static ffi_trampoline_table *
199 ffi_trampoline_table_alloc (void)
201 ffi_trampoline_table *table;
202 vm_address_t config_page;
203 vm_address_t trampoline_page;
204 vm_address_t trampoline_page_template;
205 vm_prot_t cur_prot;
206 vm_prot_t max_prot;
207 kern_return_t kt;
208 uint16_t i;
210 /* Allocate two pages -- a config page and a placeholder page */
211 config_page = 0x0;
212 kt = vm_allocate (mach_task_self (), &config_page, PAGE_MAX_SIZE * 2,
213 VM_FLAGS_ANYWHERE);
214 if (kt != KERN_SUCCESS)
215 return NULL;
217 /* Remap the trampoline table on top of the placeholder page */
218 trampoline_page = config_page + PAGE_MAX_SIZE;
220 #ifdef HAVE_PTRAUTH
221 trampoline_page_template = (vm_address_t)(uintptr_t)ptrauth_auth_data((void *)&ffi_closure_trampoline_table_page, ptrauth_key_function_pointer, 0);
222 #else
223 trampoline_page_template = (vm_address_t)&ffi_closure_trampoline_table_page;
224 #endif
226 #ifdef __arm__
227 /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */
228 trampoline_page_template &= ~1UL;
229 #endif
230 kt = vm_remap (mach_task_self (), &trampoline_page, PAGE_MAX_SIZE, 0x0,
231 VM_FLAGS_OVERWRITE, mach_task_self (), trampoline_page_template,
232 FALSE, &cur_prot, &max_prot, VM_INHERIT_SHARE);
233 if (kt != KERN_SUCCESS || !(cur_prot & VM_PROT_EXECUTE))
235 vm_deallocate (mach_task_self (), config_page, PAGE_MAX_SIZE * 2);
236 return NULL;
239 /* We have valid trampoline and config pages */
240 table = calloc (1, sizeof (ffi_trampoline_table));
241 table->free_count = FFI_TRAMPOLINE_COUNT;
242 table->config_page = config_page;
244 /* Create and initialize the free list */
245 table->free_list_pool =
246 calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry));
248 for (i = 0; i < table->free_count; i++)
250 ffi_trampoline_table_entry *entry = &table->free_list_pool[i];
251 entry->trampoline =
252 (void *) (trampoline_page + (i * FFI_TRAMPOLINE_SIZE));
253 #ifdef HAVE_PTRAUTH
254 entry->trampoline = ptrauth_sign_unauthenticated(entry->trampoline, ptrauth_key_function_pointer, 0);
255 #endif
257 if (i < table->free_count - 1)
258 entry->next = &table->free_list_pool[i + 1];
261 table->free_list = table->free_list_pool;
263 return table;
266 static void
267 ffi_trampoline_table_free (ffi_trampoline_table *table)
269 /* Remove from the list */
270 if (table->prev != NULL)
271 table->prev->next = table->next;
273 if (table->next != NULL)
274 table->next->prev = table->prev;
276 /* Deallocate pages */
277 vm_deallocate (mach_task_self (), table->config_page, PAGE_MAX_SIZE * 2);
279 /* Deallocate free list */
280 free (table->free_list_pool);
281 free (table);
284 void *
285 ffi_closure_alloc (size_t size, void **code)
287 /* Create the closure */
288 ffi_closure *closure = malloc (size);
289 if (closure == NULL)
290 return NULL;
292 pthread_mutex_lock (&ffi_trampoline_lock);
294 /* Check for an active trampoline table with available entries. */
295 ffi_trampoline_table *table = ffi_trampoline_tables;
296 if (table == NULL || table->free_list == NULL)
298 table = ffi_trampoline_table_alloc ();
299 if (table == NULL)
301 pthread_mutex_unlock (&ffi_trampoline_lock);
302 free (closure);
303 return NULL;
306 /* Insert the new table at the top of the list */
307 table->next = ffi_trampoline_tables;
308 if (table->next != NULL)
309 table->next->prev = table;
311 ffi_trampoline_tables = table;
314 /* Claim the free entry */
315 ffi_trampoline_table_entry *entry = ffi_trampoline_tables->free_list;
316 ffi_trampoline_tables->free_list = entry->next;
317 ffi_trampoline_tables->free_count--;
318 entry->next = NULL;
320 pthread_mutex_unlock (&ffi_trampoline_lock);
322 /* Initialize the return values */
323 *code = entry->trampoline;
324 closure->trampoline_table = table;
325 closure->trampoline_table_entry = entry;
327 return closure;
330 void
331 ffi_closure_free (void *ptr)
333 ffi_closure *closure = ptr;
335 pthread_mutex_lock (&ffi_trampoline_lock);
337 /* Fetch the table and entry references */
338 ffi_trampoline_table *table = closure->trampoline_table;
339 ffi_trampoline_table_entry *entry = closure->trampoline_table_entry;
341 /* Return the entry to the free list */
342 entry->next = table->free_list;
343 table->free_list = entry;
344 table->free_count++;
346 /* If all trampolines within this table are free, and at least one other table exists, deallocate
347 * the table */
348 if (table->free_count == FFI_TRAMPOLINE_COUNT
349 && ffi_trampoline_tables != table)
351 ffi_trampoline_table_free (table);
353 else if (ffi_trampoline_tables != table)
355 /* Otherwise, bump this table to the top of the list */
356 table->prev = NULL;
357 table->next = ffi_trampoline_tables;
358 if (ffi_trampoline_tables != NULL)
359 ffi_trampoline_tables->prev = table;
361 ffi_trampoline_tables = table;
364 pthread_mutex_unlock (&ffi_trampoline_lock);
366 /* Free the closure */
367 free (closure);
370 #endif
372 // Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations.
374 #elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */
376 #define USE_LOCKS 1
377 #define USE_DL_PREFIX 1
378 #ifdef __GNUC__
379 #ifndef USE_BUILTIN_FFS
380 #define USE_BUILTIN_FFS 1
381 #endif
382 #endif
384 /* We need to use mmap, not sbrk. */
385 #define HAVE_MORECORE 0
387 /* We could, in theory, support mremap, but it wouldn't buy us anything. */
388 #define HAVE_MREMAP 0
390 /* We have no use for this, so save some code and data. */
391 #define NO_MALLINFO 1
393 /* We need all allocations to be in regular segments, otherwise we
394 lose track of the corresponding code address. */
395 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
397 /* Don't allocate more than a page unless needed. */
398 #define DEFAULT_GRANULARITY ((size_t)malloc_getpagesize)
400 #include <sys/types.h>
401 #include <sys/stat.h>
402 #include <fcntl.h>
403 #include <errno.h>
404 #ifndef _MSC_VER
405 #include <unistd.h>
406 #endif
407 #include <string.h>
408 #include <stdio.h>
409 #if !defined(_WIN32)
410 #ifdef HAVE_MNTENT
411 #include <mntent.h>
412 #endif /* HAVE_MNTENT */
413 #include <sys/param.h>
414 #include <pthread.h>
416 /* We don't want sys/mman.h to be included after we redefine mmap and
417 dlmunmap. */
418 #include <sys/mman.h>
419 #define LACKS_SYS_MMAN_H 1
421 #if FFI_MMAP_EXEC_SELINUX
422 #include <sys/statfs.h>
423 #include <stdlib.h>
425 static int selinux_enabled = -1;
427 static int
428 selinux_enabled_check (void)
430 struct statfs sfs;
431 FILE *f;
432 char *buf = NULL;
433 size_t len = 0;
435 if (statfs ("/selinux", &sfs) >= 0
436 && (unsigned int) sfs.f_type == 0xf97cff8cU)
437 return 1;
438 f = fopen ("/proc/mounts", "r");
439 if (f == NULL)
440 return 0;
441 while (getline (&buf, &len, f) >= 0)
443 char *p = strchr (buf, ' ');
444 if (p == NULL)
445 break;
446 p = strchr (p + 1, ' ');
447 if (p == NULL)
448 break;
449 if (strncmp (p + 1, "selinuxfs ", 10) == 0)
451 free (buf);
452 fclose (f);
453 return 1;
456 free (buf);
457 fclose (f);
458 return 0;
461 #define is_selinux_enabled() (selinux_enabled >= 0 ? selinux_enabled \
462 : (selinux_enabled = selinux_enabled_check ()))
464 #else
466 #define is_selinux_enabled() 0
468 #endif /* !FFI_MMAP_EXEC_SELINUX */
470 /* On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. */
471 #ifdef FFI_MMAP_EXEC_EMUTRAMP_PAX
472 #include <stdlib.h>
474 static int emutramp_enabled = -1;
476 static int
477 emutramp_enabled_check (void)
479 char *buf = NULL;
480 size_t len = 0;
481 FILE *f;
482 int ret;
483 f = fopen ("/proc/self/status", "r");
484 if (f == NULL)
485 return 0;
486 ret = 0;
488 while (getline (&buf, &len, f) != -1)
489 if (!strncmp (buf, "PaX:", 4))
491 char emutramp;
492 if (sscanf (buf, "%*s %*c%c", &emutramp) == 1)
493 ret = (emutramp == 'E');
494 break;
496 free (buf);
497 fclose (f);
498 return ret;
501 #define is_emutramp_enabled() (emutramp_enabled >= 0 ? emutramp_enabled \
502 : (emutramp_enabled = emutramp_enabled_check ()))
503 #endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
505 #elif defined (__CYGWIN__) || defined(__INTERIX)
507 #include <sys/mman.h>
509 /* Cygwin is Linux-like, but not quite that Linux-like. */
510 #define is_selinux_enabled() 0
512 #endif /* !defined(X86_WIN32) && !defined(X86_WIN64) */
514 #ifndef FFI_MMAP_EXEC_EMUTRAMP_PAX
515 #define is_emutramp_enabled() 0
516 #endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
518 /* Declare all functions defined in dlmalloc.c as static. */
519 static void *dlmalloc(size_t);
520 static void dlfree(void*);
521 static void *dlcalloc(size_t, size_t) MAYBE_UNUSED;
522 static void *dlrealloc(void *, size_t) MAYBE_UNUSED;
523 static void *dlmemalign(size_t, size_t) MAYBE_UNUSED;
524 static void *dlvalloc(size_t) MAYBE_UNUSED;
525 static int dlmallopt(int, int) MAYBE_UNUSED;
526 static size_t dlmalloc_footprint(void) MAYBE_UNUSED;
527 static size_t dlmalloc_max_footprint(void) MAYBE_UNUSED;
528 static void** dlindependent_calloc(size_t, size_t, void**) MAYBE_UNUSED;
529 static void** dlindependent_comalloc(size_t, size_t*, void**) MAYBE_UNUSED;
530 static void *dlpvalloc(size_t) MAYBE_UNUSED;
531 static int dlmalloc_trim(size_t) MAYBE_UNUSED;
532 static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED;
533 static void dlmalloc_stats(void) MAYBE_UNUSED;
535 #if !(defined(_WIN32) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
536 /* Use these for mmap and munmap within dlmalloc.c. */
537 static void *dlmmap(void *, size_t, int, int, int, off_t);
538 static int dlmunmap(void *, size_t);
539 #endif /* !(defined(_WIN32) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
541 #define mmap dlmmap
542 #define munmap dlmunmap
544 #include "dlmalloc.c"
546 #undef mmap
547 #undef munmap
549 #if !(defined(_WIN32) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
551 /* A mutex used to synchronize access to *exec* variables in this file. */
552 static pthread_mutex_t open_temp_exec_file_mutex = PTHREAD_MUTEX_INITIALIZER;
554 /* A file descriptor of a temporary file from which we'll map
555 executable pages. */
556 static int execfd = -1;
558 /* The amount of space already allocated from the temporary file. */
559 static size_t execsize = 0;
561 #ifdef HAVE_MEMFD_CREATE
562 /* Open a temporary file name, and immediately unlink it. */
563 static int
564 open_temp_exec_file_memfd (const char *name)
566 int fd;
567 fd = memfd_create (name, MFD_CLOEXEC);
568 return fd;
570 #endif
572 /* Open a temporary file name, and immediately unlink it. */
573 static int
574 open_temp_exec_file_name (char *name, int flags)
576 int fd;
578 #ifdef HAVE_MKOSTEMP
579 fd = mkostemp (name, flags);
580 #else
581 fd = mkstemp (name);
582 #endif
584 if (fd != -1)
585 unlink (name);
587 return fd;
590 /* Open a temporary file in the named directory. */
591 static int
592 open_temp_exec_file_dir (const char *dir)
594 static const char suffix[] = "/ffiXXXXXX";
595 int lendir, flags;
596 char *tempname;
597 #ifdef O_TMPFILE
598 int fd;
599 #endif
601 #ifdef O_CLOEXEC
602 flags = O_CLOEXEC;
603 #else
604 flags = 0;
605 #endif
607 #ifdef O_TMPFILE
608 fd = open (dir, flags | O_RDWR | O_EXCL | O_TMPFILE, 0700);
609 /* If the running system does not support the O_TMPFILE flag then retry without it. */
610 if (fd != -1 || (errno != EINVAL && errno != EISDIR && errno != EOPNOTSUPP)) {
611 return fd;
612 } else {
613 errno = 0;
615 #endif
617 lendir = (int) strlen (dir);
618 tempname = __builtin_alloca (lendir + sizeof (suffix));
620 if (!tempname)
621 return -1;
623 memcpy (tempname, dir, lendir);
624 memcpy (tempname + lendir, suffix, sizeof (suffix));
626 return open_temp_exec_file_name (tempname, flags);
629 /* Open a temporary file in the directory in the named environment
630 variable. */
631 static int
632 open_temp_exec_file_env (const char *envvar)
634 const char *value = getenv (envvar);
636 if (!value)
637 return -1;
639 return open_temp_exec_file_dir (value);
642 #ifdef HAVE_MNTENT
643 /* Open a temporary file in an executable and writable mount point
644 listed in the mounts file. Subsequent calls with the same mounts
645 keep searching for mount points in the same file. Providing NULL
646 as the mounts file closes the file. */
647 static int
648 open_temp_exec_file_mnt (const char *mounts)
650 static const char *last_mounts;
651 static FILE *last_mntent;
653 if (mounts != last_mounts)
655 if (last_mntent)
656 endmntent (last_mntent);
658 last_mounts = mounts;
660 if (mounts)
661 last_mntent = setmntent (mounts, "r");
662 else
663 last_mntent = NULL;
666 if (!last_mntent)
667 return -1;
669 for (;;)
671 int fd;
672 struct mntent mnt;
673 char buf[MAXPATHLEN * 3];
675 if (getmntent_r (last_mntent, &mnt, buf, sizeof (buf)) == NULL)
676 return -1;
678 if (hasmntopt (&mnt, "ro")
679 || hasmntopt (&mnt, "noexec")
680 || access (mnt.mnt_dir, W_OK))
681 continue;
683 fd = open_temp_exec_file_dir (mnt.mnt_dir);
685 if (fd != -1)
686 return fd;
689 #endif /* HAVE_MNTENT */
691 /* Instructions to look for a location to hold a temporary file that
692 can be mapped in for execution. */
693 static struct
695 int (*func)(const char *);
696 const char *arg;
697 int repeat;
698 } open_temp_exec_file_opts[] = {
699 #ifdef HAVE_MEMFD_CREATE
700 { open_temp_exec_file_memfd, "libffi", 0 },
701 #endif
702 { open_temp_exec_file_env, "LIBFFI_TMPDIR", 0 },
703 { open_temp_exec_file_env, "TMPDIR", 0 },
704 { open_temp_exec_file_dir, "/tmp", 0 },
705 { open_temp_exec_file_dir, "/var/tmp", 0 },
706 { open_temp_exec_file_dir, "/dev/shm", 0 },
707 { open_temp_exec_file_env, "HOME", 0 },
708 #ifdef HAVE_MNTENT
709 { open_temp_exec_file_mnt, "/etc/mtab", 1 },
710 { open_temp_exec_file_mnt, "/proc/mounts", 1 },
711 #endif /* HAVE_MNTENT */
714 /* Current index into open_temp_exec_file_opts. */
715 static int open_temp_exec_file_opts_idx = 0;
717 /* Reset a current multi-call func, then advances to the next entry.
718 If we're at the last, go back to the first and return nonzero,
719 otherwise return zero. */
720 static int
721 open_temp_exec_file_opts_next (void)
723 if (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat)
724 open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func (NULL);
726 open_temp_exec_file_opts_idx++;
727 if (open_temp_exec_file_opts_idx
728 == (sizeof (open_temp_exec_file_opts)
729 / sizeof (*open_temp_exec_file_opts)))
731 open_temp_exec_file_opts_idx = 0;
732 return 1;
735 return 0;
738 /* Return a file descriptor of a temporary zero-sized file in a
739 writable and executable filesystem. */
740 static int
741 open_temp_exec_file (void)
743 int fd;
747 fd = open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func
748 (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].arg);
750 if (!open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat
751 || fd == -1)
753 if (open_temp_exec_file_opts_next ())
754 break;
757 while (fd == -1);
759 return fd;
762 /* We need to allocate space in a file that will be backing a writable
763 mapping. Several problems exist with the usual approaches:
764 - fallocate() is Linux-only
765 - posix_fallocate() is not available on all platforms
766 - ftruncate() does not allocate space on filesystems with sparse files
767 Failure to allocate the space will cause SIGBUS to be thrown when
768 the mapping is subsequently written to. */
769 static int
770 allocate_space (int fd, off_t offset, off_t len)
772 static size_t page_size;
774 /* Obtain system page size. */
775 if (!page_size)
776 page_size = sysconf(_SC_PAGESIZE);
778 unsigned char buf[page_size];
779 memset (buf, 0, page_size);
781 while (len > 0)
783 off_t to_write = (len < page_size) ? len : page_size;
784 if (write (fd, buf, to_write) < to_write)
785 return -1;
786 len -= to_write;
789 return 0;
792 /* Map in a chunk of memory from the temporary exec file into separate
793 locations in the virtual memory address space, one writable and one
794 executable. Returns the address of the writable portion, after
795 storing an offset to the corresponding executable portion at the
796 last word of the requested chunk. */
797 static void *
798 dlmmap_locked (void *start, size_t length, int prot, int flags, off_t offset)
800 void *ptr;
802 if (execfd == -1)
804 open_temp_exec_file_opts_idx = 0;
805 retry_open:
806 execfd = open_temp_exec_file ();
807 if (execfd == -1)
808 return MFAIL;
811 offset = execsize;
813 if (allocate_space (execfd, offset, length))
814 return MFAIL;
816 flags &= ~(MAP_PRIVATE | MAP_ANONYMOUS);
817 flags |= MAP_SHARED;
819 ptr = mmap (NULL, length, (prot & ~PROT_WRITE) | PROT_EXEC,
820 flags, execfd, offset);
821 if (ptr == MFAIL)
823 if (!offset)
825 close (execfd);
826 goto retry_open;
828 if (ftruncate (execfd, offset) != 0)
830 /* Fixme : Error logs can be added here. Returning an error for
831 * ftruncte() will not add any advantage as it is being
832 * validating in the error case. */
835 return MFAIL;
837 else if (!offset
838 && open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat)
839 open_temp_exec_file_opts_next ();
841 start = mmap (start, length, prot, flags, execfd, offset);
843 if (start == MFAIL)
845 munmap (ptr, length);
846 if (ftruncate (execfd, offset) != 0)
848 /* Fixme : Error logs can be added here. Returning an error for
849 * ftruncte() will not add any advantage as it is being
850 * validating in the error case. */
852 return start;
855 mmap_exec_offset ((char *)start, length) = (char*)ptr - (char*)start;
857 execsize += length;
859 return start;
862 /* Map in a writable and executable chunk of memory if possible.
863 Failing that, fall back to dlmmap_locked. */
864 static void *
865 dlmmap (void *start, size_t length, int prot,
866 int flags, int fd, off_t offset)
868 void *ptr;
870 assert (start == NULL && length % malloc_getpagesize == 0
871 && prot == (PROT_READ | PROT_WRITE)
872 && flags == (MAP_PRIVATE | MAP_ANONYMOUS)
873 && fd == -1 && offset == 0);
875 if (execfd == -1 && ffi_tramp_is_supported ())
877 ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset);
878 return ptr;
881 if (execfd == -1 && is_emutramp_enabled ())
883 ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset);
884 return ptr;
887 if (execfd == -1 && !is_selinux_enabled ())
889 ptr = mmap (start, length, prot | PROT_EXEC, flags, fd, offset);
891 if (ptr != MFAIL || (errno != EPERM && errno != EACCES))
892 /* Cool, no need to mess with separate segments. */
893 return ptr;
895 /* If MREMAP_DUP is ever introduced and implemented, try mmap
896 with ((prot & ~PROT_WRITE) | PROT_EXEC) and mremap with
897 MREMAP_DUP and prot at this point. */
900 if (execsize == 0 || execfd == -1)
902 pthread_mutex_lock (&open_temp_exec_file_mutex);
903 ptr = dlmmap_locked (start, length, prot, flags, offset);
904 pthread_mutex_unlock (&open_temp_exec_file_mutex);
906 return ptr;
909 return dlmmap_locked (start, length, prot, flags, offset);
912 /* Release memory at the given address, as well as the corresponding
913 executable page if it's separate. */
914 static int
915 dlmunmap (void *start, size_t length)
917 /* We don't bother decreasing execsize or truncating the file, since
918 we can't quite tell whether we're unmapping the end of the file.
919 We don't expect frequent deallocation anyway. If we did, we
920 could locate pages in the file by writing to the pages being
921 deallocated and checking that the file contents change.
922 Yuck. */
923 msegmentptr seg = segment_holding (gm, start);
924 void *code;
926 if (seg && (code = add_segment_exec_offset (start, seg)) != start)
928 int ret = munmap (code, length);
929 if (ret)
930 return ret;
933 return munmap (start, length);
936 #if FFI_CLOSURE_FREE_CODE
937 /* Return segment holding given code address. */
938 static msegmentptr
939 segment_holding_code (mstate m, char* addr)
941 msegmentptr sp = &m->seg;
942 for (;;) {
943 if (addr >= add_segment_exec_offset (sp->base, sp)
944 && addr < add_segment_exec_offset (sp->base, sp) + sp->size)
945 return sp;
946 if ((sp = sp->next) == 0)
947 return 0;
950 #endif
952 #endif /* !(defined(_WIN32) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
954 /* Allocate a chunk of memory with the given size. Returns a pointer
955 to the writable address, and sets *CODE to the executable
956 corresponding virtual address. */
957 void *
958 ffi_closure_alloc (size_t size, void **code)
960 void *ptr, *ftramp;
962 if (!code)
963 return NULL;
965 ptr = FFI_CLOSURE_PTR (dlmalloc (size));
967 if (ptr)
969 msegmentptr seg = segment_holding (gm, ptr);
971 *code = add_segment_exec_offset (ptr, seg);
972 if (!ffi_tramp_is_supported ())
973 return ptr;
975 ftramp = ffi_tramp_alloc (0);
976 if (ftramp == NULL)
978 dlfree (FFI_RESTORE_PTR (ptr));
979 return NULL;
981 *code = ffi_tramp_get_addr (ftramp);
982 ((ffi_closure *) ptr)->ftramp = ftramp;
985 return ptr;
988 void *
989 ffi_data_to_code_pointer (void *data)
991 msegmentptr seg = segment_holding (gm, data);
992 /* We expect closures to be allocated with ffi_closure_alloc(), in
993 which case seg will be non-NULL. However, some users take on the
994 burden of managing this memory themselves, in which case this
995 we'll just return data. */
996 if (seg)
998 if (!ffi_tramp_is_supported ())
999 return add_segment_exec_offset (data, seg);
1000 return ffi_tramp_get_addr (((ffi_closure *) data)->ftramp);
1002 else
1003 return data;
1006 /* Release a chunk of memory allocated with ffi_closure_alloc. If
1007 FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the
1008 writable or the executable address given. Otherwise, only the
1009 writable address can be provided here. */
1010 void
1011 ffi_closure_free (void *ptr)
1013 #if FFI_CLOSURE_FREE_CODE
1014 msegmentptr seg = segment_holding_code (gm, ptr);
1016 if (seg)
1017 ptr = sub_segment_exec_offset (ptr, seg);
1018 #endif
1019 if (ffi_tramp_is_supported ())
1020 ffi_tramp_free (((ffi_closure *) ptr)->ftramp);
1022 dlfree (FFI_RESTORE_PTR (ptr));
1026 ffi_tramp_is_present (void *ptr)
1028 msegmentptr seg = segment_holding (gm, ptr);
1029 return seg != NULL && ffi_tramp_is_supported();
1032 # else /* ! FFI_MMAP_EXEC_WRIT */
1034 /* On many systems, memory returned by malloc is writable and
1035 executable, so just use it. */
1037 #include <stdlib.h>
1039 void *
1040 ffi_closure_alloc (size_t size, void **code)
1042 if (!code)
1043 return NULL;
1045 return *code = FFI_CLOSURE_PTR (malloc (size));
1048 void
1049 ffi_closure_free (void *ptr)
1051 free (FFI_RESTORE_PTR (ptr));
1054 void *
1055 ffi_data_to_code_pointer (void *data)
1057 return data;
1061 ffi_tramp_is_present (__attribute__((unused)) void *ptr)
1063 return 0;
1066 # endif /* ! FFI_MMAP_EXEC_WRIT */
1067 #endif /* FFI_CLOSURES */
1069 #endif /* NetBSD with PROT_MPROTECT */