exp2l: Work around a NetBSD 10.0/i386 bug.
[gnulib.git] / lib / stackvma.c
blobe93f939556a6154c268a962819de8d5ab9ddfb2a
1 /* Determine the virtual memory area of a given address.
2 Copyright (C) 2002-2024 Free Software Foundation, Inc.
3 Copyright (C) 2003-2006 Paolo Bonzini <bonzini@gnu.org>
5 This program is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <https://www.gnu.org/licenses/>. */
18 /* Written by Bruno Haible and Paolo Bonzini. */
20 #include <config.h>
22 /* On Solaris in 32-bit mode, when gnulib module 'largefile' is in use,
23 prevent a compilation error
24 "Cannot use procfs in the large file compilation environment"
25 while also preventing <sys/types.h> from not defining off_t.
26 On Android, when targeting Android 4.4 or older with a GCC toolchain,
27 prevent a compilation error
28 "error: call to 'mmap' declared with attribute error: mmap is not
29 available with _FILE_OFFSET_BITS=64 when using GCC until android-21.
30 Either raise your minSdkVersion, disable _FILE_OFFSET_BITS=64, or
31 switch to Clang."
32 The files that we access in this compilation unit are less than 2 GB
33 large. */
34 #if defined __sun && !defined _LP64 && _FILE_OFFSET_BITS == 64
35 # undef _FILE_OFFSET_BITS
36 # define _FILE_OFFSET_BITS 32
37 #endif
38 #ifdef __ANDROID__
39 # undef _FILE_OFFSET_BITS
40 #endif
42 /* Specification. */
43 #include "stackvma.h"
45 #include <stdio.h>
46 #include <stdlib.h>
48 /* =========================== stackvma-simple.c =========================== */
50 #if defined __linux__ || defined __ANDROID__ \
51 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
52 || defined __NetBSD__ \
53 || (defined __APPLE__ && defined __MACH__) \
54 || defined _AIX || defined __sgi || defined __sun \
55 || defined __CYGWIN__ || defined __HAIKU__
57 /* This file contains the proximity test function for the simple cases, where
58 the OS has an API for enumerating the mapped ranges of virtual memory. */
60 # if STACK_DIRECTION < 0
62 /* Info about the gap between this VMA and the previous one.
63 addr must be < vma->start. */
64 static int
65 simple_is_near_this (uintptr_t addr, struct vma_struct *vma)
67 return (vma->start - addr <= (vma->start - vma->prev_end) / 2);
70 # endif
71 # if STACK_DIRECTION > 0
73 /* Info about the gap between this VMA and the next one.
74 addr must be > vma->end - 1. */
75 static int
76 simple_is_near_this (uintptr_t addr, struct vma_struct *vma)
78 return (addr - vma->end < (vma->next_start - vma->end) / 2);
81 # endif
83 #endif
85 /* =========================== stackvma-rofile.c =========================== */
86 /* Buffered read-only streams. */
88 #if defined __linux__ || defined __ANDROID__ \
89 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
90 || defined __NetBSD__ \
91 || defined __CYGWIN__
93 # include <errno.h> /* errno, EINTR */
94 # include <fcntl.h> /* open, O_RDONLY */
95 # include <stddef.h> /* size_t */
96 # include <unistd.h> /* getpagesize, lseek, read, close */
97 # include <sys/types.h>
98 # include <sys/mman.h> /* mmap, munmap */
100 # if defined __linux__ || defined __ANDROID__
101 # include <limits.h> /* PATH_MAX */
102 # endif
104 /* Buffered read-only streams.
105 We cannot use <stdio.h> here, because fopen() calls malloc(), and a malloc()
106 call may have been interrupted.
107 Also, we cannot use multiple read() calls, because if the buffer size is
108 smaller than the file's contents:
109 - On NetBSD, the second read() call would return 0, thus making the file
110 appear truncated.
111 - On DragonFly BSD, the first read() call would fail with errno = EFBIG.
112 - On all platforms, if some other thread is doing memory allocations or
113 deallocations between two read() calls, there is a high risk that the
114 result of these two read() calls don't fit together, and as a
115 consequence we will parse garbage and either omit some VMAs or return
116 VMAs with nonsensical addresses.
117 So use mmap(), and ignore the resulting VMA.
118 The stack-allocated buffer cannot be too large, because this can be called
119 when we are in the context of an alternate stack of just SIGSTKSZ bytes. */
121 # if defined __linux__ || defined __ANDROID__
122 /* On Linux, if the file does not entirely fit into the buffer, the read()
123 function stops before the line that would come out truncated. The
124 maximum size of such a line is 73 + PATH_MAX bytes. To be sure that we
125 have read everything, we must verify that at least that many bytes are
126 left when read() returned. */
127 # define MIN_LEFTOVER (73 + PATH_MAX)
128 # else
129 # define MIN_LEFTOVER 1
130 # endif
132 # if MIN_LEFTOVER < 1024
133 # define STACK_ALLOCATED_BUFFER_SIZE 1024
134 # else
135 /* There is no point in using a stack-allocated buffer if it is too small
136 anyway. */
137 # define STACK_ALLOCATED_BUFFER_SIZE 1
138 # endif
140 struct rofile
142 size_t position;
143 size_t filled;
144 int eof_seen;
145 /* These fields deal with allocation of the buffer. */
146 char *buffer;
147 char *auxmap;
148 size_t auxmap_length;
149 uintptr_t auxmap_start;
150 uintptr_t auxmap_end;
151 char stack_allocated_buffer[STACK_ALLOCATED_BUFFER_SIZE];
154 /* Open a read-only file stream. */
155 static int
156 rof_open (struct rofile *rof, const char *filename)
158 int fd;
159 uintptr_t pagesize;
160 size_t size;
162 fd = open (filename, O_RDONLY);
163 if (fd < 0)
164 return -1;
165 rof->position = 0;
166 rof->eof_seen = 0;
167 /* Try the static buffer first. */
168 pagesize = 0;
169 rof->buffer = rof->stack_allocated_buffer;
170 size = sizeof (rof->stack_allocated_buffer);
171 rof->auxmap = NULL;
172 rof->auxmap_start = 0;
173 rof->auxmap_end = 0;
174 for (;;)
176 /* Attempt to read the contents in a single system call. */
177 if (size > MIN_LEFTOVER)
179 int n = read (fd, rof->buffer, size);
180 if (n < 0 && errno == EINTR)
181 goto retry;
182 # if defined __DragonFly__
183 if (!(n < 0 && errno == EFBIG))
184 # endif
186 if (n <= 0)
187 /* Empty file. */
188 goto fail1;
189 if (n + MIN_LEFTOVER <= size)
191 /* The buffer was sufficiently large. */
192 rof->filled = n;
193 # if defined __linux__ || defined __ANDROID__
194 /* On Linux, the read() call may stop even if the buffer was
195 large enough. We need the equivalent of full_read(). */
196 for (;;)
198 n = read (fd, rof->buffer + rof->filled, size - rof->filled);
199 if (n < 0 && errno == EINTR)
200 goto retry;
201 if (n < 0)
202 /* Some error. */
203 goto fail1;
204 if (n + MIN_LEFTOVER > size - rof->filled)
205 /* Allocate a larger buffer. */
206 break;
207 if (n == 0)
209 /* Reached the end of file. */
210 close (fd);
211 return 0;
213 rof->filled += n;
215 # else
216 close (fd);
217 return 0;
218 # endif
222 /* Allocate a larger buffer. */
223 if (pagesize == 0)
225 pagesize = getpagesize ();
226 size = pagesize;
227 while (size <= MIN_LEFTOVER)
228 size = 2 * size;
230 else
232 size = 2 * size;
233 if (size == 0)
234 /* Wraparound. */
235 goto fail1;
236 if (rof->auxmap != NULL)
237 munmap (rof->auxmap, rof->auxmap_length);
239 rof->auxmap = (void *) mmap ((void *) 0, size, PROT_READ | PROT_WRITE,
240 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
241 if (rof->auxmap == (void *) -1)
243 close (fd);
244 return -1;
246 rof->auxmap_length = size;
247 rof->auxmap_start = (uintptr_t) rof->auxmap;
248 rof->auxmap_end = rof->auxmap_start + size;
249 rof->buffer = (char *) rof->auxmap;
250 retry:
251 /* Restart. */
252 if (lseek (fd, 0, SEEK_SET) < 0)
254 close (fd);
255 fd = open (filename, O_RDONLY);
256 if (fd < 0)
257 goto fail2;
260 fail1:
261 close (fd);
262 fail2:
263 if (rof->auxmap != NULL)
264 munmap (rof->auxmap, rof->auxmap_length);
265 return -1;
268 /* Return the next byte from a read-only file stream without consuming it,
269 or -1 at EOF. */
270 static int
271 rof_peekchar (struct rofile *rof)
273 if (rof->position == rof->filled)
275 rof->eof_seen = 1;
276 return -1;
278 return (unsigned char) rof->buffer[rof->position];
281 /* Return the next byte from a read-only file stream, or -1 at EOF. */
282 static int
283 rof_getchar (struct rofile *rof)
285 int c = rof_peekchar (rof);
286 if (c >= 0)
287 rof->position++;
288 return c;
291 /* Parse an unsigned hexadecimal number from a read-only file stream. */
292 static int
293 rof_scanf_lx (struct rofile *rof, uintptr_t *valuep)
295 uintptr_t value = 0;
296 unsigned int numdigits = 0;
297 for (;;)
299 int c = rof_peekchar (rof);
300 if (c >= '0' && c <= '9')
301 value = (value << 4) + (c - '0');
302 else if (c >= 'A' && c <= 'F')
303 value = (value << 4) + (c - 'A' + 10);
304 else if (c >= 'a' && c <= 'f')
305 value = (value << 4) + (c - 'a' + 10);
306 else
307 break;
308 rof_getchar (rof);
309 numdigits++;
311 if (numdigits == 0)
312 return -1;
313 *valuep = value;
314 return 0;
317 /* Close a read-only file stream. */
318 static void
319 rof_close (struct rofile *rof)
321 if (rof->auxmap != NULL)
322 munmap (rof->auxmap, rof->auxmap_length);
325 #endif
327 /* ========================== stackvma-vma-iter.c ========================== */
328 /* Iterate through the virtual memory areas of the current process,
329 by reading from the /proc file system. */
331 /* This code is a simplified copy (no handling of protection flags) of the
332 code in gnulib's lib/vma-iter.c. */
334 #if defined __linux__ || defined __ANDROID__ \
335 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
336 || defined __NetBSD__ \
337 || defined __CYGWIN__
339 /* Forward declarations. */
340 struct callback_locals;
341 static int callback (struct callback_locals *locals, uintptr_t start, uintptr_t end);
343 # if defined __linux__ || defined __ANDROID__ || (defined __FreeBSD_kernel__ && !defined __FreeBSD__) || defined __CYGWIN__
344 /* GNU/kFreeBSD mounts /proc as linprocfs, which looks like a Linux /proc
345 file system. */
347 static int
348 vma_iterate_proc (struct callback_locals *locals)
350 struct rofile rof;
352 /* Open the current process' maps file. It describes one VMA per line. */
353 if (rof_open (&rof, "/proc/self/maps") >= 0)
355 uintptr_t auxmap_start = rof.auxmap_start;
356 uintptr_t auxmap_end = rof.auxmap_end;
358 for (;;)
360 uintptr_t start, end;
361 int c;
363 /* Parse one line. First start and end. */
364 if (!(rof_scanf_lx (&rof, &start) >= 0
365 && rof_getchar (&rof) == '-'
366 && rof_scanf_lx (&rof, &end) >= 0))
367 break;
368 while (c = rof_getchar (&rof), c != -1 && c != '\n')
371 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
373 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
374 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
375 if (start < auxmap_start)
376 if (callback (locals, start, auxmap_start))
377 break;
378 if (auxmap_end - 1 < end - 1)
379 if (callback (locals, auxmap_end, end))
380 break;
382 else
384 if (callback (locals, start, end))
385 break;
388 rof_close (&rof);
389 return 0;
392 return -1;
395 # elif defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__
397 static int
398 vma_iterate_proc (struct callback_locals *locals)
400 struct rofile rof;
402 /* Open the current process' maps file. It describes one VMA per line.
403 On FreeBSD:
404 Cf. <https://www.freebsd.org/cgi/cvsweb.cgi/src/sys/fs/procfs/procfs_map.c?annotate=HEAD>
405 On NetBSD, there are two such files:
406 - /proc/curproc/map in near-FreeBSD syntax,
407 - /proc/curproc/maps in Linux syntax.
408 Cf. <http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/miscfs/procfs/procfs_map.c?rev=HEAD> */
409 if (rof_open (&rof, "/proc/curproc/map") >= 0)
411 uintptr_t auxmap_start = rof.auxmap_start;
412 uintptr_t auxmap_end = rof.auxmap_end;
414 for (;;)
416 uintptr_t start, end;
417 int c;
419 /* Parse one line. First start. */
420 if (!(rof_getchar (&rof) == '0'
421 && rof_getchar (&rof) == 'x'
422 && rof_scanf_lx (&rof, &start) >= 0))
423 break;
424 while (c = rof_peekchar (&rof), c == ' ' || c == '\t')
425 rof_getchar (&rof);
426 /* Then end. */
427 if (!(rof_getchar (&rof) == '0'
428 && rof_getchar (&rof) == 'x'
429 && rof_scanf_lx (&rof, &end) >= 0))
430 break;
431 while (c = rof_getchar (&rof), c != -1 && c != '\n')
434 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
436 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
437 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
438 if (start < auxmap_start)
439 if (callback (locals, start, auxmap_start))
440 break;
441 if (auxmap_end - 1 < end - 1)
442 if (callback (locals, auxmap_end, end))
443 break;
445 else
447 if (callback (locals, start, end))
448 break;
451 rof_close (&rof);
452 return 0;
455 return -1;
458 # endif
460 # if (defined __FreeBSD_kernel__ || defined __FreeBSD__) && defined KERN_PROC_VMMAP /* FreeBSD >= 7.1 */
462 # include <sys/user.h> /* struct kinfo_vmentry */
463 # include <sys/sysctl.h> /* sysctl */
465 static int
466 vma_iterate_bsd (struct callback_locals *locals)
468 /* Documentation: https://www.freebsd.org/cgi/man.cgi?sysctl(3) */
469 int info_path[] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid () };
470 size_t len;
471 size_t pagesize;
472 size_t memneed;
473 void *auxmap;
474 unsigned long auxmap_start;
475 unsigned long auxmap_end;
476 char *mem;
477 char *p;
478 char *p_end;
480 len = 0;
481 if (sysctl (info_path, 4, NULL, &len, NULL, 0) < 0)
482 return -1;
483 /* Allow for small variations over time. In a multithreaded program
484 new VMAs can be allocated at any moment. */
485 len = 2 * len + 200;
486 /* Allocate memneed bytes of memory.
487 We cannot use alloca here, because not much stack space is guaranteed.
488 We also cannot use malloc here, because a malloc() call may call mmap()
489 and thus pre-allocate available memory.
490 So use mmap(), and ignore the resulting VMA. */
491 pagesize = getpagesize ();
492 memneed = len;
493 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
494 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
495 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
496 if (auxmap == (void *) -1)
497 return -1;
498 auxmap_start = (unsigned long) auxmap;
499 auxmap_end = auxmap_start + memneed;
500 mem = (char *) auxmap;
501 if (sysctl (info_path, 4, mem, &len, NULL, 0) < 0)
503 munmap (auxmap, memneed);
504 return -1;
506 p = mem;
507 p_end = mem + len;
508 while (p < p_end)
510 struct kinfo_vmentry *kve = (struct kinfo_vmentry *) p;
511 unsigned long start = kve->kve_start;
512 unsigned long end = kve->kve_end;
513 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
515 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
516 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
517 if (start < auxmap_start)
518 if (callback (locals, start, auxmap_start))
519 break;
520 if (auxmap_end - 1 < end - 1)
521 if (callback (locals, auxmap_end, end))
522 break;
524 else
526 if (callback (locals, start, end))
527 break;
529 p += kve->kve_structsize;
531 munmap (auxmap, memneed);
532 return 0;
535 # else
537 # define vma_iterate_bsd(locals) (-1)
539 # endif
542 /* Iterate over the virtual memory areas of the current process.
543 If such iteration is supported, the callback is called once for every
544 virtual memory area, in ascending order, with the following arguments:
545 - LOCALS is the same argument as passed to vma_iterate.
546 - START is the address of the first byte in the area, page-aligned.
547 - END is the address of the last byte in the area plus 1, page-aligned.
548 Note that it may be 0 for the last area in the address space.
549 If the callback returns 0, the iteration continues. If it returns 1,
550 the iteration terminates prematurely.
551 This function may open file descriptors, but does not call malloc().
552 Return 0 if all went well, or -1 in case of error. */
553 static int
554 vma_iterate (struct callback_locals *locals)
556 # if defined __FreeBSD__
557 /* On FreeBSD with procfs (but not GNU/kFreeBSD, which uses linprocfs), the
558 function vma_iterate_proc does not return the virtual memory areas that
559 were created by anonymous mmap. See
560 <https://svnweb.freebsd.org/base/head/sys/fs/procfs/procfs_map.c?view=markup>
561 So use vma_iterate_proc only as a fallback. */
562 int retval = vma_iterate_bsd (locals);
563 if (retval == 0)
564 return 0;
566 return vma_iterate_proc (locals);
567 # else
568 /* On the other platforms, try the /proc approach first, and the sysctl()
569 as a fallback. */
570 int retval = vma_iterate_proc (locals);
571 if (retval == 0)
572 return 0;
574 return vma_iterate_bsd (locals);
575 # endif
578 #endif
580 /* =========================== stackvma-mincore.c =========================== */
582 /* mincore() is a system call that allows to inquire the status of a
583 range of pages of virtual memory. In particular, it allows to inquire
584 whether a page is mapped at all (except on Mac OS X, where mincore
585 returns 0 even for unmapped addresses).
586 As of 2006, mincore() is supported by: possible bits:
587 - Linux, since Linux 2.4 and glibc 2.2, 1
588 - Solaris, since Solaris 9, 1
589 - MacOS X, since MacOS X 10.3 (at least), 1
590 - FreeBSD, since FreeBSD 6.0, MINCORE_{INCORE,REFERENCED,MODIFIED}
591 - NetBSD, since NetBSD 3.0 (at least), 1
592 - OpenBSD, since OpenBSD 2.6 (at least), 1
593 - AIX, since AIX 5.3, 1
594 As of 2019, also on
595 - Hurd.
596 However, while the API allows to easily determine the bounds of mapped
597 virtual memory, it does not make it easy to find the bounds of _unmapped_
598 virtual memory ranges. We try to work around this, but it may still be
599 slow. */
601 #if defined __linux__ || defined __ANDROID__ \
602 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
603 || defined __NetBSD__ /* || defined __OpenBSD__ */ \
604 /* || (defined __APPLE__ && defined __MACH__) */ \
605 || defined _AIX || defined __sun
607 # include <unistd.h> /* getpagesize, mincore */
608 # include <sys/types.h>
609 # include <sys/mman.h> /* mincore */
611 /* The AIX declaration of mincore() uses 'caddr_t', whereas the other platforms
612 use 'void *'. */
613 # ifdef _AIX
614 typedef caddr_t MINCORE_ADDR_T;
615 # else
616 typedef void* MINCORE_ADDR_T;
617 # endif
619 /* The glibc and musl declaration of mincore() uses 'unsigned char *', whereas
620 the BSD declaration uses 'char *'. */
621 # if __GLIBC__ >= 2 || defined __linux__ || defined __ANDROID__
622 typedef unsigned char pageinfo_t;
623 # else
624 typedef char pageinfo_t;
625 # endif
627 /* Cache for getpagesize(). */
628 static uintptr_t pagesize;
630 /* Initialize pagesize. */
631 static void
632 init_pagesize (void)
634 pagesize = getpagesize ();
637 /* Test whether the page starting at ADDR is among the address range.
638 ADDR must be a multiple of pagesize. */
639 static int
640 is_mapped (uintptr_t addr)
642 pageinfo_t vec[1];
643 return mincore ((MINCORE_ADDR_T) addr, pagesize, vec) >= 0;
646 /* Assuming that the page starting at ADDR is among the address range,
647 return the start of its virtual memory range.
648 ADDR must be a multiple of pagesize. */
649 static uintptr_t
650 mapped_range_start (uintptr_t addr)
652 /* Use a moderately sized VEC here, small enough that it fits on the stack
653 (without requiring malloc). */
654 pageinfo_t vec[1024];
655 uintptr_t stepsize = sizeof (vec);
657 for (;;)
659 uintptr_t max_remaining;
661 if (addr == 0)
662 return addr;
664 max_remaining = addr / pagesize;
665 if (stepsize > max_remaining)
666 stepsize = max_remaining;
667 if (mincore ((MINCORE_ADDR_T) (addr - stepsize * pagesize),
668 stepsize * pagesize, vec) < 0)
669 /* Time to search in smaller steps. */
670 break;
671 /* The entire range exists. Continue searching in large steps. */
672 addr -= stepsize * pagesize;
674 for (;;)
676 uintptr_t halfstepsize1;
677 uintptr_t halfstepsize2;
679 if (stepsize == 1)
680 return addr;
682 /* Here we know that less than stepsize pages exist starting at addr. */
683 halfstepsize1 = (stepsize + 1) / 2;
684 halfstepsize2 = stepsize / 2;
685 /* halfstepsize1 + halfstepsize2 = stepsize. */
687 if (mincore ((MINCORE_ADDR_T) (addr - halfstepsize1 * pagesize),
688 halfstepsize1 * pagesize, vec) < 0)
689 stepsize = halfstepsize1;
690 else
692 addr -= halfstepsize1 * pagesize;
693 stepsize = halfstepsize2;
698 /* Assuming that the page starting at ADDR is among the address range,
699 return the end of its virtual memory range + 1.
700 ADDR must be a multiple of pagesize. */
701 static uintptr_t
702 mapped_range_end (uintptr_t addr)
704 /* Use a moderately sized VEC here, small enough that it fits on the stack
705 (without requiring malloc). */
706 pageinfo_t vec[1024];
707 uintptr_t stepsize = sizeof (vec);
709 addr += pagesize;
710 for (;;)
712 uintptr_t max_remaining;
714 if (addr == 0) /* wrapped around? */
715 return addr;
717 max_remaining = (- addr) / pagesize;
718 if (stepsize > max_remaining)
719 stepsize = max_remaining;
720 if (mincore ((MINCORE_ADDR_T) addr, stepsize * pagesize, vec) < 0)
721 /* Time to search in smaller steps. */
722 break;
723 /* The entire range exists. Continue searching in large steps. */
724 addr += stepsize * pagesize;
726 for (;;)
728 uintptr_t halfstepsize1;
729 uintptr_t halfstepsize2;
731 if (stepsize == 1)
732 return addr;
734 /* Here we know that less than stepsize pages exist starting at addr. */
735 halfstepsize1 = (stepsize + 1) / 2;
736 halfstepsize2 = stepsize / 2;
737 /* halfstepsize1 + halfstepsize2 = stepsize. */
739 if (mincore ((MINCORE_ADDR_T) addr, halfstepsize1 * pagesize, vec) < 0)
740 stepsize = halfstepsize1;
741 else
743 addr += halfstepsize1 * pagesize;
744 stepsize = halfstepsize2;
749 /* Determine whether an address range [ADDR1..ADDR2] is completely unmapped.
750 ADDR1 must be <= ADDR2. */
751 static int
752 is_unmapped (uintptr_t addr1, uintptr_t addr2)
754 uintptr_t count;
755 uintptr_t stepsize;
757 /* Round addr1 down. */
758 addr1 = (addr1 / pagesize) * pagesize;
759 /* Round addr2 up and turn it into an exclusive bound. */
760 addr2 = ((addr2 / pagesize) + 1) * pagesize;
762 /* This is slow: mincore() does not provide a way to determine the bounds
763 of the gaps directly. So we have to use mincore() on individual pages
764 over and over again. Only after we've verified that all pages are
765 unmapped, we know that the range is completely unmapped.
766 If we were to traverse the pages from bottom to top or from top to bottom,
767 it would be slow even in the average case. To speed up the search, we
768 exploit the fact that mapped memory ranges are larger than one page on
769 average, therefore we have good chances of hitting a mapped area if we
770 traverse only every second, or only fourth page, etc. This doesn't
771 decrease the worst-case runtime, only the average runtime. */
772 count = (addr2 - addr1) / pagesize;
773 /* We have to test is_mapped (addr1 + i * pagesize) for 0 <= i < count. */
774 for (stepsize = 1; stepsize < count; )
775 stepsize = 2 * stepsize;
776 for (;;)
778 uintptr_t addr_stepsize;
779 uintptr_t i;
780 uintptr_t addr;
782 stepsize = stepsize / 2;
783 if (stepsize == 0)
784 break;
785 addr_stepsize = stepsize * pagesize;
786 for (i = stepsize, addr = addr1 + addr_stepsize;
787 i < count;
788 i += 2 * stepsize, addr += 2 * addr_stepsize)
789 /* Here addr = addr1 + i * pagesize. */
790 if (is_mapped (addr))
791 return 0;
793 return 1;
796 # if STACK_DIRECTION < 0
798 /* Info about the gap between this VMA and the previous one.
799 addr must be < vma->start. */
800 static int
801 mincore_is_near_this (uintptr_t addr, struct vma_struct *vma)
803 /* vma->start - addr <= (vma->start - vma->prev_end) / 2
804 is mathematically equivalent to
805 vma->prev_end <= 2 * addr - vma->start
806 <==> is_unmapped (2 * addr - vma->start, vma->start - 1).
807 But be careful about overflow: if 2 * addr - vma->start is negative,
808 we consider a tiny "guard page" mapping [0, 0] to be present around
809 NULL; it intersects the range (2 * addr - vma->start, vma->start - 1),
810 therefore return false. */
811 uintptr_t testaddr = addr - (vma->start - addr);
812 if (testaddr > addr) /* overflow? */
813 return 0;
814 /* Here testaddr <= addr < vma->start. */
815 return is_unmapped (testaddr, vma->start - 1);
818 # endif
819 # if STACK_DIRECTION > 0
821 /* Info about the gap between this VMA and the next one.
822 addr must be > vma->end - 1. */
823 static int
824 mincore_is_near_this (uintptr_t addr, struct vma_struct *vma)
826 /* addr - vma->end < (vma->next_start - vma->end) / 2
827 is mathematically equivalent to
828 vma->next_start > 2 * addr - vma->end
829 <==> is_unmapped (vma->end, 2 * addr - vma->end).
830 But be careful about overflow: if 2 * addr - vma->end is > ~0UL,
831 we consider a tiny "guard page" mapping [0, 0] to be present around
832 NULL; it intersects the range (vma->end, 2 * addr - vma->end),
833 therefore return false. */
834 uintptr_t testaddr = addr + (addr - vma->end);
835 if (testaddr < addr) /* overflow? */
836 return 0;
837 /* Here vma->end - 1 < addr <= testaddr. */
838 return is_unmapped (vma->end, testaddr);
841 # endif
843 static int
844 mincore_get_vma (uintptr_t address, struct vma_struct *vma)
846 if (pagesize == 0)
847 init_pagesize ();
848 address = (address / pagesize) * pagesize;
849 vma->start = mapped_range_start (address);
850 vma->end = mapped_range_end (address);
851 vma->is_near_this = mincore_is_near_this;
852 return 0;
855 #endif
857 /* ========================================================================== */
859 /* ---------------------------- stackvma-linux.c ---------------------------- */
861 #if defined __linux__ || defined __ANDROID__ /* Linux */
863 struct callback_locals
865 uintptr_t address;
866 struct vma_struct *vma;
867 # if STACK_DIRECTION < 0
868 uintptr_t prev;
869 # else
870 int stop_at_next_vma;
871 # endif
872 int retval;
875 static int
876 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
878 # if STACK_DIRECTION < 0
879 if (locals->address >= start && locals->address <= end - 1)
881 locals->vma->start = start;
882 locals->vma->end = end;
883 locals->vma->prev_end = locals->prev;
884 locals->retval = 0;
885 return 1;
887 locals->prev = end;
888 # else
889 if (locals->stop_at_next_vma)
891 locals->vma->next_start = start;
892 locals->stop_at_next_vma = 0;
893 return 1;
895 if (locals->address >= start && locals->address <= end - 1)
897 locals->vma->start = start;
898 locals->vma->end = end;
899 locals->retval = 0;
900 locals->stop_at_next_vma = 1;
901 return 0;
903 # endif
904 return 0;
908 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
910 struct callback_locals locals;
911 locals.address = address;
912 locals.vma = vma;
913 # if STACK_DIRECTION < 0
914 locals.prev = 0;
915 # else
916 locals.stop_at_next_vma = 0;
917 # endif
918 locals.retval = -1;
920 vma_iterate (&locals);
921 if (locals.retval == 0)
923 # if !(STACK_DIRECTION < 0)
924 if (locals.stop_at_next_vma)
925 vma->next_start = 0;
926 # endif
927 vma->is_near_this = simple_is_near_this;
928 return 0;
931 return mincore_get_vma (address, vma);
934 /* --------------------------- stackvma-freebsd.c --------------------------- */
936 #elif defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ /* GNU/kFreeBSD, FreeBSD */
938 struct callback_locals
940 uintptr_t address;
941 struct vma_struct *vma;
942 /* The stack appears as multiple adjacent segments, therefore we
943 merge adjacent segments. */
944 uintptr_t curr_start, curr_end;
945 # if STACK_DIRECTION < 0
946 uintptr_t prev_end;
947 # else
948 int stop_at_next_vma;
949 # endif
950 int retval;
953 static int
954 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
956 if (start == locals->curr_end)
958 /* Merge adjacent segments. */
959 locals->curr_end = end;
960 return 0;
962 # if STACK_DIRECTION < 0
963 if (locals->curr_start < locals->curr_end
964 && locals->address >= locals->curr_start
965 && locals->address <= locals->curr_end - 1)
967 locals->vma->start = locals->curr_start;
968 locals->vma->end = locals->curr_end;
969 locals->vma->prev_end = locals->prev_end;
970 locals->retval = 0;
971 return 1;
973 locals->prev_end = locals->curr_end;
974 # else
975 if (locals->stop_at_next_vma)
977 locals->vma->next_start = locals->curr_start;
978 locals->stop_at_next_vma = 0;
979 return 1;
981 if (locals->curr_start < locals->curr_end
982 && locals->address >= locals->curr_start
983 && locals->address <= locals->curr_end - 1)
985 locals->vma->start = locals->curr_start;
986 locals->vma->end = locals->curr_end;
987 locals->retval = 0;
988 locals->stop_at_next_vma = 1;
989 return 0;
991 # endif
992 locals->curr_start = start; locals->curr_end = end;
993 return 0;
997 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
999 struct callback_locals locals;
1000 locals.address = address;
1001 locals.vma = vma;
1002 locals.curr_start = 0;
1003 locals.curr_end = 0;
1004 # if STACK_DIRECTION < 0
1005 locals.prev_end = 0;
1006 # else
1007 locals.stop_at_next_vma = 0;
1008 # endif
1009 locals.retval = -1;
1011 vma_iterate (&locals);
1012 if (locals.retval < 0)
1014 if (locals.curr_start < locals.curr_end
1015 && address >= locals.curr_start && address <= locals.curr_end - 1)
1017 vma->start = locals.curr_start;
1018 vma->end = locals.curr_end;
1019 # if STACK_DIRECTION < 0
1020 vma->prev_end = locals.prev_end;
1021 # else
1022 vma->next_start = 0;
1023 # endif
1024 locals.retval = 0;
1027 if (locals.retval == 0)
1029 # if !(STACK_DIRECTION < 0)
1030 if (locals.stop_at_next_vma)
1031 vma->next_start = 0;
1032 # endif
1033 vma->is_near_this = simple_is_near_this;
1034 return 0;
1037 /* FreeBSD 6.[01] doesn't allow to distinguish unmapped pages from
1038 mapped but swapped-out pages. See whether it's fixed. */
1039 if (!is_mapped (0))
1040 /* OK, mincore() appears to work as expected. */
1041 return mincore_get_vma (address, vma);
1042 return -1;
1045 /* --------------------------- stackvma-netbsd.c --------------------------- */
1047 #elif defined __NetBSD__ /* NetBSD */
1049 struct callback_locals
1051 uintptr_t address;
1052 struct vma_struct *vma;
1053 /* The stack appears as multiple adjacent segments, therefore we
1054 merge adjacent segments. */
1055 uintptr_t curr_start, curr_end;
1056 # if STACK_DIRECTION < 0
1057 uintptr_t prev_end;
1058 # else
1059 int stop_at_next_vma;
1060 # endif
1061 int retval;
1064 static int
1065 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
1067 if (start == locals->curr_end)
1069 /* Merge adjacent segments. */
1070 locals->curr_end = end;
1071 return 0;
1073 # if STACK_DIRECTION < 0
1074 if (locals->curr_start < locals->curr_end
1075 && locals->address >= locals->curr_start
1076 && locals->address <= locals->curr_end - 1)
1078 locals->vma->start = locals->curr_start;
1079 locals->vma->end = locals->curr_end;
1080 locals->vma->prev_end = locals->prev_end;
1081 locals->retval = 0;
1082 return 1;
1084 locals->prev_end = locals->curr_end;
1085 # else
1086 if (locals->stop_at_next_vma)
1088 locals->vma->next_start = locals->curr_start;
1089 locals->stop_at_next_vma = 0;
1090 return 1;
1092 if (locals->curr_start < locals->curr_end
1093 && locals->address >= locals->curr_start
1094 && locals->address <= locals->curr_end - 1)
1096 locals->vma->start = locals->curr_start;
1097 locals->vma->end = locals->curr_end;
1098 locals->retval = 0;
1099 locals->stop_at_next_vma = 1;
1100 return 0;
1102 # endif
1103 locals->curr_start = start; locals->curr_end = end;
1104 return 0;
1108 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
1110 struct callback_locals locals;
1111 locals.address = address;
1112 locals.vma = vma;
1113 locals.curr_start = 0;
1114 locals.curr_end = 0;
1115 # if STACK_DIRECTION < 0
1116 locals.prev_end = 0;
1117 # else
1118 locals.stop_at_next_vma = 0;
1119 # endif
1120 locals.retval = -1;
1122 vma_iterate (&locals);
1123 if (locals.retval < 0)
1125 if (locals.curr_start < locals.curr_end
1126 && address >= locals.curr_start && address <= locals.curr_end - 1)
1128 vma->start = locals.curr_start;
1129 vma->end = locals.curr_end;
1130 # if STACK_DIRECTION < 0
1131 vma->prev_end = locals.prev_end;
1132 # else
1133 vma->next_start = 0;
1134 # endif
1135 locals.retval = 0;
1138 if (locals.retval == 0)
1140 # if !(STACK_DIRECTION < 0)
1141 if (locals.stop_at_next_vma)
1142 vma->next_start = 0;
1143 # endif
1144 vma->is_near_this = simple_is_near_this;
1145 return 0;
1148 return mincore_get_vma (address, vma);
1151 /* --------------------------- stackvma-mquery.c --------------------------- */
1153 /* mquery() is a system call that allows to inquire the status of a
1154 range of pages of virtual memory. In particular, it allows to inquire
1155 whether a page is mapped at all, and where is the next unmapped page
1156 after a given address.
1157 As of 2021, mquery() is supported by:
1158 - OpenBSD, since OpenBSD 3.4.
1159 Note that this file can give different results. For example, on
1160 OpenBSD 4.4 / i386 the stack segment (which starts around 0xcdbfe000)
1161 ends at 0xcfbfdfff according to mincore, but at 0xffffffff according to
1162 mquery. */
1164 #elif defined __OpenBSD__ /* OpenBSD */
1166 # include <unistd.h> /* getpagesize, mincore */
1167 # include <sys/types.h>
1168 # include <sys/mman.h> /* mincore */
1170 /* Cache for getpagesize(). */
1171 static uintptr_t pagesize;
1173 /* Initialize pagesize. */
1174 static void
1175 init_pagesize (void)
1177 pagesize = getpagesize ();
1180 /* Test whether the page starting at ADDR is among the address range.
1181 ADDR must be a multiple of pagesize. */
1182 static int
1183 is_mapped (uintptr_t addr)
1185 /* Avoid calling mquery with a NULL first argument, because this argument
1186 value has a specific meaning. We know the NULL page is unmapped. */
1187 if (addr == 0)
1188 return 0;
1189 return mquery ((void *) addr, pagesize, 0, MAP_FIXED, -1, 0) == (void *) -1;
1192 /* Assuming that the page starting at ADDR is among the address range,
1193 return the start of its virtual memory range.
1194 ADDR must be a multiple of pagesize. */
1195 static uintptr_t
1196 mapped_range_start (uintptr_t addr)
1198 uintptr_t stepsize;
1199 uintptr_t known_unmapped_page;
1201 /* Look at smaller addresses, in larger and larger steps, to minimize the
1202 number of mquery() calls. */
1203 stepsize = pagesize;
1204 for (;;)
1206 uintptr_t hole;
1208 if (addr == 0)
1209 abort ();
1211 if (addr <= stepsize)
1213 known_unmapped_page = 0;
1214 break;
1217 hole = (uintptr_t) mquery ((void *) (addr - stepsize), pagesize,
1218 0, 0, -1, 0);
1219 if (!(hole == (uintptr_t) (void *) -1 || hole >= addr))
1221 /* Some part of [addr - stepsize, addr - 1] is unmapped. */
1222 known_unmapped_page = hole;
1223 break;
1226 /* The entire range [addr - stepsize, addr - 1] is mapped. */
1227 addr -= stepsize;
1229 if (2 * stepsize > stepsize && 2 * stepsize < addr)
1230 stepsize = 2 * stepsize;
1233 /* Now reduce the step size again.
1234 We know that the page at known_unmapped_page is unmapped and that
1235 0 < addr - known_unmapped_page <= stepsize. */
1236 while (stepsize > pagesize && stepsize / 2 >= addr - known_unmapped_page)
1237 stepsize = stepsize / 2;
1238 /* Still 0 < addr - known_unmapped_page <= stepsize. */
1239 while (stepsize > pagesize)
1241 uintptr_t hole;
1243 stepsize = stepsize / 2;
1244 hole = (uintptr_t) mquery ((void *) (addr - stepsize), pagesize,
1245 0, 0, -1, 0);
1246 if (!(hole == (uintptr_t) (void *) -1 || hole >= addr))
1247 /* Some part of [addr - stepsize, addr - 1] is unmapped. */
1248 known_unmapped_page = hole;
1249 else
1250 /* The entire range [addr - stepsize, addr - 1] is mapped. */
1251 addr -= stepsize;
1252 /* Still 0 < addr - known_unmapped_page <= stepsize. */
1255 return addr;
1258 /* Assuming that the page starting at ADDR is among the address range,
1259 return the end of its virtual memory range + 1.
1260 ADDR must be a multiple of pagesize. */
1261 static uintptr_t
1262 mapped_range_end (uintptr_t addr)
1264 uintptr_t end;
1266 if (addr == 0)
1267 abort ();
1269 end = (uintptr_t) mquery ((void *) addr, pagesize, 0, 0, -1, 0);
1270 if (end == (uintptr_t) (void *) -1)
1271 end = 0; /* wrap around */
1272 return end;
1275 /* Determine whether an address range [ADDR1..ADDR2] is completely unmapped.
1276 ADDR1 must be <= ADDR2. */
1277 static int
1278 is_unmapped (uintptr_t addr1, uintptr_t addr2)
1280 /* Round addr1 down. */
1281 addr1 = (addr1 / pagesize) * pagesize;
1282 /* Round addr2 up and turn it into an exclusive bound. */
1283 addr2 = ((addr2 / pagesize) + 1) * pagesize;
1285 /* Avoid calling mquery with a NULL first argument, because this argument
1286 value has a specific meaning. We know the NULL page is unmapped. */
1287 if (addr1 == 0)
1288 addr1 = pagesize;
1290 if (addr1 < addr2)
1292 if (mquery ((void *) addr1, addr2 - addr1, 0, MAP_FIXED, -1, 0)
1293 == (void *) -1)
1294 /* Not all the interval [addr1 .. addr2 - 1] is unmapped. */
1295 return 0;
1296 else
1297 /* The interval [addr1 .. addr2 - 1] is unmapped. */
1298 return 1;
1300 return 1;
1303 # if STACK_DIRECTION < 0
1305 /* Info about the gap between this VMA and the previous one.
1306 addr must be < vma->start. */
1307 static int
1308 mquery_is_near_this (uintptr_t addr, struct vma_struct *vma)
1310 /* vma->start - addr <= (vma->start - vma->prev_end) / 2
1311 is mathematically equivalent to
1312 vma->prev_end <= 2 * addr - vma->start
1313 <==> is_unmapped (2 * addr - vma->start, vma->start - 1).
1314 But be careful about overflow: if 2 * addr - vma->start is negative,
1315 we consider a tiny "guard page" mapping [0, 0] to be present around
1316 NULL; it intersects the range (2 * addr - vma->start, vma->start - 1),
1317 therefore return false. */
1318 uintptr_t testaddr = addr - (vma->start - addr);
1319 if (testaddr > addr) /* overflow? */
1320 return 0;
1321 /* Here testaddr <= addr < vma->start. */
1322 return is_unmapped (testaddr, vma->start - 1);
1325 # endif
1326 # if STACK_DIRECTION > 0
1328 /* Info about the gap between this VMA and the next one.
1329 addr must be > vma->end - 1. */
1330 static int
1331 mquery_is_near_this (uintptr_t addr, struct vma_struct *vma)
1333 /* addr - vma->end < (vma->next_start - vma->end) / 2
1334 is mathematically equivalent to
1335 vma->next_start > 2 * addr - vma->end
1336 <==> is_unmapped (vma->end, 2 * addr - vma->end).
1337 But be careful about overflow: if 2 * addr - vma->end is > ~0UL,
1338 we consider a tiny "guard page" mapping [0, 0] to be present around
1339 NULL; it intersects the range (vma->end, 2 * addr - vma->end),
1340 therefore return false. */
1341 uintptr_t testaddr = addr + (addr - vma->end);
1342 if (testaddr < addr) /* overflow? */
1343 return 0;
1344 /* Here vma->end - 1 < addr <= testaddr. */
1345 return is_unmapped (vma->end, testaddr);
1348 # endif
1351 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
1353 if (pagesize == 0)
1354 init_pagesize ();
1355 address = (address / pagesize) * pagesize;
1356 vma->start = mapped_range_start (address);
1357 vma->end = mapped_range_end (address);
1358 vma->is_near_this = mquery_is_near_this;
1359 return 0;
1362 /* ---------------------------- stackvma-mach.c ---------------------------- */
1364 #elif (defined __APPLE__ && defined __MACH__) /* macOS */
1366 #include <libc.h>
1367 #include <nlist.h>
1368 #include <mach/mach.h>
1369 #include <mach/machine/vm_param.h>
1372 sigsegv_get_vma (uintptr_t req_address, struct vma_struct *vma)
1374 uintptr_t prev_address = 0, prev_size = 0;
1375 uintptr_t join_address = 0, join_size = 0;
1376 int more = 1;
1377 vm_address_t address;
1378 vm_size_t size;
1379 task_t task = mach_task_self ();
1381 for (address = VM_MIN_ADDRESS; more; address += size)
1383 mach_port_t object_name;
1384 /* In MacOS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
1385 32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
1386 mach_vm_address_t and mach_vm_size_t are always 64 bits large.
1387 MacOS X 10.5 has three vm_region like methods:
1388 - vm_region. It has arguments that depend on whether the current
1389 process is 32-bit or 64-bit. When linking dynamically, this
1390 function exists only in 32-bit processes. Therefore we use it only
1391 in 32-bit processes.
1392 - vm_region_64. It has arguments that depend on whether the current
1393 process is 32-bit or 64-bit. It interprets a flavor
1394 VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
1395 dangerous since 'struct vm_region_basic_info_64' is larger than
1396 'struct vm_region_basic_info'; therefore let's write
1397 VM_REGION_BASIC_INFO_64 explicitly.
1398 - mach_vm_region. It has arguments that are 64-bit always. This
1399 function is useful when you want to access the VM of a process
1400 other than the current process.
1401 In 64-bit processes, we could use vm_region_64 or mach_vm_region.
1402 I choose vm_region_64 because it uses the same types as vm_region,
1403 resulting in less conditional code. */
1404 # if defined __aarch64__ || defined __ppc64__ || defined __x86_64__
1405 struct vm_region_basic_info_64 info;
1406 mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
1408 more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
1409 (vm_region_info_t)&info, &info_count, &object_name)
1410 == KERN_SUCCESS);
1411 # else
1412 struct vm_region_basic_info info;
1413 mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
1415 more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
1416 (vm_region_info_t)&info, &info_count, &object_name)
1417 == KERN_SUCCESS);
1418 # endif
1419 if (!more)
1421 address = join_address + join_size;
1422 size = 0;
1425 if ((uintptr_t) address == join_address + join_size)
1426 join_size += size;
1427 else
1429 prev_address = join_address;
1430 prev_size = join_size;
1431 join_address = (uintptr_t) address;
1432 join_size = size;
1435 if (object_name != MACH_PORT_NULL)
1436 mach_port_deallocate (mach_task_self (), object_name);
1438 # if STACK_DIRECTION < 0
1439 if (join_address <= req_address && join_address + join_size > req_address)
1441 vma->start = join_address;
1442 vma->end = join_address + join_size;
1443 vma->prev_end = prev_address + prev_size;
1444 vma->is_near_this = simple_is_near_this;
1445 return 0;
1447 # else
1448 if (prev_address <= req_address && prev_address + prev_size > req_address)
1450 vma->start = prev_address;
1451 vma->end = prev_address + prev_size;
1452 vma->next_start = join_address;
1453 vma->is_near_this = simple_is_near_this;
1454 return 0;
1456 # endif
1459 # if STACK_DIRECTION > 0
1460 if (join_address <= req_address && join_address + size > req_address)
1462 vma->start = prev_address;
1463 vma->end = prev_address + prev_size;
1464 vma->next_start = ~0UL;
1465 vma->is_near_this = simple_is_near_this;
1466 return 0;
1468 # endif
1470 return -1;
1473 /* ----------------------------- stackvma-aix.c ----------------------------- */
1475 #elif defined _AIX /* AIX */
1477 # include <unistd.h> /* getpagesize, getpid, close, read */
1478 # include <errno.h> /* EINTR */
1479 # include <fcntl.h> /* open */
1480 # include <string.h> /* memcpy */
1481 # include <sys/types.h>
1482 # include <sys/mman.h> /* mmap, munmap */
1483 # include <sys/procfs.h> /* prmap_t */
1484 # include <sys/utsname.h> /* uname */
1486 struct callback_locals
1488 uintptr_t address;
1489 struct vma_struct *vma;
1490 # if STACK_DIRECTION < 0
1491 uintptr_t prev;
1492 # else
1493 int stop_at_next_vma;
1494 # endif
1495 int retval;
1498 static int
1499 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
1501 # if STACK_DIRECTION < 0
1502 if (locals->address >= start && locals->address <= end - 1)
1504 locals->vma->start = start;
1505 locals->vma->end = end;
1506 locals->vma->prev_end = locals->prev;
1507 locals->retval = 0;
1508 return 1;
1510 locals->prev = end;
1511 # else
1512 if (locals->stop_at_next_vma)
1514 locals->vma->next_start = start;
1515 locals->stop_at_next_vma = 0;
1516 return 1;
1518 if (locals->address >= start && locals->address <= end - 1)
1520 locals->vma->start = start;
1521 locals->vma->end = end;
1522 locals->retval = 0;
1523 locals->stop_at_next_vma = 1;
1524 return 0;
1526 # endif
1527 return 0;
1530 /* Iterate over the virtual memory areas of the current process.
1531 If such iteration is supported, the callback is called once for every
1532 virtual memory area, in ascending order, with the following arguments:
1533 - LOCALS is the same argument as passed to vma_iterate.
1534 - START is the address of the first byte in the area, page-aligned.
1535 - END is the address of the last byte in the area plus 1, page-aligned.
1536 Note that it may be 0 for the last area in the address space.
1537 If the callback returns 0, the iteration continues. If it returns 1,
1538 the iteration terminates prematurely.
1539 This function may open file descriptors, but does not call malloc().
1540 Return 0 if all went well, or -1 in case of error. */
1541 /* This code is a simplified copy (no handling of protection flags) of the
1542 code in gnulib's lib/vma-iter.c. */
1543 static int
1544 vma_iterate (struct callback_locals *locals)
1546 /* On AIX, there is a /proc/$pic/map file, that contains records of type
1547 prmap_t, defined in <sys/procfs.h>. In older versions of AIX, it lists
1548 only the virtual memory areas that are connected to a file, not the
1549 anonymous ones. But at least since AIX 7.1, it is well usable. */
1551 char fnamebuf[6+10+4+1];
1552 char *fname;
1553 int fd;
1554 size_t memneed;
1556 if (pagesize == 0)
1557 init_pagesize ();
1559 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()). */
1560 fname = fnamebuf + sizeof (fnamebuf) - (4+1);
1561 memcpy (fname, "/map", 4+1);
1563 unsigned int value = getpid ();
1565 *--fname = (value % 10) + '0';
1566 while ((value = value / 10) > 0);
1568 fname -= 6;
1569 memcpy (fname, "/proc/", 6);
1571 fd = open (fname, O_RDONLY | O_CLOEXEC);
1572 if (fd < 0)
1573 return -1;
1575 /* The contents of /proc/<pid>/map contains a number of prmap_t entries,
1576 then an entirely null prmap_t entry, then a heap of NUL terminated
1577 strings.
1578 Documentation: https://www.ibm.com/docs/en/aix/7.1?topic=files-proc-file
1579 We read the entire contents, but look only at the prmap_t entries and
1580 ignore the tail part. */
1582 for (memneed = 2 * pagesize; ; memneed = 2 * memneed)
1584 /* Allocate memneed bytes of memory.
1585 We cannot use alloca here, because not much stack space is guaranteed.
1586 We also cannot use malloc here, because a malloc() call may call mmap()
1587 and thus pre-allocate available memory.
1588 So use mmap(), and ignore the resulting VMA if it occurs among the
1589 resulting VMAs. (Normally it doesn't, because it was allocated after
1590 the open() call.) */
1591 void *auxmap;
1592 unsigned long auxmap_start;
1593 unsigned long auxmap_end;
1594 ssize_t nbytes;
1596 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
1597 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
1598 if (auxmap == (void *) -1)
1600 close (fd);
1601 return -1;
1603 auxmap_start = (unsigned long) auxmap;
1604 auxmap_end = auxmap_start + memneed;
1606 /* Read the contents of /proc/<pid>/map in a single system call.
1607 This guarantees a consistent result (no duplicated or omitted
1608 entries). */
1609 retry:
1611 nbytes = read (fd, auxmap, memneed);
1612 while (nbytes < 0 && errno == EINTR);
1613 if (nbytes <= 0)
1615 munmap (auxmap, memneed);
1616 close (fd);
1617 return -1;
1619 if (nbytes == memneed)
1621 /* Need more memory. */
1622 munmap (auxmap, memneed);
1623 if (lseek (fd, 0, SEEK_SET) < 0)
1625 close (fd);
1626 return -1;
1629 else
1631 if (read (fd, (char *) auxmap + nbytes, 1) > 0)
1633 /* Oops, we had a short read. Retry. */
1634 if (lseek (fd, 0, SEEK_SET) < 0)
1636 munmap (auxmap, memneed);
1637 close (fd);
1638 return -1;
1640 goto retry;
1643 /* We now have the entire contents of /proc/<pid>/map in memory. */
1644 prmap_t* maps = (prmap_t *) auxmap;
1646 /* The entries are not sorted by address. Therefore
1647 1. Extract the relevant information into an array.
1648 2. Sort the array in ascending order.
1649 3. Invoke the callback. */
1650 typedef struct
1652 uintptr_t start;
1653 uintptr_t end;
1655 vma_t;
1656 /* Since 2 * sizeof (vma_t) <= sizeof (prmap_t), we can reuse the
1657 same memory. */
1658 vma_t *vmas = (vma_t *) auxmap;
1660 vma_t *vp = vmas;
1662 prmap_t* mp;
1663 for (mp = maps;;)
1665 unsigned long start, end;
1667 start = (unsigned long) mp->pr_vaddr;
1668 end = start + mp->pr_size;
1669 if (start == 0 && end == 0 && mp->pr_mflags == 0)
1670 break;
1671 /* Discard empty VMAs and kernel VMAs. */
1672 if (start < end && (mp->pr_mflags & MA_KERNTEXT) == 0)
1674 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
1676 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1677 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
1678 if (start < auxmap_start)
1680 vp->start = start;
1681 vp->end = auxmap_start;
1682 vp++;
1684 if (auxmap_end - 1 < end - 1)
1686 vp->start = auxmap_end;
1687 vp->end = end;
1688 vp++;
1691 else
1693 vp->start = start;
1694 vp->end = end;
1695 vp++;
1698 mp++;
1702 size_t nvmas = vp - vmas;
1703 /* Sort the array in ascending order.
1704 Better not call qsort(), since it may call malloc().
1705 Insertion-sort is OK in this case, despite its worst-case running
1706 time of O(N²), since the number of VMAs will rarely be larger than
1707 1000. */
1709 size_t i;
1710 for (i = 1; i < nvmas; i++)
1712 /* Invariant: Here vmas[0..i-1] is sorted. */
1713 size_t j;
1714 for (j = i; j > 0 && vmas[j - 1].start > vmas[j].start; j--)
1716 vma_t tmp = vmas[j - 1];
1717 vmas[j - 1] = vmas[j];
1718 vmas[j] = tmp;
1720 /* Invariant: Here vmas[0..i] is sorted. */
1724 /* Invoke the callback. */
1726 size_t i;
1727 for (i = 0; i < nvmas; i++)
1729 vma_t *vpi = &vmas[i];
1730 if (callback (locals, vpi->start, vpi->end))
1731 break;
1735 munmap (auxmap, memneed);
1736 break;
1740 close (fd);
1741 return 0;
1745 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
1747 struct utsname u;
1748 if (uname (&u) >= 0
1749 /* && strcmp (u.sysname, "AIX") == 0 */
1750 && !(u.version[0] >= '1' && u.version[0] <= '6' && u.version[1] == '\0'))
1752 /* AIX 7 or higher. */
1753 struct callback_locals locals;
1754 locals.address = address;
1755 locals.vma = vma;
1756 #if STACK_DIRECTION < 0
1757 locals.prev = 0;
1758 #else
1759 locals.stop_at_next_vma = 0;
1760 #endif
1761 locals.retval = -1;
1763 vma_iterate (&locals);
1764 if (locals.retval == 0)
1766 #if !(STACK_DIRECTION < 0)
1767 if (locals.stop_at_next_vma)
1768 vma->next_start = 0;
1769 #endif
1770 vma->is_near_this = simple_is_near_this;
1771 return 0;
1775 return mincore_get_vma (address, vma);
1778 /* --------------------------- stackvma-procfs.c --------------------------- */
1780 #elif defined __sgi || defined __sun /* IRIX, Solaris */
1782 # include <errno.h> /* errno, EINTR */
1783 # include <fcntl.h> /* open, O_RDONLY */
1784 # include <stddef.h> /* size_t */
1785 # include <unistd.h> /* getpagesize, getpid, read, close */
1786 # include <sys/types.h>
1787 # include <sys/mman.h> /* mmap, munmap */
1788 # include <sys/stat.h> /* fstat */
1789 # include <string.h> /* memcpy */
1791 /* Try to use the newer ("structured") /proc filesystem API, if supported. */
1792 # define _STRUCTURED_PROC 1
1793 # include <sys/procfs.h> /* prmap_t, optionally PIOC* */
1795 # if !defined __sun
1797 /* Cache for getpagesize(). */
1798 static uintptr_t pagesize;
1800 /* Initialize pagesize. */
1801 static void
1802 init_pagesize (void)
1804 pagesize = getpagesize ();
1807 # endif
1809 struct callback_locals
1811 uintptr_t address;
1812 struct vma_struct *vma;
1813 # if STACK_DIRECTION < 0
1814 uintptr_t prev;
1815 # else
1816 int stop_at_next_vma;
1817 # endif
1818 int retval;
1821 static int
1822 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
1824 # if STACK_DIRECTION < 0
1825 if (locals->address >= start && locals->address <= end - 1)
1827 locals->vma->start = start;
1828 locals->vma->end = end;
1829 locals->vma->prev_end = locals->prev;
1830 locals->retval = 0;
1831 return 1;
1833 locals->prev = end;
1834 # else
1835 if (locals->stop_at_next_vma)
1837 locals->vma->next_start = start;
1838 locals->stop_at_next_vma = 0;
1839 return 1;
1841 if (locals->address >= start && locals->address <= end - 1)
1843 locals->vma->start = start;
1844 locals->vma->end = end;
1845 locals->retval = 0;
1846 locals->stop_at_next_vma = 1;
1847 return 0;
1849 # endif
1850 return 0;
1853 /* Iterate over the virtual memory areas of the current process.
1854 If such iteration is supported, the callback is called once for every
1855 virtual memory area, in ascending order, with the following arguments:
1856 - LOCALS is the same argument as passed to vma_iterate.
1857 - START is the address of the first byte in the area, page-aligned.
1858 - END is the address of the last byte in the area plus 1, page-aligned.
1859 Note that it may be 0 for the last area in the address space.
1860 If the callback returns 0, the iteration continues. If it returns 1,
1861 the iteration terminates prematurely.
1862 This function may open file descriptors, but does not call malloc().
1863 Return 0 if all went well, or -1 in case of error. */
1864 /* This code is a simplified copy (no handling of protection flags) of the
1865 code in gnulib's lib/vma-iter.c. */
1866 static int
1867 vma_iterate (struct callback_locals *locals)
1869 /* Note: Solaris <sys/procfs.h> defines a different type prmap_t with
1870 _STRUCTURED_PROC than without! Here's a table of sizeof(prmap_t):
1871 32-bit 64-bit
1872 _STRUCTURED_PROC = 0 32 56
1873 _STRUCTURED_PROC = 1 96 104
1874 Therefore, if the include files provide the newer API, prmap_t has
1875 the bigger size, and thus you MUST use the newer API. And if the
1876 include files provide the older API, prmap_t has the smaller size,
1877 and thus you MUST use the older API. */
1879 # if defined PIOCNMAP && defined PIOCMAP
1880 /* We must use the older /proc interface. */
1882 char fnamebuf[6+10+1];
1883 char *fname;
1884 int fd;
1885 int nmaps;
1886 size_t memneed;
1887 # if HAVE_MAP_ANONYMOUS
1888 # define zero_fd -1
1889 # define map_flags MAP_ANONYMOUS
1890 # else /* !HAVE_MAP_ANONYMOUS */
1891 int zero_fd;
1892 # define map_flags 0
1893 # endif
1894 void *auxmap;
1895 uintptr_t auxmap_start;
1896 uintptr_t auxmap_end;
1897 prmap_t* maps;
1898 prmap_t* mp;
1900 if (pagesize == 0)
1901 init_pagesize ();
1903 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()). */
1904 fname = fnamebuf + sizeof (fnamebuf) - 1;
1905 *fname = '\0';
1907 unsigned int value = getpid ();
1909 *--fname = (value % 10) + '0';
1910 while ((value = value / 10) > 0);
1912 fname -= 6;
1913 memcpy (fname, "/proc/", 6);
1915 fd = open (fname, O_RDONLY);
1916 if (fd < 0)
1917 return -1;
1919 if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
1920 goto fail2;
1922 memneed = (nmaps + 10) * sizeof (prmap_t);
1923 /* Allocate memneed bytes of memory.
1924 We cannot use alloca here, because not much stack space is guaranteed.
1925 We also cannot use malloc here, because a malloc() call may call mmap()
1926 and thus pre-allocate available memory.
1927 So use mmap(), and ignore the resulting VMA. */
1928 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
1929 # if !HAVE_MAP_ANONYMOUS
1930 zero_fd = open ("/dev/zero", O_RDONLY, 0644);
1931 if (zero_fd < 0)
1932 goto fail2;
1933 # endif
1934 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
1935 map_flags | MAP_PRIVATE, zero_fd, 0);
1936 # if !HAVE_MAP_ANONYMOUS
1937 close (zero_fd);
1938 # endif
1939 if (auxmap == (void *) -1)
1940 goto fail2;
1941 auxmap_start = (uintptr_t) auxmap;
1942 auxmap_end = auxmap_start + memneed;
1943 maps = (prmap_t *) auxmap;
1945 if (ioctl (fd, PIOCMAP, maps) < 0)
1946 goto fail1;
1948 for (mp = maps;;)
1950 uintptr_t start, end;
1952 start = (uintptr_t) mp->pr_vaddr;
1953 end = start + mp->pr_size;
1954 if (start == 0 && end == 0)
1955 break;
1956 mp++;
1957 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
1959 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1960 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
1961 if (start < auxmap_start)
1962 if (callback (locals, start, auxmap_start))
1963 break;
1964 if (auxmap_end - 1 < end - 1)
1965 if (callback (locals, auxmap_end, end))
1966 break;
1968 else
1970 if (callback (locals, start, end))
1971 break;
1974 munmap (auxmap, memneed);
1975 close (fd);
1976 return 0;
1978 fail1:
1979 munmap (auxmap, memneed);
1980 fail2:
1981 close (fd);
1982 return -1;
1984 # else
1985 /* We must use the newer /proc interface.
1986 Documentation:
1987 https://docs.oracle.com/cd/E23824_01/html/821-1473/proc-4.html
1988 The contents of /proc/<pid>/map consists of records of type
1989 prmap_t. These are different in 32-bit and 64-bit processes,
1990 but here we are fortunately accessing only the current process. */
1992 char fnamebuf[6+10+4+1];
1993 char *fname;
1994 int fd;
1995 int nmaps;
1996 size_t memneed;
1997 # if HAVE_MAP_ANONYMOUS
1998 # define zero_fd -1
1999 # define map_flags MAP_ANONYMOUS
2000 # else /* !HAVE_MAP_ANONYMOUS */
2001 int zero_fd;
2002 # define map_flags 0
2003 # endif
2004 void *auxmap;
2005 uintptr_t auxmap_start;
2006 uintptr_t auxmap_end;
2007 prmap_t* maps;
2008 prmap_t* maps_end;
2009 prmap_t* mp;
2011 if (pagesize == 0)
2012 init_pagesize ();
2014 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()). */
2015 fname = fnamebuf + sizeof (fnamebuf) - 1 - 4;
2016 memcpy (fname, "/map", 4 + 1);
2018 unsigned int value = getpid ();
2020 *--fname = (value % 10) + '0';
2021 while ((value = value / 10) > 0);
2023 fname -= 6;
2024 memcpy (fname, "/proc/", 6);
2026 fd = open (fname, O_RDONLY);
2027 if (fd < 0)
2028 return -1;
2031 struct stat statbuf;
2032 if (fstat (fd, &statbuf) < 0)
2033 goto fail2;
2034 nmaps = statbuf.st_size / sizeof (prmap_t);
2037 memneed = (nmaps + 10) * sizeof (prmap_t);
2038 /* Allocate memneed bytes of memory.
2039 We cannot use alloca here, because not much stack space is guaranteed.
2040 We also cannot use malloc here, because a malloc() call may call mmap()
2041 and thus pre-allocate available memory.
2042 So use mmap(), and ignore the resulting VMA. */
2043 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
2044 # if !HAVE_MAP_ANONYMOUS
2045 zero_fd = open ("/dev/zero", O_RDONLY, 0644);
2046 if (zero_fd < 0)
2047 goto fail2;
2048 # endif
2049 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
2050 map_flags | MAP_PRIVATE, zero_fd, 0);
2051 # if !HAVE_MAP_ANONYMOUS
2052 close (zero_fd);
2053 # endif
2054 if (auxmap == (void *) -1)
2055 goto fail2;
2056 auxmap_start = (uintptr_t) auxmap;
2057 auxmap_end = auxmap_start + memneed;
2058 maps = (prmap_t *) auxmap;
2060 /* Read up to memneed bytes from fd into maps. */
2062 size_t remaining = memneed;
2063 size_t total_read = 0;
2064 char *ptr = (char *) maps;
2068 size_t nread = read (fd, ptr, remaining);
2069 if (nread == (size_t)-1)
2071 if (errno == EINTR)
2072 continue;
2073 goto fail1;
2075 if (nread == 0)
2076 /* EOF */
2077 break;
2078 total_read += nread;
2079 ptr += nread;
2080 remaining -= nread;
2082 while (remaining > 0);
2084 nmaps = (memneed - remaining) / sizeof (prmap_t);
2085 maps_end = maps + nmaps;
2088 for (mp = maps; mp < maps_end; mp++)
2090 uintptr_t start, end;
2092 start = (uintptr_t) mp->pr_vaddr;
2093 end = start + mp->pr_size;
2094 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
2096 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
2097 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
2098 if (start < auxmap_start)
2099 if (callback (locals, start, auxmap_start))
2100 break;
2101 if (auxmap_end - 1 < end - 1)
2102 if (callback (locals, auxmap_end, end))
2103 break;
2105 else
2107 if (callback (locals, start, end))
2108 break;
2111 munmap (auxmap, memneed);
2112 close (fd);
2113 return 0;
2115 fail1:
2116 munmap (auxmap, memneed);
2117 fail2:
2118 close (fd);
2119 return -1;
2121 # endif
2125 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
2127 struct callback_locals locals;
2128 locals.address = address;
2129 locals.vma = vma;
2130 # if STACK_DIRECTION < 0
2131 locals.prev = 0;
2132 # else
2133 locals.stop_at_next_vma = 0;
2134 # endif
2135 locals.retval = -1;
2137 vma_iterate (&locals);
2138 if (locals.retval == 0)
2140 # if !(STACK_DIRECTION < 0)
2141 if (locals.stop_at_next_vma)
2142 vma->next_start = 0;
2143 # endif
2144 vma->is_near_this = simple_is_near_this;
2145 return 0;
2148 # if defined __sun
2149 return mincore_get_vma (address, vma);
2150 # else
2151 return -1;
2152 # endif
2155 /* -------------------------------------------------------------------------- */
2157 #elif defined __CYGWIN__ /* Cygwin */
2159 struct callback_locals
2161 uintptr_t address;
2162 struct vma_struct *vma;
2163 /* The stack appears as three adjacent segments, therefore we
2164 merge adjacent segments. */
2165 uintptr_t curr_start, curr_end;
2166 # if STACK_DIRECTION < 0
2167 uintptr_t prev_end;
2168 # else
2169 int stop_at_next_vma;
2170 # endif
2171 int retval;
2174 static int
2175 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
2177 if (start == locals->curr_end)
2179 /* Merge adjacent segments. */
2180 locals->curr_end = end;
2181 return 0;
2183 # if STACK_DIRECTION < 0
2184 if (locals->curr_start < locals->curr_end
2185 && locals->address >= locals->curr_start
2186 && locals->address <= locals->curr_end - 1)
2188 locals->vma->start = locals->curr_start;
2189 locals->vma->end = locals->curr_end;
2190 locals->vma->prev_end = locals->prev_end;
2191 locals->retval = 0;
2192 return 1;
2194 locals->prev_end = locals->curr_end;
2195 # else
2196 if (locals->stop_at_next_vma)
2198 locals->vma->next_start = locals->curr_start;
2199 locals->stop_at_next_vma = 0;
2200 return 1;
2202 if (locals->curr_start < locals->curr_end
2203 && locals->address >= locals->curr_start
2204 && locals->address <= locals->curr_end - 1)
2206 locals->vma->start = locals->curr_start;
2207 locals->vma->end = locals->curr_end;
2208 locals->retval = 0;
2209 locals->stop_at_next_vma = 1;
2210 return 0;
2212 # endif
2213 locals->curr_start = start; locals->curr_end = end;
2214 return 0;
2218 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
2220 struct callback_locals locals;
2221 locals.address = address;
2222 locals.vma = vma;
2223 locals.curr_start = 0;
2224 locals.curr_end = 0;
2225 # if STACK_DIRECTION < 0
2226 locals.prev_end = 0;
2227 # else
2228 locals.stop_at_next_vma = 0;
2229 # endif
2230 locals.retval = -1;
2232 vma_iterate (&locals);
2233 if (locals.retval < 0)
2235 if (locals.curr_start < locals.curr_end
2236 && address >= locals.curr_start && address <= locals.curr_end - 1)
2238 vma->start = locals.curr_start;
2239 vma->end = locals.curr_end;
2240 # if STACK_DIRECTION < 0
2241 vma->prev_end = locals.prev_end;
2242 # else
2243 vma->next_start = 0;
2244 # endif
2245 locals.retval = 0;
2248 if (locals.retval == 0)
2250 # if !(STACK_DIRECTION < 0)
2251 if (locals.stop_at_next_vma)
2252 vma->next_start = 0;
2253 # endif
2254 vma->is_near_this = simple_is_near_this;
2255 return 0;
2258 return -1;
2261 /* ---------------------------- stackvma-beos.h ---------------------------- */
2263 #elif defined __HAIKU__ /* Haiku */
2265 # include <OS.h> /* get_next_area_info */
2267 struct callback_locals
2269 uintptr_t address;
2270 struct vma_struct *vma;
2271 # if STACK_DIRECTION < 0
2272 uintptr_t prev;
2273 # else
2274 int stop_at_next_vma;
2275 # endif
2276 int retval;
2279 static int
2280 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
2282 # if STACK_DIRECTION < 0
2283 if (locals->address >= start && locals->address <= end - 1)
2285 locals->vma->start = start;
2286 locals->vma->end = end;
2287 locals->vma->prev_end = locals->prev;
2288 locals->retval = 0;
2289 return 1;
2291 locals->prev = end;
2292 # else
2293 if (locals->stop_at_next_vma)
2295 locals->vma->next_start = start;
2296 locals->stop_at_next_vma = 0;
2297 return 1;
2299 if (locals->address >= start && locals->address <= end - 1)
2301 locals->vma->start = start;
2302 locals->vma->end = end;
2303 locals->retval = 0;
2304 locals->stop_at_next_vma = 1;
2305 return 0;
2307 # endif
2308 return 0;
2311 /* Iterate over the virtual memory areas of the current process.
2312 If such iteration is supported, the callback is called once for every
2313 virtual memory area, in ascending order, with the following arguments:
2314 - LOCALS is the same argument as passed to vma_iterate.
2315 - START is the address of the first byte in the area, page-aligned.
2316 - END is the address of the last byte in the area plus 1, page-aligned.
2317 Note that it may be 0 for the last area in the address space.
2318 If the callback returns 0, the iteration continues. If it returns 1,
2319 the iteration terminates prematurely.
2320 This function may open file descriptors, but does not call malloc().
2321 Return 0 if all went well, or -1 in case of error. */
2322 /* This code is a simplified copy (no handling of protection flags) of the
2323 code in gnulib's lib/vma-iter.c. */
2324 static int
2325 vma_iterate (struct callback_locals *locals)
2327 area_info info;
2328 ssize_t cookie;
2330 cookie = 0;
2331 while (get_next_area_info (0, &cookie, &info) == B_OK)
2333 uintptr_t start, end;
2335 start = (uintptr_t) info.address;
2336 end = start + info.size;
2338 if (callback (locals, start, end))
2339 break;
2341 return 0;
2345 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
2347 struct callback_locals locals;
2348 locals.address = address;
2349 locals.vma = vma;
2350 # if STACK_DIRECTION < 0
2351 locals.prev = 0;
2352 # else
2353 locals.stop_at_next_vma = 0;
2354 # endif
2355 locals.retval = -1;
2357 vma_iterate (&locals);
2358 if (locals.retval == 0)
2360 # if !(STACK_DIRECTION < 0)
2361 if (locals.stop_at_next_vma)
2362 vma->next_start = 0;
2363 # endif
2364 vma->is_near_this = simple_is_near_this;
2365 return 0;
2367 return -1;
2370 /* -------------------------------------------------------------------------- */
2372 #else /* Hurd, Minix, ... */
2375 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
2377 /* No way. */
2378 return -1;
2381 #endif