1 /* Determine the virtual memory area of a given address.
2 Copyright (C) 2002-2024 Free Software Foundation, Inc.
3 Copyright (C) 2003-2006 Paolo Bonzini <bonzini@gnu.org>
5 This program is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <https://www.gnu.org/licenses/>. */
18 /* Written by Bruno Haible and Paolo Bonzini. */
22 /* On Solaris in 32-bit mode, when gnulib module 'largefile' is in use,
23 prevent a compilation error
24 "Cannot use procfs in the large file compilation environment"
25 while also preventing <sys/types.h> from not defining off_t.
26 On Android, when targeting Android 4.4 or older with a GCC toolchain,
27 prevent a compilation error
28 "error: call to 'mmap' declared with attribute error: mmap is not
29 available with _FILE_OFFSET_BITS=64 when using GCC until android-21.
30 Either raise your minSdkVersion, disable _FILE_OFFSET_BITS=64, or
32 The files that we access in this compilation unit are less than 2 GB
34 #if defined __sun && !defined _LP64 && _FILE_OFFSET_BITS == 64
35 # undef _FILE_OFFSET_BITS
36 # define _FILE_OFFSET_BITS 32
39 # undef _FILE_OFFSET_BITS
48 /* =========================== stackvma-simple.c =========================== */
50 #if defined __linux__ || defined __ANDROID__ \
51 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
52 || defined __NetBSD__ \
53 || (defined __APPLE__ && defined __MACH__) \
54 || defined _AIX || defined __sgi || defined __sun \
55 || defined __CYGWIN__ || defined __HAIKU__
57 /* This file contains the proximity test function for the simple cases, where
58 the OS has an API for enumerating the mapped ranges of virtual memory. */
60 # if STACK_DIRECTION < 0
62 /* Info about the gap between this VMA and the previous one.
63 addr must be < vma->start. */
65 simple_is_near_this (uintptr_t addr
, struct vma_struct
*vma
)
67 return (vma
->start
- addr
<= (vma
->start
- vma
->prev_end
) / 2);
71 # if STACK_DIRECTION > 0
73 /* Info about the gap between this VMA and the next one.
74 addr must be > vma->end - 1. */
76 simple_is_near_this (uintptr_t addr
, struct vma_struct
*vma
)
78 return (addr
- vma
->end
< (vma
->next_start
- vma
->end
) / 2);
85 /* =========================== stackvma-rofile.c =========================== */
86 /* Buffered read-only streams. */
88 #if defined __linux__ || defined __ANDROID__ \
89 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
90 || defined __NetBSD__ \
93 # include <errno.h> /* errno, EINTR */
94 # include <fcntl.h> /* open, O_RDONLY */
95 # include <stddef.h> /* size_t */
96 # include <unistd.h> /* getpagesize, lseek, read, close */
97 # include <sys/types.h>
98 # include <sys/mman.h> /* mmap, munmap */
100 # if defined __linux__ || defined __ANDROID__
101 # include <limits.h> /* PATH_MAX */
104 /* Buffered read-only streams.
105 We cannot use <stdio.h> here, because fopen() calls malloc(), and a malloc()
106 call may have been interrupted.
107 Also, we cannot use multiple read() calls, because if the buffer size is
108 smaller than the file's contents:
109 - On NetBSD, the second read() call would return 0, thus making the file
111 - On DragonFly BSD, the first read() call would fail with errno = EFBIG.
112 - On all platforms, if some other thread is doing memory allocations or
113 deallocations between two read() calls, there is a high risk that the
114 result of these two read() calls don't fit together, and as a
115 consequence we will parse garbage and either omit some VMAs or return
116 VMAs with nonsensical addresses.
117 So use mmap(), and ignore the resulting VMA.
118 The stack-allocated buffer cannot be too large, because this can be called
119 when we are in the context of an alternate stack of just SIGSTKSZ bytes. */
121 # if defined __linux__ || defined __ANDROID__
122 /* On Linux, if the file does not entirely fit into the buffer, the read()
123 function stops before the line that would come out truncated. The
124 maximum size of such a line is 73 + PATH_MAX bytes. To be sure that we
125 have read everything, we must verify that at least that many bytes are
126 left when read() returned. */
127 # define MIN_LEFTOVER (73 + PATH_MAX)
129 # define MIN_LEFTOVER 1
132 # if MIN_LEFTOVER < 1024
133 # define STACK_ALLOCATED_BUFFER_SIZE 1024
135 /* There is no point in using a stack-allocated buffer if it is too small
137 # define STACK_ALLOCATED_BUFFER_SIZE 1
145 /* These fields deal with allocation of the buffer. */
148 size_t auxmap_length
;
149 uintptr_t auxmap_start
;
150 uintptr_t auxmap_end
;
151 char stack_allocated_buffer
[STACK_ALLOCATED_BUFFER_SIZE
];
154 /* Open a read-only file stream. */
156 rof_open (struct rofile
*rof
, const char *filename
)
162 fd
= open (filename
, O_RDONLY
);
167 /* Try the static buffer first. */
169 rof
->buffer
= rof
->stack_allocated_buffer
;
170 size
= sizeof (rof
->stack_allocated_buffer
);
172 rof
->auxmap_start
= 0;
176 /* Attempt to read the contents in a single system call. */
177 if (size
> MIN_LEFTOVER
)
179 int n
= read (fd
, rof
->buffer
, size
);
180 if (n
< 0 && errno
== EINTR
)
182 # if defined __DragonFly__
183 if (!(n
< 0 && errno
== EFBIG
))
189 if (n
+ MIN_LEFTOVER
<= size
)
191 /* The buffer was sufficiently large. */
193 # if defined __linux__ || defined __ANDROID__
194 /* On Linux, the read() call may stop even if the buffer was
195 large enough. We need the equivalent of full_read(). */
198 n
= read (fd
, rof
->buffer
+ rof
->filled
, size
- rof
->filled
);
199 if (n
< 0 && errno
== EINTR
)
204 if (n
+ MIN_LEFTOVER
> size
- rof
->filled
)
205 /* Allocate a larger buffer. */
209 /* Reached the end of file. */
222 /* Allocate a larger buffer. */
225 pagesize
= getpagesize ();
227 while (size
<= MIN_LEFTOVER
)
236 if (rof
->auxmap
!= NULL
)
237 munmap (rof
->auxmap
, rof
->auxmap_length
);
239 rof
->auxmap
= (void *) mmap ((void *) 0, size
, PROT_READ
| PROT_WRITE
,
240 MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
241 if (rof
->auxmap
== (void *) -1)
246 rof
->auxmap_length
= size
;
247 rof
->auxmap_start
= (uintptr_t) rof
->auxmap
;
248 rof
->auxmap_end
= rof
->auxmap_start
+ size
;
249 rof
->buffer
= (char *) rof
->auxmap
;
252 if (lseek (fd
, 0, SEEK_SET
) < 0)
255 fd
= open (filename
, O_RDONLY
);
263 if (rof
->auxmap
!= NULL
)
264 munmap (rof
->auxmap
, rof
->auxmap_length
);
268 /* Return the next byte from a read-only file stream without consuming it,
271 rof_peekchar (struct rofile
*rof
)
273 if (rof
->position
== rof
->filled
)
278 return (unsigned char) rof
->buffer
[rof
->position
];
281 /* Return the next byte from a read-only file stream, or -1 at EOF. */
283 rof_getchar (struct rofile
*rof
)
285 int c
= rof_peekchar (rof
);
291 /* Parse an unsigned hexadecimal number from a read-only file stream. */
293 rof_scanf_lx (struct rofile
*rof
, uintptr_t *valuep
)
296 unsigned int numdigits
= 0;
299 int c
= rof_peekchar (rof
);
300 if (c
>= '0' && c
<= '9')
301 value
= (value
<< 4) + (c
- '0');
302 else if (c
>= 'A' && c
<= 'F')
303 value
= (value
<< 4) + (c
- 'A' + 10);
304 else if (c
>= 'a' && c
<= 'f')
305 value
= (value
<< 4) + (c
- 'a' + 10);
317 /* Close a read-only file stream. */
319 rof_close (struct rofile
*rof
)
321 if (rof
->auxmap
!= NULL
)
322 munmap (rof
->auxmap
, rof
->auxmap_length
);
327 /* ========================== stackvma-vma-iter.c ========================== */
328 /* Iterate through the virtual memory areas of the current process,
329 by reading from the /proc file system. */
331 /* This code is a simplified copy (no handling of protection flags) of the
332 code in gnulib's lib/vma-iter.c. */
334 #if defined __linux__ || defined __ANDROID__ \
335 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
336 || defined __NetBSD__ \
337 || defined __CYGWIN__
339 /* Forward declarations. */
340 struct callback_locals
;
341 static int callback (struct callback_locals
*locals
, uintptr_t start
, uintptr_t end
);
343 # if defined __linux__ || defined __ANDROID__ || (defined __FreeBSD_kernel__ && !defined __FreeBSD__) || defined __CYGWIN__
344 /* GNU/kFreeBSD mounts /proc as linprocfs, which looks like a Linux /proc
348 vma_iterate_proc (struct callback_locals
*locals
)
352 /* Open the current process' maps file. It describes one VMA per line. */
353 if (rof_open (&rof
, "/proc/self/maps") >= 0)
355 uintptr_t auxmap_start
= rof
.auxmap_start
;
356 uintptr_t auxmap_end
= rof
.auxmap_end
;
360 uintptr_t start
, end
;
363 /* Parse one line. First start and end. */
364 if (!(rof_scanf_lx (&rof
, &start
) >= 0
365 && rof_getchar (&rof
) == '-'
366 && rof_scanf_lx (&rof
, &end
) >= 0))
368 while (c
= rof_getchar (&rof
), c
!= -1 && c
!= '\n')
371 if (start
<= auxmap_start
&& auxmap_end
- 1 <= end
- 1)
373 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
374 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
375 if (start
< auxmap_start
)
376 if (callback (locals
, start
, auxmap_start
))
378 if (auxmap_end
- 1 < end
- 1)
379 if (callback (locals
, auxmap_end
, end
))
384 if (callback (locals
, start
, end
))
395 # elif defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__
398 vma_iterate_proc (struct callback_locals
*locals
)
402 /* Open the current process' maps file. It describes one VMA per line.
404 Cf. <https://www.freebsd.org/cgi/cvsweb.cgi/src/sys/fs/procfs/procfs_map.c?annotate=HEAD>
405 On NetBSD, there are two such files:
406 - /proc/curproc/map in near-FreeBSD syntax,
407 - /proc/curproc/maps in Linux syntax.
408 Cf. <http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/miscfs/procfs/procfs_map.c?rev=HEAD> */
409 if (rof_open (&rof
, "/proc/curproc/map") >= 0)
411 uintptr_t auxmap_start
= rof
.auxmap_start
;
412 uintptr_t auxmap_end
= rof
.auxmap_end
;
416 uintptr_t start
, end
;
419 /* Parse one line. First start. */
420 if (!(rof_getchar (&rof
) == '0'
421 && rof_getchar (&rof
) == 'x'
422 && rof_scanf_lx (&rof
, &start
) >= 0))
424 while (c
= rof_peekchar (&rof
), c
== ' ' || c
== '\t')
427 if (!(rof_getchar (&rof
) == '0'
428 && rof_getchar (&rof
) == 'x'
429 && rof_scanf_lx (&rof
, &end
) >= 0))
431 while (c
= rof_getchar (&rof
), c
!= -1 && c
!= '\n')
434 if (start
<= auxmap_start
&& auxmap_end
- 1 <= end
- 1)
436 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
437 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
438 if (start
< auxmap_start
)
439 if (callback (locals
, start
, auxmap_start
))
441 if (auxmap_end
- 1 < end
- 1)
442 if (callback (locals
, auxmap_end
, end
))
447 if (callback (locals
, start
, end
))
460 # if (defined __FreeBSD_kernel__ || defined __FreeBSD__) && defined KERN_PROC_VMMAP /* FreeBSD >= 7.1 */
462 # include <sys/user.h> /* struct kinfo_vmentry */
463 # include <sys/sysctl.h> /* sysctl */
466 vma_iterate_bsd (struct callback_locals
*locals
)
468 /* Documentation: https://www.freebsd.org/cgi/man.cgi?sysctl(3) */
469 int info_path
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_VMMAP
, getpid () };
474 unsigned long auxmap_start
;
475 unsigned long auxmap_end
;
481 if (sysctl (info_path
, 4, NULL
, &len
, NULL
, 0) < 0)
483 /* Allow for small variations over time. In a multithreaded program
484 new VMAs can be allocated at any moment. */
486 /* Allocate memneed bytes of memory.
487 We cannot use alloca here, because not much stack space is guaranteed.
488 We also cannot use malloc here, because a malloc() call may call mmap()
489 and thus pre-allocate available memory.
490 So use mmap(), and ignore the resulting VMA. */
491 pagesize
= getpagesize ();
493 memneed
= ((memneed
- 1) / pagesize
+ 1) * pagesize
;
494 auxmap
= (void *) mmap ((void *) 0, memneed
, PROT_READ
| PROT_WRITE
,
495 MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
496 if (auxmap
== (void *) -1)
498 auxmap_start
= (unsigned long) auxmap
;
499 auxmap_end
= auxmap_start
+ memneed
;
500 mem
= (char *) auxmap
;
501 if (sysctl (info_path
, 4, mem
, &len
, NULL
, 0) < 0)
503 munmap (auxmap
, memneed
);
510 struct kinfo_vmentry
*kve
= (struct kinfo_vmentry
*) p
;
511 unsigned long start
= kve
->kve_start
;
512 unsigned long end
= kve
->kve_end
;
513 if (start
<= auxmap_start
&& auxmap_end
- 1 <= end
- 1)
515 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
516 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
517 if (start
< auxmap_start
)
518 if (callback (locals
, start
, auxmap_start
))
520 if (auxmap_end
- 1 < end
- 1)
521 if (callback (locals
, auxmap_end
, end
))
526 if (callback (locals
, start
, end
))
529 p
+= kve
->kve_structsize
;
531 munmap (auxmap
, memneed
);
537 # define vma_iterate_bsd(locals) (-1)
542 /* Iterate over the virtual memory areas of the current process.
543 If such iteration is supported, the callback is called once for every
544 virtual memory area, in ascending order, with the following arguments:
545 - LOCALS is the same argument as passed to vma_iterate.
546 - START is the address of the first byte in the area, page-aligned.
547 - END is the address of the last byte in the area plus 1, page-aligned.
548 Note that it may be 0 for the last area in the address space.
549 If the callback returns 0, the iteration continues. If it returns 1,
550 the iteration terminates prematurely.
551 This function may open file descriptors, but does not call malloc().
552 Return 0 if all went well, or -1 in case of error. */
554 vma_iterate (struct callback_locals
*locals
)
556 # if defined __FreeBSD__
557 /* On FreeBSD with procfs (but not GNU/kFreeBSD, which uses linprocfs), the
558 function vma_iterate_proc does not return the virtual memory areas that
559 were created by anonymous mmap. See
560 <https://svnweb.freebsd.org/base/head/sys/fs/procfs/procfs_map.c?view=markup>
561 So use vma_iterate_proc only as a fallback. */
562 int retval
= vma_iterate_bsd (locals
);
566 return vma_iterate_proc (locals
);
568 /* On the other platforms, try the /proc approach first, and the sysctl()
570 int retval
= vma_iterate_proc (locals
);
574 return vma_iterate_bsd (locals
);
580 /* =========================== stackvma-mincore.c =========================== */
582 /* mincore() is a system call that allows to inquire the status of a
583 range of pages of virtual memory. In particular, it allows to inquire
584 whether a page is mapped at all (except on Mac OS X, where mincore
585 returns 0 even for unmapped addresses).
586 As of 2006, mincore() is supported by: possible bits:
587 - Linux, since Linux 2.4 and glibc 2.2, 1
588 - Solaris, since Solaris 9, 1
589 - MacOS X, since MacOS X 10.3 (at least), 1
590 - FreeBSD, since FreeBSD 6.0, MINCORE_{INCORE,REFERENCED,MODIFIED}
591 - NetBSD, since NetBSD 3.0 (at least), 1
592 - OpenBSD, since OpenBSD 2.6 (at least), 1
593 - AIX, since AIX 5.3, 1
596 However, while the API allows to easily determine the bounds of mapped
597 virtual memory, it does not make it easy to find the bounds of _unmapped_
598 virtual memory ranges. We try to work around this, but it may still be
601 #if defined __linux__ || defined __ANDROID__ \
602 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
603 || defined __NetBSD__ /* || defined __OpenBSD__ */ \
604 /* || (defined __APPLE__ && defined __MACH__) */ \
605 || defined _AIX || defined __sun
607 # include <unistd.h> /* getpagesize, mincore */
608 # include <sys/types.h>
609 # include <sys/mman.h> /* mincore */
611 /* The AIX declaration of mincore() uses 'caddr_t', whereas the other platforms
614 typedef caddr_t MINCORE_ADDR_T
;
616 typedef void* MINCORE_ADDR_T
;
619 /* The glibc and musl declaration of mincore() uses 'unsigned char *', whereas
620 the BSD declaration uses 'char *'. */
621 # if __GLIBC__ >= 2 || defined __linux__ || defined __ANDROID__
622 typedef unsigned char pageinfo_t
;
624 typedef char pageinfo_t
;
627 /* Cache for getpagesize(). */
628 static uintptr_t pagesize
;
630 /* Initialize pagesize. */
634 pagesize
= getpagesize ();
637 /* Test whether the page starting at ADDR is among the address range.
638 ADDR must be a multiple of pagesize. */
640 is_mapped (uintptr_t addr
)
643 return mincore ((MINCORE_ADDR_T
) addr
, pagesize
, vec
) >= 0;
646 /* Assuming that the page starting at ADDR is among the address range,
647 return the start of its virtual memory range.
648 ADDR must be a multiple of pagesize. */
650 mapped_range_start (uintptr_t addr
)
652 /* Use a moderately sized VEC here, small enough that it fits on the stack
653 (without requiring malloc). */
654 pageinfo_t vec
[1024];
655 uintptr_t stepsize
= sizeof (vec
);
659 uintptr_t max_remaining
;
664 max_remaining
= addr
/ pagesize
;
665 if (stepsize
> max_remaining
)
666 stepsize
= max_remaining
;
667 if (mincore ((MINCORE_ADDR_T
) (addr
- stepsize
* pagesize
),
668 stepsize
* pagesize
, vec
) < 0)
669 /* Time to search in smaller steps. */
671 /* The entire range exists. Continue searching in large steps. */
672 addr
-= stepsize
* pagesize
;
676 uintptr_t halfstepsize1
;
677 uintptr_t halfstepsize2
;
682 /* Here we know that less than stepsize pages exist starting at addr. */
683 halfstepsize1
= (stepsize
+ 1) / 2;
684 halfstepsize2
= stepsize
/ 2;
685 /* halfstepsize1 + halfstepsize2 = stepsize. */
687 if (mincore ((MINCORE_ADDR_T
) (addr
- halfstepsize1
* pagesize
),
688 halfstepsize1
* pagesize
, vec
) < 0)
689 stepsize
= halfstepsize1
;
692 addr
-= halfstepsize1
* pagesize
;
693 stepsize
= halfstepsize2
;
698 /* Assuming that the page starting at ADDR is among the address range,
699 return the end of its virtual memory range + 1.
700 ADDR must be a multiple of pagesize. */
702 mapped_range_end (uintptr_t addr
)
704 /* Use a moderately sized VEC here, small enough that it fits on the stack
705 (without requiring malloc). */
706 pageinfo_t vec
[1024];
707 uintptr_t stepsize
= sizeof (vec
);
712 uintptr_t max_remaining
;
714 if (addr
== 0) /* wrapped around? */
717 max_remaining
= (- addr
) / pagesize
;
718 if (stepsize
> max_remaining
)
719 stepsize
= max_remaining
;
720 if (mincore ((MINCORE_ADDR_T
) addr
, stepsize
* pagesize
, vec
) < 0)
721 /* Time to search in smaller steps. */
723 /* The entire range exists. Continue searching in large steps. */
724 addr
+= stepsize
* pagesize
;
728 uintptr_t halfstepsize1
;
729 uintptr_t halfstepsize2
;
734 /* Here we know that less than stepsize pages exist starting at addr. */
735 halfstepsize1
= (stepsize
+ 1) / 2;
736 halfstepsize2
= stepsize
/ 2;
737 /* halfstepsize1 + halfstepsize2 = stepsize. */
739 if (mincore ((MINCORE_ADDR_T
) addr
, halfstepsize1
* pagesize
, vec
) < 0)
740 stepsize
= halfstepsize1
;
743 addr
+= halfstepsize1
* pagesize
;
744 stepsize
= halfstepsize2
;
749 /* Determine whether an address range [ADDR1..ADDR2] is completely unmapped.
750 ADDR1 must be <= ADDR2. */
752 is_unmapped (uintptr_t addr1
, uintptr_t addr2
)
757 /* Round addr1 down. */
758 addr1
= (addr1
/ pagesize
) * pagesize
;
759 /* Round addr2 up and turn it into an exclusive bound. */
760 addr2
= ((addr2
/ pagesize
) + 1) * pagesize
;
762 /* This is slow: mincore() does not provide a way to determine the bounds
763 of the gaps directly. So we have to use mincore() on individual pages
764 over and over again. Only after we've verified that all pages are
765 unmapped, we know that the range is completely unmapped.
766 If we were to traverse the pages from bottom to top or from top to bottom,
767 it would be slow even in the average case. To speed up the search, we
768 exploit the fact that mapped memory ranges are larger than one page on
769 average, therefore we have good chances of hitting a mapped area if we
770 traverse only every second, or only fourth page, etc. This doesn't
771 decrease the worst-case runtime, only the average runtime. */
772 count
= (addr2
- addr1
) / pagesize
;
773 /* We have to test is_mapped (addr1 + i * pagesize) for 0 <= i < count. */
774 for (stepsize
= 1; stepsize
< count
; )
775 stepsize
= 2 * stepsize
;
778 uintptr_t addr_stepsize
;
782 stepsize
= stepsize
/ 2;
785 addr_stepsize
= stepsize
* pagesize
;
786 for (i
= stepsize
, addr
= addr1
+ addr_stepsize
;
788 i
+= 2 * stepsize
, addr
+= 2 * addr_stepsize
)
789 /* Here addr = addr1 + i * pagesize. */
790 if (is_mapped (addr
))
796 # if STACK_DIRECTION < 0
798 /* Info about the gap between this VMA and the previous one.
799 addr must be < vma->start. */
801 mincore_is_near_this (uintptr_t addr
, struct vma_struct
*vma
)
803 /* vma->start - addr <= (vma->start - vma->prev_end) / 2
804 is mathematically equivalent to
805 vma->prev_end <= 2 * addr - vma->start
806 <==> is_unmapped (2 * addr - vma->start, vma->start - 1).
807 But be careful about overflow: if 2 * addr - vma->start is negative,
808 we consider a tiny "guard page" mapping [0, 0] to be present around
809 NULL; it intersects the range (2 * addr - vma->start, vma->start - 1),
810 therefore return false. */
811 uintptr_t testaddr
= addr
- (vma
->start
- addr
);
812 if (testaddr
> addr
) /* overflow? */
814 /* Here testaddr <= addr < vma->start. */
815 return is_unmapped (testaddr
, vma
->start
- 1);
819 # if STACK_DIRECTION > 0
821 /* Info about the gap between this VMA and the next one.
822 addr must be > vma->end - 1. */
824 mincore_is_near_this (uintptr_t addr
, struct vma_struct
*vma
)
826 /* addr - vma->end < (vma->next_start - vma->end) / 2
827 is mathematically equivalent to
828 vma->next_start > 2 * addr - vma->end
829 <==> is_unmapped (vma->end, 2 * addr - vma->end).
830 But be careful about overflow: if 2 * addr - vma->end is > ~0UL,
831 we consider a tiny "guard page" mapping [0, 0] to be present around
832 NULL; it intersects the range (vma->end, 2 * addr - vma->end),
833 therefore return false. */
834 uintptr_t testaddr
= addr
+ (addr
- vma
->end
);
835 if (testaddr
< addr
) /* overflow? */
837 /* Here vma->end - 1 < addr <= testaddr. */
838 return is_unmapped (vma
->end
, testaddr
);
844 mincore_get_vma (uintptr_t address
, struct vma_struct
*vma
)
848 address
= (address
/ pagesize
) * pagesize
;
849 vma
->start
= mapped_range_start (address
);
850 vma
->end
= mapped_range_end (address
);
851 vma
->is_near_this
= mincore_is_near_this
;
857 /* ========================================================================== */
859 /* ---------------------------- stackvma-linux.c ---------------------------- */
861 #if defined __linux__ || defined __ANDROID__ /* Linux */
863 struct callback_locals
866 struct vma_struct
*vma
;
867 # if STACK_DIRECTION < 0
870 int stop_at_next_vma
;
876 callback (struct callback_locals
*locals
, uintptr_t start
, uintptr_t end
)
878 # if STACK_DIRECTION < 0
879 if (locals
->address
>= start
&& locals
->address
<= end
- 1)
881 locals
->vma
->start
= start
;
882 locals
->vma
->end
= end
;
883 locals
->vma
->prev_end
= locals
->prev
;
889 if (locals
->stop_at_next_vma
)
891 locals
->vma
->next_start
= start
;
892 locals
->stop_at_next_vma
= 0;
895 if (locals
->address
>= start
&& locals
->address
<= end
- 1)
897 locals
->vma
->start
= start
;
898 locals
->vma
->end
= end
;
900 locals
->stop_at_next_vma
= 1;
908 sigsegv_get_vma (uintptr_t address
, struct vma_struct
*vma
)
910 struct callback_locals locals
;
911 locals
.address
= address
;
913 # if STACK_DIRECTION < 0
916 locals
.stop_at_next_vma
= 0;
920 vma_iterate (&locals
);
921 if (locals
.retval
== 0)
923 # if !(STACK_DIRECTION < 0)
924 if (locals
.stop_at_next_vma
)
927 vma
->is_near_this
= simple_is_near_this
;
931 return mincore_get_vma (address
, vma
);
934 /* --------------------------- stackvma-freebsd.c --------------------------- */
936 #elif defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ /* GNU/kFreeBSD, FreeBSD */
938 struct callback_locals
941 struct vma_struct
*vma
;
942 /* The stack appears as multiple adjacent segments, therefore we
943 merge adjacent segments. */
944 uintptr_t curr_start
, curr_end
;
945 # if STACK_DIRECTION < 0
948 int stop_at_next_vma
;
954 callback (struct callback_locals
*locals
, uintptr_t start
, uintptr_t end
)
956 if (start
== locals
->curr_end
)
958 /* Merge adjacent segments. */
959 locals
->curr_end
= end
;
962 # if STACK_DIRECTION < 0
963 if (locals
->curr_start
< locals
->curr_end
964 && locals
->address
>= locals
->curr_start
965 && locals
->address
<= locals
->curr_end
- 1)
967 locals
->vma
->start
= locals
->curr_start
;
968 locals
->vma
->end
= locals
->curr_end
;
969 locals
->vma
->prev_end
= locals
->prev_end
;
973 locals
->prev_end
= locals
->curr_end
;
975 if (locals
->stop_at_next_vma
)
977 locals
->vma
->next_start
= locals
->curr_start
;
978 locals
->stop_at_next_vma
= 0;
981 if (locals
->curr_start
< locals
->curr_end
982 && locals
->address
>= locals
->curr_start
983 && locals
->address
<= locals
->curr_end
- 1)
985 locals
->vma
->start
= locals
->curr_start
;
986 locals
->vma
->end
= locals
->curr_end
;
988 locals
->stop_at_next_vma
= 1;
992 locals
->curr_start
= start
; locals
->curr_end
= end
;
997 sigsegv_get_vma (uintptr_t address
, struct vma_struct
*vma
)
999 struct callback_locals locals
;
1000 locals
.address
= address
;
1002 locals
.curr_start
= 0;
1003 locals
.curr_end
= 0;
1004 # if STACK_DIRECTION < 0
1005 locals
.prev_end
= 0;
1007 locals
.stop_at_next_vma
= 0;
1011 vma_iterate (&locals
);
1012 if (locals
.retval
< 0)
1014 if (locals
.curr_start
< locals
.curr_end
1015 && address
>= locals
.curr_start
&& address
<= locals
.curr_end
- 1)
1017 vma
->start
= locals
.curr_start
;
1018 vma
->end
= locals
.curr_end
;
1019 # if STACK_DIRECTION < 0
1020 vma
->prev_end
= locals
.prev_end
;
1022 vma
->next_start
= 0;
1027 if (locals
.retval
== 0)
1029 # if !(STACK_DIRECTION < 0)
1030 if (locals
.stop_at_next_vma
)
1031 vma
->next_start
= 0;
1033 vma
->is_near_this
= simple_is_near_this
;
1037 /* FreeBSD 6.[01] doesn't allow to distinguish unmapped pages from
1038 mapped but swapped-out pages. See whether it's fixed. */
1040 /* OK, mincore() appears to work as expected. */
1041 return mincore_get_vma (address
, vma
);
1045 /* --------------------------- stackvma-netbsd.c --------------------------- */
1047 #elif defined __NetBSD__ /* NetBSD */
1049 struct callback_locals
1052 struct vma_struct
*vma
;
1053 /* The stack appears as multiple adjacent segments, therefore we
1054 merge adjacent segments. */
1055 uintptr_t curr_start
, curr_end
;
1056 # if STACK_DIRECTION < 0
1059 int stop_at_next_vma
;
1065 callback (struct callback_locals
*locals
, uintptr_t start
, uintptr_t end
)
1067 if (start
== locals
->curr_end
)
1069 /* Merge adjacent segments. */
1070 locals
->curr_end
= end
;
1073 # if STACK_DIRECTION < 0
1074 if (locals
->curr_start
< locals
->curr_end
1075 && locals
->address
>= locals
->curr_start
1076 && locals
->address
<= locals
->curr_end
- 1)
1078 locals
->vma
->start
= locals
->curr_start
;
1079 locals
->vma
->end
= locals
->curr_end
;
1080 locals
->vma
->prev_end
= locals
->prev_end
;
1084 locals
->prev_end
= locals
->curr_end
;
1086 if (locals
->stop_at_next_vma
)
1088 locals
->vma
->next_start
= locals
->curr_start
;
1089 locals
->stop_at_next_vma
= 0;
1092 if (locals
->curr_start
< locals
->curr_end
1093 && locals
->address
>= locals
->curr_start
1094 && locals
->address
<= locals
->curr_end
- 1)
1096 locals
->vma
->start
= locals
->curr_start
;
1097 locals
->vma
->end
= locals
->curr_end
;
1099 locals
->stop_at_next_vma
= 1;
1103 locals
->curr_start
= start
; locals
->curr_end
= end
;
1108 sigsegv_get_vma (uintptr_t address
, struct vma_struct
*vma
)
1110 struct callback_locals locals
;
1111 locals
.address
= address
;
1113 locals
.curr_start
= 0;
1114 locals
.curr_end
= 0;
1115 # if STACK_DIRECTION < 0
1116 locals
.prev_end
= 0;
1118 locals
.stop_at_next_vma
= 0;
1122 vma_iterate (&locals
);
1123 if (locals
.retval
< 0)
1125 if (locals
.curr_start
< locals
.curr_end
1126 && address
>= locals
.curr_start
&& address
<= locals
.curr_end
- 1)
1128 vma
->start
= locals
.curr_start
;
1129 vma
->end
= locals
.curr_end
;
1130 # if STACK_DIRECTION < 0
1131 vma
->prev_end
= locals
.prev_end
;
1133 vma
->next_start
= 0;
1138 if (locals
.retval
== 0)
1140 # if !(STACK_DIRECTION < 0)
1141 if (locals
.stop_at_next_vma
)
1142 vma
->next_start
= 0;
1144 vma
->is_near_this
= simple_is_near_this
;
1148 return mincore_get_vma (address
, vma
);
1151 /* --------------------------- stackvma-mquery.c --------------------------- */
1153 /* mquery() is a system call that allows to inquire the status of a
1154 range of pages of virtual memory. In particular, it allows to inquire
1155 whether a page is mapped at all, and where is the next unmapped page
1156 after a given address.
1157 As of 2021, mquery() is supported by:
1158 - OpenBSD, since OpenBSD 3.4.
1159 Note that this file can give different results. For example, on
1160 OpenBSD 4.4 / i386 the stack segment (which starts around 0xcdbfe000)
1161 ends at 0xcfbfdfff according to mincore, but at 0xffffffff according to
1164 #elif defined __OpenBSD__ /* OpenBSD */
1166 # include <unistd.h> /* getpagesize, mincore */
1167 # include <sys/types.h>
1168 # include <sys/mman.h> /* mincore */
1170 /* Cache for getpagesize(). */
1171 static uintptr_t pagesize
;
1173 /* Initialize pagesize. */
1175 init_pagesize (void)
1177 pagesize
= getpagesize ();
1180 /* Test whether the page starting at ADDR is among the address range.
1181 ADDR must be a multiple of pagesize. */
1183 is_mapped (uintptr_t addr
)
1185 /* Avoid calling mquery with a NULL first argument, because this argument
1186 value has a specific meaning. We know the NULL page is unmapped. */
1189 return mquery ((void *) addr
, pagesize
, 0, MAP_FIXED
, -1, 0) == (void *) -1;
1192 /* Assuming that the page starting at ADDR is among the address range,
1193 return the start of its virtual memory range.
1194 ADDR must be a multiple of pagesize. */
1196 mapped_range_start (uintptr_t addr
)
1199 uintptr_t known_unmapped_page
;
1201 /* Look at smaller addresses, in larger and larger steps, to minimize the
1202 number of mquery() calls. */
1203 stepsize
= pagesize
;
1211 if (addr
<= stepsize
)
1213 known_unmapped_page
= 0;
1217 hole
= (uintptr_t) mquery ((void *) (addr
- stepsize
), pagesize
,
1219 if (!(hole
== (uintptr_t) (void *) -1 || hole
>= addr
))
1221 /* Some part of [addr - stepsize, addr - 1] is unmapped. */
1222 known_unmapped_page
= hole
;
1226 /* The entire range [addr - stepsize, addr - 1] is mapped. */
1229 if (2 * stepsize
> stepsize
&& 2 * stepsize
< addr
)
1230 stepsize
= 2 * stepsize
;
1233 /* Now reduce the step size again.
1234 We know that the page at known_unmapped_page is unmapped and that
1235 0 < addr - known_unmapped_page <= stepsize. */
1236 while (stepsize
> pagesize
&& stepsize
/ 2 >= addr
- known_unmapped_page
)
1237 stepsize
= stepsize
/ 2;
1238 /* Still 0 < addr - known_unmapped_page <= stepsize. */
1239 while (stepsize
> pagesize
)
1243 stepsize
= stepsize
/ 2;
1244 hole
= (uintptr_t) mquery ((void *) (addr
- stepsize
), pagesize
,
1246 if (!(hole
== (uintptr_t) (void *) -1 || hole
>= addr
))
1247 /* Some part of [addr - stepsize, addr - 1] is unmapped. */
1248 known_unmapped_page
= hole
;
1250 /* The entire range [addr - stepsize, addr - 1] is mapped. */
1252 /* Still 0 < addr - known_unmapped_page <= stepsize. */
1258 /* Assuming that the page starting at ADDR is among the address range,
1259 return the end of its virtual memory range + 1.
1260 ADDR must be a multiple of pagesize. */
1262 mapped_range_end (uintptr_t addr
)
1269 end
= (uintptr_t) mquery ((void *) addr
, pagesize
, 0, 0, -1, 0);
1270 if (end
== (uintptr_t) (void *) -1)
1271 end
= 0; /* wrap around */
1275 /* Determine whether an address range [ADDR1..ADDR2] is completely unmapped.
1276 ADDR1 must be <= ADDR2. */
1278 is_unmapped (uintptr_t addr1
, uintptr_t addr2
)
1280 /* Round addr1 down. */
1281 addr1
= (addr1
/ pagesize
) * pagesize
;
1282 /* Round addr2 up and turn it into an exclusive bound. */
1283 addr2
= ((addr2
/ pagesize
) + 1) * pagesize
;
1285 /* Avoid calling mquery with a NULL first argument, because this argument
1286 value has a specific meaning. We know the NULL page is unmapped. */
1292 if (mquery ((void *) addr1
, addr2
- addr1
, 0, MAP_FIXED
, -1, 0)
1294 /* Not all the interval [addr1 .. addr2 - 1] is unmapped. */
1297 /* The interval [addr1 .. addr2 - 1] is unmapped. */
1303 # if STACK_DIRECTION < 0
1305 /* Info about the gap between this VMA and the previous one.
1306 addr must be < vma->start. */
1308 mquery_is_near_this (uintptr_t addr
, struct vma_struct
*vma
)
1310 /* vma->start - addr <= (vma->start - vma->prev_end) / 2
1311 is mathematically equivalent to
1312 vma->prev_end <= 2 * addr - vma->start
1313 <==> is_unmapped (2 * addr - vma->start, vma->start - 1).
1314 But be careful about overflow: if 2 * addr - vma->start is negative,
1315 we consider a tiny "guard page" mapping [0, 0] to be present around
1316 NULL; it intersects the range (2 * addr - vma->start, vma->start - 1),
1317 therefore return false. */
1318 uintptr_t testaddr
= addr
- (vma
->start
- addr
);
1319 if (testaddr
> addr
) /* overflow? */
1321 /* Here testaddr <= addr < vma->start. */
1322 return is_unmapped (testaddr
, vma
->start
- 1);
1326 # if STACK_DIRECTION > 0
1328 /* Info about the gap between this VMA and the next one.
1329 addr must be > vma->end - 1. */
1331 mquery_is_near_this (uintptr_t addr
, struct vma_struct
*vma
)
1333 /* addr - vma->end < (vma->next_start - vma->end) / 2
1334 is mathematically equivalent to
1335 vma->next_start > 2 * addr - vma->end
1336 <==> is_unmapped (vma->end, 2 * addr - vma->end).
1337 But be careful about overflow: if 2 * addr - vma->end is > ~0UL,
1338 we consider a tiny "guard page" mapping [0, 0] to be present around
1339 NULL; it intersects the range (vma->end, 2 * addr - vma->end),
1340 therefore return false. */
1341 uintptr_t testaddr
= addr
+ (addr
- vma
->end
);
1342 if (testaddr
< addr
) /* overflow? */
1344 /* Here vma->end - 1 < addr <= testaddr. */
1345 return is_unmapped (vma
->end
, testaddr
);
1351 sigsegv_get_vma (uintptr_t address
, struct vma_struct
*vma
)
1355 address
= (address
/ pagesize
) * pagesize
;
1356 vma
->start
= mapped_range_start (address
);
1357 vma
->end
= mapped_range_end (address
);
1358 vma
->is_near_this
= mquery_is_near_this
;
1362 /* ---------------------------- stackvma-mach.c ---------------------------- */
1364 #elif (defined __APPLE__ && defined __MACH__) /* macOS */
1368 #include <mach/mach.h>
1369 #include <mach/machine/vm_param.h>
1372 sigsegv_get_vma (uintptr_t req_address
, struct vma_struct
*vma
)
1374 uintptr_t prev_address
= 0, prev_size
= 0;
1375 uintptr_t join_address
= 0, join_size
= 0;
1377 vm_address_t address
;
1379 task_t task
= mach_task_self ();
1381 for (address
= VM_MIN_ADDRESS
; more
; address
+= size
)
1383 mach_port_t object_name
;
1384 /* In MacOS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
1385 32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
1386 mach_vm_address_t and mach_vm_size_t are always 64 bits large.
1387 MacOS X 10.5 has three vm_region like methods:
1388 - vm_region. It has arguments that depend on whether the current
1389 process is 32-bit or 64-bit. When linking dynamically, this
1390 function exists only in 32-bit processes. Therefore we use it only
1391 in 32-bit processes.
1392 - vm_region_64. It has arguments that depend on whether the current
1393 process is 32-bit or 64-bit. It interprets a flavor
1394 VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
1395 dangerous since 'struct vm_region_basic_info_64' is larger than
1396 'struct vm_region_basic_info'; therefore let's write
1397 VM_REGION_BASIC_INFO_64 explicitly.
1398 - mach_vm_region. It has arguments that are 64-bit always. This
1399 function is useful when you want to access the VM of a process
1400 other than the current process.
1401 In 64-bit processes, we could use vm_region_64 or mach_vm_region.
1402 I choose vm_region_64 because it uses the same types as vm_region,
1403 resulting in less conditional code. */
1404 # if defined __aarch64__ || defined __ppc64__ || defined __x86_64__
1405 struct vm_region_basic_info_64 info
;
1406 mach_msg_type_number_t info_count
= VM_REGION_BASIC_INFO_COUNT_64
;
1408 more
= (vm_region_64 (task
, &address
, &size
, VM_REGION_BASIC_INFO_64
,
1409 (vm_region_info_t
)&info
, &info_count
, &object_name
)
1412 struct vm_region_basic_info info
;
1413 mach_msg_type_number_t info_count
= VM_REGION_BASIC_INFO_COUNT
;
1415 more
= (vm_region (task
, &address
, &size
, VM_REGION_BASIC_INFO
,
1416 (vm_region_info_t
)&info
, &info_count
, &object_name
)
1421 address
= join_address
+ join_size
;
1425 if ((uintptr_t) address
== join_address
+ join_size
)
1429 prev_address
= join_address
;
1430 prev_size
= join_size
;
1431 join_address
= (uintptr_t) address
;
1435 if (object_name
!= MACH_PORT_NULL
)
1436 mach_port_deallocate (mach_task_self (), object_name
);
1438 # if STACK_DIRECTION < 0
1439 if (join_address
<= req_address
&& join_address
+ join_size
> req_address
)
1441 vma
->start
= join_address
;
1442 vma
->end
= join_address
+ join_size
;
1443 vma
->prev_end
= prev_address
+ prev_size
;
1444 vma
->is_near_this
= simple_is_near_this
;
1448 if (prev_address
<= req_address
&& prev_address
+ prev_size
> req_address
)
1450 vma
->start
= prev_address
;
1451 vma
->end
= prev_address
+ prev_size
;
1452 vma
->next_start
= join_address
;
1453 vma
->is_near_this
= simple_is_near_this
;
1459 # if STACK_DIRECTION > 0
1460 if (join_address
<= req_address
&& join_address
+ size
> req_address
)
1462 vma
->start
= prev_address
;
1463 vma
->end
= prev_address
+ prev_size
;
1464 vma
->next_start
= ~0UL;
1465 vma
->is_near_this
= simple_is_near_this
;
1473 /* ----------------------------- stackvma-aix.c ----------------------------- */
1475 #elif defined _AIX /* AIX */
1477 # include <unistd.h> /* getpagesize, getpid, close, read */
1478 # include <errno.h> /* EINTR */
1479 # include <fcntl.h> /* open */
1480 # include <string.h> /* memcpy */
1481 # include <sys/types.h>
1482 # include <sys/mman.h> /* mmap, munmap */
1483 # include <sys/procfs.h> /* prmap_t */
1484 # include <sys/utsname.h> /* uname */
1486 struct callback_locals
1489 struct vma_struct
*vma
;
1490 # if STACK_DIRECTION < 0
1493 int stop_at_next_vma
;
1499 callback (struct callback_locals
*locals
, uintptr_t start
, uintptr_t end
)
1501 # if STACK_DIRECTION < 0
1502 if (locals
->address
>= start
&& locals
->address
<= end
- 1)
1504 locals
->vma
->start
= start
;
1505 locals
->vma
->end
= end
;
1506 locals
->vma
->prev_end
= locals
->prev
;
1512 if (locals
->stop_at_next_vma
)
1514 locals
->vma
->next_start
= start
;
1515 locals
->stop_at_next_vma
= 0;
1518 if (locals
->address
>= start
&& locals
->address
<= end
- 1)
1520 locals
->vma
->start
= start
;
1521 locals
->vma
->end
= end
;
1523 locals
->stop_at_next_vma
= 1;
1530 /* Iterate over the virtual memory areas of the current process.
1531 If such iteration is supported, the callback is called once for every
1532 virtual memory area, in ascending order, with the following arguments:
1533 - LOCALS is the same argument as passed to vma_iterate.
1534 - START is the address of the first byte in the area, page-aligned.
1535 - END is the address of the last byte in the area plus 1, page-aligned.
1536 Note that it may be 0 for the last area in the address space.
1537 If the callback returns 0, the iteration continues. If it returns 1,
1538 the iteration terminates prematurely.
1539 This function may open file descriptors, but does not call malloc().
1540 Return 0 if all went well, or -1 in case of error. */
1541 /* This code is a simplified copy (no handling of protection flags) of the
1542 code in gnulib's lib/vma-iter.c. */
1544 vma_iterate (struct callback_locals
*locals
)
1546 /* On AIX, there is a /proc/$pic/map file, that contains records of type
1547 prmap_t, defined in <sys/procfs.h>. In older versions of AIX, it lists
1548 only the virtual memory areas that are connected to a file, not the
1549 anonymous ones. But at least since AIX 7.1, it is well usable. */
1551 char fnamebuf
[6+10+4+1];
1559 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()). */
1560 fname
= fnamebuf
+ sizeof (fnamebuf
) - (4+1);
1561 memcpy (fname
, "/map", 4+1);
1563 unsigned int value
= getpid ();
1565 *--fname
= (value
% 10) + '0';
1566 while ((value
= value
/ 10) > 0);
1569 memcpy (fname
, "/proc/", 6);
1571 fd
= open (fname
, O_RDONLY
| O_CLOEXEC
);
1575 /* The contents of /proc/<pid>/map contains a number of prmap_t entries,
1576 then an entirely null prmap_t entry, then a heap of NUL terminated
1578 Documentation: https://www.ibm.com/docs/en/aix/7.1?topic=files-proc-file
1579 We read the entire contents, but look only at the prmap_t entries and
1580 ignore the tail part. */
1582 for (memneed
= 2 * pagesize
; ; memneed
= 2 * memneed
)
1584 /* Allocate memneed bytes of memory.
1585 We cannot use alloca here, because not much stack space is guaranteed.
1586 We also cannot use malloc here, because a malloc() call may call mmap()
1587 and thus pre-allocate available memory.
1588 So use mmap(), and ignore the resulting VMA if it occurs among the
1589 resulting VMAs. (Normally it doesn't, because it was allocated after
1590 the open() call.) */
1592 unsigned long auxmap_start
;
1593 unsigned long auxmap_end
;
1596 auxmap
= (void *) mmap ((void *) 0, memneed
, PROT_READ
| PROT_WRITE
,
1597 MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
1598 if (auxmap
== (void *) -1)
1603 auxmap_start
= (unsigned long) auxmap
;
1604 auxmap_end
= auxmap_start
+ memneed
;
1606 /* Read the contents of /proc/<pid>/map in a single system call.
1607 This guarantees a consistent result (no duplicated or omitted
1611 nbytes
= read (fd
, auxmap
, memneed
);
1612 while (nbytes
< 0 && errno
== EINTR
);
1615 munmap (auxmap
, memneed
);
1619 if (nbytes
== memneed
)
1621 /* Need more memory. */
1622 munmap (auxmap
, memneed
);
1623 if (lseek (fd
, 0, SEEK_SET
) < 0)
1631 if (read (fd
, (char *) auxmap
+ nbytes
, 1) > 0)
1633 /* Oops, we had a short read. Retry. */
1634 if (lseek (fd
, 0, SEEK_SET
) < 0)
1636 munmap (auxmap
, memneed
);
1643 /* We now have the entire contents of /proc/<pid>/map in memory. */
1644 prmap_t
* maps
= (prmap_t
*) auxmap
;
1646 /* The entries are not sorted by address. Therefore
1647 1. Extract the relevant information into an array.
1648 2. Sort the array in ascending order.
1649 3. Invoke the callback. */
1656 /* Since 2 * sizeof (vma_t) <= sizeof (prmap_t), we can reuse the
1658 vma_t
*vmas
= (vma_t
*) auxmap
;
1665 unsigned long start
, end
;
1667 start
= (unsigned long) mp
->pr_vaddr
;
1668 end
= start
+ mp
->pr_size
;
1669 if (start
== 0 && end
== 0 && mp
->pr_mflags
== 0)
1671 /* Discard empty VMAs and kernel VMAs. */
1672 if (start
< end
&& (mp
->pr_mflags
& MA_KERNTEXT
) == 0)
1674 if (start
<= auxmap_start
&& auxmap_end
- 1 <= end
- 1)
1676 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1677 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
1678 if (start
< auxmap_start
)
1681 vp
->end
= auxmap_start
;
1684 if (auxmap_end
- 1 < end
- 1)
1686 vp
->start
= auxmap_end
;
1702 size_t nvmas
= vp
- vmas
;
1703 /* Sort the array in ascending order.
1704 Better not call qsort(), since it may call malloc().
1705 Insertion-sort is OK in this case, despite its worst-case running
1706 time of O(N²), since the number of VMAs will rarely be larger than
1710 for (i
= 1; i
< nvmas
; i
++)
1712 /* Invariant: Here vmas[0..i-1] is sorted. */
1714 for (j
= i
; j
> 0 && vmas
[j
- 1].start
> vmas
[j
].start
; j
--)
1716 vma_t tmp
= vmas
[j
- 1];
1717 vmas
[j
- 1] = vmas
[j
];
1720 /* Invariant: Here vmas[0..i] is sorted. */
1724 /* Invoke the callback. */
1727 for (i
= 0; i
< nvmas
; i
++)
1729 vma_t
*vpi
= &vmas
[i
];
1730 if (callback (locals
, vpi
->start
, vpi
->end
))
1735 munmap (auxmap
, memneed
);
1745 sigsegv_get_vma (uintptr_t address
, struct vma_struct
*vma
)
1749 /* && strcmp (u.sysname, "AIX") == 0 */
1750 && !(u
.version
[0] >= '1' && u
.version
[0] <= '6' && u
.version
[1] == '\0'))
1752 /* AIX 7 or higher. */
1753 struct callback_locals locals
;
1754 locals
.address
= address
;
1756 #if STACK_DIRECTION < 0
1759 locals
.stop_at_next_vma
= 0;
1763 vma_iterate (&locals
);
1764 if (locals
.retval
== 0)
1766 #if !(STACK_DIRECTION < 0)
1767 if (locals
.stop_at_next_vma
)
1768 vma
->next_start
= 0;
1770 vma
->is_near_this
= simple_is_near_this
;
1775 return mincore_get_vma (address
, vma
);
1778 /* --------------------------- stackvma-procfs.c --------------------------- */
1780 #elif defined __sgi || defined __sun /* IRIX, Solaris */
1782 # include <errno.h> /* errno, EINTR */
1783 # include <fcntl.h> /* open, O_RDONLY */
1784 # include <stddef.h> /* size_t */
1785 # include <unistd.h> /* getpagesize, getpid, read, close */
1786 # include <sys/types.h>
1787 # include <sys/mman.h> /* mmap, munmap */
1788 # include <sys/stat.h> /* fstat */
1789 # include <string.h> /* memcpy */
1791 /* Try to use the newer ("structured") /proc filesystem API, if supported. */
1792 # define _STRUCTURED_PROC 1
1793 # include <sys/procfs.h> /* prmap_t, optionally PIOC* */
1797 /* Cache for getpagesize(). */
1798 static uintptr_t pagesize
;
1800 /* Initialize pagesize. */
1802 init_pagesize (void)
1804 pagesize
= getpagesize ();
1809 struct callback_locals
1812 struct vma_struct
*vma
;
1813 # if STACK_DIRECTION < 0
1816 int stop_at_next_vma
;
1822 callback (struct callback_locals
*locals
, uintptr_t start
, uintptr_t end
)
1824 # if STACK_DIRECTION < 0
1825 if (locals
->address
>= start
&& locals
->address
<= end
- 1)
1827 locals
->vma
->start
= start
;
1828 locals
->vma
->end
= end
;
1829 locals
->vma
->prev_end
= locals
->prev
;
1835 if (locals
->stop_at_next_vma
)
1837 locals
->vma
->next_start
= start
;
1838 locals
->stop_at_next_vma
= 0;
1841 if (locals
->address
>= start
&& locals
->address
<= end
- 1)
1843 locals
->vma
->start
= start
;
1844 locals
->vma
->end
= end
;
1846 locals
->stop_at_next_vma
= 1;
1853 /* Iterate over the virtual memory areas of the current process.
1854 If such iteration is supported, the callback is called once for every
1855 virtual memory area, in ascending order, with the following arguments:
1856 - LOCALS is the same argument as passed to vma_iterate.
1857 - START is the address of the first byte in the area, page-aligned.
1858 - END is the address of the last byte in the area plus 1, page-aligned.
1859 Note that it may be 0 for the last area in the address space.
1860 If the callback returns 0, the iteration continues. If it returns 1,
1861 the iteration terminates prematurely.
1862 This function may open file descriptors, but does not call malloc().
1863 Return 0 if all went well, or -1 in case of error. */
1864 /* This code is a simplified copy (no handling of protection flags) of the
1865 code in gnulib's lib/vma-iter.c. */
1867 vma_iterate (struct callback_locals
*locals
)
1869 /* Note: Solaris <sys/procfs.h> defines a different type prmap_t with
1870 _STRUCTURED_PROC than without! Here's a table of sizeof(prmap_t):
1872 _STRUCTURED_PROC = 0 32 56
1873 _STRUCTURED_PROC = 1 96 104
1874 Therefore, if the include files provide the newer API, prmap_t has
1875 the bigger size, and thus you MUST use the newer API. And if the
1876 include files provide the older API, prmap_t has the smaller size,
1877 and thus you MUST use the older API. */
1879 # if defined PIOCNMAP && defined PIOCMAP
1880 /* We must use the older /proc interface. */
1882 char fnamebuf
[6+10+1];
1887 # if HAVE_MAP_ANONYMOUS
1889 # define map_flags MAP_ANONYMOUS
1890 # else /* !HAVE_MAP_ANONYMOUS */
1892 # define map_flags 0
1895 uintptr_t auxmap_start
;
1896 uintptr_t auxmap_end
;
1903 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()). */
1904 fname
= fnamebuf
+ sizeof (fnamebuf
) - 1;
1907 unsigned int value
= getpid ();
1909 *--fname
= (value
% 10) + '0';
1910 while ((value
= value
/ 10) > 0);
1913 memcpy (fname
, "/proc/", 6);
1915 fd
= open (fname
, O_RDONLY
);
1919 if (ioctl (fd
, PIOCNMAP
, &nmaps
) < 0)
1922 memneed
= (nmaps
+ 10) * sizeof (prmap_t
);
1923 /* Allocate memneed bytes of memory.
1924 We cannot use alloca here, because not much stack space is guaranteed.
1925 We also cannot use malloc here, because a malloc() call may call mmap()
1926 and thus pre-allocate available memory.
1927 So use mmap(), and ignore the resulting VMA. */
1928 memneed
= ((memneed
- 1) / pagesize
+ 1) * pagesize
;
1929 # if !HAVE_MAP_ANONYMOUS
1930 zero_fd
= open ("/dev/zero", O_RDONLY
, 0644);
1934 auxmap
= (void *) mmap ((void *) 0, memneed
, PROT_READ
| PROT_WRITE
,
1935 map_flags
| MAP_PRIVATE
, zero_fd
, 0);
1936 # if !HAVE_MAP_ANONYMOUS
1939 if (auxmap
== (void *) -1)
1941 auxmap_start
= (uintptr_t) auxmap
;
1942 auxmap_end
= auxmap_start
+ memneed
;
1943 maps
= (prmap_t
*) auxmap
;
1945 if (ioctl (fd
, PIOCMAP
, maps
) < 0)
1950 uintptr_t start
, end
;
1952 start
= (uintptr_t) mp
->pr_vaddr
;
1953 end
= start
+ mp
->pr_size
;
1954 if (start
== 0 && end
== 0)
1957 if (start
<= auxmap_start
&& auxmap_end
- 1 <= end
- 1)
1959 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1960 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
1961 if (start
< auxmap_start
)
1962 if (callback (locals
, start
, auxmap_start
))
1964 if (auxmap_end
- 1 < end
- 1)
1965 if (callback (locals
, auxmap_end
, end
))
1970 if (callback (locals
, start
, end
))
1974 munmap (auxmap
, memneed
);
1979 munmap (auxmap
, memneed
);
1985 /* We must use the newer /proc interface.
1987 https://docs.oracle.com/cd/E23824_01/html/821-1473/proc-4.html
1988 The contents of /proc/<pid>/map consists of records of type
1989 prmap_t. These are different in 32-bit and 64-bit processes,
1990 but here we are fortunately accessing only the current process. */
1992 char fnamebuf
[6+10+4+1];
1997 # if HAVE_MAP_ANONYMOUS
1999 # define map_flags MAP_ANONYMOUS
2000 # else /* !HAVE_MAP_ANONYMOUS */
2002 # define map_flags 0
2005 uintptr_t auxmap_start
;
2006 uintptr_t auxmap_end
;
2014 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()). */
2015 fname
= fnamebuf
+ sizeof (fnamebuf
) - 1 - 4;
2016 memcpy (fname
, "/map", 4 + 1);
2018 unsigned int value
= getpid ();
2020 *--fname
= (value
% 10) + '0';
2021 while ((value
= value
/ 10) > 0);
2024 memcpy (fname
, "/proc/", 6);
2026 fd
= open (fname
, O_RDONLY
);
2031 struct stat statbuf
;
2032 if (fstat (fd
, &statbuf
) < 0)
2034 nmaps
= statbuf
.st_size
/ sizeof (prmap_t
);
2037 memneed
= (nmaps
+ 10) * sizeof (prmap_t
);
2038 /* Allocate memneed bytes of memory.
2039 We cannot use alloca here, because not much stack space is guaranteed.
2040 We also cannot use malloc here, because a malloc() call may call mmap()
2041 and thus pre-allocate available memory.
2042 So use mmap(), and ignore the resulting VMA. */
2043 memneed
= ((memneed
- 1) / pagesize
+ 1) * pagesize
;
2044 # if !HAVE_MAP_ANONYMOUS
2045 zero_fd
= open ("/dev/zero", O_RDONLY
, 0644);
2049 auxmap
= (void *) mmap ((void *) 0, memneed
, PROT_READ
| PROT_WRITE
,
2050 map_flags
| MAP_PRIVATE
, zero_fd
, 0);
2051 # if !HAVE_MAP_ANONYMOUS
2054 if (auxmap
== (void *) -1)
2056 auxmap_start
= (uintptr_t) auxmap
;
2057 auxmap_end
= auxmap_start
+ memneed
;
2058 maps
= (prmap_t
*) auxmap
;
2060 /* Read up to memneed bytes from fd into maps. */
2062 size_t remaining
= memneed
;
2063 size_t total_read
= 0;
2064 char *ptr
= (char *) maps
;
2068 size_t nread
= read (fd
, ptr
, remaining
);
2069 if (nread
== (size_t)-1)
2078 total_read
+= nread
;
2082 while (remaining
> 0);
2084 nmaps
= (memneed
- remaining
) / sizeof (prmap_t
);
2085 maps_end
= maps
+ nmaps
;
2088 for (mp
= maps
; mp
< maps_end
; mp
++)
2090 uintptr_t start
, end
;
2092 start
= (uintptr_t) mp
->pr_vaddr
;
2093 end
= start
+ mp
->pr_size
;
2094 if (start
<= auxmap_start
&& auxmap_end
- 1 <= end
- 1)
2096 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
2097 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
2098 if (start
< auxmap_start
)
2099 if (callback (locals
, start
, auxmap_start
))
2101 if (auxmap_end
- 1 < end
- 1)
2102 if (callback (locals
, auxmap_end
, end
))
2107 if (callback (locals
, start
, end
))
2111 munmap (auxmap
, memneed
);
2116 munmap (auxmap
, memneed
);
2125 sigsegv_get_vma (uintptr_t address
, struct vma_struct
*vma
)
2127 struct callback_locals locals
;
2128 locals
.address
= address
;
2130 # if STACK_DIRECTION < 0
2133 locals
.stop_at_next_vma
= 0;
2137 vma_iterate (&locals
);
2138 if (locals
.retval
== 0)
2140 # if !(STACK_DIRECTION < 0)
2141 if (locals
.stop_at_next_vma
)
2142 vma
->next_start
= 0;
2144 vma
->is_near_this
= simple_is_near_this
;
2149 return mincore_get_vma (address
, vma
);
2155 /* -------------------------------------------------------------------------- */
2157 #elif defined __CYGWIN__ /* Cygwin */
2159 struct callback_locals
2162 struct vma_struct
*vma
;
2163 /* The stack appears as three adjacent segments, therefore we
2164 merge adjacent segments. */
2165 uintptr_t curr_start
, curr_end
;
2166 # if STACK_DIRECTION < 0
2169 int stop_at_next_vma
;
2175 callback (struct callback_locals
*locals
, uintptr_t start
, uintptr_t end
)
2177 if (start
== locals
->curr_end
)
2179 /* Merge adjacent segments. */
2180 locals
->curr_end
= end
;
2183 # if STACK_DIRECTION < 0
2184 if (locals
->curr_start
< locals
->curr_end
2185 && locals
->address
>= locals
->curr_start
2186 && locals
->address
<= locals
->curr_end
- 1)
2188 locals
->vma
->start
= locals
->curr_start
;
2189 locals
->vma
->end
= locals
->curr_end
;
2190 locals
->vma
->prev_end
= locals
->prev_end
;
2194 locals
->prev_end
= locals
->curr_end
;
2196 if (locals
->stop_at_next_vma
)
2198 locals
->vma
->next_start
= locals
->curr_start
;
2199 locals
->stop_at_next_vma
= 0;
2202 if (locals
->curr_start
< locals
->curr_end
2203 && locals
->address
>= locals
->curr_start
2204 && locals
->address
<= locals
->curr_end
- 1)
2206 locals
->vma
->start
= locals
->curr_start
;
2207 locals
->vma
->end
= locals
->curr_end
;
2209 locals
->stop_at_next_vma
= 1;
2213 locals
->curr_start
= start
; locals
->curr_end
= end
;
2218 sigsegv_get_vma (uintptr_t address
, struct vma_struct
*vma
)
2220 struct callback_locals locals
;
2221 locals
.address
= address
;
2223 locals
.curr_start
= 0;
2224 locals
.curr_end
= 0;
2225 # if STACK_DIRECTION < 0
2226 locals
.prev_end
= 0;
2228 locals
.stop_at_next_vma
= 0;
2232 vma_iterate (&locals
);
2233 if (locals
.retval
< 0)
2235 if (locals
.curr_start
< locals
.curr_end
2236 && address
>= locals
.curr_start
&& address
<= locals
.curr_end
- 1)
2238 vma
->start
= locals
.curr_start
;
2239 vma
->end
= locals
.curr_end
;
2240 # if STACK_DIRECTION < 0
2241 vma
->prev_end
= locals
.prev_end
;
2243 vma
->next_start
= 0;
2248 if (locals
.retval
== 0)
2250 # if !(STACK_DIRECTION < 0)
2251 if (locals
.stop_at_next_vma
)
2252 vma
->next_start
= 0;
2254 vma
->is_near_this
= simple_is_near_this
;
2261 /* ---------------------------- stackvma-beos.h ---------------------------- */
2263 #elif defined __HAIKU__ /* Haiku */
2265 # include <OS.h> /* get_next_area_info */
2267 struct callback_locals
2270 struct vma_struct
*vma
;
2271 # if STACK_DIRECTION < 0
2274 int stop_at_next_vma
;
2280 callback (struct callback_locals
*locals
, uintptr_t start
, uintptr_t end
)
2282 # if STACK_DIRECTION < 0
2283 if (locals
->address
>= start
&& locals
->address
<= end
- 1)
2285 locals
->vma
->start
= start
;
2286 locals
->vma
->end
= end
;
2287 locals
->vma
->prev_end
= locals
->prev
;
2293 if (locals
->stop_at_next_vma
)
2295 locals
->vma
->next_start
= start
;
2296 locals
->stop_at_next_vma
= 0;
2299 if (locals
->address
>= start
&& locals
->address
<= end
- 1)
2301 locals
->vma
->start
= start
;
2302 locals
->vma
->end
= end
;
2304 locals
->stop_at_next_vma
= 1;
2311 /* Iterate over the virtual memory areas of the current process.
2312 If such iteration is supported, the callback is called once for every
2313 virtual memory area, in ascending order, with the following arguments:
2314 - LOCALS is the same argument as passed to vma_iterate.
2315 - START is the address of the first byte in the area, page-aligned.
2316 - END is the address of the last byte in the area plus 1, page-aligned.
2317 Note that it may be 0 for the last area in the address space.
2318 If the callback returns 0, the iteration continues. If it returns 1,
2319 the iteration terminates prematurely.
2320 This function may open file descriptors, but does not call malloc().
2321 Return 0 if all went well, or -1 in case of error. */
2322 /* This code is a simplified copy (no handling of protection flags) of the
2323 code in gnulib's lib/vma-iter.c. */
2325 vma_iterate (struct callback_locals
*locals
)
2331 while (get_next_area_info (0, &cookie
, &info
) == B_OK
)
2333 uintptr_t start
, end
;
2335 start
= (uintptr_t) info
.address
;
2336 end
= start
+ info
.size
;
2338 if (callback (locals
, start
, end
))
2345 sigsegv_get_vma (uintptr_t address
, struct vma_struct
*vma
)
2347 struct callback_locals locals
;
2348 locals
.address
= address
;
2350 # if STACK_DIRECTION < 0
2353 locals
.stop_at_next_vma
= 0;
2357 vma_iterate (&locals
);
2358 if (locals
.retval
== 0)
2360 # if !(STACK_DIRECTION < 0)
2361 if (locals
.stop_at_next_vma
)
2362 vma
->next_start
= 0;
2364 vma
->is_near_this
= simple_is_near_this
;
2370 /* -------------------------------------------------------------------------- */
2372 #else /* Hurd, Minix, ... */
2375 sigsegv_get_vma (uintptr_t address
, struct vma_struct
*vma
)