exp2l: Work around a NetBSD 10.0/i386 bug.
[gnulib.git] / lib / vma-iter.c
blob8e830ae9e0ed520ae49b7db33730628c4ba6d928
1 /* Iteration over virtual memory areas.
2 Copyright (C) 2011-2024 Free Software Foundation, Inc.
3 Written by Bruno Haible <bruno@clisp.org>, 2011-2017.
5 This program is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <https://www.gnu.org/licenses/>. */
18 #include <config.h>
20 /* On Solaris in 32-bit mode, when gnulib module 'largefile' is in use,
21 prevent a compilation error
22 "Cannot use procfs in the large file compilation environment"
23 while also preventing <sys/types.h> from not defining off_t.
24 On Android, when targeting Android 4.4 or older with a GCC toolchain,
25 prevent a compilation error
26 "error: call to 'mmap' declared with attribute error: mmap is not
27 available with _FILE_OFFSET_BITS=64 when using GCC until android-21.
28 Either raise your minSdkVersion, disable _FILE_OFFSET_BITS=64, or
29 switch to Clang."
30 The files that we access in this compilation unit are less than 2 GB
31 large. */
32 #if defined __sun && !defined _LP64 && _FILE_OFFSET_BITS == 64
33 # undef _FILE_OFFSET_BITS
34 # define _FILE_OFFSET_BITS 32
35 #endif
36 #ifdef __ANDROID__
37 # undef _FILE_OFFSET_BITS
38 #endif
40 /* Specification. */
41 #include "vma-iter.h"
43 #include <errno.h> /* errno */
44 #include <stdlib.h> /* size_t */
45 #include <fcntl.h> /* open, O_RDONLY */
46 #include <unistd.h> /* getpagesize, lseek, read, close, getpid */
48 #if defined __linux__ || defined __ANDROID__
49 # include <limits.h> /* PATH_MAX */
50 #endif
52 #if defined __linux__ || defined __ANDROID__ || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__ || defined __minix /* || defined __CYGWIN__ */
53 # include <sys/types.h>
54 # include <sys/mman.h> /* mmap, munmap */
55 #endif
56 #if defined __minix
57 # include <string.h> /* memcpy */
58 #endif
60 #if defined __FreeBSD__ || defined __FreeBSD_kernel__ /* FreeBSD, GNU/kFreeBSD */
61 # include <sys/types.h>
62 # include <sys/mman.h> /* mmap, munmap */
63 # include <sys/param.h> /* prerequisite of <sys/user.h> */
64 # include <sys/user.h> /* struct kinfo_vmentry */
65 # include <sys/sysctl.h> /* sysctl */
66 #endif
67 #if defined __NetBSD__ || defined __OpenBSD__ /* NetBSD, OpenBSD */
68 # include <sys/types.h>
69 # include <sys/mman.h> /* mmap, munmap */
70 # include <sys/sysctl.h> /* sysctl, struct kinfo_vmentry */
71 #endif
73 #if defined _AIX /* AIX */
74 # include <string.h> /* memcpy */
75 # include <sys/types.h>
76 # include <sys/mman.h> /* mmap, munmap */
77 # include <sys/procfs.h> /* prmap_t */
78 #endif
80 #if defined __sgi || defined __osf__ /* IRIX, OSF/1 */
81 # include <string.h> /* memcpy */
82 # include <sys/types.h>
83 # include <sys/mman.h> /* mmap, munmap */
84 # include <sys/procfs.h> /* PIOC*, prmap_t */
85 #endif
87 #if defined __sun /* Solaris */
88 # include <string.h> /* memcpy */
89 # include <sys/types.h>
90 # include <sys/mman.h> /* mmap, munmap */
91 /* Try to use the newer ("structured") /proc filesystem API, if supported. */
92 # define _STRUCTURED_PROC 1
93 # include <sys/procfs.h> /* prmap_t, optionally PIOC* */
94 #endif
96 #if HAVE_PSTAT_GETPROCVM /* HP-UX */
97 # include <sys/pstat.h> /* pstat_getprocvm */
98 #endif
100 #if defined __APPLE__ && defined __MACH__ /* Mac OS X */
101 # include <mach/mach.h>
102 #endif
104 #if defined __GNU__ /* GNU/Hurd */
105 # include <mach/mach.h>
106 #endif
108 #if defined _WIN32 || defined __CYGWIN__ /* Windows */
109 # include <windows.h>
110 #endif
112 #if defined __BEOS__ || defined __HAIKU__ /* BeOS, Haiku */
113 # include <OS.h>
114 #endif
116 #if HAVE_MQUERY /* OpenBSD */
117 # include <sys/types.h>
118 # include <sys/mman.h> /* mquery */
119 #endif
122 /* Support for reading text files in the /proc file system. */
124 #if defined __linux__ || defined __ANDROID__ || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__ || defined __minix /* || defined __CYGWIN__ */
126 /* Buffered read-only streams.
127 We cannot use <stdio.h> here, because fopen() calls malloc(), and a malloc()
128 call may call mmap() and thus pre-allocate available memory.
129 Also, we cannot use multiple read() calls, because if the buffer size is
130 smaller than the file's contents:
131 - On NetBSD, the second read() call would return 0, thus making the file
132 appear truncated.
133 - On DragonFly BSD, the first read() call would fail with errno = EFBIG.
134 - On all platforms, if some other thread is doing memory allocations or
135 deallocations between two read() calls, there is a high risk that the
136 result of these two read() calls don't fit together, and as a
137 consequence we will parse garbage and either omit some VMAs or return
138 VMAs with nonsensical addresses.
139 So use mmap(), and ignore the resulting VMA. */
141 # if defined __linux__ || defined __ANDROID__
142 /* On Linux, if the file does not entirely fit into the buffer, the read()
143 function stops before the line that would come out truncated. The
144 maximum size of such a line is 73 + PATH_MAX bytes. To be sure that we
145 have read everything, we must verify that at least that many bytes are
146 left when read() returned. */
147 # define MIN_LEFTOVER (73 + PATH_MAX)
148 # else
149 # define MIN_LEFTOVER 1
150 # endif
152 # ifdef TEST
153 /* During testing, we want to run into the hairy cases. */
154 # define STACK_ALLOCATED_BUFFER_SIZE 32
155 # else
156 # if MIN_LEFTOVER < 1024
157 # define STACK_ALLOCATED_BUFFER_SIZE 1024
158 # else
159 /* There is no point in using a stack-allocated buffer if it is too small anyway. */
160 # define STACK_ALLOCATED_BUFFER_SIZE 1
161 # endif
162 # endif
164 struct rofile
166 size_t position;
167 size_t filled;
168 int eof_seen;
169 /* These fields deal with allocation of the buffer. */
170 char *buffer;
171 char *auxmap;
172 size_t auxmap_length;
173 unsigned long auxmap_start;
174 unsigned long auxmap_end;
175 char stack_allocated_buffer[STACK_ALLOCATED_BUFFER_SIZE];
178 /* Open a read-only file stream. */
179 static int
180 rof_open (struct rofile *rof, const char *filename)
182 int fd;
183 unsigned long pagesize;
184 size_t size;
186 fd = open (filename, O_RDONLY | O_CLOEXEC);
187 if (fd < 0)
188 return -1;
189 rof->position = 0;
190 rof->eof_seen = 0;
191 /* Try the static buffer first. */
192 pagesize = 0;
193 rof->buffer = rof->stack_allocated_buffer;
194 size = sizeof (rof->stack_allocated_buffer);
195 rof->auxmap = NULL;
196 rof->auxmap_start = 0;
197 rof->auxmap_end = 0;
198 for (;;)
200 /* Attempt to read the contents in a single system call. */
201 if (size > MIN_LEFTOVER)
203 int n = read (fd, rof->buffer, size);
204 if (n < 0 && errno == EINTR)
205 goto retry;
206 # if defined __DragonFly__
207 if (!(n < 0 && errno == EFBIG))
208 # endif
210 if (n <= 0)
211 /* Empty file. */
212 goto fail1;
213 if (n + MIN_LEFTOVER <= size)
215 /* The buffer was sufficiently large. */
216 rof->filled = n;
217 # if defined __linux__ || defined __ANDROID__
218 /* On Linux, the read() call may stop even if the buffer was
219 large enough. We need the equivalent of full_read(). */
220 for (;;)
222 n = read (fd, rof->buffer + rof->filled, size - rof->filled);
223 if (n < 0 && errno == EINTR)
224 goto retry;
225 if (n < 0)
226 /* Some error. */
227 goto fail1;
228 if (n + MIN_LEFTOVER > size - rof->filled)
229 /* Allocate a larger buffer. */
230 break;
231 if (n == 0)
233 /* Reached the end of file. */
234 close (fd);
235 return 0;
237 rof->filled += n;
239 # else
240 close (fd);
241 return 0;
242 # endif
246 /* Allocate a larger buffer. */
247 if (pagesize == 0)
249 pagesize = getpagesize ();
250 size = pagesize;
251 while (size <= MIN_LEFTOVER)
252 size = 2 * size;
254 else
256 size = 2 * size;
257 if (size == 0)
258 /* Wraparound. */
259 goto fail1;
260 if (rof->auxmap != NULL)
261 munmap (rof->auxmap, rof->auxmap_length);
263 rof->auxmap = (void *) mmap ((void *) 0, size, PROT_READ | PROT_WRITE,
264 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
265 if (rof->auxmap == (void *) -1)
267 close (fd);
268 return -1;
270 rof->auxmap_length = size;
271 rof->auxmap_start = (unsigned long) rof->auxmap;
272 rof->auxmap_end = rof->auxmap_start + size;
273 rof->buffer = (char *) rof->auxmap;
274 retry:
275 /* Restart. */
276 if (lseek (fd, 0, SEEK_SET) < 0)
278 close (fd);
279 fd = open (filename, O_RDONLY | O_CLOEXEC);
280 if (fd < 0)
281 goto fail2;
284 fail1:
285 close (fd);
286 fail2:
287 if (rof->auxmap != NULL)
288 munmap (rof->auxmap, rof->auxmap_length);
289 return -1;
292 /* Return the next byte from a read-only file stream without consuming it,
293 or -1 at EOF. */
294 static int
295 rof_peekchar (struct rofile *rof)
297 if (rof->position == rof->filled)
299 rof->eof_seen = 1;
300 return -1;
302 return (unsigned char) rof->buffer[rof->position];
305 /* Return the next byte from a read-only file stream, or -1 at EOF. */
306 static int
307 rof_getchar (struct rofile *rof)
309 int c = rof_peekchar (rof);
310 if (c >= 0)
311 rof->position++;
312 return c;
315 /* Parse an unsigned hexadecimal number from a read-only file stream. */
316 static int
317 rof_scanf_lx (struct rofile *rof, unsigned long *valuep)
319 unsigned long value = 0;
320 unsigned int numdigits = 0;
321 for (;;)
323 int c = rof_peekchar (rof);
324 if (c >= '0' && c <= '9')
325 value = (value << 4) + (c - '0');
326 else if (c >= 'A' && c <= 'F')
327 value = (value << 4) + (c - 'A' + 10);
328 else if (c >= 'a' && c <= 'f')
329 value = (value << 4) + (c - 'a' + 10);
330 else
331 break;
332 rof_getchar (rof);
333 numdigits++;
335 if (numdigits == 0)
336 return -1;
337 *valuep = value;
338 return 0;
341 /* Close a read-only file stream. */
342 static void
343 rof_close (struct rofile *rof)
345 if (rof->auxmap != NULL)
346 munmap (rof->auxmap, rof->auxmap_length);
349 #endif
352 /* Support for reading the info from a text file in the /proc file system. */
354 #if defined __linux__ || defined __ANDROID__ || (defined __FreeBSD_kernel__ && !defined __FreeBSD__) /* || defined __CYGWIN__ */
355 /* GNU/kFreeBSD mounts /proc as linprocfs, which looks like a Linux /proc
356 file system. */
358 static int
359 vma_iterate_proc (vma_iterate_callback_fn callback, void *data)
361 struct rofile rof;
363 /* Open the current process' maps file. It describes one VMA per line. */
364 if (rof_open (&rof, "/proc/self/maps") >= 0)
366 unsigned long auxmap_start = rof.auxmap_start;
367 unsigned long auxmap_end = rof.auxmap_end;
369 for (;;)
371 unsigned long start, end;
372 unsigned int flags;
373 int c;
375 /* Parse one line. First start and end. */
376 if (!(rof_scanf_lx (&rof, &start) >= 0
377 && rof_getchar (&rof) == '-'
378 && rof_scanf_lx (&rof, &end) >= 0))
379 break;
380 /* Then the flags. */
382 c = rof_getchar (&rof);
383 while (c == ' ');
384 flags = 0;
385 if (c == 'r')
386 flags |= VMA_PROT_READ;
387 c = rof_getchar (&rof);
388 if (c == 'w')
389 flags |= VMA_PROT_WRITE;
390 c = rof_getchar (&rof);
391 if (c == 'x')
392 flags |= VMA_PROT_EXECUTE;
393 while (c = rof_getchar (&rof), c != -1 && c != '\n')
396 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
398 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
399 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
400 if (start < auxmap_start)
401 if (callback (data, start, auxmap_start, flags))
402 break;
403 if (auxmap_end - 1 < end - 1)
404 if (callback (data, auxmap_end, end, flags))
405 break;
407 else
409 if (callback (data, start, end, flags))
410 break;
413 rof_close (&rof);
414 return 0;
417 return -1;
420 #elif defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__
422 static int
423 vma_iterate_proc (vma_iterate_callback_fn callback, void *data)
425 struct rofile rof;
427 /* Open the current process' maps file. It describes one VMA per line. */
428 if (rof_open (&rof, "/proc/curproc/map") >= 0)
430 unsigned long auxmap_start = rof.auxmap_start;
431 unsigned long auxmap_end = rof.auxmap_end;
433 for (;;)
435 unsigned long start, end;
436 unsigned int flags;
437 int c;
439 /* Parse one line. First start. */
440 if (!(rof_getchar (&rof) == '0'
441 && rof_getchar (&rof) == 'x'
442 && rof_scanf_lx (&rof, &start) >= 0))
443 break;
444 while (c = rof_peekchar (&rof), c == ' ' || c == '\t')
445 rof_getchar (&rof);
446 /* Then end. */
447 if (!(rof_getchar (&rof) == '0'
448 && rof_getchar (&rof) == 'x'
449 && rof_scanf_lx (&rof, &end) >= 0))
450 break;
451 # if defined __FreeBSD__ || defined __DragonFly__
452 /* Then the resident pages count. */
454 c = rof_getchar (&rof);
455 while (c == ' ');
457 c = rof_getchar (&rof);
458 while (c != -1 && c != '\n' && c != ' ');
459 /* Then the private resident pages count. */
461 c = rof_getchar (&rof);
462 while (c == ' ');
464 c = rof_getchar (&rof);
465 while (c != -1 && c != '\n' && c != ' ');
466 /* Then some kernel address. */
468 c = rof_getchar (&rof);
469 while (c == ' ');
471 c = rof_getchar (&rof);
472 while (c != -1 && c != '\n' && c != ' ');
473 # endif
474 /* Then the flags. */
476 c = rof_getchar (&rof);
477 while (c == ' ');
478 flags = 0;
479 if (c == 'r')
480 flags |= VMA_PROT_READ;
481 c = rof_getchar (&rof);
482 if (c == 'w')
483 flags |= VMA_PROT_WRITE;
484 c = rof_getchar (&rof);
485 if (c == 'x')
486 flags |= VMA_PROT_EXECUTE;
487 while (c = rof_getchar (&rof), c != -1 && c != '\n')
490 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
492 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
493 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
494 if (start < auxmap_start)
495 if (callback (data, start, auxmap_start, flags))
496 break;
497 if (auxmap_end - 1 < end - 1)
498 if (callback (data, auxmap_end, end, flags))
499 break;
501 else
503 if (callback (data, start, end, flags))
504 break;
507 rof_close (&rof);
508 return 0;
511 return -1;
514 #elif defined __minix
516 static int
517 vma_iterate_proc (vma_iterate_callback_fn callback, void *data)
519 char fnamebuf[6+10+4+1];
520 char *fname;
521 struct rofile rof;
523 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()). */
524 fname = fnamebuf + sizeof (fnamebuf) - (4 + 1);
525 memcpy (fname, "/map", 4 + 1);
527 unsigned int value = getpid ();
529 *--fname = (value % 10) + '0';
530 while ((value = value / 10) > 0);
532 fname -= 6;
533 memcpy (fname, "/proc/", 6);
535 /* Open the current process' maps file. It describes one VMA per line. */
536 if (rof_open (&rof, fname) >= 0)
538 unsigned long auxmap_start = rof.auxmap_start;
539 unsigned long auxmap_end = rof.auxmap_end;
541 for (;;)
543 unsigned long start, end;
544 unsigned int flags;
545 int c;
547 /* Parse one line. First start and end. */
548 if (!(rof_scanf_lx (&rof, &start) >= 0
549 && rof_getchar (&rof) == '-'
550 && rof_scanf_lx (&rof, &end) >= 0))
551 break;
552 /* Then the flags. */
554 c = rof_getchar (&rof);
555 while (c == ' ');
556 flags = 0;
557 if (c == 'r')
558 flags |= VMA_PROT_READ;
559 c = rof_getchar (&rof);
560 if (c == 'w')
561 flags |= VMA_PROT_WRITE;
562 c = rof_getchar (&rof);
563 if (c == 'x')
564 flags |= VMA_PROT_EXECUTE;
565 while (c = rof_getchar (&rof), c != -1 && c != '\n')
568 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
570 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
571 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
572 if (start < auxmap_start)
573 if (callback (data, start, auxmap_start, flags))
574 break;
575 if (auxmap_end - 1 < end - 1)
576 if (callback (data, auxmap_end, end, flags))
577 break;
579 else
581 if (callback (data, start, end, flags))
582 break;
585 rof_close (&rof);
586 return 0;
589 return -1;
592 #else
594 static inline int
595 vma_iterate_proc (vma_iterate_callback_fn callback, void *data)
597 return -1;
600 #endif
603 /* Support for reading the info from the BSD sysctl() system call. */
605 #if (defined __FreeBSD__ || defined __FreeBSD_kernel__) && defined KERN_PROC_VMMAP /* FreeBSD >= 7.1 */
607 static int
608 vma_iterate_bsd (vma_iterate_callback_fn callback, void *data)
610 /* Documentation: https://www.freebsd.org/cgi/man.cgi?sysctl(3) */
611 int info_path[] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid () };
612 size_t len;
613 size_t pagesize;
614 size_t memneed;
615 void *auxmap;
616 unsigned long auxmap_start;
617 unsigned long auxmap_end;
618 char *mem;
619 char *p;
620 char *p_end;
622 len = 0;
623 if (sysctl (info_path, 4, NULL, &len, NULL, 0) < 0)
624 return -1;
625 /* Allow for small variations over time. In a multithreaded program
626 new VMAs can be allocated at any moment. */
627 len = 2 * len + 200;
628 /* Allocate memneed bytes of memory.
629 We cannot use alloca here, because not much stack space is guaranteed.
630 We also cannot use malloc here, because a malloc() call may call mmap()
631 and thus pre-allocate available memory.
632 So use mmap(), and ignore the resulting VMA. */
633 pagesize = getpagesize ();
634 memneed = len;
635 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
636 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
637 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
638 if (auxmap == (void *) -1)
639 return -1;
640 auxmap_start = (unsigned long) auxmap;
641 auxmap_end = auxmap_start + memneed;
642 mem = (char *) auxmap;
643 if (sysctl (info_path, 4, mem, &len, NULL, 0) < 0)
645 munmap (auxmap, memneed);
646 return -1;
648 p = mem;
649 p_end = mem + len;
650 while (p < p_end)
652 struct kinfo_vmentry *kve = (struct kinfo_vmentry *) p;
653 unsigned long start = kve->kve_start;
654 unsigned long end = kve->kve_end;
655 unsigned int flags = 0;
656 if (kve->kve_protection & KVME_PROT_READ)
657 flags |= VMA_PROT_READ;
658 if (kve->kve_protection & KVME_PROT_WRITE)
659 flags |= VMA_PROT_WRITE;
660 if (kve->kve_protection & KVME_PROT_EXEC)
661 flags |= VMA_PROT_EXECUTE;
662 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
664 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
665 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
666 if (start < auxmap_start)
667 if (callback (data, start, auxmap_start, flags))
668 break;
669 if (auxmap_end - 1 < end - 1)
670 if (callback (data, auxmap_end, end, flags))
671 break;
673 else
675 if (callback (data, start, end, flags))
676 break;
678 p += kve->kve_structsize;
680 munmap (auxmap, memneed);
681 return 0;
684 #elif defined __NetBSD__ && defined VM_PROC_MAP /* NetBSD >= 8.0 */
686 static int
687 vma_iterate_bsd (vma_iterate_callback_fn callback, void *data)
689 /* Documentation: https://man.netbsd.org/man/sysctl+7 */
690 unsigned int entry_size =
691 /* If we wanted to have the path of each entry, we would need
692 sizeof (struct kinfo_vmentry). But we need only the non-string
693 parts of each entry. */
694 offsetof (struct kinfo_vmentry, kve_path);
695 int info_path[] = { CTL_VM, VM_PROC, VM_PROC_MAP, getpid (), entry_size };
696 size_t len;
697 size_t pagesize;
698 size_t memneed;
699 void *auxmap;
700 unsigned long auxmap_start;
701 unsigned long auxmap_end;
702 char *mem;
703 char *p;
704 char *p_end;
706 len = 0;
707 if (sysctl (info_path, 5, NULL, &len, NULL, 0) < 0)
708 return -1;
709 /* Allow for small variations over time. In a multithreaded program
710 new VMAs can be allocated at any moment. */
711 len = 2 * len + 10 * entry_size;
712 /* But the system call rejects lengths > 1 MB. */
713 if (len > 0x100000)
714 len = 0x100000;
715 /* And the system call causes a kernel panic if the length is not a multiple
716 of entry_size. */
717 len = (len / entry_size) * entry_size;
718 /* Allocate memneed bytes of memory.
719 We cannot use alloca here, because not much stack space is guaranteed.
720 We also cannot use malloc here, because a malloc() call may call mmap()
721 and thus pre-allocate available memory.
722 So use mmap(), and ignore the resulting VMA. */
723 pagesize = getpagesize ();
724 memneed = len;
725 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
726 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
727 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
728 if (auxmap == (void *) -1)
729 return -1;
730 auxmap_start = (unsigned long) auxmap;
731 auxmap_end = auxmap_start + memneed;
732 mem = (char *) auxmap;
733 if (sysctl (info_path, 5, mem, &len, NULL, 0) < 0
734 || len > 0x100000 - entry_size)
736 /* sysctl failed, or the list of VMAs is possibly truncated. */
737 munmap (auxmap, memneed);
738 return -1;
740 p = mem;
741 p_end = mem + len;
742 while (p < p_end)
744 struct kinfo_vmentry *kve = (struct kinfo_vmentry *) p;
745 unsigned long start = kve->kve_start;
746 unsigned long end = kve->kve_end;
747 unsigned int flags = 0;
748 if (kve->kve_protection & KVME_PROT_READ)
749 flags |= VMA_PROT_READ;
750 if (kve->kve_protection & KVME_PROT_WRITE)
751 flags |= VMA_PROT_WRITE;
752 if (kve->kve_protection & KVME_PROT_EXEC)
753 flags |= VMA_PROT_EXECUTE;
754 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
756 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
757 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
758 if (start < auxmap_start)
759 if (callback (data, start, auxmap_start, flags))
760 break;
761 if (auxmap_end - 1 < end - 1)
762 if (callback (data, auxmap_end, end, flags))
763 break;
765 else
767 if (callback (data, start, end, flags))
768 break;
770 p += entry_size;
772 munmap (auxmap, memneed);
773 return 0;
776 #elif defined __OpenBSD__ && defined KERN_PROC_VMMAP /* OpenBSD >= 5.7 */
778 static int
779 vma_iterate_bsd (vma_iterate_callback_fn callback, void *data)
781 /* Documentation: https://man.openbsd.org/sysctl.2 */
782 int info_path[] = { CTL_KERN, KERN_PROC_VMMAP, getpid () };
783 size_t len;
784 size_t pagesize;
785 size_t memneed;
786 void *auxmap;
787 unsigned long auxmap_start;
788 unsigned long auxmap_end;
789 char *mem;
790 char *p;
791 char *p_end;
793 len = 0;
794 if (sysctl (info_path, 3, NULL, &len, NULL, 0) < 0)
795 return -1;
796 /* Allow for small variations over time. In a multithreaded program
797 new VMAs can be allocated at any moment. */
798 len = 2 * len + 10 * sizeof (struct kinfo_vmentry);
799 /* But the system call rejects lengths > 64 KB. */
800 if (len > 0x10000)
801 len = 0x10000;
802 /* And the system call rejects lengths that are not a multiple of
803 sizeof (struct kinfo_vmentry). */
804 len = (len / sizeof (struct kinfo_vmentry)) * sizeof (struct kinfo_vmentry);
805 /* Allocate memneed bytes of memory.
806 We cannot use alloca here, because not much stack space is guaranteed.
807 We also cannot use malloc here, because a malloc() call may call mmap()
808 and thus pre-allocate available memory.
809 So use mmap(), and ignore the resulting VMA. */
810 pagesize = getpagesize ();
811 memneed = len;
812 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
813 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
814 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
815 if (auxmap == (void *) -1)
816 return -1;
817 auxmap_start = (unsigned long) auxmap;
818 auxmap_end = auxmap_start + memneed;
819 mem = (char *) auxmap;
820 if (sysctl (info_path, 3, mem, &len, NULL, 0) < 0
821 || len > 0x10000 - sizeof (struct kinfo_vmentry))
823 /* sysctl failed, or the list of VMAs is possibly truncated. */
824 munmap (auxmap, memneed);
825 return -1;
827 p = mem;
828 p_end = mem + len;
829 while (p < p_end)
831 struct kinfo_vmentry *kve = (struct kinfo_vmentry *) p;
832 unsigned long start = kve->kve_start;
833 unsigned long end = kve->kve_end;
834 unsigned int flags = 0;
835 if (kve->kve_protection & KVE_PROT_READ)
836 flags |= VMA_PROT_READ;
837 if (kve->kve_protection & KVE_PROT_WRITE)
838 flags |= VMA_PROT_WRITE;
839 if (kve->kve_protection & KVE_PROT_EXEC)
840 flags |= VMA_PROT_EXECUTE;
841 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
843 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
844 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
845 if (start < auxmap_start)
846 if (callback (data, start, auxmap_start, flags))
847 break;
848 if (auxmap_end - 1 < end - 1)
849 if (callback (data, auxmap_end, end, flags))
850 break;
852 else
854 if (start != end)
855 if (callback (data, start, end, flags))
856 break;
858 p += sizeof (struct kinfo_vmentry);
860 munmap (auxmap, memneed);
861 return 0;
864 #else
866 static inline int
867 vma_iterate_bsd (vma_iterate_callback_fn callback, void *data)
869 return -1;
872 #endif
876 vma_iterate (vma_iterate_callback_fn callback, void *data)
878 #if defined __linux__ || defined __ANDROID__ || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__ || defined __minix /* || defined __CYGWIN__ */
880 # if defined __FreeBSD__
881 /* On FreeBSD with procfs (but not GNU/kFreeBSD, which uses linprocfs), the
882 function vma_iterate_proc does not return the virtual memory areas that
883 were created by anonymous mmap. See
884 <https://svnweb.freebsd.org/base/head/sys/fs/procfs/procfs_map.c?view=markup>
885 So use vma_iterate_proc only as a fallback. */
886 int retval = vma_iterate_bsd (callback, data);
887 if (retval == 0)
888 return 0;
890 return vma_iterate_proc (callback, data);
891 # else
892 /* On the other platforms, try the /proc approach first, and the sysctl()
893 as a fallback. */
894 int retval = vma_iterate_proc (callback, data);
895 if (retval == 0)
896 return 0;
898 return vma_iterate_bsd (callback, data);
899 # endif
901 #elif defined _AIX /* AIX */
903 /* On AIX, there is a /proc/$pic/map file, that contains records of type
904 prmap_t, defined in <sys/procfs.h>. In older versions of AIX, it lists
905 only the virtual memory areas that are connected to a file, not the
906 anonymous ones. But at least since AIX 7.1, it is well usable. */
908 size_t pagesize;
909 char fnamebuf[6+10+4+1];
910 char *fname;
911 int fd;
912 size_t memneed;
914 pagesize = getpagesize ();
916 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()). */
917 fname = fnamebuf + sizeof (fnamebuf) - (4+1);
918 memcpy (fname, "/map", 4+1);
920 unsigned int value = getpid ();
922 *--fname = (value % 10) + '0';
923 while ((value = value / 10) > 0);
925 fname -= 6;
926 memcpy (fname, "/proc/", 6);
928 fd = open (fname, O_RDONLY | O_CLOEXEC);
929 if (fd < 0)
930 return -1;
932 /* The contents of /proc/<pid>/map contains a number of prmap_t entries,
933 then an entirely null prmap_t entry, then a heap of NUL terminated
934 strings.
935 Documentation: https://www.ibm.com/docs/en/aix/7.1?topic=files-proc-file
936 We read the entire contents, but look only at the prmap_t entries and
937 ignore the tail part. */
939 for (memneed = 2 * pagesize; ; memneed = 2 * memneed)
941 /* Allocate memneed bytes of memory.
942 We cannot use alloca here, because not much stack space is guaranteed.
943 We also cannot use malloc here, because a malloc() call may call mmap()
944 and thus pre-allocate available memory.
945 So use mmap(), and ignore the resulting VMA if it occurs among the
946 resulting VMAs. (Normally it doesn't, because it was allocated after
947 the open() call.) */
948 void *auxmap;
949 unsigned long auxmap_start;
950 unsigned long auxmap_end;
951 ssize_t nbytes;
953 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
954 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
955 if (auxmap == (void *) -1)
957 close (fd);
958 return -1;
960 auxmap_start = (unsigned long) auxmap;
961 auxmap_end = auxmap_start + memneed;
963 /* Read the contents of /proc/<pid>/map in a single system call.
964 This guarantees a consistent result (no duplicated or omitted
965 entries). */
966 retry:
968 nbytes = read (fd, auxmap, memneed);
969 while (nbytes < 0 && errno == EINTR);
970 if (nbytes <= 0)
972 munmap (auxmap, memneed);
973 close (fd);
974 return -1;
976 if (nbytes == memneed)
978 /* Need more memory. */
979 munmap (auxmap, memneed);
980 if (lseek (fd, 0, SEEK_SET) < 0)
982 close (fd);
983 return -1;
986 else
988 if (read (fd, (char *) auxmap + nbytes, 1) > 0)
990 /* Oops, we had a short read. Retry. */
991 if (lseek (fd, 0, SEEK_SET) < 0)
993 munmap (auxmap, memneed);
994 close (fd);
995 return -1;
997 goto retry;
1000 /* We now have the entire contents of /proc/<pid>/map in memory. */
1001 prmap_t* maps = (prmap_t *) auxmap;
1003 /* The entries are not sorted by address. Therefore
1004 1. Extract the relevant information into an array.
1005 2. Sort the array in ascending order.
1006 3. Invoke the callback. */
1007 typedef struct
1009 uintptr_t start;
1010 uintptr_t end;
1011 unsigned int flags;
1013 vma_t;
1014 /* Since 2 * sizeof (vma_t) <= sizeof (prmap_t), we can reuse the
1015 same memory. */
1016 vma_t *vmas = (vma_t *) auxmap;
1018 vma_t *vp = vmas;
1020 prmap_t* mp;
1021 for (mp = maps;;)
1023 unsigned long start, end;
1025 start = (unsigned long) mp->pr_vaddr;
1026 end = start + mp->pr_size;
1027 if (start == 0 && end == 0 && mp->pr_mflags == 0)
1028 break;
1029 /* Discard empty VMAs and kernel VMAs. */
1030 if (start < end && (mp->pr_mflags & MA_KERNTEXT) == 0)
1032 unsigned int flags;
1033 flags = 0;
1034 if (mp->pr_mflags & MA_READ)
1035 flags |= VMA_PROT_READ;
1036 if (mp->pr_mflags & MA_WRITE)
1037 flags |= VMA_PROT_WRITE;
1038 if (mp->pr_mflags & MA_EXEC)
1039 flags |= VMA_PROT_EXECUTE;
1041 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
1043 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1044 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
1045 if (start < auxmap_start)
1047 vp->start = start;
1048 vp->end = auxmap_start;
1049 vp->flags = flags;
1050 vp++;
1052 if (auxmap_end - 1 < end - 1)
1054 vp->start = auxmap_end;
1055 vp->end = end;
1056 vp->flags = flags;
1057 vp++;
1060 else
1062 vp->start = start;
1063 vp->end = end;
1064 vp->flags = flags;
1065 vp++;
1068 mp++;
1072 size_t nvmas = vp - vmas;
1073 /* Sort the array in ascending order.
1074 Better not call qsort(), since it may call malloc().
1075 Insertion-sort is OK in this case, despite its worst-case running
1076 time of O(N²), since the number of VMAs will rarely be larger than
1077 1000. */
1079 size_t i;
1080 for (i = 1; i < nvmas; i++)
1082 /* Invariant: Here vmas[0..i-1] is sorted. */
1083 size_t j;
1084 for (j = i; j > 0 && vmas[j - 1].start > vmas[j].start; j--)
1086 vma_t tmp = vmas[j - 1];
1087 vmas[j - 1] = vmas[j];
1088 vmas[j] = tmp;
1090 /* Invariant: Here vmas[0..i] is sorted. */
1094 /* Invoke the callback. */
1096 size_t i;
1097 for (i = 0; i < nvmas; i++)
1099 vma_t *vpi = &vmas[i];
1100 if (callback (data, vpi->start, vpi->end, vpi->flags))
1101 break;
1105 munmap (auxmap, memneed);
1106 break;
1110 close (fd);
1111 return 0;
1113 #elif defined __sgi || defined __osf__ /* IRIX, OSF/1 */
1115 size_t pagesize;
1116 char fnamebuf[6+10+1];
1117 char *fname;
1118 int fd;
1119 int nmaps;
1120 size_t memneed;
1121 # if HAVE_MAP_ANONYMOUS
1122 # define zero_fd -1
1123 # define map_flags MAP_ANONYMOUS
1124 # else
1125 int zero_fd;
1126 # define map_flags 0
1127 # endif
1128 void *auxmap;
1129 unsigned long auxmap_start;
1130 unsigned long auxmap_end;
1131 prmap_t* maps;
1132 prmap_t* mp;
1134 pagesize = getpagesize ();
1136 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()). */
1137 fname = fnamebuf + sizeof (fnamebuf) - 1;
1138 *fname = '\0';
1140 unsigned int value = getpid ();
1142 *--fname = (value % 10) + '0';
1143 while ((value = value / 10) > 0);
1145 fname -= 6;
1146 memcpy (fname, "/proc/", 6);
1148 fd = open (fname, O_RDONLY | O_CLOEXEC);
1149 if (fd < 0)
1150 return -1;
1152 if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
1153 goto fail2;
1155 memneed = (nmaps + 10) * sizeof (prmap_t);
1156 /* Allocate memneed bytes of memory.
1157 We cannot use alloca here, because not much stack space is guaranteed.
1158 We also cannot use malloc here, because a malloc() call may call mmap()
1159 and thus pre-allocate available memory.
1160 So use mmap(), and ignore the resulting VMA. */
1161 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
1162 # if !HAVE_MAP_ANONYMOUS
1163 zero_fd = open ("/dev/zero", O_RDONLY | O_CLOEXEC, 0644);
1164 if (zero_fd < 0)
1165 goto fail2;
1166 # endif
1167 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
1168 map_flags | MAP_PRIVATE, zero_fd, 0);
1169 # if !HAVE_MAP_ANONYMOUS
1170 close (zero_fd);
1171 # endif
1172 if (auxmap == (void *) -1)
1173 goto fail2;
1174 auxmap_start = (unsigned long) auxmap;
1175 auxmap_end = auxmap_start + memneed;
1176 maps = (prmap_t *) auxmap;
1178 if (ioctl (fd, PIOCMAP, maps) < 0)
1179 goto fail1;
1181 for (mp = maps;;)
1183 unsigned long start, end;
1184 unsigned int flags;
1186 start = (unsigned long) mp->pr_vaddr;
1187 end = start + mp->pr_size;
1188 if (start == 0 && end == 0)
1189 break;
1190 flags = 0;
1191 if (mp->pr_mflags & MA_READ)
1192 flags |= VMA_PROT_READ;
1193 if (mp->pr_mflags & MA_WRITE)
1194 flags |= VMA_PROT_WRITE;
1195 if (mp->pr_mflags & MA_EXEC)
1196 flags |= VMA_PROT_EXECUTE;
1197 mp++;
1198 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
1200 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1201 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
1202 if (start < auxmap_start)
1203 if (callback (data, start, auxmap_start, flags))
1204 break;
1205 if (auxmap_end - 1 < end - 1)
1206 if (callback (data, auxmap_end, end, flags))
1207 break;
1209 else
1211 if (callback (data, start, end, flags))
1212 break;
1215 munmap (auxmap, memneed);
1216 close (fd);
1217 return 0;
1219 fail1:
1220 munmap (auxmap, memneed);
1221 fail2:
1222 close (fd);
1223 return -1;
1225 #elif defined __sun /* Solaris */
1227 /* Note: Solaris <sys/procfs.h> defines a different type prmap_t with
1228 _STRUCTURED_PROC than without! Here's a table of sizeof(prmap_t):
1229 32-bit 64-bit
1230 _STRUCTURED_PROC = 0 32 56
1231 _STRUCTURED_PROC = 1 96 104
1232 Therefore, if the include files provide the newer API, prmap_t has
1233 the bigger size, and thus you MUST use the newer API. And if the
1234 include files provide the older API, prmap_t has the smaller size,
1235 and thus you MUST use the older API. */
1237 # if defined PIOCNMAP && defined PIOCMAP
1238 /* We must use the older /proc interface. */
1240 size_t pagesize;
1241 char fnamebuf[6+10+1];
1242 char *fname;
1243 int fd;
1244 int nmaps;
1245 size_t memneed;
1246 # if HAVE_MAP_ANONYMOUS
1247 # define zero_fd -1
1248 # define map_flags MAP_ANONYMOUS
1249 # else /* Solaris <= 7 */
1250 int zero_fd;
1251 # define map_flags 0
1252 # endif
1253 void *auxmap;
1254 unsigned long auxmap_start;
1255 unsigned long auxmap_end;
1256 prmap_t* maps;
1257 prmap_t* mp;
1259 pagesize = getpagesize ();
1261 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()). */
1262 fname = fnamebuf + sizeof (fnamebuf) - 1;
1263 *fname = '\0';
1265 unsigned int value = getpid ();
1267 *--fname = (value % 10) + '0';
1268 while ((value = value / 10) > 0);
1270 fname -= 6;
1271 memcpy (fname, "/proc/", 6);
1273 fd = open (fname, O_RDONLY | O_CLOEXEC);
1274 if (fd < 0)
1275 return -1;
1277 if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
1278 goto fail2;
1280 memneed = (nmaps + 10) * sizeof (prmap_t);
1281 /* Allocate memneed bytes of memory.
1282 We cannot use alloca here, because not much stack space is guaranteed.
1283 We also cannot use malloc here, because a malloc() call may call mmap()
1284 and thus pre-allocate available memory.
1285 So use mmap(), and ignore the resulting VMA. */
1286 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
1287 # if !HAVE_MAP_ANONYMOUS
1288 zero_fd = open ("/dev/zero", O_RDONLY | O_CLOEXEC, 0644);
1289 if (zero_fd < 0)
1290 goto fail2;
1291 # endif
1292 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
1293 map_flags | MAP_PRIVATE, zero_fd, 0);
1294 # if !HAVE_MAP_ANONYMOUS
1295 close (zero_fd);
1296 # endif
1297 if (auxmap == (void *) -1)
1298 goto fail2;
1299 auxmap_start = (unsigned long) auxmap;
1300 auxmap_end = auxmap_start + memneed;
1301 maps = (prmap_t *) auxmap;
1303 if (ioctl (fd, PIOCMAP, maps) < 0)
1304 goto fail1;
1306 for (mp = maps;;)
1308 unsigned long start, end;
1309 unsigned int flags;
1311 start = (unsigned long) mp->pr_vaddr;
1312 end = start + mp->pr_size;
1313 if (start == 0 && end == 0)
1314 break;
1315 flags = 0;
1316 if (mp->pr_mflags & MA_READ)
1317 flags |= VMA_PROT_READ;
1318 if (mp->pr_mflags & MA_WRITE)
1319 flags |= VMA_PROT_WRITE;
1320 if (mp->pr_mflags & MA_EXEC)
1321 flags |= VMA_PROT_EXECUTE;
1322 mp++;
1323 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
1325 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1326 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
1327 if (start < auxmap_start)
1328 if (callback (data, start, auxmap_start, flags))
1329 break;
1330 if (auxmap_end - 1 < end - 1)
1331 if (callback (data, auxmap_end, end, flags))
1332 break;
1334 else
1336 if (callback (data, start, end, flags))
1337 break;
1340 munmap (auxmap, memneed);
1341 close (fd);
1342 return 0;
1344 fail1:
1345 munmap (auxmap, memneed);
1346 fail2:
1347 close (fd);
1348 return -1;
1350 # else
1351 /* We must use the newer /proc interface.
1352 Documentation:
1353 https://docs.oracle.com/cd/E23824_01/html/821-1473/proc-4.html
1354 The contents of /proc/<pid>/map consists of records of type
1355 prmap_t. These are different in 32-bit and 64-bit processes,
1356 but here we are fortunately accessing only the current process. */
1358 size_t pagesize;
1359 char fnamebuf[6+10+4+1];
1360 char *fname;
1361 int fd;
1362 int nmaps;
1363 size_t memneed;
1364 # if HAVE_MAP_ANONYMOUS
1365 # define zero_fd -1
1366 # define map_flags MAP_ANONYMOUS
1367 # else /* Solaris <= 7 */
1368 int zero_fd;
1369 # define map_flags 0
1370 # endif
1371 void *auxmap;
1372 unsigned long auxmap_start;
1373 unsigned long auxmap_end;
1374 prmap_t* maps;
1375 prmap_t* maps_end;
1376 prmap_t* mp;
1378 pagesize = getpagesize ();
1380 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()). */
1381 fname = fnamebuf + sizeof (fnamebuf) - 1 - 4;
1382 memcpy (fname, "/map", 4 + 1);
1384 unsigned int value = getpid ();
1386 *--fname = (value % 10) + '0';
1387 while ((value = value / 10) > 0);
1389 fname -= 6;
1390 memcpy (fname, "/proc/", 6);
1392 fd = open (fname, O_RDONLY | O_CLOEXEC);
1393 if (fd < 0)
1394 return -1;
1397 struct stat statbuf;
1398 if (fstat (fd, &statbuf) < 0)
1399 goto fail2;
1400 nmaps = statbuf.st_size / sizeof (prmap_t);
1403 memneed = (nmaps + 10) * sizeof (prmap_t);
1404 /* Allocate memneed bytes of memory.
1405 We cannot use alloca here, because not much stack space is guaranteed.
1406 We also cannot use malloc here, because a malloc() call may call mmap()
1407 and thus pre-allocate available memory.
1408 So use mmap(), and ignore the resulting VMA. */
1409 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
1410 # if !HAVE_MAP_ANONYMOUS
1411 zero_fd = open ("/dev/zero", O_RDONLY | O_CLOEXEC, 0644);
1412 if (zero_fd < 0)
1413 goto fail2;
1414 # endif
1415 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
1416 map_flags | MAP_PRIVATE, zero_fd, 0);
1417 # if !HAVE_MAP_ANONYMOUS
1418 close (zero_fd);
1419 # endif
1420 if (auxmap == (void *) -1)
1421 goto fail2;
1422 auxmap_start = (unsigned long) auxmap;
1423 auxmap_end = auxmap_start + memneed;
1424 maps = (prmap_t *) auxmap;
1426 /* Read up to memneed bytes from fd into maps. */
1428 size_t remaining = memneed;
1429 size_t total_read = 0;
1430 char *ptr = (char *) maps;
1434 size_t nread = read (fd, ptr, remaining);
1435 if (nread == (size_t)-1)
1437 if (errno == EINTR)
1438 continue;
1439 goto fail1;
1441 if (nread == 0)
1442 /* EOF */
1443 break;
1444 total_read += nread;
1445 ptr += nread;
1446 remaining -= nread;
1448 while (remaining > 0);
1450 nmaps = (memneed - remaining) / sizeof (prmap_t);
1451 maps_end = maps + nmaps;
1454 for (mp = maps; mp < maps_end; mp++)
1456 unsigned long start, end;
1457 unsigned int flags;
1459 start = (unsigned long) mp->pr_vaddr;
1460 end = start + mp->pr_size;
1461 flags = 0;
1462 if (mp->pr_mflags & MA_READ)
1463 flags |= VMA_PROT_READ;
1464 if (mp->pr_mflags & MA_WRITE)
1465 flags |= VMA_PROT_WRITE;
1466 if (mp->pr_mflags & MA_EXEC)
1467 flags |= VMA_PROT_EXECUTE;
1468 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
1470 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1471 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
1472 if (start < auxmap_start)
1473 if (callback (data, start, auxmap_start, flags))
1474 break;
1475 if (auxmap_end - 1 < end - 1)
1476 if (callback (data, auxmap_end, end, flags))
1477 break;
1479 else
1481 if (callback (data, start, end, flags))
1482 break;
1485 munmap (auxmap, memneed);
1486 close (fd);
1487 return 0;
1489 fail1:
1490 munmap (auxmap, memneed);
1491 fail2:
1492 close (fd);
1493 return -1;
1495 # endif
1497 #elif HAVE_PSTAT_GETPROCVM /* HP-UX */
1499 unsigned long pagesize = getpagesize ();
1500 int i;
1502 for (i = 0; ; i++)
1504 struct pst_vm_status info;
1505 int ret = pstat_getprocvm (&info, sizeof (info), 0, i);
1506 if (ret < 0)
1507 return -1;
1508 if (ret == 0)
1509 break;
1511 unsigned long start = info.pst_vaddr;
1512 unsigned long end = start + info.pst_length * pagesize;
1513 unsigned int flags = 0;
1514 if (info.pst_permission & PS_PROT_READ)
1515 flags |= VMA_PROT_READ;
1516 if (info.pst_permission & PS_PROT_WRITE)
1517 flags |= VMA_PROT_WRITE;
1518 if (info.pst_permission & PS_PROT_EXECUTE)
1519 flags |= VMA_PROT_EXECUTE;
1521 if (callback (data, start, end, flags))
1522 break;
1526 #elif defined __APPLE__ && defined __MACH__ /* Mac OS X */
1528 task_t task = mach_task_self ();
1529 vm_address_t address;
1530 vm_size_t size;
1532 for (address = VM_MIN_ADDRESS;; address += size)
1534 int more;
1535 mach_port_t object_name;
1536 unsigned int flags;
1537 /* In Mac OS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
1538 32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
1539 mach_vm_address_t and mach_vm_size_t are always 64 bits large.
1540 Mac OS X 10.5 has three vm_region like methods:
1541 - vm_region. It has arguments that depend on whether the current
1542 process is 32-bit or 64-bit. When linking dynamically, this
1543 function exists only in 32-bit processes. Therefore we use it only
1544 in 32-bit processes.
1545 - vm_region_64. It has arguments that depend on whether the current
1546 process is 32-bit or 64-bit. It interprets a flavor
1547 VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
1548 dangerous since 'struct vm_region_basic_info_64' is larger than
1549 'struct vm_region_basic_info'; therefore let's write
1550 VM_REGION_BASIC_INFO_64 explicitly.
1551 - mach_vm_region. It has arguments that are 64-bit always. This
1552 function is useful when you want to access the VM of a process
1553 other than the current process.
1554 In 64-bit processes, we could use vm_region_64 or mach_vm_region.
1555 I choose vm_region_64 because it uses the same types as vm_region,
1556 resulting in less conditional code. */
1557 # if defined __aarch64__ || defined __ppc64__ || defined __x86_64__
1558 struct vm_region_basic_info_64 info;
1559 mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
1561 more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
1562 (vm_region_info_t)&info, &info_count, &object_name)
1563 == KERN_SUCCESS);
1564 # else
1565 struct vm_region_basic_info info;
1566 mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
1568 more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
1569 (vm_region_info_t)&info, &info_count, &object_name)
1570 == KERN_SUCCESS);
1571 # endif
1572 if (object_name != MACH_PORT_NULL)
1573 mach_port_deallocate (mach_task_self (), object_name);
1574 if (!more)
1575 break;
1576 flags = 0;
1577 if (info.protection & VM_PROT_READ)
1578 flags |= VMA_PROT_READ;
1579 if (info.protection & VM_PROT_WRITE)
1580 flags |= VMA_PROT_WRITE;
1581 if (info.protection & VM_PROT_EXECUTE)
1582 flags |= VMA_PROT_EXECUTE;
1583 if (callback (data, address, address + size, flags))
1584 break;
1586 return 0;
1588 #elif defined __GNU__ /* GNU/Hurd */
1590 /* The Hurd has a /proc/self/maps that looks like the Linux one, but it
1591 lacks the VMAs created through anonymous mmap. Therefore use the Mach
1592 API.
1593 Documentation:
1594 https://www.gnu.org/software/hurd/gnumach-doc/Memory-Attributes.html */
1596 task_t task = mach_task_self ();
1597 vm_address_t address;
1598 vm_size_t size;
1600 for (address = 0;; address += size)
1602 vm_prot_t protection;
1603 vm_prot_t max_protection;
1604 vm_inherit_t inheritance;
1605 boolean_t shared;
1606 memory_object_name_t object_name;
1607 vm_offset_t offset;
1608 unsigned int flags;
1610 if (!(vm_region (task, &address, &size, &protection, &max_protection,
1611 &inheritance, &shared, &object_name, &offset)
1612 == KERN_SUCCESS))
1613 break;
1614 mach_port_deallocate (task, object_name);
1615 flags = 0;
1616 if (protection & VM_PROT_READ)
1617 flags |= VMA_PROT_READ;
1618 if (protection & VM_PROT_WRITE)
1619 flags |= VMA_PROT_WRITE;
1620 if (protection & VM_PROT_EXECUTE)
1621 flags |= VMA_PROT_EXECUTE;
1622 if (callback (data, address, address + size, flags))
1623 break;
1625 return 0;
1627 #elif defined _WIN32 || defined __CYGWIN__
1628 /* Windows platform. Use the native Windows API. */
1630 MEMORY_BASIC_INFORMATION info;
1631 uintptr_t address = 0;
1633 while (VirtualQuery ((void*)address, &info, sizeof(info)) == sizeof(info))
1635 if (info.State != MEM_FREE)
1636 /* Ignore areas where info.State has the value MEM_RESERVE or,
1637 equivalently, info.Protect has the undocumented value 0.
1638 This is needed, so that on Cygwin, areas used by malloc() are
1639 distinguished from areas reserved for future malloc(). */
1640 if (info.State != MEM_RESERVE)
1642 uintptr_t start, end;
1643 unsigned int flags;
1645 start = (uintptr_t)info.BaseAddress;
1646 end = start + info.RegionSize;
1647 switch (info.Protect & ~(PAGE_GUARD|PAGE_NOCACHE))
1649 case PAGE_READONLY:
1650 flags = VMA_PROT_READ;
1651 break;
1652 case PAGE_READWRITE:
1653 case PAGE_WRITECOPY:
1654 flags = VMA_PROT_READ | VMA_PROT_WRITE;
1655 break;
1656 case PAGE_EXECUTE:
1657 flags = VMA_PROT_EXECUTE;
1658 break;
1659 case PAGE_EXECUTE_READ:
1660 flags = VMA_PROT_READ | VMA_PROT_EXECUTE;
1661 break;
1662 case PAGE_EXECUTE_READWRITE:
1663 case PAGE_EXECUTE_WRITECOPY:
1664 flags = VMA_PROT_READ | VMA_PROT_WRITE | VMA_PROT_EXECUTE;
1665 break;
1666 case PAGE_NOACCESS:
1667 default:
1668 flags = 0;
1669 break;
1672 if (callback (data, start, end, flags))
1673 break;
1675 address = (uintptr_t)info.BaseAddress + info.RegionSize;
1677 return 0;
1679 #elif defined __BEOS__ || defined __HAIKU__
1680 /* Use the BeOS specific API. */
1682 area_info info;
1683 ssize_t cookie;
1685 cookie = 0;
1686 while (get_next_area_info (0, &cookie, &info) == B_OK)
1688 unsigned long start, end;
1689 unsigned int flags;
1691 start = (unsigned long) info.address;
1692 end = start + info.size;
1693 flags = 0;
1694 if (info.protection & B_READ_AREA)
1695 flags |= VMA_PROT_READ | VMA_PROT_EXECUTE;
1696 if (info.protection & B_WRITE_AREA)
1697 flags |= VMA_PROT_WRITE;
1699 if (callback (data, start, end, flags))
1700 break;
1702 return 0;
1704 #elif HAVE_MQUERY /* OpenBSD */
1706 # if defined __OpenBSD__
1707 /* Try sysctl() first. It is more efficient than the mquery() loop below
1708 and also provides the flags. */
1710 int retval = vma_iterate_bsd (callback, data);
1711 if (retval == 0)
1712 return 0;
1714 # endif
1717 uintptr_t pagesize;
1718 uintptr_t address;
1719 int /*bool*/ address_known_mapped;
1721 pagesize = getpagesize ();
1722 /* Avoid calling mquery with a NULL first argument, because this argument
1723 value has a specific meaning. We know the NULL page is unmapped. */
1724 address = pagesize;
1725 address_known_mapped = 0;
1726 for (;;)
1728 /* Test whether the page at address is mapped. */
1729 if (address_known_mapped
1730 || mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0)
1731 == (void *) -1)
1733 /* The page at address is mapped.
1734 This is the start of an interval. */
1735 uintptr_t start = address;
1736 uintptr_t end;
1738 /* Find the end of the interval. */
1739 end = (uintptr_t) mquery ((void *) address, pagesize, 0, 0, -1, 0);
1740 if (end == (uintptr_t) (void *) -1)
1741 end = 0; /* wrap around */
1742 address = end;
1744 /* It's too complicated to find out about the flags.
1745 Just pass 0. */
1746 if (callback (data, start, end, 0))
1747 break;
1749 if (address < pagesize) /* wrap around? */
1750 break;
1752 /* Here we know that the page at address is unmapped. */
1754 uintptr_t query_size = pagesize;
1756 address += pagesize;
1758 /* Query larger and larger blocks, to get through the unmapped address
1759 range with few mquery() calls. */
1760 for (;;)
1762 if (2 * query_size > query_size)
1763 query_size = 2 * query_size;
1764 if (address + query_size - 1 < query_size) /* wrap around? */
1766 address_known_mapped = 0;
1767 break;
1769 if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
1770 == (void *) -1)
1772 /* Not all the interval [address .. address + query_size - 1]
1773 is unmapped. */
1774 address_known_mapped = (query_size == pagesize);
1775 break;
1777 /* The interval [address .. address + query_size - 1] is
1778 unmapped. */
1779 address += query_size;
1781 /* Reduce the query size again, to determine the precise size of the
1782 unmapped interval that starts at address. */
1783 while (query_size > pagesize)
1785 query_size = query_size / 2;
1786 if (address + query_size - 1 >= query_size)
1788 if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
1789 != (void *) -1)
1791 /* The interval [address .. address + query_size - 1] is
1792 unmapped. */
1793 address += query_size;
1794 address_known_mapped = 0;
1796 else
1797 address_known_mapped = (query_size == pagesize);
1800 /* Here again query_size = pagesize, and
1801 either address + pagesize - 1 < pagesize, or
1802 mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0) fails.
1803 So, the unmapped area ends at address. */
1805 if (address + pagesize - 1 < pagesize) /* wrap around? */
1806 break;
1808 return 0;
1811 #else
1813 /* Not implemented. */
1814 return -1;
1816 #endif
1820 #ifdef TEST
1822 #include <stdio.h>
1824 /* Output the VMAs of the current process in a format similar to the Linux
1825 /proc/$pid/maps file. */
1827 static int
1828 vma_iterate_callback (void *data, uintptr_t start, uintptr_t end,
1829 unsigned int flags)
1831 printf ("%08lx-%08lx %c%c%c\n",
1832 (unsigned long) start, (unsigned long) end,
1833 flags & VMA_PROT_READ ? 'r' : '-',
1834 flags & VMA_PROT_WRITE ? 'w' : '-',
1835 flags & VMA_PROT_EXECUTE ? 'x' : '-');
1836 return 0;
1840 main ()
1842 vma_iterate (vma_iterate_callback, NULL);
1844 /* Let the user interactively look at the /proc file system. */
1845 sleep (10);
1847 return 0;
1851 * Local Variables:
1852 * compile-command: "gcc -ggdb -DTEST -Wall -I.. vma-iter.c"
1853 * End:
1856 #endif /* TEST */