timespec_get: New module.
[gnulib.git] / lib / vma-iter.c
blobf3bfec4f4bf6dea61db0c2c9280b9a88d0508101
1 /* Iteration over virtual memory areas.
2 Copyright (C) 2011-2021 Free Software Foundation, Inc.
3 Written by Bruno Haible <bruno@clisp.org>, 2011-2017.
5 This program is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <https://www.gnu.org/licenses/>. */
18 #include <config.h>
20 /* On Solaris in 32-bit mode, when gnulib module 'largefile' is in use,
21 prevent a compilation error
22 "Cannot use procfs in the large file compilation environment"
23 On Android, when targeting Android 4.4 or older with a GCC toolchain,
24 prevent a compilation error
25 "error: call to 'mmap' declared with attribute error: mmap is not
26 available with _FILE_OFFSET_BITS=64 when using GCC until android-21.
27 Either raise your minSdkVersion, disable _FILE_OFFSET_BITS=64, or
28 switch to Clang."
29 The files that we access in this compilation unit are less than 2 GB
30 large. */
31 #if defined __sun || defined __ANDROID__
32 # undef _FILE_OFFSET_BITS
33 #endif
35 /* Specification. */
36 #include "vma-iter.h"
38 #include <errno.h> /* errno */
39 #include <stdlib.h> /* size_t */
40 #include <fcntl.h> /* open, O_RDONLY */
41 #include <unistd.h> /* getpagesize, lseek, read, close, getpid */
43 #if defined __linux__ || defined __ANDROID__
44 # include <limits.h> /* PATH_MAX */
45 #endif
47 #if defined __linux__ || defined __ANDROID__ || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__ || defined __minix /* || defined __CYGWIN__ */
48 # include <sys/types.h>
49 # include <sys/mman.h> /* mmap, munmap */
50 #endif
51 #if defined __minix
52 # include <string.h> /* memcpy */
53 #endif
55 #if defined __FreeBSD__ || defined __FreeBSD_kernel__ /* FreeBSD, GNU/kFreeBSD */
56 # include <sys/types.h>
57 # include <sys/mman.h> /* mmap, munmap */
58 # include <sys/user.h> /* struct kinfo_vmentry */
59 # include <sys/sysctl.h> /* sysctl */
60 #endif
61 #if defined __NetBSD__ || defined __OpenBSD__ /* NetBSD, OpenBSD */
62 # include <sys/types.h>
63 # include <sys/mman.h> /* mmap, munmap */
64 # include <sys/sysctl.h> /* sysctl, struct kinfo_vmentry */
65 #endif
67 #if defined __sgi || defined __osf__ /* IRIX, OSF/1 */
68 # include <string.h> /* memcpy */
69 # include <sys/types.h>
70 # include <sys/mman.h> /* mmap, munmap */
71 # include <sys/procfs.h> /* PIOC*, prmap_t */
72 #endif
74 #if defined __sun /* Solaris */
75 # include <string.h> /* memcpy */
76 # include <sys/types.h>
77 # include <sys/mman.h> /* mmap, munmap */
78 /* Try to use the newer ("structured") /proc filesystem API, if supported. */
79 # define _STRUCTURED_PROC 1
80 # include <sys/procfs.h> /* prmap_t, optionally PIOC* */
81 #endif
83 #if HAVE_PSTAT_GETPROCVM /* HP-UX */
84 # include <sys/pstat.h> /* pstat_getprocvm */
85 #endif
87 #if defined __APPLE__ && defined __MACH__ /* Mac OS X */
88 # include <mach/mach.h>
89 #endif
91 #if defined __GNU__ /* GNU/Hurd */
92 # include <mach/mach.h>
93 #endif
95 #if defined _WIN32 || defined __CYGWIN__ /* Windows */
96 # include <windows.h>
97 #endif
99 #if defined __BEOS__ || defined __HAIKU__ /* BeOS, Haiku */
100 # include <OS.h>
101 #endif
103 #if HAVE_MQUERY /* OpenBSD */
104 # include <sys/types.h>
105 # include <sys/mman.h> /* mquery */
106 #endif
108 /* Note: On AIX, there is a /proc/$pic/map file, that contains records of type
109 prmap_t, defined in <sys/procfs.h>. But it lists only the virtual memory
110 areas that are connected to a file, not the anonymous ones. */
113 /* Support for reading text files in the /proc file system. */
115 #if defined __linux__ || defined __ANDROID__ || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__ || defined __minix /* || defined __CYGWIN__ */
117 /* Buffered read-only streams.
118 We cannot use <stdio.h> here, because fopen() calls malloc(), and a malloc()
119 call may call mmap() and thus pre-allocate available memory.
120 Also, we cannot use multiple read() calls, because if the buffer size is
121 smaller than the file's contents:
122 - On NetBSD, the second read() call would return 0, thus making the file
123 appear truncated.
124 - On DragonFly BSD, the first read() call would fail with errno = EFBIG.
125 - On all platforms, if some other thread is doing memory allocations or
126 deallocations between two read() calls, there is a high risk that the
127 result of these two read() calls don't fit together, and as a
128 consequence we will parse garbage and either omit some VMAs or return
129 VMAs with nonsensical addresses.
130 So use mmap(), and ignore the resulting VMA. */
132 # if defined __linux__ || defined __ANDROID__
133 /* On Linux, if the file does not entirely fit into the buffer, the read()
134 function stops before the line that would come out truncated. The
135 maximum size of such a line is 73 + PATH_MAX bytes. To be sure that we
136 have read everything, we must verify that at least that many bytes are
137 left when read() returned. */
138 # define MIN_LEFTOVER (73 + PATH_MAX)
139 # else
140 # define MIN_LEFTOVER 1
141 # endif
143 # ifdef TEST
144 /* During testing, we want to run into the hairy cases. */
145 # define STACK_ALLOCATED_BUFFER_SIZE 32
146 # else
147 # if MIN_LEFTOVER < 1024
148 # define STACK_ALLOCATED_BUFFER_SIZE 1024
149 # else
150 /* There is no point in using a stack-allocated buffer if it is too small anyway. */
151 # define STACK_ALLOCATED_BUFFER_SIZE 1
152 # endif
153 # endif
155 struct rofile
157 size_t position;
158 size_t filled;
159 int eof_seen;
160 /* These fields deal with allocation of the buffer. */
161 char *buffer;
162 char *auxmap;
163 size_t auxmap_length;
164 unsigned long auxmap_start;
165 unsigned long auxmap_end;
166 char stack_allocated_buffer[STACK_ALLOCATED_BUFFER_SIZE];
169 /* Open a read-only file stream. */
170 static int
171 rof_open (struct rofile *rof, const char *filename)
173 int fd;
174 unsigned long pagesize;
175 size_t size;
177 fd = open (filename, O_RDONLY | O_CLOEXEC);
178 if (fd < 0)
179 return -1;
180 rof->position = 0;
181 rof->eof_seen = 0;
182 /* Try the static buffer first. */
183 pagesize = 0;
184 rof->buffer = rof->stack_allocated_buffer;
185 size = sizeof (rof->stack_allocated_buffer);
186 rof->auxmap = NULL;
187 rof->auxmap_start = 0;
188 rof->auxmap_end = 0;
189 for (;;)
191 /* Attempt to read the contents in a single system call. */
192 if (size > MIN_LEFTOVER)
194 int n = read (fd, rof->buffer, size);
195 if (n < 0 && errno == EINTR)
196 goto retry;
197 # if defined __DragonFly__
198 if (!(n < 0 && errno == EFBIG))
199 # endif
201 if (n <= 0)
202 /* Empty file. */
203 goto fail1;
204 if (n + MIN_LEFTOVER <= size)
206 /* The buffer was sufficiently large. */
207 rof->filled = n;
208 # if defined __linux__ || defined __ANDROID__
209 /* On Linux, the read() call may stop even if the buffer was
210 large enough. We need the equivalent of full_read(). */
211 for (;;)
213 n = read (fd, rof->buffer + rof->filled, size - rof->filled);
214 if (n < 0 && errno == EINTR)
215 goto retry;
216 if (n < 0)
217 /* Some error. */
218 goto fail1;
219 if (n + MIN_LEFTOVER > size - rof->filled)
220 /* Allocate a larger buffer. */
221 break;
222 if (n == 0)
224 /* Reached the end of file. */
225 close (fd);
226 return 0;
228 rof->filled += n;
230 # else
231 close (fd);
232 return 0;
233 # endif
237 /* Allocate a larger buffer. */
238 if (pagesize == 0)
240 pagesize = getpagesize ();
241 size = pagesize;
242 while (size <= MIN_LEFTOVER)
243 size = 2 * size;
245 else
247 size = 2 * size;
248 if (size == 0)
249 /* Wraparound. */
250 goto fail1;
251 if (rof->auxmap != NULL)
252 munmap (rof->auxmap, rof->auxmap_length);
254 rof->auxmap = (void *) mmap ((void *) 0, size, PROT_READ | PROT_WRITE,
255 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
256 if (rof->auxmap == (void *) -1)
258 close (fd);
259 return -1;
261 rof->auxmap_length = size;
262 rof->auxmap_start = (unsigned long) rof->auxmap;
263 rof->auxmap_end = rof->auxmap_start + size;
264 rof->buffer = (char *) rof->auxmap;
265 retry:
266 /* Restart. */
267 if (lseek (fd, 0, SEEK_SET) < 0)
269 close (fd);
270 fd = open (filename, O_RDONLY | O_CLOEXEC);
271 if (fd < 0)
272 goto fail2;
275 fail1:
276 close (fd);
277 fail2:
278 if (rof->auxmap != NULL)
279 munmap (rof->auxmap, rof->auxmap_length);
280 return -1;
283 /* Return the next byte from a read-only file stream without consuming it,
284 or -1 at EOF. */
285 static int
286 rof_peekchar (struct rofile *rof)
288 if (rof->position == rof->filled)
290 rof->eof_seen = 1;
291 return -1;
293 return (unsigned char) rof->buffer[rof->position];
296 /* Return the next byte from a read-only file stream, or -1 at EOF. */
297 static int
298 rof_getchar (struct rofile *rof)
300 int c = rof_peekchar (rof);
301 if (c >= 0)
302 rof->position++;
303 return c;
306 /* Parse an unsigned hexadecimal number from a read-only file stream. */
307 static int
308 rof_scanf_lx (struct rofile *rof, unsigned long *valuep)
310 unsigned long value = 0;
311 unsigned int numdigits = 0;
312 for (;;)
314 int c = rof_peekchar (rof);
315 if (c >= '0' && c <= '9')
316 value = (value << 4) + (c - '0');
317 else if (c >= 'A' && c <= 'F')
318 value = (value << 4) + (c - 'A' + 10);
319 else if (c >= 'a' && c <= 'f')
320 value = (value << 4) + (c - 'a' + 10);
321 else
322 break;
323 rof_getchar (rof);
324 numdigits++;
326 if (numdigits == 0)
327 return -1;
328 *valuep = value;
329 return 0;
332 /* Close a read-only file stream. */
333 static void
334 rof_close (struct rofile *rof)
336 if (rof->auxmap != NULL)
337 munmap (rof->auxmap, rof->auxmap_length);
340 #endif
343 /* Support for reading the info from a text file in the /proc file system. */
345 #if defined __linux__ || defined __ANDROID__ || (defined __FreeBSD_kernel__ && !defined __FreeBSD__) /* || defined __CYGWIN__ */
346 /* GNU/kFreeBSD mounts /proc as linprocfs, which looks like a Linux /proc
347 file system. */
349 static int
350 vma_iterate_proc (vma_iterate_callback_fn callback, void *data)
352 struct rofile rof;
354 /* Open the current process' maps file. It describes one VMA per line. */
355 if (rof_open (&rof, "/proc/self/maps") >= 0)
357 unsigned long auxmap_start = rof.auxmap_start;
358 unsigned long auxmap_end = rof.auxmap_end;
360 for (;;)
362 unsigned long start, end;
363 unsigned int flags;
364 int c;
366 /* Parse one line. First start and end. */
367 if (!(rof_scanf_lx (&rof, &start) >= 0
368 && rof_getchar (&rof) == '-'
369 && rof_scanf_lx (&rof, &end) >= 0))
370 break;
371 /* Then the flags. */
373 c = rof_getchar (&rof);
374 while (c == ' ');
375 flags = 0;
376 if (c == 'r')
377 flags |= VMA_PROT_READ;
378 c = rof_getchar (&rof);
379 if (c == 'w')
380 flags |= VMA_PROT_WRITE;
381 c = rof_getchar (&rof);
382 if (c == 'x')
383 flags |= VMA_PROT_EXECUTE;
384 while (c = rof_getchar (&rof), c != -1 && c != '\n')
387 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
389 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
390 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
391 if (start < auxmap_start)
392 if (callback (data, start, auxmap_start, flags))
393 break;
394 if (auxmap_end - 1 < end - 1)
395 if (callback (data, auxmap_end, end, flags))
396 break;
398 else
400 if (callback (data, start, end, flags))
401 break;
404 rof_close (&rof);
405 return 0;
408 return -1;
411 #elif defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__
413 static int
414 vma_iterate_proc (vma_iterate_callback_fn callback, void *data)
416 struct rofile rof;
418 /* Open the current process' maps file. It describes one VMA per line. */
419 if (rof_open (&rof, "/proc/curproc/map") >= 0)
421 unsigned long auxmap_start = rof.auxmap_start;
422 unsigned long auxmap_end = rof.auxmap_end;
424 for (;;)
426 unsigned long start, end;
427 unsigned int flags;
428 int c;
430 /* Parse one line. First start. */
431 if (!(rof_getchar (&rof) == '0'
432 && rof_getchar (&rof) == 'x'
433 && rof_scanf_lx (&rof, &start) >= 0))
434 break;
435 while (c = rof_peekchar (&rof), c == ' ' || c == '\t')
436 rof_getchar (&rof);
437 /* Then end. */
438 if (!(rof_getchar (&rof) == '0'
439 && rof_getchar (&rof) == 'x'
440 && rof_scanf_lx (&rof, &end) >= 0))
441 break;
442 # if defined __FreeBSD__ || defined __DragonFly__
443 /* Then the resident pages count. */
445 c = rof_getchar (&rof);
446 while (c == ' ');
448 c = rof_getchar (&rof);
449 while (c != -1 && c != '\n' && c != ' ');
450 /* Then the private resident pages count. */
452 c = rof_getchar (&rof);
453 while (c == ' ');
455 c = rof_getchar (&rof);
456 while (c != -1 && c != '\n' && c != ' ');
457 /* Then some kernel address. */
459 c = rof_getchar (&rof);
460 while (c == ' ');
462 c = rof_getchar (&rof);
463 while (c != -1 && c != '\n' && c != ' ');
464 # endif
465 /* Then the flags. */
467 c = rof_getchar (&rof);
468 while (c == ' ');
469 flags = 0;
470 if (c == 'r')
471 flags |= VMA_PROT_READ;
472 c = rof_getchar (&rof);
473 if (c == 'w')
474 flags |= VMA_PROT_WRITE;
475 c = rof_getchar (&rof);
476 if (c == 'x')
477 flags |= VMA_PROT_EXECUTE;
478 while (c = rof_getchar (&rof), c != -1 && c != '\n')
481 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
483 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
484 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
485 if (start < auxmap_start)
486 if (callback (data, start, auxmap_start, flags))
487 break;
488 if (auxmap_end - 1 < end - 1)
489 if (callback (data, auxmap_end, end, flags))
490 break;
492 else
494 if (callback (data, start, end, flags))
495 break;
498 rof_close (&rof);
499 return 0;
502 return -1;
505 #elif defined __minix
507 static int
508 vma_iterate_proc (vma_iterate_callback_fn callback, void *data)
510 char fnamebuf[6+10+4+1];
511 char *fname;
512 struct rofile rof;
514 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()). */
515 fname = fnamebuf + sizeof (fnamebuf) - (4 + 1);
516 memcpy (fname, "/map", 4 + 1);
518 unsigned int value = getpid ();
520 *--fname = (value % 10) + '0';
521 while ((value = value / 10) > 0);
523 fname -= 6;
524 memcpy (fname, "/proc/", 6);
526 /* Open the current process' maps file. It describes one VMA per line. */
527 if (rof_open (&rof, fname) >= 0)
529 unsigned long auxmap_start = rof.auxmap_start;
530 unsigned long auxmap_end = rof.auxmap_end;
532 for (;;)
534 unsigned long start, end;
535 unsigned int flags;
536 int c;
538 /* Parse one line. First start and end. */
539 if (!(rof_scanf_lx (&rof, &start) >= 0
540 && rof_getchar (&rof) == '-'
541 && rof_scanf_lx (&rof, &end) >= 0))
542 break;
543 /* Then the flags. */
545 c = rof_getchar (&rof);
546 while (c == ' ');
547 flags = 0;
548 if (c == 'r')
549 flags |= VMA_PROT_READ;
550 c = rof_getchar (&rof);
551 if (c == 'w')
552 flags |= VMA_PROT_WRITE;
553 c = rof_getchar (&rof);
554 if (c == 'x')
555 flags |= VMA_PROT_EXECUTE;
556 while (c = rof_getchar (&rof), c != -1 && c != '\n')
559 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
561 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
562 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
563 if (start < auxmap_start)
564 if (callback (data, start, auxmap_start, flags))
565 break;
566 if (auxmap_end - 1 < end - 1)
567 if (callback (data, auxmap_end, end, flags))
568 break;
570 else
572 if (callback (data, start, end, flags))
573 break;
576 rof_close (&rof);
577 return 0;
580 return -1;
583 #else
585 static inline int
586 vma_iterate_proc (vma_iterate_callback_fn callback, void *data)
588 return -1;
591 #endif
594 /* Support for reading the info from the BSD sysctl() system call. */
596 #if (defined __FreeBSD__ || defined __FreeBSD_kernel__) && defined KERN_PROC_VMMAP /* FreeBSD >= 7.1 */
598 static int
599 vma_iterate_bsd (vma_iterate_callback_fn callback, void *data)
601 /* Documentation: https://www.freebsd.org/cgi/man.cgi?sysctl(3) */
602 int info_path[] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid () };
603 size_t len;
604 size_t pagesize;
605 size_t memneed;
606 void *auxmap;
607 unsigned long auxmap_start;
608 unsigned long auxmap_end;
609 char *mem;
610 char *p;
611 char *p_end;
613 len = 0;
614 if (sysctl (info_path, 4, NULL, &len, NULL, 0) < 0)
615 return -1;
616 /* Allow for small variations over time. In a multithreaded program
617 new VMAs can be allocated at any moment. */
618 len = 2 * len + 200;
619 /* Allocate memneed bytes of memory.
620 We cannot use alloca here, because not much stack space is guaranteed.
621 We also cannot use malloc here, because a malloc() call may call mmap()
622 and thus pre-allocate available memory.
623 So use mmap(), and ignore the resulting VMA. */
624 pagesize = getpagesize ();
625 memneed = len;
626 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
627 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
628 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
629 if (auxmap == (void *) -1)
630 return -1;
631 auxmap_start = (unsigned long) auxmap;
632 auxmap_end = auxmap_start + memneed;
633 mem = (char *) auxmap;
634 if (sysctl (info_path, 4, mem, &len, NULL, 0) < 0)
636 munmap (auxmap, memneed);
637 return -1;
639 p = mem;
640 p_end = mem + len;
641 while (p < p_end)
643 struct kinfo_vmentry *kve = (struct kinfo_vmentry *) p;
644 unsigned long start = kve->kve_start;
645 unsigned long end = kve->kve_end;
646 unsigned int flags = 0;
647 if (kve->kve_protection & KVME_PROT_READ)
648 flags |= VMA_PROT_READ;
649 if (kve->kve_protection & KVME_PROT_WRITE)
650 flags |= VMA_PROT_WRITE;
651 if (kve->kve_protection & KVME_PROT_EXEC)
652 flags |= VMA_PROT_EXECUTE;
653 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
655 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
656 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
657 if (start < auxmap_start)
658 if (callback (data, start, auxmap_start, flags))
659 break;
660 if (auxmap_end - 1 < end - 1)
661 if (callback (data, auxmap_end, end, flags))
662 break;
664 else
666 if (callback (data, start, end, flags))
667 break;
669 p += kve->kve_structsize;
671 munmap (auxmap, memneed);
672 return 0;
675 #elif defined __NetBSD__ && defined VM_PROC_MAP /* NetBSD >= 8.0 */
677 static int
678 vma_iterate_bsd (vma_iterate_callback_fn callback, void *data)
680 /* Documentation: https://man.netbsd.org/man/sysctl+7 */
681 unsigned int entry_size =
682 /* If we wanted to have the path of each entry, we would need
683 sizeof (struct kinfo_vmentry). But we need only the non-string
684 parts of each entry. */
685 offsetof (struct kinfo_vmentry, kve_path);
686 int info_path[] = { CTL_VM, VM_PROC, VM_PROC_MAP, getpid (), entry_size };
687 size_t len;
688 size_t pagesize;
689 size_t memneed;
690 void *auxmap;
691 unsigned long auxmap_start;
692 unsigned long auxmap_end;
693 char *mem;
694 char *p;
695 char *p_end;
697 len = 0;
698 if (sysctl (info_path, 5, NULL, &len, NULL, 0) < 0)
699 return -1;
700 /* Allow for small variations over time. In a multithreaded program
701 new VMAs can be allocated at any moment. */
702 len = 2 * len + 10 * entry_size;
703 /* But the system call rejects lengths > 1 MB. */
704 if (len > 0x100000)
705 len = 0x100000;
706 /* And the system call causes a kernel panic if the length is not a multiple
707 of entry_size. */
708 len = (len / entry_size) * entry_size;
709 /* Allocate memneed bytes of memory.
710 We cannot use alloca here, because not much stack space is guaranteed.
711 We also cannot use malloc here, because a malloc() call may call mmap()
712 and thus pre-allocate available memory.
713 So use mmap(), and ignore the resulting VMA. */
714 pagesize = getpagesize ();
715 memneed = len;
716 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
717 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
718 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
719 if (auxmap == (void *) -1)
720 return -1;
721 auxmap_start = (unsigned long) auxmap;
722 auxmap_end = auxmap_start + memneed;
723 mem = (char *) auxmap;
724 if (sysctl (info_path, 5, mem, &len, NULL, 0) < 0
725 || len > 0x100000 - entry_size)
727 /* sysctl failed, or the list of VMAs is possibly truncated. */
728 munmap (auxmap, memneed);
729 return -1;
731 p = mem;
732 p_end = mem + len;
733 while (p < p_end)
735 struct kinfo_vmentry *kve = (struct kinfo_vmentry *) p;
736 unsigned long start = kve->kve_start;
737 unsigned long end = kve->kve_end;
738 unsigned int flags = 0;
739 if (kve->kve_protection & KVME_PROT_READ)
740 flags |= VMA_PROT_READ;
741 if (kve->kve_protection & KVME_PROT_WRITE)
742 flags |= VMA_PROT_WRITE;
743 if (kve->kve_protection & KVME_PROT_EXEC)
744 flags |= VMA_PROT_EXECUTE;
745 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
747 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
748 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
749 if (start < auxmap_start)
750 if (callback (data, start, auxmap_start, flags))
751 break;
752 if (auxmap_end - 1 < end - 1)
753 if (callback (data, auxmap_end, end, flags))
754 break;
756 else
758 if (callback (data, start, end, flags))
759 break;
761 p += entry_size;
763 munmap (auxmap, memneed);
764 return 0;
767 #elif defined __OpenBSD__ && defined KERN_PROC_VMMAP /* OpenBSD >= 5.7 */
769 static int
770 vma_iterate_bsd (vma_iterate_callback_fn callback, void *data)
772 /* Documentation: https://man.openbsd.org/sysctl.2 */
773 int info_path[] = { CTL_KERN, KERN_PROC_VMMAP, getpid () };
774 size_t len;
775 size_t pagesize;
776 size_t memneed;
777 void *auxmap;
778 unsigned long auxmap_start;
779 unsigned long auxmap_end;
780 char *mem;
781 char *p;
782 char *p_end;
784 len = 0;
785 if (sysctl (info_path, 3, NULL, &len, NULL, 0) < 0)
786 return -1;
787 /* Allow for small variations over time. In a multithreaded program
788 new VMAs can be allocated at any moment. */
789 len = 2 * len + 10 * sizeof (struct kinfo_vmentry);
790 /* But the system call rejects lengths > 64 KB. */
791 if (len > 0x10000)
792 len = 0x10000;
793 /* And the system call rejects lengths that are not a multiple of
794 sizeof (struct kinfo_vmentry). */
795 len = (len / sizeof (struct kinfo_vmentry)) * sizeof (struct kinfo_vmentry);
796 /* Allocate memneed bytes of memory.
797 We cannot use alloca here, because not much stack space is guaranteed.
798 We also cannot use malloc here, because a malloc() call may call mmap()
799 and thus pre-allocate available memory.
800 So use mmap(), and ignore the resulting VMA. */
801 pagesize = getpagesize ();
802 memneed = len;
803 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
804 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
805 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
806 if (auxmap == (void *) -1)
807 return -1;
808 auxmap_start = (unsigned long) auxmap;
809 auxmap_end = auxmap_start + memneed;
810 mem = (char *) auxmap;
811 if (sysctl (info_path, 3, mem, &len, NULL, 0) < 0
812 || len > 0x10000 - sizeof (struct kinfo_vmentry))
814 /* sysctl failed, or the list of VMAs is possibly truncated. */
815 munmap (auxmap, memneed);
816 return -1;
818 p = mem;
819 p_end = mem + len;
820 while (p < p_end)
822 struct kinfo_vmentry *kve = (struct kinfo_vmentry *) p;
823 unsigned long start = kve->kve_start;
824 unsigned long end = kve->kve_end;
825 unsigned int flags = 0;
826 if (kve->kve_protection & KVE_PROT_READ)
827 flags |= VMA_PROT_READ;
828 if (kve->kve_protection & KVE_PROT_WRITE)
829 flags |= VMA_PROT_WRITE;
830 if (kve->kve_protection & KVE_PROT_EXEC)
831 flags |= VMA_PROT_EXECUTE;
832 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
834 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
835 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
836 if (start < auxmap_start)
837 if (callback (data, start, auxmap_start, flags))
838 break;
839 if (auxmap_end - 1 < end - 1)
840 if (callback (data, auxmap_end, end, flags))
841 break;
843 else
845 if (start != end)
846 if (callback (data, start, end, flags))
847 break;
849 p += sizeof (struct kinfo_vmentry);
851 munmap (auxmap, memneed);
852 return 0;
855 #else
857 static inline int
858 vma_iterate_bsd (vma_iterate_callback_fn callback, void *data)
860 return -1;
863 #endif
867 vma_iterate (vma_iterate_callback_fn callback, void *data)
869 #if defined __linux__ || defined __ANDROID__ || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__ || defined __minix /* || defined __CYGWIN__ */
871 # if defined __FreeBSD__
872 /* On FreeBSD with procfs (but not GNU/kFreeBSD, which uses linprocfs), the
873 function vma_iterate_proc does not return the virtual memory areas that
874 were created by anonymous mmap. See
875 <https://svnweb.freebsd.org/base/head/sys/fs/procfs/procfs_map.c?view=markup>
876 So use vma_iterate_proc only as a fallback. */
877 int retval = vma_iterate_bsd (callback, data);
878 if (retval == 0)
879 return 0;
881 return vma_iterate_proc (callback, data);
882 # else
883 /* On the other platforms, try the /proc approach first, and the sysctl()
884 as a fallback. */
885 int retval = vma_iterate_proc (callback, data);
886 if (retval == 0)
887 return 0;
889 return vma_iterate_bsd (callback, data);
890 # endif
892 #elif defined __sgi || defined __osf__ /* IRIX, OSF/1 */
894 size_t pagesize;
895 char fnamebuf[6+10+1];
896 char *fname;
897 int fd;
898 int nmaps;
899 size_t memneed;
900 # if HAVE_MAP_ANONYMOUS
901 # define zero_fd -1
902 # define map_flags MAP_ANONYMOUS
903 # else
904 int zero_fd;
905 # define map_flags 0
906 # endif
907 void *auxmap;
908 unsigned long auxmap_start;
909 unsigned long auxmap_end;
910 prmap_t* maps;
911 prmap_t* mp;
913 pagesize = getpagesize ();
915 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()). */
916 fname = fnamebuf + sizeof (fnamebuf) - 1;
917 *fname = '\0';
919 unsigned int value = getpid ();
921 *--fname = (value % 10) + '0';
922 while ((value = value / 10) > 0);
924 fname -= 6;
925 memcpy (fname, "/proc/", 6);
927 fd = open (fname, O_RDONLY | O_CLOEXEC);
928 if (fd < 0)
929 return -1;
931 if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
932 goto fail2;
934 memneed = (nmaps + 10) * sizeof (prmap_t);
935 /* Allocate memneed bytes of memory.
936 We cannot use alloca here, because not much stack space is guaranteed.
937 We also cannot use malloc here, because a malloc() call may call mmap()
938 and thus pre-allocate available memory.
939 So use mmap(), and ignore the resulting VMA. */
940 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
941 # if !HAVE_MAP_ANONYMOUS
942 zero_fd = open ("/dev/zero", O_RDONLY | O_CLOEXEC, 0644);
943 if (zero_fd < 0)
944 goto fail2;
945 # endif
946 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
947 map_flags | MAP_PRIVATE, zero_fd, 0);
948 # if !HAVE_MAP_ANONYMOUS
949 close (zero_fd);
950 # endif
951 if (auxmap == (void *) -1)
952 goto fail2;
953 auxmap_start = (unsigned long) auxmap;
954 auxmap_end = auxmap_start + memneed;
955 maps = (prmap_t *) auxmap;
957 if (ioctl (fd, PIOCMAP, maps) < 0)
958 goto fail1;
960 for (mp = maps;;)
962 unsigned long start, end;
963 unsigned int flags;
965 start = (unsigned long) mp->pr_vaddr;
966 end = start + mp->pr_size;
967 if (start == 0 && end == 0)
968 break;
969 flags = 0;
970 if (mp->pr_mflags & MA_READ)
971 flags |= VMA_PROT_READ;
972 if (mp->pr_mflags & MA_WRITE)
973 flags |= VMA_PROT_WRITE;
974 if (mp->pr_mflags & MA_EXEC)
975 flags |= VMA_PROT_EXECUTE;
976 mp++;
977 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
979 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
980 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
981 if (start < auxmap_start)
982 if (callback (data, start, auxmap_start, flags))
983 break;
984 if (auxmap_end - 1 < end - 1)
985 if (callback (data, auxmap_end, end, flags))
986 break;
988 else
990 if (callback (data, start, end, flags))
991 break;
994 munmap (auxmap, memneed);
995 close (fd);
996 return 0;
998 fail1:
999 munmap (auxmap, memneed);
1000 fail2:
1001 close (fd);
1002 return -1;
1004 #elif defined __sun /* Solaris */
1006 /* Note: Solaris <sys/procfs.h> defines a different type prmap_t with
1007 _STRUCTURED_PROC than without! Here's a table of sizeof(prmap_t):
1008 32-bit 64-bit
1009 _STRUCTURED_PROC = 0 32 56
1010 _STRUCTURED_PROC = 1 96 104
1011 Therefore, if the include files provide the newer API, prmap_t has
1012 the bigger size, and thus you MUST use the newer API. And if the
1013 include files provide the older API, prmap_t has the smaller size,
1014 and thus you MUST use the older API. */
1016 # if defined PIOCNMAP && defined PIOCMAP
1017 /* We must use the older /proc interface. */
1019 size_t pagesize;
1020 char fnamebuf[6+10+1];
1021 char *fname;
1022 int fd;
1023 int nmaps;
1024 size_t memneed;
1025 # if HAVE_MAP_ANONYMOUS
1026 # define zero_fd -1
1027 # define map_flags MAP_ANONYMOUS
1028 # else /* Solaris <= 7 */
1029 int zero_fd;
1030 # define map_flags 0
1031 # endif
1032 void *auxmap;
1033 unsigned long auxmap_start;
1034 unsigned long auxmap_end;
1035 prmap_t* maps;
1036 prmap_t* mp;
1038 pagesize = getpagesize ();
1040 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()). */
1041 fname = fnamebuf + sizeof (fnamebuf) - 1;
1042 *fname = '\0';
1044 unsigned int value = getpid ();
1046 *--fname = (value % 10) + '0';
1047 while ((value = value / 10) > 0);
1049 fname -= 6;
1050 memcpy (fname, "/proc/", 6);
1052 fd = open (fname, O_RDONLY | O_CLOEXEC);
1053 if (fd < 0)
1054 return -1;
1056 if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
1057 goto fail2;
1059 memneed = (nmaps + 10) * sizeof (prmap_t);
1060 /* Allocate memneed bytes of memory.
1061 We cannot use alloca here, because not much stack space is guaranteed.
1062 We also cannot use malloc here, because a malloc() call may call mmap()
1063 and thus pre-allocate available memory.
1064 So use mmap(), and ignore the resulting VMA. */
1065 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
1066 # if !HAVE_MAP_ANONYMOUS
1067 zero_fd = open ("/dev/zero", O_RDONLY | O_CLOEXEC, 0644);
1068 if (zero_fd < 0)
1069 goto fail2;
1070 # endif
1071 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
1072 map_flags | MAP_PRIVATE, zero_fd, 0);
1073 # if !HAVE_MAP_ANONYMOUS
1074 close (zero_fd);
1075 # endif
1076 if (auxmap == (void *) -1)
1077 goto fail2;
1078 auxmap_start = (unsigned long) auxmap;
1079 auxmap_end = auxmap_start + memneed;
1080 maps = (prmap_t *) auxmap;
1082 if (ioctl (fd, PIOCMAP, maps) < 0)
1083 goto fail1;
1085 for (mp = maps;;)
1087 unsigned long start, end;
1088 unsigned int flags;
1090 start = (unsigned long) mp->pr_vaddr;
1091 end = start + mp->pr_size;
1092 if (start == 0 && end == 0)
1093 break;
1094 flags = 0;
1095 if (mp->pr_mflags & MA_READ)
1096 flags |= VMA_PROT_READ;
1097 if (mp->pr_mflags & MA_WRITE)
1098 flags |= VMA_PROT_WRITE;
1099 if (mp->pr_mflags & MA_EXEC)
1100 flags |= VMA_PROT_EXECUTE;
1101 mp++;
1102 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
1104 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1105 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
1106 if (start < auxmap_start)
1107 if (callback (data, start, auxmap_start, flags))
1108 break;
1109 if (auxmap_end - 1 < end - 1)
1110 if (callback (data, auxmap_end, end, flags))
1111 break;
1113 else
1115 if (callback (data, start, end, flags))
1116 break;
1119 munmap (auxmap, memneed);
1120 close (fd);
1121 return 0;
1123 fail1:
1124 munmap (auxmap, memneed);
1125 fail2:
1126 close (fd);
1127 return -1;
1129 # else
1130 /* We must use the newer /proc interface.
1131 Documentation:
1132 https://docs.oracle.com/cd/E23824_01/html/821-1473/proc-4.html
1133 The contents of /proc/<pid>/map consists of records of type
1134 prmap_t. These are different in 32-bit and 64-bit processes,
1135 but here we are fortunately accessing only the current process. */
1137 size_t pagesize;
1138 char fnamebuf[6+10+4+1];
1139 char *fname;
1140 int fd;
1141 int nmaps;
1142 size_t memneed;
1143 # if HAVE_MAP_ANONYMOUS
1144 # define zero_fd -1
1145 # define map_flags MAP_ANONYMOUS
1146 # else /* Solaris <= 7 */
1147 int zero_fd;
1148 # define map_flags 0
1149 # endif
1150 void *auxmap;
1151 unsigned long auxmap_start;
1152 unsigned long auxmap_end;
1153 prmap_t* maps;
1154 prmap_t* maps_end;
1155 prmap_t* mp;
1157 pagesize = getpagesize ();
1159 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()). */
1160 fname = fnamebuf + sizeof (fnamebuf) - 1 - 4;
1161 memcpy (fname, "/map", 4 + 1);
1163 unsigned int value = getpid ();
1165 *--fname = (value % 10) + '0';
1166 while ((value = value / 10) > 0);
1168 fname -= 6;
1169 memcpy (fname, "/proc/", 6);
1171 fd = open (fname, O_RDONLY | O_CLOEXEC);
1172 if (fd < 0)
1173 return -1;
1176 struct stat statbuf;
1177 if (fstat (fd, &statbuf) < 0)
1178 goto fail2;
1179 nmaps = statbuf.st_size / sizeof (prmap_t);
1182 memneed = (nmaps + 10) * sizeof (prmap_t);
1183 /* Allocate memneed bytes of memory.
1184 We cannot use alloca here, because not much stack space is guaranteed.
1185 We also cannot use malloc here, because a malloc() call may call mmap()
1186 and thus pre-allocate available memory.
1187 So use mmap(), and ignore the resulting VMA. */
1188 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
1189 # if !HAVE_MAP_ANONYMOUS
1190 zero_fd = open ("/dev/zero", O_RDONLY | O_CLOEXEC, 0644);
1191 if (zero_fd < 0)
1192 goto fail2;
1193 # endif
1194 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
1195 map_flags | MAP_PRIVATE, zero_fd, 0);
1196 # if !HAVE_MAP_ANONYMOUS
1197 close (zero_fd);
1198 # endif
1199 if (auxmap == (void *) -1)
1200 goto fail2;
1201 auxmap_start = (unsigned long) auxmap;
1202 auxmap_end = auxmap_start + memneed;
1203 maps = (prmap_t *) auxmap;
1205 /* Read up to memneed bytes from fd into maps. */
1207 size_t remaining = memneed;
1208 size_t total_read = 0;
1209 char *ptr = (char *) maps;
1213 size_t nread = read (fd, ptr, remaining);
1214 if (nread == (size_t)-1)
1216 if (errno == EINTR)
1217 continue;
1218 goto fail1;
1220 if (nread == 0)
1221 /* EOF */
1222 break;
1223 total_read += nread;
1224 ptr += nread;
1225 remaining -= nread;
1227 while (remaining > 0);
1229 nmaps = (memneed - remaining) / sizeof (prmap_t);
1230 maps_end = maps + nmaps;
1233 for (mp = maps; mp < maps_end; mp++)
1235 unsigned long start, end;
1236 unsigned int flags;
1238 start = (unsigned long) mp->pr_vaddr;
1239 end = start + mp->pr_size;
1240 flags = 0;
1241 if (mp->pr_mflags & MA_READ)
1242 flags |= VMA_PROT_READ;
1243 if (mp->pr_mflags & MA_WRITE)
1244 flags |= VMA_PROT_WRITE;
1245 if (mp->pr_mflags & MA_EXEC)
1246 flags |= VMA_PROT_EXECUTE;
1247 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
1249 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1250 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
1251 if (start < auxmap_start)
1252 if (callback (data, start, auxmap_start, flags))
1253 break;
1254 if (auxmap_end - 1 < end - 1)
1255 if (callback (data, auxmap_end, end, flags))
1256 break;
1258 else
1260 if (callback (data, start, end, flags))
1261 break;
1264 munmap (auxmap, memneed);
1265 close (fd);
1266 return 0;
1268 fail1:
1269 munmap (auxmap, memneed);
1270 fail2:
1271 close (fd);
1272 return -1;
1274 # endif
1276 #elif HAVE_PSTAT_GETPROCVM /* HP-UX */
1278 unsigned long pagesize = getpagesize ();
1279 int i;
1281 for (i = 0; ; i++)
1283 struct pst_vm_status info;
1284 int ret = pstat_getprocvm (&info, sizeof (info), 0, i);
1285 if (ret < 0)
1286 return -1;
1287 if (ret == 0)
1288 break;
1290 unsigned long start = info.pst_vaddr;
1291 unsigned long end = start + info.pst_length * pagesize;
1292 unsigned int flags = 0;
1293 if (info.pst_permission & PS_PROT_READ)
1294 flags |= VMA_PROT_READ;
1295 if (info.pst_permission & PS_PROT_WRITE)
1296 flags |= VMA_PROT_WRITE;
1297 if (info.pst_permission & PS_PROT_EXECUTE)
1298 flags |= VMA_PROT_EXECUTE;
1300 if (callback (data, start, end, flags))
1301 break;
1305 #elif defined __APPLE__ && defined __MACH__ /* Mac OS X */
1307 task_t task = mach_task_self ();
1308 vm_address_t address;
1309 vm_size_t size;
1311 for (address = VM_MIN_ADDRESS;; address += size)
1313 int more;
1314 mach_port_t object_name;
1315 unsigned int flags;
1316 /* In Mac OS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
1317 32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
1318 mach_vm_address_t and mach_vm_size_t are always 64 bits large.
1319 Mac OS X 10.5 has three vm_region like methods:
1320 - vm_region. It has arguments that depend on whether the current
1321 process is 32-bit or 64-bit. When linking dynamically, this
1322 function exists only in 32-bit processes. Therefore we use it only
1323 in 32-bit processes.
1324 - vm_region_64. It has arguments that depend on whether the current
1325 process is 32-bit or 64-bit. It interprets a flavor
1326 VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
1327 dangerous since 'struct vm_region_basic_info_64' is larger than
1328 'struct vm_region_basic_info'; therefore let's write
1329 VM_REGION_BASIC_INFO_64 explicitly.
1330 - mach_vm_region. It has arguments that are 64-bit always. This
1331 function is useful when you want to access the VM of a process
1332 other than the current process.
1333 In 64-bit processes, we could use vm_region_64 or mach_vm_region.
1334 I choose vm_region_64 because it uses the same types as vm_region,
1335 resulting in less conditional code. */
1336 # if defined __aarch64__ || defined __ppc64__ || defined __x86_64__
1337 struct vm_region_basic_info_64 info;
1338 mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
1340 more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
1341 (vm_region_info_t)&info, &info_count, &object_name)
1342 == KERN_SUCCESS);
1343 # else
1344 struct vm_region_basic_info info;
1345 mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
1347 more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
1348 (vm_region_info_t)&info, &info_count, &object_name)
1349 == KERN_SUCCESS);
1350 # endif
1351 if (object_name != MACH_PORT_NULL)
1352 mach_port_deallocate (mach_task_self (), object_name);
1353 if (!more)
1354 break;
1355 flags = 0;
1356 if (info.protection & VM_PROT_READ)
1357 flags |= VMA_PROT_READ;
1358 if (info.protection & VM_PROT_WRITE)
1359 flags |= VMA_PROT_WRITE;
1360 if (info.protection & VM_PROT_EXECUTE)
1361 flags |= VMA_PROT_EXECUTE;
1362 if (callback (data, address, address + size, flags))
1363 break;
1365 return 0;
1367 #elif defined __GNU__ /* GNU/Hurd */
1369 /* The Hurd has a /proc/self/maps that looks like the Linux one, but it
1370 lacks the VMAs created through anonymous mmap. Therefore use the Mach
1371 API.
1372 Documentation:
1373 https://www.gnu.org/software/hurd/gnumach-doc/Memory-Attributes.html */
1375 task_t task = mach_task_self ();
1376 vm_address_t address;
1377 vm_size_t size;
1379 for (address = 0;; address += size)
1381 vm_prot_t protection;
1382 vm_prot_t max_protection;
1383 vm_inherit_t inheritance;
1384 boolean_t shared;
1385 memory_object_name_t object_name;
1386 vm_offset_t offset;
1387 unsigned int flags;
1389 if (!(vm_region (task, &address, &size, &protection, &max_protection,
1390 &inheritance, &shared, &object_name, &offset)
1391 == KERN_SUCCESS))
1392 break;
1393 mach_port_deallocate (task, object_name);
1394 flags = 0;
1395 if (protection & VM_PROT_READ)
1396 flags |= VMA_PROT_READ;
1397 if (protection & VM_PROT_WRITE)
1398 flags |= VMA_PROT_WRITE;
1399 if (protection & VM_PROT_EXECUTE)
1400 flags |= VMA_PROT_EXECUTE;
1401 if (callback (data, address, address + size, flags))
1402 break;
1404 return 0;
1406 #elif defined _WIN32 || defined __CYGWIN__
1407 /* Windows platform. Use the native Windows API. */
1409 MEMORY_BASIC_INFORMATION info;
1410 uintptr_t address = 0;
1412 while (VirtualQuery ((void*)address, &info, sizeof(info)) == sizeof(info))
1414 if (info.State != MEM_FREE)
1415 /* Ignore areas where info.State has the value MEM_RESERVE or,
1416 equivalently, info.Protect has the undocumented value 0.
1417 This is needed, so that on Cygwin, areas used by malloc() are
1418 distinguished from areas reserved for future malloc(). */
1419 if (info.State != MEM_RESERVE)
1421 uintptr_t start, end;
1422 unsigned int flags;
1424 start = (uintptr_t)info.BaseAddress;
1425 end = start + info.RegionSize;
1426 switch (info.Protect & ~(PAGE_GUARD|PAGE_NOCACHE))
1428 case PAGE_READONLY:
1429 flags = VMA_PROT_READ;
1430 break;
1431 case PAGE_READWRITE:
1432 case PAGE_WRITECOPY:
1433 flags = VMA_PROT_READ | VMA_PROT_WRITE;
1434 break;
1435 case PAGE_EXECUTE:
1436 flags = VMA_PROT_EXECUTE;
1437 break;
1438 case PAGE_EXECUTE_READ:
1439 flags = VMA_PROT_READ | VMA_PROT_EXECUTE;
1440 break;
1441 case PAGE_EXECUTE_READWRITE:
1442 case PAGE_EXECUTE_WRITECOPY:
1443 flags = VMA_PROT_READ | VMA_PROT_WRITE | VMA_PROT_EXECUTE;
1444 break;
1445 case PAGE_NOACCESS:
1446 default:
1447 flags = 0;
1448 break;
1451 if (callback (data, start, end, flags))
1452 break;
1454 address = (uintptr_t)info.BaseAddress + info.RegionSize;
1456 return 0;
1458 #elif defined __BEOS__ || defined __HAIKU__
1459 /* Use the BeOS specific API. */
1461 area_info info;
1462 int32 cookie;
1464 cookie = 0;
1465 while (get_next_area_info (0, &cookie, &info) == B_OK)
1467 unsigned long start, end;
1468 unsigned int flags;
1470 start = (unsigned long) info.address;
1471 end = start + info.size;
1472 flags = 0;
1473 if (info.protection & B_READ_AREA)
1474 flags |= VMA_PROT_READ | VMA_PROT_EXECUTE;
1475 if (info.protection & B_WRITE_AREA)
1476 flags |= VMA_PROT_WRITE;
1478 if (callback (data, start, end, flags))
1479 break;
1481 return 0;
1483 #elif HAVE_MQUERY /* OpenBSD */
1485 # if defined __OpenBSD__
1486 /* Try sysctl() first. It is more efficient than the mquery() loop below
1487 and also provides the flags. */
1489 int retval = vma_iterate_bsd (callback, data);
1490 if (retval == 0)
1491 return 0;
1493 # endif
1496 uintptr_t pagesize;
1497 uintptr_t address;
1498 int /*bool*/ address_known_mapped;
1500 pagesize = getpagesize ();
1501 /* Avoid calling mquery with a NULL first argument, because this argument
1502 value has a specific meaning. We know the NULL page is unmapped. */
1503 address = pagesize;
1504 address_known_mapped = 0;
1505 for (;;)
1507 /* Test whether the page at address is mapped. */
1508 if (address_known_mapped
1509 || mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0)
1510 == (void *) -1)
1512 /* The page at address is mapped.
1513 This is the start of an interval. */
1514 uintptr_t start = address;
1515 uintptr_t end;
1517 /* Find the end of the interval. */
1518 end = (uintptr_t) mquery ((void *) address, pagesize, 0, 0, -1, 0);
1519 if (end == (uintptr_t) (void *) -1)
1520 end = 0; /* wrap around */
1521 address = end;
1523 /* It's too complicated to find out about the flags.
1524 Just pass 0. */
1525 if (callback (data, start, end, 0))
1526 break;
1528 if (address < pagesize) /* wrap around? */
1529 break;
1531 /* Here we know that the page at address is unmapped. */
1533 uintptr_t query_size = pagesize;
1535 address += pagesize;
1537 /* Query larger and larger blocks, to get through the unmapped address
1538 range with few mquery() calls. */
1539 for (;;)
1541 if (2 * query_size > query_size)
1542 query_size = 2 * query_size;
1543 if (address + query_size - 1 < query_size) /* wrap around? */
1545 address_known_mapped = 0;
1546 break;
1548 if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
1549 == (void *) -1)
1551 /* Not all the interval [address .. address + query_size - 1]
1552 is unmapped. */
1553 address_known_mapped = (query_size == pagesize);
1554 break;
1556 /* The interval [address .. address + query_size - 1] is
1557 unmapped. */
1558 address += query_size;
1560 /* Reduce the query size again, to determine the precise size of the
1561 unmapped interval that starts at address. */
1562 while (query_size > pagesize)
1564 query_size = query_size / 2;
1565 if (address + query_size - 1 >= query_size)
1567 if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
1568 != (void *) -1)
1570 /* The interval [address .. address + query_size - 1] is
1571 unmapped. */
1572 address += query_size;
1573 address_known_mapped = 0;
1575 else
1576 address_known_mapped = (query_size == pagesize);
1579 /* Here again query_size = pagesize, and
1580 either address + pagesize - 1 < pagesize, or
1581 mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0) fails.
1582 So, the unmapped area ends at address. */
1584 if (address + pagesize - 1 < pagesize) /* wrap around? */
1585 break;
1587 return 0;
1590 #else
1592 /* Not implemented. */
1593 return -1;
1595 #endif
1599 #ifdef TEST
1601 #include <stdio.h>
1603 /* Output the VMAs of the current process in a format similar to the Linux
1604 /proc/$pid/maps file. */
1606 static int
1607 vma_iterate_callback (void *data, uintptr_t start, uintptr_t end,
1608 unsigned int flags)
1610 printf ("%08lx-%08lx %c%c%c\n",
1611 (unsigned long) start, (unsigned long) end,
1612 flags & VMA_PROT_READ ? 'r' : '-',
1613 flags & VMA_PROT_WRITE ? 'w' : '-',
1614 flags & VMA_PROT_EXECUTE ? 'x' : '-');
1615 return 0;
1619 main ()
1621 vma_iterate (vma_iterate_callback, NULL);
1623 /* Let the user interactively look at the /proc file system. */
1624 sleep (10);
1626 return 0;
1630 * Local Variables:
1631 * compile-command: "gcc -ggdb -DTEST -Wall -I.. vma-iter.c"
1632 * End:
1635 #endif /* TEST */