1 /* -*- mode: C; c-basic-offset: 3; -*- */
3 /*--------------------------------------------------------------------*/
4 /*--- The address space manager: segment initialisation and ---*/
5 /*--- tracking, stack operations ---*/
7 /*--- Implementation for Linux, Darwin, Solaris and FreeBSD ---*/
8 /*--------------------------------------------------------------------*/
11 This file is part of Valgrind, a dynamic binary instrumentation
14 Copyright (C) 2000-2017 Julian Seward
17 This program is free software; you can redistribute it and/or
18 modify it under the terms of the GNU General Public License as
19 published by the Free Software Foundation; either version 2 of the
20 License, or (at your option) any later version.
22 This program is distributed in the hope that it will be useful, but
23 WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25 General Public License for more details.
27 You should have received a copy of the GNU General Public License
28 along with this program; if not, see <http://www.gnu.org/licenses/>.
30 The GNU General Public License is contained in the file COPYING.
33 #if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_freebsd)
35 /* *************************************************************
36 DO NOT INCLUDE ANY OTHER FILES HERE.
37 ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
38 AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
39 ************************************************************* */
41 #include "priv_aspacemgr.h"
45 /* Note: many of the exported functions implemented below are
46 described more fully in comments in pub_core_aspacemgr.h.
50 /*-----------------------------------------------------------------*/
54 /*-----------------------------------------------------------------*/
58 The purpose of the address space manager (aspacem) is:
60 (1) to record the disposition of all parts of the process' address
63 (2) to the extent that it can, influence layout in ways favourable
66 It is important to appreciate that whilst it can and does attempt
67 to influence layout, and usually succeeds, it isn't possible to
68 impose absolute control: in the end, the kernel is the final
69 arbiter, and can always bounce our requests.
73 The strategy is therefore as follows:
75 * Track ownership of mappings. Each one can belong either to
76 Valgrind or to the client.
78 * Try to place the client's fixed and hinted mappings at the
79 requested addresses. Fixed mappings are allowed anywhere except
80 in areas reserved by Valgrind; the client can trash its own
81 mappings if it wants. Hinted mappings are allowed providing they
82 fall entirely in free areas; if not, they will be placed by
83 aspacem in a free area.
85 * Anonymous mappings are allocated so as to keep Valgrind and
86 client areas widely separated when possible. If address space
87 runs low, then they may become intermingled: aspacem will attempt
88 to use all possible space. But under most circumstances lack of
89 address space is not a problem and so the areas will remain far
92 Searches for client space start at aspacem_cStart and will wrap
93 around the end of the available space if needed. Searches for
94 Valgrind space start at aspacem_vStart and will also wrap around.
95 Because aspacem_cStart is approximately at the start of the
96 available space and aspacem_vStart is approximately in the
97 middle, for the most part the client anonymous mappings will be
98 clustered towards the start of available space, and Valgrind ones
101 On Solaris, searches for client space start at (aspacem_vStart - 1)
102 and for Valgrind space start at (aspacem_maxAddr - 1) and go backwards.
103 This simulates what kernel does - brk limit grows from bottom and mmap'ed
104 objects from top. It is in contrary with Linux where data segment
105 and mmap'ed objects grow from bottom (leading to early data segment
106 exhaustion for tools which do not use m_replacemalloc). While Linux glibc
107 can cope with this problem by employing mmap, Solaris libc treats inability
108 to grow brk limit as a hard failure.
110 The available space is delimited by aspacem_minAddr and
111 aspacem_maxAddr. aspacem is flexible and can operate with these
112 at any (sane) setting. For 32-bit Linux, aspacem_minAddr is set
113 to some low-ish value at startup (64M) and aspacem_maxAddr is
114 derived from the stack pointer at system startup. This seems a
115 reliable way to establish the initial boundaries.
116 A command line option allows to change the value of aspacem_minAddr,
117 so as to allow memory hungry applications to use the lowest
120 64-bit Linux is similar except for the important detail that the
121 upper boundary is set to 64G. The reason is so that all
122 anonymous mappings (basically all client data areas) are kept
123 below 64G, since that is the maximum range that memcheck can
124 track shadow memory using a fast 2-level sparse array. It can go
125 beyond that but runs much more slowly. The 64G limit is
126 arbitrary and is trivially changed. So, with the current
127 settings, programs on 64-bit Linux will appear to run out of
128 address space and presumably fail at the 64G limit. Given the
129 considerable space overhead of Memcheck, that means you should be
130 able to memcheckify programs that use up to about 32G natively.
132 Note that the aspacem_minAddr/aspacem_maxAddr limits apply only to
133 anonymous mappings. The client can still do fixed and hinted maps
134 at any addresses provided they do not overlap Valgrind's segments.
135 This makes Valgrind able to load prelinked .so's at their requested
136 addresses on 64-bit platforms, even if they are very high (eg,
139 At startup, aspacem establishes the usable limits, and advises
140 m_main to place the client stack at the top of the range, which on
141 a 32-bit machine will be just below the real initial stack. One
142 effect of this is that self-hosting sort-of works, because an inner
143 valgrind will then place its client's stack just below its own
146 The segment array and segment kinds
147 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
148 The central data structure is the segment array (segments[0
149 .. nsegments_used-1]). This covers the entire address space in
150 order, giving account of every byte of it. Free spaces are
151 represented explicitly as this makes many operations simpler.
152 Mergeable adjacent segments are aggressively merged so as to create
153 a "normalised" representation (preen_nsegments).
155 There are 7 (mutually-exclusive) segment kinds, the meaning of
158 SkFree: a free space, which may be allocated either to Valgrind (V)
161 SkAnonC: an anonymous mapping belonging to C. For these, aspacem
162 tracks a boolean indicating whether or not is is part of the
163 client's heap area (can't remember why).
165 SkFileC: a file mapping belonging to C.
167 SkShmC: a shared memory segment belonging to C.
169 SkAnonV: an anonymous mapping belonging to V. These cover all V's
170 dynamic memory needs, including non-client malloc/free areas,
171 shadow memory, and the translation cache.
173 SkFileV: a file mapping belonging to V. As far as I know these are
174 only created transiently for the purposes of reading debug info.
176 SkResvn: a reservation segment.
178 These are mostly straightforward. Reservation segments have some
181 A reservation segment is unmapped from the kernel's point of view,
182 but is an area in which aspacem will not create anonymous maps
183 (either Vs or Cs). The idea is that we will try to keep it clear
184 when the choice to do so is ours. Reservation segments are
185 'invisible' from the client's point of view: it may choose to park
186 a fixed mapping in the middle of one, and that's just tough -- we
187 can't do anything about that. From the client's perspective
188 reservations are semantically equivalent to (although
189 distinguishable from, if it makes enquiries) free areas.
191 Reservations are a primitive mechanism provided for whatever
192 purposes the rest of the system wants. Currently they are used to
193 reserve the expansion space into which a growdown stack is
194 expanded, and into which the data segment is extended. Note,
195 though, those uses are entirely external to this module, which only
196 supplies the primitives.
198 Reservations may be shrunk in order that an adjoining anonymous
199 mapping may be extended. This makes dataseg/stack expansion work.
200 A reservation may not be shrunk below one page.
202 The advise/notify concept
203 ~~~~~~~~~~~~~~~~~~~~~~~~~
204 All mmap-related calls must be routed via aspacem. Calling
205 sys_mmap directly from the rest of the system is very dangerous
206 because aspacem's data structures will become out of date.
208 The fundamental mode of operation of aspacem is to support client
209 mmaps. Here's what happens (in ML_(generic_PRE_sys_mmap)):
211 * m_syswrap intercepts the mmap call. It examines the parameters
212 and identifies the requested placement constraints. There are
213 three possibilities: no constraint (MAny), hinted (MHint, "I
214 prefer X but will accept anything"), and fixed (MFixed, "X or
217 * This request is passed to VG_(am_get_advisory). This decides on
218 a placement as described in detail in Strategy above. It may
219 also indicate that the map should fail, because it would trash
220 one of Valgrind's areas, which would probably kill the system.
222 * Control returns to the wrapper. If VG_(am_get_advisory) has
223 declared that the map should fail, then it must be made to do so.
224 Usually, though, the request is considered acceptable, in which
225 case an "advised" address is supplied. The advised address
226 replaces the original address supplied by the client, and
229 Note at this point that although aspacem has been asked for
230 advice on where to place the mapping, no commitment has yet been
231 made by either it or the kernel.
233 * The adjusted request is handed off to the kernel.
235 * The kernel's result is examined. If the map succeeded, aspacem
236 is told of the outcome (VG_(am_notify_client_mmap)), so it can
237 update its records accordingly.
239 This then is the central advise-notify idiom for handling client
240 mmap/munmap/mprotect/shmat:
242 * ask aspacem for an advised placement (or a veto)
244 * if not vetoed, hand request to kernel, using the advised placement
246 * examine result, and if successful, notify aspacem of the result.
248 There are also many convenience functions, eg
249 VG_(am_mmap_anon_fixed_client), which do both phases entirely within
252 To debug all this, a sync-checker is provided. It reads
253 /proc/self/maps, compares what it sees with aspacem's records, and
254 complains if there is a difference. --sanity-level=3 runs it before
255 and after each syscall, which is a powerful, if slow way of finding
256 buggy syscall wrappers.
260 Up to and including Valgrind 2.4.1, x86 segmentation was used to
261 enforce separation of V and C, so that wild writes by C could not
262 trash V. This got called "pointercheck". Unfortunately, the new
263 more flexible memory layout, plus the need to be portable across
264 different architectures, means doing this in hardware is no longer
265 viable, and doing it in software is expensive. So at the moment we
270 /*-----------------------------------------------------------------*/
272 /*--- The Address Space Manager's state. ---*/
274 /*-----------------------------------------------------------------*/
276 /* ------ start of STATE for the address-space manager ------ */
278 /* Max number of segments we can track. On Android, virtual address
279 space is limited, so keep a low limit -- 5000 x sizef(NSegment) is
281 #if defined(VGPV_arm_linux_android) \
282 || defined(VGPV_x86_linux_android) \
283 || defined(VGPV_mips32_linux_android) \
284 || defined(VGPV_arm64_linux_android)
285 # define VG_N_SEGMENTS 5000
287 # define VG_N_SEGMENTS 30000
290 /* Array [0 .. nsegments_used-1] of all mappings. */
291 /* Sorted by .addr field. */
292 /* I: len may not be zero. */
293 /* I: overlapping segments are not allowed. */
294 /* I: the segments cover the entire address space precisely. */
295 /* Each segment can optionally hold an index into the filename table. */
297 static NSegment nsegments
[VG_N_SEGMENTS
];
298 static Int nsegments_used
= 0;
300 #define Addr_MIN ((Addr)0)
301 #define Addr_MAX ((Addr)(-1ULL))
306 Addr
VG_(clo_aspacem_minAddr
)
307 #if defined(VGO_linux)
308 = (Addr
) 0x04000000; // 64M
309 #elif defined(VGO_darwin)
310 # if VG_WORDSIZE == 4
313 = (Addr
) 0x100000000; // 4GB page zero
315 #elif defined(VGO_solaris)
316 = (Addr
) 0x00100000; // 1MB
317 #elif defined(VGO_freebsd)
318 = (Addr
) 0x04000000; // 64M
323 // The smallest address that aspacem will try to allocate
324 static Addr aspacem_minAddr
= 0;
326 // The largest address that aspacem will try to allocate
327 static Addr aspacem_maxAddr
= 0;
329 // Where aspacem will start looking for client space
330 static Addr aspacem_cStart
= 0;
332 // Where aspacem will start looking for Valgrind space
333 static Addr aspacem_vStart
= 0;
336 #define AM_SANITY_CHECK \
338 if (VG_(clo_sanity_level) >= 3) \
339 aspacem_assert(VG_(am_do_sync_check) \
340 (__PRETTY_FUNCTION__,__FILE__,__LINE__)); \
343 /* ------ end of STATE for the address-space manager ------ */
345 /* ------ Forwards decls ------ */
347 static Int
find_nsegment_idx ( Addr a
);
349 static void parse_procselfmaps (
350 void (*record_mapping
)( Addr addr
, SizeT len
, UInt prot
,
351 ULong dev
, ULong ino
, Off64T offset
,
352 const HChar
* filename
),
353 void (*record_gap
)( Addr addr
, SizeT len
)
356 /* ----- Hacks to do with the "commpage" on arm-linux ----- */
357 /* Not that I have anything against the commpage per se. It's just
358 that it's not listed in /proc/self/maps, which is a royal PITA --
359 we have to fake it up, in parse_procselfmaps.
361 But note also bug 254556 comment #2: this is now fixed in newer
362 kernels -- it is listed as a "[vectors]" entry. Presumably the
363 fake entry made here duplicates the [vectors] entry, and so, if at
364 some point in the future, we can stop supporting buggy kernels,
365 then this kludge can be removed entirely, since the procmap parser
366 below will read that entry in the normal way. */
367 #if defined(VGP_arm_linux)
368 # define ARM_LINUX_FAKE_COMMPAGE_START 0xFFFF0000
369 # define ARM_LINUX_FAKE_COMMPAGE_END1 0xFFFF1000
372 #if !defined(VKI_MAP_STACK)
373 /* this is only defined for FreeBSD
374 * for readability, define it to 0
375 * for other platforms */
376 #define VKI_MAP_STACK 0
379 /*-----------------------------------------------------------------*/
381 /*--- Displaying the segment array. ---*/
383 /*-----------------------------------------------------------------*/
385 static const HChar
* show_SegKind ( SegKind sk
)
388 case SkFree
: return " ";
389 case SkAnonC
: return "anon";
390 case SkAnonV
: return "ANON";
391 case SkFileC
: return "file";
392 case SkFileV
: return "FILE";
393 case SkShmC
: return "shm ";
394 case SkResvn
: return "RSVN";
395 default: return "????";
399 static const HChar
* show_ShrinkMode ( ShrinkMode sm
)
402 case SmLower
: return "SmLower";
403 case SmUpper
: return "SmUpper";
404 case SmFixed
: return "SmFixed";
405 default: return "Sm?????";
409 static void show_len_concisely ( /*OUT*/HChar
* buf
, Addr start
, Addr end
)
412 ULong len
= ((ULong
)end
) - ((ULong
)start
) + 1;
414 if (len
< 10*1000*1000ULL) {
417 else if (len
< 999999ULL * (1ULL<<20)) {
421 else if (len
< 999999ULL * (1ULL<<30)) {
425 else if (len
< 999999ULL * (1ULL<<40)) {
433 ML_(am_sprintf
)(buf
, fmt
, len
);
436 /* Show full details of an NSegment */
438 static void show_nsegment_full ( Int logLevel
, Int segNo
, const NSegment
* seg
)
441 const HChar
* name
= ML_(am_get_segname
)( seg
->fnIdx
);
446 show_len_concisely(len_buf
, seg
->start
, seg
->end
);
450 "%3d: %s %010lx-%010lx %s %c%c%c%c%c %s "
451 "d=0x%03llx i=%-7llu o=%-7lld (%d,%d) %s\n",
452 segNo
, show_SegKind(seg
->kind
),
453 seg
->start
, seg
->end
, len_buf
,
454 seg
->hasR
? 'r' : '-', seg
->hasW
? 'w' : '-',
455 seg
->hasX
? 'x' : '-', seg
->hasT
? 'T' : '-',
456 seg
->isCH
? 'H' : '-',
457 show_ShrinkMode(seg
->smode
),
458 seg
->dev
, seg
->ino
, seg
->offset
,
459 ML_(am_segname_get_seqnr
)(seg
->fnIdx
), seg
->fnIdx
,
465 /* Show an NSegment in a user-friendly-ish way. */
467 static void show_nsegment ( Int logLevel
, Int segNo
, const NSegment
* seg
)
470 show_len_concisely(len_buf
, seg
->start
, seg
->end
);
477 "%3d: %s %010lx-%010lx %s\n",
478 segNo
, show_SegKind(seg
->kind
),
479 seg
->start
, seg
->end
, len_buf
483 case SkAnonC
: case SkAnonV
: case SkShmC
:
486 "%3d: %s %010lx-%010lx %s %c%c%c%c%c\n",
487 segNo
, show_SegKind(seg
->kind
),
488 seg
->start
, seg
->end
, len_buf
,
489 seg
->hasR
? 'r' : '-', seg
->hasW
? 'w' : '-',
490 seg
->hasX
? 'x' : '-', seg
->hasT
? 'T' : '-',
491 seg
->isCH
? 'H' : '-'
495 case SkFileC
: case SkFileV
:
498 "%3d: %s %010lx-%010lx %s %c%c%c%c%c d=0x%03llx "
499 "i=%-7llu o=%-7lld (%d,%d)\n",
500 segNo
, show_SegKind(seg
->kind
),
501 seg
->start
, seg
->end
, len_buf
,
502 seg
->hasR
? 'r' : '-', seg
->hasW
? 'w' : '-',
503 seg
->hasX
? 'x' : '-', seg
->hasT
? 'T' : '-',
504 seg
->isCH
? 'H' : '-',
505 seg
->dev
, seg
->ino
, seg
->offset
,
506 ML_(am_segname_get_seqnr
)(seg
->fnIdx
), seg
->fnIdx
513 "%3d: %s %010lx-%010lx %s %c%c%c%c%c %s\n",
514 segNo
, show_SegKind(seg
->kind
),
515 seg
->start
, seg
->end
, len_buf
,
516 seg
->hasR
? 'r' : '-', seg
->hasW
? 'w' : '-',
517 seg
->hasX
? 'x' : '-', seg
->hasT
? 'T' : '-',
518 seg
->isCH
? 'H' : '-',
519 show_ShrinkMode(seg
->smode
)
526 "%3d: ???? UNKNOWN SEGMENT KIND\n",
533 /* Print out the segment array (debugging only!). */
534 void VG_(am_show_nsegments
) ( Int logLevel
, const HChar
* who
)
537 VG_(debugLog
)(logLevel
, "aspacem",
538 "<<< SHOW_SEGMENTS: %s (%d segments)\n",
539 who
, nsegments_used
);
540 ML_(am_show_segnames
)( logLevel
, who
);
541 for (i
= 0; i
< nsegments_used
; i
++)
542 show_nsegment( logLevel
, i
, &nsegments
[i
] );
543 VG_(debugLog
)(logLevel
, "aspacem",
548 /* Get the filename corresponding to this segment, if known and if it
550 const HChar
* VG_(am_get_filename
)( NSegment
const * seg
)
553 return ML_(am_get_segname
)( seg
->fnIdx
);
556 /* Collect up the start addresses of segments whose kind matches one of
557 the kinds specified in kind_mask.
558 The interface is a bit strange in order to avoid potential
559 segment-creation races caused by dynamic allocation of the result
562 The function first computes how many entries in the result
563 buffer *starts will be needed. If this number <= nStarts,
564 they are placed in starts[0..], and the number is returned.
565 If nStarts is not large enough, nothing is written to
566 starts[0..], and the negation of the size is returned.
568 Correct use of this function may mean calling it multiple times in
569 order to establish a suitably-sized buffer. */
571 Int
VG_(am_get_segment_starts
)( UInt kind_mask
, Addr
* starts
, Int nStarts
)
575 /* don't pass dumbass arguments */
576 aspacem_assert(nStarts
> 0);
579 for (i
= 0; i
< nsegments_used
; i
++) {
580 if ((nsegments
[i
].kind
& kind_mask
) != 0)
584 if (nSegs
> nStarts
) {
585 /* The buffer isn't big enough. Tell the caller how big it needs
590 /* There's enough space. So write into the result buffer. */
591 aspacem_assert(nSegs
<= nStarts
);
594 for (i
= 0; i
< nsegments_used
; i
++) {
595 if ((nsegments
[i
].kind
& kind_mask
) != 0)
596 starts
[j
++] = nsegments
[i
].start
;
599 aspacem_assert(j
== nSegs
); /* this should not fail */
604 /*-----------------------------------------------------------------*/
606 /*--- Sanity checking and preening of the segment array. ---*/
608 /*-----------------------------------------------------------------*/
610 /* Check representational invariants for NSegments. */
612 static Bool
sane_NSegment ( const NSegment
* s
)
614 if (s
== NULL
) return False
;
616 /* No zero sized segments and no wraparounds. */
617 if (s
->start
> s
->end
) return False
;
619 /* require page alignment */
620 if (!VG_IS_PAGE_ALIGNED(s
->start
)) return False
;
621 if (!VG_IS_PAGE_ALIGNED(s
->end
+1)) return False
;
628 && s
->dev
== 0 && s
->ino
== 0 && s
->offset
== 0 && s
->fnIdx
== -1
629 && !s
->hasR
&& !s
->hasW
&& !s
->hasX
&& !s
->hasT
632 case SkAnonC
: case SkAnonV
: case SkShmC
:
635 && s
->dev
== 0 && s
->ino
== 0 && s
->offset
== 0 && s
->fnIdx
== -1
636 && (s
->kind
==SkAnonC
? True
: !s
->isCH
);
638 case SkFileC
: case SkFileV
:
641 && ML_(am_sane_segname
)(s
->fnIdx
)
646 s
->dev
== 0 && s
->ino
== 0 && s
->offset
== 0 && s
->fnIdx
== -1
647 && !s
->hasR
&& !s
->hasW
&& !s
->hasX
&& !s
->hasT
656 /* Try merging s2 into s1, if possible. If successful, s1 is
657 modified, and True is returned. Otherwise s1 is unchanged and
658 False is returned. */
660 static Bool
maybe_merge_nsegments ( NSegment
* s1
, const NSegment
* s2
)
662 if (s1
->kind
!= s2
->kind
)
665 if (s1
->end
+1 != s2
->start
)
668 /* reject cases which would cause wraparound */
669 if (s1
->start
> s2
->end
)
678 case SkAnonC
: case SkAnonV
:
679 if (s1
->hasR
== s2
->hasR
&& s1
->hasW
== s2
->hasW
680 && s1
->hasX
== s2
->hasX
&& s1
->isCH
== s2
->isCH
) {
682 s1
->hasT
|= s2
->hasT
;
687 case SkFileC
: case SkFileV
:
688 if (s1
->hasR
== s2
->hasR
689 && s1
->hasW
== s2
->hasW
&& s1
->hasX
== s2
->hasX
690 && s1
->dev
== s2
->dev
&& s1
->ino
== s2
->ino
691 && s2
->offset
== s1
->offset
692 + ((ULong
)s2
->start
) - ((ULong
)s1
->start
) ) {
694 s1
->hasT
|= s2
->hasT
;
695 ML_(am_dec_refcount
)(s1
->fnIdx
);
704 if (s1
->smode
== SmFixed
&& s2
->smode
== SmFixed
) {
718 /* Sanity-check and canonicalise the segment array (merge mergable
719 segments). Returns True if any segments were merged. */
721 static Bool
preen_nsegments ( void )
723 Int i
, r
, w
, nsegments_used_old
= nsegments_used
;
725 /* Pass 1: check the segment array covers the entire address space
726 exactly once, and also that each segment is sane. */
727 aspacem_assert(nsegments_used
> 0);
728 aspacem_assert(nsegments
[0].start
== Addr_MIN
);
729 aspacem_assert(nsegments
[nsegments_used
-1].end
== Addr_MAX
);
731 aspacem_assert(sane_NSegment(&nsegments
[0]));
732 for (i
= 1; i
< nsegments_used
; i
++) {
733 aspacem_assert(sane_NSegment(&nsegments
[i
]));
734 aspacem_assert(nsegments
[i
-1].end
+1 == nsegments
[i
].start
);
737 /* Pass 2: merge as much as possible, using
738 maybe_merge_segments. */
740 for (r
= 1; r
< nsegments_used
; r
++) {
741 if (maybe_merge_nsegments(&nsegments
[w
], &nsegments
[r
])) {
746 nsegments
[w
] = nsegments
[r
];
750 aspacem_assert(w
> 0 && w
<= nsegments_used
);
753 return nsegments_used
!= nsegments_used_old
;
757 /* Check the segment array corresponds with the kernel's view of
758 memory layout. sync_check_ok returns True if no anomalies were
759 found, else False. In the latter case the mismatching segments are
762 The general idea is: we get the kernel to show us all its segments
763 and also the gaps in between. For each such interval, try and find
764 a sequence of appropriate intervals in our segment array which
765 cover or more than cover the kernel's interval, and which all have
766 suitable kinds/permissions etc.
768 Although any specific kernel interval is not matched exactly to a
769 valgrind interval or sequence thereof, eventually any disagreement
770 on mapping boundaries will be detected. This is because, if for
771 example valgrind's intervals cover a greater range than the current
772 kernel interval, it must be the case that a neighbouring free-space
773 interval belonging to valgrind cannot cover the neighbouring
774 free-space interval belonging to the kernel. So the disagreement
777 In other words, we examine each kernel interval in turn, and check
778 we do not disagree over the range of that interval. Because all of
779 the address space is examined, any disagreements must eventually be
783 static Bool sync_check_ok
= False
;
785 static void sync_check_mapping_callback ( Addr addr
, SizeT len
, UInt prot
,
786 ULong dev
, ULong ino
, Off64T offset
,
787 const HChar
* filename
)
790 Bool sloppyXcheck
, sloppyRcheck
;
792 /* If a problem has already been detected, don't continue comparing
793 segments, so as to avoid flooding the output with error
795 #if !defined(VGO_darwin)
803 /* The kernel should not give us wraparounds. */
804 aspacem_assert(addr
<= addr
+ len
- 1);
806 iLo
= find_nsegment_idx( addr
);
807 iHi
= find_nsegment_idx( addr
+ len
- 1 );
809 /* These 5 should be guaranteed by find_nsegment_idx. */
810 aspacem_assert(0 <= iLo
&& iLo
< nsegments_used
);
811 aspacem_assert(0 <= iHi
&& iHi
< nsegments_used
);
812 aspacem_assert(iLo
<= iHi
);
813 aspacem_assert(nsegments
[iLo
].start
<= addr
);
814 aspacem_assert(nsegments
[iHi
].end
>= addr
+ len
- 1 );
816 /* x86 doesn't differentiate 'x' and 'r' (at least, all except the
817 most recent NX-bit enabled CPUs) and so recent kernels attempt
818 to provide execute protection by placing all executable mappings
819 low down in the address space and then reducing the size of the
820 code segment to prevent code at higher addresses being executed.
822 These kernels report which mappings are really executable in
823 the /proc/self/maps output rather than mirroring what was asked
824 for when each mapping was created. In order to cope with this we
825 have a sloppyXcheck mode which we enable on x86 and s390 - in this
826 mode we allow the kernel to report execute permission when we weren't
827 expecting it but not vice versa. */
828 # if defined(VGA_x86) || defined (VGA_s390x) || \
829 defined(VGA_mips32) || defined(VGA_mips64)
832 sloppyXcheck
= False
;
835 /* Some kernels on s390 provide 'r' permission even when it was not
836 explicitly requested. It seems that 'x' permission implies 'r'.
837 This behaviour also occurs on OS X. */
838 # if defined(VGA_s390x) || defined(VGO_darwin)
841 sloppyRcheck
= False
;
844 /* NSegments iLo .. iHi inclusive should agree with the presented
846 for (i
= iLo
; i
<= iHi
; i
++) {
848 Bool same
, cmp_offsets
, cmp_devino
;
851 /* compare the kernel's offering against ours. */
852 same
= nsegments
[i
].kind
== SkAnonC
853 || nsegments
[i
].kind
== SkAnonV
854 || nsegments
[i
].kind
== SkFileC
855 || nsegments
[i
].kind
== SkFileV
856 || nsegments
[i
].kind
== SkShmC
;
859 if (nsegments
[i
].hasR
) seg_prot
|= VKI_PROT_READ
;
860 if (nsegments
[i
].hasW
) seg_prot
|= VKI_PROT_WRITE
;
861 if (nsegments
[i
].hasX
) seg_prot
|= VKI_PROT_EXEC
;
864 = nsegments
[i
].kind
== SkFileC
|| nsegments
[i
].kind
== SkFileV
;
867 = nsegments
[i
].dev
!= 0 || nsegments
[i
].ino
!= 0;
869 /* Consider other reasons to not compare dev/inode */
870 #if defined(VGO_linux)
871 /* bproc does some godawful hack on /dev/zero at process
872 migration, which changes the name of it, and its dev & ino */
873 if (filename
&& 0==VG_(strcmp
)(filename
, "/dev/zero (deleted)"))
876 /* hack apparently needed on MontaVista Linux */
877 if (filename
&& VG_(strstr
)(filename
, "/.lib-ro/"))
881 #if defined(VGO_darwin) || defined(VGO_freebsd)
882 // GrP fixme kernel info doesn't have dev/inode
885 // GrP fixme V and kernel don't agree on offsets
889 /* If we are doing sloppy execute permission checks then we
890 allow segment to have X permission when we weren't expecting
891 it (but not vice versa) so if the kernel reported execute
892 permission then pretend that this segment has it regardless
893 of what we were expecting. */
894 if (sloppyXcheck
&& (prot
& VKI_PROT_EXEC
) != 0) {
895 seg_prot
|= VKI_PROT_EXEC
;
898 if (sloppyRcheck
&& (prot
& (VKI_PROT_EXEC
| VKI_PROT_READ
)) ==
899 (VKI_PROT_EXEC
| VKI_PROT_READ
)) {
900 seg_prot
|= VKI_PROT_READ
;
906 ? (nsegments
[i
].dev
== dev
&& nsegments
[i
].ino
== ino
)
909 ? nsegments
[i
].start
-nsegments
[i
].offset
== addr
-offset
913 Addr end
= start
+ len
- 1;
915 show_len_concisely(len_buf
, start
, end
);
917 sync_check_ok
= False
;
921 "segment mismatch: V's seg 1st, kernel's 2nd:\n");
922 show_nsegment_full( 0, i
, &nsegments
[i
] );
923 VG_(debugLog
)(0,"aspacem",
924 "...: .... %010lx-%010lx %s %c%c%c.. ....... "
925 "d=0x%03llx i=%-7llu o=%-7lld (.) m=. %s\n",
927 prot
& VKI_PROT_READ
? 'r' : '-',
928 prot
& VKI_PROT_WRITE
? 'w' : '-',
929 prot
& VKI_PROT_EXEC
? 'x' : '-',
930 dev
, ino
, offset
, filename
? filename
: "(none)" );
936 /* Looks harmless. Keep going. */
940 static void sync_check_gap_callback ( Addr addr
, SizeT len
)
944 /* If a problem has already been detected, don't continue comparing
945 segments, so as to avoid flooding the output with error
947 #if !defined(VGO_darwin)
955 /* The kernel should not give us wraparounds. */
956 aspacem_assert(addr
<= addr
+ len
- 1);
958 iLo
= find_nsegment_idx( addr
);
959 iHi
= find_nsegment_idx( addr
+ len
- 1 );
961 /* These 5 should be guaranteed by find_nsegment_idx. */
962 aspacem_assert(0 <= iLo
&& iLo
< nsegments_used
);
963 aspacem_assert(0 <= iHi
&& iHi
< nsegments_used
);
964 aspacem_assert(iLo
<= iHi
);
965 aspacem_assert(nsegments
[iLo
].start
<= addr
);
966 aspacem_assert(nsegments
[iHi
].end
>= addr
+ len
- 1 );
968 /* NSegments iLo .. iHi inclusive should agree with the presented
970 for (i
= iLo
; i
<= iHi
; i
++) {
974 /* compare the kernel's offering against ours. */
975 same
= nsegments
[i
].kind
== SkFree
976 || nsegments
[i
].kind
== SkResvn
;
980 Addr end
= start
+ len
- 1;
982 show_len_concisely(len_buf
, start
, end
);
984 sync_check_ok
= False
;
988 "segment mismatch: V's gap 1st, kernel's 2nd:\n");
989 show_nsegment_full( 0, i
, &nsegments
[i
] );
990 VG_(debugLog
)(0,"aspacem",
991 " : .... %010lx-%010lx %s\n",
992 start
, end
, len_buf
);
997 /* Looks harmless. Keep going. */
1002 /* Sanity check: check that Valgrind and the kernel agree on the
1003 address space layout. Prints offending segments and call point if
1004 a discrepancy is detected, but does not abort the system. Returned
1005 Bool is False if a discrepancy was found. */
1007 Bool
VG_(am_do_sync_check
) ( const HChar
* fn
,
1008 const HChar
* file
, Int line
)
1010 sync_check_ok
= True
;
1012 VG_(debugLog
)(0,"aspacem", "do_sync_check %s:%d\n", file
,line
);
1013 parse_procselfmaps( sync_check_mapping_callback
,
1014 sync_check_gap_callback
);
1015 if (!sync_check_ok
) {
1016 VG_(debugLog
)(0,"aspacem",
1017 "sync check at %s:%d (%s): FAILED\n",
1019 VG_(debugLog
)(0,"aspacem", "\n");
1023 HChar buf
[100]; // large enough
1024 VG_(am_show_nsegments
)(0,"post syncheck failure");
1025 VG_(sprintf
)(buf
, "/bin/cat /proc/%d/maps", VG_(getpid
)());
1031 return sync_check_ok
;
1034 /* Hook to allow sanity checks to be done from aspacemgr-common.c. */
1035 void ML_(am_do_sanity_check
)( void )
1041 /*-----------------------------------------------------------------*/
1043 /*--- Low level access / modification of the segment array. ---*/
1045 /*-----------------------------------------------------------------*/
1047 /* Binary search the interval array for a given address. Since the
1048 array covers the entire address space the search cannot fail. The
1049 _WRK function does the real work. Its caller (just below) caches
1050 the results thereof, to save time. With N_CACHE of 63 we get a hit
1051 rate exceeding 90% when running OpenOffice.
1053 Re ">> 12", it doesn't matter that the page size of some targets
1054 might be different from 12. Really "(a >> 12) % N_CACHE" is merely
1055 a hash function, and the actual cache entry is always validated
1056 correctly against the selected cache entry before use.
1058 /* Don't call find_nsegment_idx_WRK; use find_nsegment_idx instead. */
1059 __attribute__((noinline
))
1060 static Int
find_nsegment_idx_WRK ( Addr a
)
1062 Addr a_mid_lo
, a_mid_hi
;
1065 hi
= nsegments_used
-1;
1067 /* current unsearched space is from lo to hi, inclusive. */
1069 /* Not found. This can't happen. */
1070 ML_(am_barf
)("find_nsegment_idx: not found");
1072 mid
= (lo
+ hi
) / 2;
1073 a_mid_lo
= nsegments
[mid
].start
;
1074 a_mid_hi
= nsegments
[mid
].end
;
1076 if (a
< a_mid_lo
) { hi
= mid
-1; continue; }
1077 if (a
> a_mid_hi
) { lo
= mid
+1; continue; }
1078 aspacem_assert(a
>= a_mid_lo
&& a
<= a_mid_hi
);
1079 aspacem_assert(0 <= mid
&& mid
< nsegments_used
);
1084 inline static Int
find_nsegment_idx ( Addr a
)
1086 # define N_CACHE 131 /*prime*/
1087 static Addr cache_pageno
[N_CACHE
];
1088 static Int cache_segidx
[N_CACHE
];
1089 static Bool cache_inited
= False
;
1092 static UWord n_q
= 0;
1093 static UWord n_m
= 0;
1095 if (0 == (n_q
& 0xFFFF))
1096 VG_(debugLog
)(0,"xxx","find_nsegment_idx: %lu %lu\n", n_q
, n_m
);
1101 if (LIKELY(cache_inited
)) {
1104 for (ix
= 0; ix
< N_CACHE
; ix
++) {
1105 cache_pageno
[ix
] = 0;
1106 cache_segidx
[ix
] = -1;
1108 cache_inited
= True
;
1111 ix
= (a
>> 12) % N_CACHE
;
1113 if ((a
>> 12) == cache_pageno
[ix
]
1114 && cache_segidx
[ix
] >= 0
1115 && cache_segidx
[ix
] < nsegments_used
1116 && nsegments
[cache_segidx
[ix
]].start
<= a
1117 && a
<= nsegments
[cache_segidx
[ix
]].end
) {
1119 /* aspacem_assert( cache_segidx[ix] == find_nsegment_idx_WRK(a) ); */
1120 return cache_segidx
[ix
];
1126 cache_segidx
[ix
] = find_nsegment_idx_WRK(a
);
1127 cache_pageno
[ix
] = a
>> 12;
1128 return cache_segidx
[ix
];
1133 /* Finds the segment containing 'a'. Only returns non-SkFree segments. */
1134 NSegment
const * VG_(am_find_nsegment
) ( Addr a
)
1136 Int i
= find_nsegment_idx(a
);
1137 aspacem_assert(i
>= 0 && i
< nsegments_used
);
1138 aspacem_assert(nsegments
[i
].start
<= a
);
1139 aspacem_assert(a
<= nsegments
[i
].end
);
1140 if (nsegments
[i
].kind
== SkFree
)
1143 return &nsegments
[i
];
1146 /* Finds an anonymous segment containing 'a'. Returned pointer is read only. */
1147 NSegment
const *VG_(am_find_anon_segment
) ( Addr a
)
1149 Int i
= find_nsegment_idx(a
);
1150 aspacem_assert(i
>= 0 && i
< nsegments_used
);
1151 aspacem_assert(nsegments
[i
].start
<= a
);
1152 aspacem_assert(a
<= nsegments
[i
].end
);
1153 if (nsegments
[i
].kind
== SkAnonC
|| nsegments
[i
].kind
== SkAnonV
)
1154 return &nsegments
[i
];
1159 /* Map segment pointer to segment index. */
1160 static Int
segAddr_to_index ( const NSegment
* seg
)
1162 aspacem_assert(seg
>= &nsegments
[0] && seg
< &nsegments
[nsegments_used
]);
1164 return seg
- &nsegments
[0];
1168 /* Find the next segment along from 'here', if it is a non-SkFree segment. */
1169 NSegment
const * VG_(am_next_nsegment
) ( const NSegment
* here
, Bool fwds
)
1171 Int i
= segAddr_to_index(here
);
1175 if (i
>= nsegments_used
)
1182 if (nsegments
[i
].kind
== SkFree
)
1185 return &nsegments
[i
];
1189 /* Trivial fn: return the total amount of space in anonymous mappings,
1190 both for V and the client. Is used for printing stats in
1191 out-of-memory messages. */
1192 ULong
VG_(am_get_anonsize_total
)( void )
1196 for (i
= 0; i
< nsegments_used
; i
++) {
1197 if (nsegments
[i
].kind
== SkAnonC
|| nsegments
[i
].kind
== SkAnonV
) {
1198 total
+= (ULong
)nsegments
[i
].end
1199 - (ULong
)nsegments
[i
].start
+ 1ULL;
1206 /* Test if a piece of memory is addressable by client or by valgrind with at
1207 least the "prot" protection permissions by examining the underlying
1208 segments. The KINDS argument specifies the allowed segments ADDR may
1209 belong to in order to be considered "valid".
1212 Bool
is_valid_for( UInt kinds
, Addr start
, SizeT len
, UInt prot
)
1215 Bool needR
, needW
, needX
;
1218 return True
; /* somewhat dubious case */
1219 if (start
+ len
< start
)
1220 return False
; /* reject wraparounds */
1222 needR
= toBool(prot
& VKI_PROT_READ
);
1223 needW
= toBool(prot
& VKI_PROT_WRITE
);
1224 needX
= toBool(prot
& VKI_PROT_EXEC
);
1226 iLo
= find_nsegment_idx(start
);
1227 aspacem_assert(start
>= nsegments
[iLo
].start
);
1229 if (start
+len
-1 <= nsegments
[iLo
].end
) {
1230 /* This is a speedup hack which avoids calling find_nsegment_idx
1231 a second time when possible. It is always correct to just
1232 use the "else" clause below, but is_valid_for_client is
1233 called a lot by the leak checker, so avoiding pointless calls
1234 to find_nsegment_idx, which can be expensive, is helpful. */
1237 iHi
= find_nsegment_idx(start
+ len
- 1);
1240 for (i
= iLo
; i
<= iHi
; i
++) {
1241 if ( (nsegments
[i
].kind
& kinds
) != 0
1242 && (needR
? nsegments
[i
].hasR
: True
)
1243 && (needW
? nsegments
[i
].hasW
: True
)
1244 && (needX
? nsegments
[i
].hasX
: True
) ) {
1254 /* Test if a piece of memory is addressable by the client with at
1255 least the "prot" protection permissions by examining the underlying
1257 Bool
VG_(am_is_valid_for_client
)( Addr start
, SizeT len
,
1260 const UInt kinds
= SkFileC
| SkAnonC
| SkShmC
;
1262 return is_valid_for(kinds
, start
, len
, prot
);
1265 /* Variant of VG_(am_is_valid_for_client) which allows free areas to
1266 be consider part of the client's addressable space. It also
1267 considers reservations to be allowable, since from the client's
1268 point of view they don't exist. */
1269 Bool
VG_(am_is_valid_for_client_or_free_or_resvn
)
1270 ( Addr start
, SizeT len
, UInt prot
)
1272 const UInt kinds
= SkFileC
| SkAnonC
| SkShmC
| SkFree
| SkResvn
;
1274 return is_valid_for(kinds
, start
, len
, prot
);
1277 /* Checks if a piece of memory consists of either free or reservation
1279 Bool
VG_(am_is_free_or_resvn
)( Addr start
, SizeT len
)
1281 const UInt kinds
= SkFree
| SkResvn
;
1283 return is_valid_for(kinds
, start
, len
, 0);
1287 Bool
VG_(am_is_valid_for_valgrind
) ( Addr start
, SizeT len
, UInt prot
)
1289 const UInt kinds
= SkFileV
| SkAnonV
;
1291 return is_valid_for(kinds
, start
, len
, prot
);
1295 /* Returns True if any part of the address range is marked as having
1296 translations made from it. This is used to determine when to
1297 discard code, so if in doubt return True. */
1299 static Bool
any_Ts_in_range ( Addr start
, SizeT len
)
1302 aspacem_assert(len
> 0);
1303 aspacem_assert(start
+ len
> start
);
1304 iLo
= find_nsegment_idx(start
);
1305 iHi
= find_nsegment_idx(start
+ len
- 1);
1306 for (i
= iLo
; i
<= iHi
; i
++) {
1307 if (nsegments
[i
].hasT
)
1314 /* Check whether ADDR looks like an address or address-to-be located in an
1315 extensible client stack segment. Return true if
1316 (1) ADDR is located in an already mapped stack segment, OR
1317 (2) ADDR is located in a reservation segment into which an abutting SkAnonC
1318 segment can be extended. */
1319 Bool
VG_(am_addr_is_in_extensible_client_stack
)( Addr addr
)
1321 const NSegment
*seg
= nsegments
+ find_nsegment_idx(addr
);
1323 switch (seg
->kind
) {
1332 if (seg
->smode
!= SmUpper
) return False
;
1333 /* If the abutting segment towards higher addresses is an SkAnonC
1334 segment, then ADDR is a future stack pointer. */
1335 const NSegment
*next
= VG_(am_next_nsegment
)(seg
, /*forward*/ True
);
1336 if (next
== NULL
|| next
->kind
!= SkAnonC
) return False
;
1338 /* OK; looks like a stack segment */
1343 /* If the abutting segment towards lower addresses is an SkResvn
1344 segment, then ADDR is a stack pointer into mapped memory. */
1345 const NSegment
*next
= VG_(am_next_nsegment
)(seg
, /*forward*/ False
);
1346 if (next
== NULL
|| next
->kind
!= SkResvn
|| next
->smode
!= SmUpper
)
1349 /* OK; looks like a stack segment */
1354 aspacem_assert(0); // should never happen
1358 /*-----------------------------------------------------------------*/
1360 /*--- Modifying the segment array, and constructing segments. ---*/
1362 /*-----------------------------------------------------------------*/
1364 /* Split the segment containing 'a' into two, so that 'a' is
1365 guaranteed to be the start of a new segment. If 'a' is already the
1366 start of a segment, do nothing. */
1368 static void split_nsegment_at ( Addr a
)
1372 aspacem_assert(a
> 0);
1373 aspacem_assert(VG_IS_PAGE_ALIGNED(a
));
1375 i
= find_nsegment_idx(a
);
1376 aspacem_assert(i
>= 0 && i
< nsegments_used
);
1378 if (nsegments
[i
].start
== a
)
1379 /* 'a' is already the start point of a segment, so nothing to be
1383 /* else we have to slide the segments upwards to make a hole */
1384 if (nsegments_used
>= VG_N_SEGMENTS
)
1385 ML_(am_barf_toolow
)("VG_N_SEGMENTS");
1386 for (j
= nsegments_used
-1; j
> i
; j
--)
1387 nsegments
[j
+1] = nsegments
[j
];
1390 nsegments
[i
+1] = nsegments
[i
];
1391 nsegments
[i
+1].start
= a
;
1392 nsegments
[i
].end
= a
-1;
1394 if (nsegments
[i
].kind
== SkFileV
|| nsegments
[i
].kind
== SkFileC
)
1395 nsegments
[i
+1].offset
1396 += ((ULong
)nsegments
[i
+1].start
) - ((ULong
)nsegments
[i
].start
);
1398 ML_(am_inc_refcount
)(nsegments
[i
].fnIdx
);
1400 aspacem_assert(sane_NSegment(&nsegments
[i
]));
1401 aspacem_assert(sane_NSegment(&nsegments
[i
+1]));
1405 /* Do the minimum amount of segment splitting necessary to ensure that
1406 sLo is the first address denoted by some segment and sHi is the
1407 highest address denoted by some other segment. Returns the indices
1408 of the lowest and highest segments in the range. */
1411 void split_nsegments_lo_and_hi ( Addr sLo
, Addr sHi
,
1415 aspacem_assert(sLo
< sHi
);
1416 aspacem_assert(VG_IS_PAGE_ALIGNED(sLo
));
1417 aspacem_assert(VG_IS_PAGE_ALIGNED(sHi
+1));
1420 split_nsegment_at(sLo
);
1422 split_nsegment_at(sHi
+1);
1424 *iLo
= find_nsegment_idx(sLo
);
1425 *iHi
= find_nsegment_idx(sHi
);
1426 aspacem_assert(0 <= *iLo
&& *iLo
< nsegments_used
);
1427 aspacem_assert(0 <= *iHi
&& *iHi
< nsegments_used
);
1428 aspacem_assert(*iLo
<= *iHi
);
1429 aspacem_assert(nsegments
[*iLo
].start
== sLo
);
1430 aspacem_assert(nsegments
[*iHi
].end
== sHi
);
1431 /* Not that I'm overly paranoid or anything, definitely not :-) */
1435 /* Add SEG to the collection, deleting/truncating any it overlaps.
1436 This deals with all the tricky cases of splitting up segments as
1439 static void add_segment ( const NSegment
* seg
)
1441 Int i
, iLo
, iHi
, delta
;
1442 Bool segment_is_sane
;
1444 Addr sStart
= seg
->start
;
1445 Addr sEnd
= seg
->end
;
1447 aspacem_assert(sStart
<= sEnd
);
1448 aspacem_assert(VG_IS_PAGE_ALIGNED(sStart
));
1449 aspacem_assert(VG_IS_PAGE_ALIGNED(sEnd
+1));
1451 segment_is_sane
= sane_NSegment(seg
);
1452 if (!segment_is_sane
) show_nsegment_full(0,-1,seg
);
1453 aspacem_assert(segment_is_sane
);
1455 split_nsegments_lo_and_hi( sStart
, sEnd
, &iLo
, &iHi
);
1457 /* Increase the reference count of SEG's name. We need to do this
1458 *before* decreasing the reference count of the names of the replaced
1459 segments. Consider the case where the segment name of SEG and one of
1460 the replaced segments are the same. If the refcount of that name is 1,
1461 then decrementing first would put the slot for that name on the free
1462 list. Attempting to increment the refcount later would then fail
1463 because the slot is no longer allocated. */
1464 ML_(am_inc_refcount
)(seg
->fnIdx
);
1466 /* Now iLo .. iHi inclusive is the range of segment indices which
1467 seg will replace. If we're replacing more than one segment,
1468 slide those above the range down to fill the hole. Before doing
1469 that decrement the reference counters for the segments names of
1470 the replaced segments. */
1471 for (i
= iLo
; i
<= iHi
; ++i
)
1472 ML_(am_dec_refcount
)(nsegments
[i
].fnIdx
);
1474 aspacem_assert(delta
>= 0);
1476 for (i
= iLo
; i
< nsegments_used
-delta
; i
++)
1477 nsegments
[i
] = nsegments
[i
+delta
];
1478 nsegments_used
-= delta
;
1481 nsegments
[iLo
] = *seg
;
1483 (void)preen_nsegments();
1484 if (0) VG_(am_show_nsegments
)(0,"AFTER preen (add_segment)");
1488 /* Clear out an NSegment record. */
1490 static void init_nsegment ( /*OUT*/NSegment
* seg
)
1495 seg
->smode
= SmFixed
;
1502 seg
->hasR
= seg
->hasW
= seg
->hasX
= seg
->hasT
1503 = seg
->isCH
= False
;
1504 #if defined(VGO_freebsd)
1510 /* Make an NSegment which holds a reservation. */
1512 static void init_resvn ( /*OUT*/NSegment
* seg
, Addr start
, Addr end
)
1514 aspacem_assert(start
< end
);
1515 aspacem_assert(VG_IS_PAGE_ALIGNED(start
));
1516 aspacem_assert(VG_IS_PAGE_ALIGNED(end
+1));
1518 seg
->kind
= SkResvn
;
1524 /*-----------------------------------------------------------------*/
1526 /*--- Startup, including reading /proc/self/maps. ---*/
1528 /*-----------------------------------------------------------------*/
1530 static void read_maps_callback ( Addr addr
, SizeT len
, UInt prot
,
1531 ULong dev
, ULong ino
, Off64T offset
,
1532 const HChar
* filename
)
1535 init_nsegment( &seg
);
1537 seg
.end
= addr
+len
-1;
1540 seg
.offset
= offset
;
1541 seg
.hasR
= toBool(prot
& VKI_PROT_READ
);
1542 seg
.hasW
= toBool(prot
& VKI_PROT_WRITE
);
1543 seg
.hasX
= toBool(prot
& VKI_PROT_EXEC
);
1546 /* A segment in the initial /proc/self/maps is considered a FileV
1547 segment if either it has a file name associated with it or both its
1548 device and inode numbers are != 0. See bug #124528. */
1550 if (filename
|| (dev
!= 0 && ino
!= 0))
1553 # if defined(VGO_darwin)
1554 // GrP fixme no dev/ino on darwin
1557 # endif // defined(VGO_darwin)
1559 # if defined(VGP_arm_linux)
1560 /* The standard handling of entries read from /proc/self/maps will
1561 cause the faked up commpage segment to have type SkAnonV, which
1562 is a problem because it contains code we want the client to
1563 execute, and so later m_translate will segfault the client when
1564 it tries to go in there. Hence change the ownership of it here
1565 to the client (SkAnonC). The least-worst kludge I could think
1567 if (addr
== ARM_LINUX_FAKE_COMMPAGE_START
1568 && addr
+ len
== ARM_LINUX_FAKE_COMMPAGE_END1
1569 && seg
.kind
== SkAnonV
)
1571 # endif // defined(VGP_arm_linux)
1574 seg
.fnIdx
= ML_(am_allocate_segname
)( filename
);
1576 if (0) show_nsegment( 2,0, &seg
);
1577 add_segment( &seg
);
1581 VG_(am_is_valid_for_aspacem_minAddr
)( Addr addr
, const HChar
**errmsg
)
1583 const Addr min
= VKI_PAGE_SIZE
;
1584 #if VG_WORDSIZE == 4
1585 const Addr max
= 0x40000000; // 1Gb
1587 const Addr max
= 0x200000000; // 8Gb
1589 Bool ok
= VG_IS_PAGE_ALIGNED(addr
) && addr
>= min
&& addr
<= max
;
1594 const HChar fmt
[] = "Must be a page aligned address between "
1596 static HChar buf
[sizeof fmt
+ 2 * 16]; // large enough
1597 ML_(am_sprintf
)(buf
, fmt
, min
, max
);
1604 /* See description in pub_core_aspacemgr.h */
1605 Addr
VG_(am_startup
) ( Addr sp_at_startup
)
1608 Addr suggested_clstack_end
;
1610 aspacem_assert(sizeof(Word
) == sizeof(void*));
1611 aspacem_assert(sizeof(Addr
) == sizeof(void*));
1612 aspacem_assert(sizeof(SizeT
) == sizeof(void*));
1613 aspacem_assert(sizeof(SSizeT
) == sizeof(void*));
1615 /* Initialise the string table for segment names. */
1616 ML_(am_segnames_init
)();
1618 /* Check that we can store the largest imaginable dev, ino and
1619 offset numbers in an NSegment. */
1620 aspacem_assert(sizeof(seg
.dev
) == 8);
1621 aspacem_assert(sizeof(seg
.ino
) == 8);
1622 aspacem_assert(sizeof(seg
.offset
) == 8);
1623 aspacem_assert(sizeof(seg
.mode
) == 4);
1625 /* Add a single interval covering the entire address space. */
1626 init_nsegment(&seg
);
1628 seg
.start
= Addr_MIN
;
1633 aspacem_minAddr
= VG_(clo_aspacem_minAddr
);
1635 // --- Darwin -------------------------------------------
1636 #if defined(VGO_darwin)
1638 # if VG_WORDSIZE == 4
1639 aspacem_maxAddr
= (Addr
) 0xffffffff;
1641 aspacem_cStart
= aspacem_minAddr
;
1642 aspacem_vStart
= 0xf0000000; // 0xc0000000..0xf0000000 available
1644 aspacem_maxAddr
= (Addr
) 0x7fffffffffff;
1646 aspacem_cStart
= aspacem_minAddr
;
1647 aspacem_vStart
= 0x700000000000; // 0x7000:00000000..0x7fff:5c000000 avail
1648 // 0x7fff:5c000000..0x7fff:ffe00000? is stack, dyld, shared cache
1651 suggested_clstack_end
= -1; // ignored; Mach-O specifies its stack
1653 // --- Freebsd ------------------------------------------
1654 #elif defined(VGO_freebsd)
1657 VG_(debugLog
)(2, "aspacem",
1658 " sp_at_startup = 0x%010lx (supplied)\n",
1661 # if VG_WORDSIZE == 4
1663 aspacem_maxAddr
= VG_PGROUNDDN( sp_at_startup
) - 1;
1665 aspacem_maxAddr
= (Addr
)0x2000000000ULL
- 1; // 128G
1666 # ifdef ENABLE_INNER
1667 { Addr cse
= VG_PGROUNDDN( sp_at_startup
) - 1;
1668 if (aspacem_maxAddr
> cse
)
1669 aspacem_maxAddr
= cse
;
1671 # endif // ENABLE_INNER
1674 aspacem_cStart
= aspacem_minAddr
;
1675 aspacem_vStart
= VG_PGROUNDUP((aspacem_minAddr
+ aspacem_maxAddr
+ 1) / 2);
1677 # ifdef ENABLE_INNER
1678 aspacem_vStart
-= 0x10000000UL
; // 512M
1679 # endif // ENABLE_INNER
1681 // starting with FreeBSD 10.4, the stack is created with a zone
1682 // that is marked MAP_GUARD. This zone is reserved but unmapped,
1683 // and fills the space up to the end of the segment
1686 // Version number from
1687 // https://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/versions-10.html
1689 // On x86 this is 0x3FE0000
1690 // And on amd64 it is 0x1FFE0000 (536739840)
1691 // There is less of an issue on amd64 as we just choose some arbitrary address rather then trying
1692 // to squeeze in just below the host stack
1694 // Some of this is in sys/vm/vm_map.c, for instance vm_map_stack and vm_map_stack_locked
1695 // These refer to the kernel global sgrowsiz, which seems to be the initial size
1696 // of the user stack, 128k on my system
1698 // This seems to be in the sysctl kern.sgrowsiz
1699 // Then there is kern.maxssiz which is the total stack size (grow size + guard area)
1700 // In other words guard area = maxssiz - sgrowsiz
1702 #if (__FreeBSD_version >= 1003516)
1705 // this block implements what is described above
1706 // this makes no changes to the regression tests
1707 // I'm keeping it for a rainy day.
1709 // #include "pub_core_libcproc.h"
1711 SizeT kern_sgrowsiz
;
1712 SizeT sysctl_size
= sizeof(SizeT
);
1713 VG_(sysctlbyname
)("kern.maxssiz", &kern_maxssiz
, &sysctl_size
, NULL
, 0);
1714 VG_(sysctlbyname
)("kern.sgrowsiz", &kern_sgrowsiz
, &sysctl_size
, NULL
, 0);
1716 suggested_clstack_end
= aspacem_maxAddr
- (kern_maxssiz
- kern_sgrowsiz
) + VKI_PAGE_SIZE
;
1719 suggested_clstack_end
= aspacem_maxAddr
- 64*1024*1024UL
1723 suggested_clstack_end
= aspacem_maxAddr
- 16*1024*1024UL
1728 // --- Solaris ------------------------------------------
1729 #elif defined(VGO_solaris)
1730 # if VG_WORDSIZE == 4
1732 Intended address space partitioning:
1734 ,--------------------------------, 0x00000000
1736 |--------------------------------|
1737 | initial stack given to V by OS |
1738 |--------------------------------| 0x08000000
1740 |--------------------------------|
1743 |--------------------------------|
1745 |--------------------------------| 0x58000000
1747 |--------------------------------|
1750 |--------------------------------|
1751 | dynamic shared objects |
1752 '--------------------------------' 0xffffffff
1756 /* Anonymous pages need to fit under user limit (USERLIMIT32)
1757 which is 4KB + 16MB below the top of the 32-bit range. */
1758 # ifdef ENABLE_INNER
1759 aspacem_maxAddr
= (Addr
)0x4fffffff; // 1.25GB
1760 aspacem_vStart
= (Addr
)0x40000000; // 1GB
1762 aspacem_maxAddr
= (Addr
)0xfefff000 - 1; // 4GB - 16MB - 4KB
1763 aspacem_vStart
= (Addr
)0x50000000; // 1.25GB
1765 # elif VG_WORDSIZE == 8
1767 Intended address space partitioning:
1769 ,--------------------------------, 0x00000000_00000000
1771 |--------------------------------| 0x00000000_00400000
1773 |--------------------------------|
1776 |--------------------------------|
1778 |--------------------------------| 0x00000000_58000000
1780 |--------------------------------|
1782 |--------------------------------|
1783 | dynamic shared objects |
1784 |--------------------------------| 0x0000001f_ffffffff
1787 |--------------------------------|
1788 | initial stack given to V by OS |
1789 '--------------------------------' 0xffffffff_ffffffff
1793 /* Kernel likes to place objects at the end of the address space.
1794 However accessing memory beyond 128GB makes memcheck slow
1795 (see memcheck/mc_main.c, internal representation). Therefore:
1796 - mmapobj() syscall is emulated so that libraries are subject to
1797 Valgrind's aspacemgr control
1798 - Kernel shared pages (such as schedctl and hrt) are left as they are
1799 because kernel cannot be told where they should be put */
1800 # ifdef ENABLE_INNER
1801 aspacem_maxAddr
= (Addr
) 0x0000000fffffffff; // 64GB
1802 aspacem_vStart
= (Addr
) 0x0000000800000000; // 32GB
1804 aspacem_maxAddr
= (Addr
) 0x0000001fffffffff; // 128GB
1805 aspacem_vStart
= (Addr
) 0x0000001000000000; // 64GB
1808 # error "Unknown word size"
1811 aspacem_cStart
= aspacem_minAddr
;
1812 # ifdef ENABLE_INNER
1813 suggested_clstack_end
= (Addr
) 0x37ff0000 - 1; // 64kB below V's text
1815 suggested_clstack_end
= (Addr
) 0x57ff0000 - 1; // 64kB below V's text
1818 // --- Linux --------------------------------------------
1821 /* Establish address limits and block out unusable parts
1824 VG_(debugLog
)(2, "aspacem",
1825 " sp_at_startup = 0x%010lx (supplied)\n",
1828 # if VG_WORDSIZE == 8
1829 aspacem_maxAddr
= (Addr
)0x2000000000ULL
- 1; // 128G
1830 # ifdef ENABLE_INNER
1831 { Addr cse
= VG_PGROUNDDN( sp_at_startup
) - 1;
1832 if (aspacem_maxAddr
> cse
)
1833 aspacem_maxAddr
= cse
;
1837 aspacem_maxAddr
= VG_PGROUNDDN( sp_at_startup
) - 1;
1840 aspacem_cStart
= aspacem_minAddr
;
1841 aspacem_vStart
= VG_PGROUNDUP(aspacem_minAddr
1842 + (aspacem_maxAddr
- aspacem_minAddr
+ 1) / 2);
1843 # ifdef ENABLE_INNER
1844 aspacem_vStart
-= 0x20000000; // 512M
1847 suggested_clstack_end
= aspacem_maxAddr
- 16*1024*1024ULL
1850 #endif /* #else of 'defined(VGO_solaris)' */
1851 // --- (end) --------------------------------------------
1853 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr
));
1854 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr
+ 1));
1855 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_cStart
));
1856 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_vStart
));
1857 aspacem_assert(VG_IS_PAGE_ALIGNED(suggested_clstack_end
+ 1));
1859 VG_(debugLog
)(2, "aspacem",
1860 " minAddr = 0x%010lx (computed)\n",
1862 VG_(debugLog
)(2, "aspacem",
1863 " maxAddr = 0x%010lx (computed)\n",
1865 VG_(debugLog
)(2, "aspacem",
1866 " cStart = 0x%010lx (computed)\n",
1868 VG_(debugLog
)(2, "aspacem",
1869 " vStart = 0x%010lx (computed)\n",
1871 VG_(debugLog
)(2, "aspacem",
1872 "suggested_clstack_end = 0x%010lx (computed)\n",
1873 suggested_clstack_end
);
1875 if (aspacem_cStart
> Addr_MIN
) {
1876 init_resvn(&seg
, Addr_MIN
, aspacem_cStart
-1);
1879 if (aspacem_maxAddr
< Addr_MAX
) {
1880 init_resvn(&seg
, aspacem_maxAddr
+1, Addr_MAX
);
1884 /* Create a 1-page reservation at the notional initial
1885 client/valgrind boundary. This isn't strictly necessary, but
1886 because the advisor does first-fit and starts searches for
1887 valgrind allocations at the boundary, this is kind of necessary
1888 in order to get it to start allocating in the right place. */
1889 init_resvn(&seg
, aspacem_vStart
, aspacem_vStart
+ VKI_PAGE_SIZE
- 1);
1892 VG_(am_show_nsegments
)(2, "Initial layout");
1894 VG_(debugLog
)(2, "aspacem", "Reading /proc/self/maps\n");
1895 parse_procselfmaps( read_maps_callback
, NULL
);
1896 /* NB: on arm-linux, parse_procselfmaps automagically kludges up
1897 (iow, hands to its callbacks) a description of the ARM Commpage,
1898 since that's not listed in /proc/self/maps (kernel bug IMO). We
1899 have to fake up its existence in parse_procselfmaps and not
1900 merely add it here as an extra segment, because doing the latter
1901 causes sync checking to fail: we see we have an extra segment in
1902 the segments array, which isn't listed in /proc/self/maps.
1903 Hence we must make it appear that /proc/self/maps contained this
1904 segment all along. Sigh. */
1906 VG_(am_show_nsegments
)(2, "With contents of /proc/self/maps");
1909 return suggested_clstack_end
;
1913 /*-----------------------------------------------------------------*/
1915 /*--- The core query-notify mechanism. ---*/
1917 /*-----------------------------------------------------------------*/
1919 /* Query aspacem to ask where a mapping should go. */
1921 Addr
VG_(am_get_advisory
) ( const MapRequest
* req
,
1925 /* This function implements allocation policy.
1927 The nature of the allocation request is determined by req, which
1928 specifies the start and length of the request and indicates
1929 whether the start address is mandatory, a hint, or irrelevant,
1930 and by forClient, which says whether this is for the client or
1933 Return values: the request can be vetoed (*ok is set to False),
1934 in which case the caller should not attempt to proceed with
1935 making the mapping. Otherwise, *ok is set to True, the caller
1936 may proceed, and the preferred address at which the mapping
1937 should happen is returned.
1939 Note that this is an advisory system only: the kernel can in
1940 fact do whatever it likes as far as placement goes, and we have
1941 no absolute control over it.
1943 Allocations will never be granted in a reserved area.
1945 The Default Policy is:
1947 Search the address space for two free intervals: one of them
1948 big enough to contain the request without regard to the
1949 specified address (viz, as if it was a floating request) and
1950 the other being able to contain the request at the specified
1951 address (viz, as if were a fixed request). Then, depending on
1952 the outcome of the search and the kind of request made, decide
1953 whether the request is allowable and what address to advise.
1955 The Default Policy is overridden by Policy Exception #1:
1957 If the request is for a fixed client map, we are prepared to
1958 grant it providing all areas inside the request are either
1959 free, reservations, or mappings belonging to the client. In
1960 other words we are prepared to let the client trash its own
1961 mappings if it wants to.
1963 The Default Policy is overridden by Policy Exception #2:
1965 If the request is for a hinted client map, we are prepared to
1966 grant it providing all areas inside the request are either
1967 free or reservations. In other words we are prepared to let
1968 the client have a hinted mapping anywhere it likes provided
1969 it does not trash either any of its own mappings or any of
1970 valgrind's mappings.
1973 Addr holeStart
, holeEnd
, holeLen
;
1974 Bool fixed_not_required
;
1976 #if defined(VGO_solaris)
1977 Addr startPoint
= forClient
? aspacem_vStart
- 1 : aspacem_maxAddr
- 1;
1979 Addr startPoint
= forClient
? aspacem_cStart
: aspacem_vStart
;
1980 #endif /* VGO_solaris */
1982 Addr reqStart
= req
->rkind
==MFixed
|| req
->rkind
==MHint
? req
->start
: 0;
1983 Addr reqEnd
= reqStart
+ req
->len
- 1;
1984 Addr reqLen
= req
->len
;
1986 /* These hold indices for segments found during search, or -1 if not
1991 aspacem_assert(nsegments_used
> 0);
1994 VG_(am_show_nsegments
)(0,"getAdvisory");
1995 VG_(debugLog
)(0,"aspacem", "getAdvisory 0x%lx %lu\n",
1996 req
->start
, req
->len
);
1999 /* Reject zero-length requests */
2000 if (req
->len
== 0) {
2005 /* Reject wraparounds */
2006 if (req
->start
+ req
->len
< req
->start
) {
2011 /* ------ Implement Policy Exception #1 ------ */
2013 if (forClient
&& req
->rkind
== MFixed
) {
2014 Int iLo
= find_nsegment_idx(reqStart
);
2015 Int iHi
= find_nsegment_idx(reqEnd
);
2017 for (i
= iLo
; i
<= iHi
; i
++) {
2018 if (nsegments
[i
].kind
== SkFree
2019 || nsegments
[i
].kind
== SkFileC
2020 || nsegments
[i
].kind
== SkAnonC
2021 || nsegments
[i
].kind
== SkShmC
2022 || nsegments
[i
].kind
== SkResvn
) {
2030 /* Acceptable. Granted. */
2034 /* Not acceptable. Fail. */
2039 /* ------ Implement Policy Exception #2 ------ */
2041 if (forClient
&& req
->rkind
== MHint
) {
2042 Int iLo
= find_nsegment_idx(reqStart
);
2043 Int iHi
= find_nsegment_idx(reqEnd
);
2045 for (i
= iLo
; i
<= iHi
; i
++) {
2046 if (nsegments
[i
].kind
== SkFree
2047 || nsegments
[i
].kind
== SkResvn
) {
2055 /* Acceptable. Granted. */
2059 /* Not acceptable. Fall through to the default policy. */
2062 /* ------ Implement the Default Policy ------ */
2064 /* Don't waste time looking for a fixed match if not requested to. */
2065 fixed_not_required
= req
->rkind
== MAny
|| req
->rkind
== MAlign
;
2067 i
= find_nsegment_idx(startPoint
);
2069 #if defined(VGO_solaris)
2070 # define UPDATE_INDEX(index) \
2073 (index) = nsegments_used - 1;
2074 # define ADVISE_ADDRESS(segment) \
2075 VG_PGROUNDDN((segment)->end + 1 - reqLen)
2076 # define ADVISE_ADDRESS_ALIGNED(segment) \
2077 VG_ROUNDDN((segment)->end + 1 - reqLen, req->start)
2081 # define UPDATE_INDEX(index) \
2083 if ((index) >= nsegments_used) \
2085 # define ADVISE_ADDRESS(segment) \
2087 # define ADVISE_ADDRESS_ALIGNED(segment) \
2088 VG_ROUNDUP((segment)->start, req->start)
2089 #endif /* VGO_solaris */
2091 /* Examine holes from index i back round to i-1. Record the
2092 index first fixed hole and the first floating hole which would
2093 satisfy the request. */
2094 for (j
= 0; j
< nsegments_used
; j
++) {
2096 if (nsegments
[i
].kind
!= SkFree
) {
2101 holeStart
= nsegments
[i
].start
;
2102 holeEnd
= nsegments
[i
].end
;
2105 aspacem_assert(holeStart
<= holeEnd
);
2106 aspacem_assert(aspacem_minAddr
<= holeStart
);
2107 aspacem_assert(holeEnd
<= aspacem_maxAddr
);
2109 if (req
->rkind
== MAlign
) {
2110 holeStart
= VG_ROUNDUP(holeStart
, req
->start
);
2111 if (holeStart
>= holeEnd
) {
2112 /* This hole can't be used. */
2118 /* See if it's any use to us. */
2119 holeLen
= holeEnd
- holeStart
+ 1;
2121 if (fixedIdx
== -1 && holeStart
<= reqStart
&& reqEnd
<= holeEnd
)
2124 if (floatIdx
== -1 && holeLen
>= reqLen
)
2127 /* Don't waste time searching once we've found what we wanted. */
2128 if ((fixed_not_required
|| fixedIdx
>= 0) && floatIdx
>= 0)
2134 aspacem_assert(fixedIdx
>= -1 && fixedIdx
< nsegments_used
);
2136 aspacem_assert(nsegments
[fixedIdx
].kind
== SkFree
);
2138 aspacem_assert(floatIdx
>= -1 && floatIdx
< nsegments_used
);
2140 aspacem_assert(nsegments
[floatIdx
].kind
== SkFree
);
2144 /* Now see if we found anything which can satisfy the request. */
2145 switch (req
->rkind
) {
2147 if (fixedIdx
>= 0) {
2156 if (fixedIdx
>= 0) {
2160 if (floatIdx
>= 0) {
2162 return ADVISE_ADDRESS(&nsegments
[floatIdx
]);
2167 if (floatIdx
>= 0) {
2169 return ADVISE_ADDRESS(&nsegments
[floatIdx
]);
2174 if (floatIdx
>= 0) {
2176 return ADVISE_ADDRESS_ALIGNED(&nsegments
[floatIdx
]);
2185 ML_(am_barf
)("getAdvisory: unknown request kind");
2190 #undef ADVISE_ADDRESS
2191 #undef ADVISE_ADDRESS_ALIGNED
2194 /* Convenience wrapper for VG_(am_get_advisory) for client floating or
2195 fixed requests. If start is zero, a floating request is issued; if
2196 nonzero, a fixed request at that address is issued. Same comments
2197 about return values apply. */
2199 Addr
VG_(am_get_advisory_client_simple
) ( Addr start
, SizeT len
,
2203 mreq
.rkind
= start
==0 ? MAny
: MFixed
;
2206 return VG_(am_get_advisory
)( &mreq
, True
/*forClient*/, ok
);
2209 /* Similar to VG_(am_find_nsegment) but only returns free segments. */
2210 static NSegment
const * VG_(am_find_free_nsegment
) ( Addr a
)
2212 Int i
= find_nsegment_idx(a
);
2213 aspacem_assert(i
>= 0 && i
< nsegments_used
);
2214 aspacem_assert(nsegments
[i
].start
<= a
);
2215 aspacem_assert(a
<= nsegments
[i
].end
);
2216 if (nsegments
[i
].kind
== SkFree
)
2217 return &nsegments
[i
];
2222 Bool
VG_(am_covered_by_single_free_segment
)
2223 ( Addr start
, SizeT len
)
2225 NSegment
const* segLo
= VG_(am_find_free_nsegment
)( start
);
2226 NSegment
const* segHi
= VG_(am_find_free_nsegment
)( start
+ len
- 1 );
2228 return segLo
!= NULL
&& segHi
!= NULL
&& segLo
== segHi
;
2232 /* Notifies aspacem that the client completed an mmap successfully.
2233 The segment array is updated accordingly. If the returned Bool is
2234 True, the caller should immediately discard translations from the
2235 specified address range. */
2238 VG_(am_notify_client_mmap
)( Addr a
, SizeT len
, UInt prot
, UInt flags
,
2239 Int fd
, Off64T offset
)
2241 HChar buf
[VKI_PATH_MAX
];
2247 aspacem_assert(len
> 0);
2248 aspacem_assert(VG_IS_PAGE_ALIGNED(a
));
2249 aspacem_assert(VG_IS_PAGE_ALIGNED(len
));
2250 aspacem_assert(VG_IS_PAGE_ALIGNED(offset
));
2252 /* Discard is needed if any of the just-trashed range had T. */
2253 needDiscard
= any_Ts_in_range( a
, len
);
2255 init_nsegment( &seg
);
2256 seg
.kind
= (flags
& (VKI_MAP_ANONYMOUS
| VKI_MAP_STACK
)) ? SkAnonC
: SkFileC
;
2258 seg
.end
= a
+ len
- 1;
2259 seg
.hasR
= toBool(prot
& VKI_PROT_READ
);
2260 seg
.hasW
= toBool(prot
& VKI_PROT_WRITE
);
2261 seg
.hasX
= toBool(prot
& VKI_PROT_EXEC
);
2262 if (!(flags
& (VKI_MAP_ANONYMOUS
| VKI_MAP_STACK
))) {
2263 // Nb: We ignore offset requests in anonymous mmaps (see bug #126722)
2264 seg
.offset
= offset
;
2265 if (ML_(am_get_fd_d_i_m
)(fd
, &dev
, &ino
, &mode
)) {
2270 if (ML_(am_resolve_filename
)(fd
, buf
, VKI_PATH_MAX
)) {
2271 seg
.fnIdx
= ML_(am_allocate_segname
)( buf
);
2273 #if defined(VGO_freebsd)
2274 seg
.isFF
= (flags
& VKI_MAP_FIXED
);
2277 add_segment( &seg
);
2282 /* Notifies aspacem that the client completed a shmat successfully.
2283 The segment array is updated accordingly. If the returned Bool is
2284 True, the caller should immediately discard translations from the
2285 specified address range. */
2288 VG_(am_notify_client_shmat
)( Addr a
, SizeT len
, UInt prot
)
2293 aspacem_assert(len
> 0);
2294 aspacem_assert(VG_IS_PAGE_ALIGNED(a
));
2295 aspacem_assert(VG_IS_PAGE_ALIGNED(len
));
2297 /* Discard is needed if any of the just-trashed range had T. */
2298 needDiscard
= any_Ts_in_range( a
, len
);
2300 init_nsegment( &seg
);
2303 seg
.end
= a
+ len
- 1;
2305 seg
.hasR
= toBool(prot
& VKI_PROT_READ
);
2306 seg
.hasW
= toBool(prot
& VKI_PROT_WRITE
);
2307 seg
.hasX
= toBool(prot
& VKI_PROT_EXEC
);
2308 add_segment( &seg
);
2313 /* Notifies aspacem that an mprotect was completed successfully. The
2314 segment array is updated accordingly. Note, as with
2315 VG_(am_notify_munmap), it is not the job of this function to reject
2316 stupid mprotects, for example the client doing mprotect of
2317 non-client areas. Such requests should be intercepted earlier, by
2318 the syscall wrapper for mprotect. This function merely records
2319 whatever it is told. If the returned Bool is True, the caller
2320 should immediately discard translations from the specified address
2323 Bool
VG_(am_notify_mprotect
)( Addr start
, SizeT len
, UInt prot
)
2326 Bool newR
, newW
, newX
, needDiscard
;
2328 aspacem_assert(VG_IS_PAGE_ALIGNED(start
));
2329 aspacem_assert(VG_IS_PAGE_ALIGNED(len
));
2334 newR
= toBool(prot
& VKI_PROT_READ
);
2335 newW
= toBool(prot
& VKI_PROT_WRITE
);
2336 newX
= toBool(prot
& VKI_PROT_EXEC
);
2338 /* Discard is needed if we're dumping X permission */
2339 needDiscard
= any_Ts_in_range( start
, len
) && !newX
;
2341 split_nsegments_lo_and_hi( start
, start
+len
-1, &iLo
, &iHi
);
2343 iLo
= find_nsegment_idx(start
);
2344 iHi
= find_nsegment_idx(start
+ len
- 1);
2346 for (i
= iLo
; i
<= iHi
; i
++) {
2347 /* Apply the permissions to all relevant segments. */
2348 switch (nsegments
[i
].kind
) {
2349 case SkAnonC
: case SkAnonV
: case SkFileC
: case SkFileV
: case SkShmC
:
2350 nsegments
[i
].hasR
= newR
;
2351 nsegments
[i
].hasW
= newW
;
2352 nsegments
[i
].hasX
= newX
;
2353 aspacem_assert(sane_NSegment(&nsegments
[i
]));
2360 /* Changing permissions could have made previously un-mergable
2361 segments mergeable. Therefore have to re-preen them. */
2362 (void)preen_nsegments();
2368 /* Notifies aspacem that an munmap completed successfully. The
2369 segment array is updated accordingly. As with
2370 VG_(am_notify_mprotect), we merely record the given info, and don't
2371 check it for sensibleness. If the returned Bool is True, the
2372 caller should immediately discard translations from the specified
2375 Bool
VG_(am_notify_munmap
)( Addr start
, SizeT len
)
2379 aspacem_assert(VG_IS_PAGE_ALIGNED(start
));
2380 aspacem_assert(VG_IS_PAGE_ALIGNED(len
));
2385 needDiscard
= any_Ts_in_range( start
, len
);
2387 init_nsegment( &seg
);
2389 seg
.end
= start
+ len
- 1;
2391 /* The segment becomes unused (free). Segments from above
2392 aspacem_maxAddr were originally SkResvn and so we make them so
2393 again. Note, this isn't really right when the segment straddles
2394 the aspacem_maxAddr boundary - then really it should be split in
2395 two, the lower part marked as SkFree and the upper part as
2396 SkResvn. Ah well. */
2397 if (start
> aspacem_maxAddr
2398 && /* check previous comparison is meaningful */
2399 aspacem_maxAddr
< Addr_MAX
)
2402 /* Ditto for segments from below aspacem_minAddr. */
2403 if (seg
.end
< aspacem_minAddr
&& aspacem_minAddr
> 0)
2408 add_segment( &seg
);
2410 /* Unmapping could create two adjacent free segments, so a preen is
2411 needed. add_segment() will do that, so no need to here. */
2417 /*-----------------------------------------------------------------*/
2419 /*--- Handling mappings which do not arise directly from the ---*/
2420 /*--- simulation of the client. ---*/
2422 /*-----------------------------------------------------------------*/
2424 /* --- --- --- map, unmap, protect --- --- --- */
2426 /* Map a file at a fixed address for the client, and update the
2427 segment array accordingly. */
2429 SysRes
VG_(am_mmap_file_fixed_client
)
2430 ( Addr start
, SizeT length
, UInt prot
, Int fd
, Off64T offset
)
2432 UInt flags
= VKI_MAP_FIXED
| VKI_MAP_PRIVATE
;
2433 return VG_(am_mmap_named_file_fixed_client_flags
)(start
, length
, prot
, flags
,
2437 SysRes
VG_(am_mmap_file_fixed_client_flags
)
2438 ( Addr start
, SizeT length
, UInt prot
, UInt flags
, Int fd
, Off64T offset
)
2440 return VG_(am_mmap_named_file_fixed_client_flags
)(start
, length
, prot
, flags
,
2444 SysRes
VG_(am_mmap_named_file_fixed_client
)
2445 ( Addr start
, SizeT length
, UInt prot
, Int fd
, Off64T offset
, const HChar
*name
)
2447 UInt flags
= VKI_MAP_FIXED
| VKI_MAP_PRIVATE
;
2448 return VG_(am_mmap_named_file_fixed_client_flags
)(start
, length
, prot
, flags
,
2452 SysRes
VG_(am_mmap_named_file_fixed_client_flags
)
2453 ( Addr start
, SizeT length
, UInt prot
, UInt flags
,
2454 Int fd
, Off64T offset
, const HChar
*name
)
2463 HChar buf
[VKI_PATH_MAX
];
2465 /* Not allowable. */
2467 || !VG_IS_PAGE_ALIGNED(start
)
2468 || !VG_IS_PAGE_ALIGNED(offset
))
2469 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2471 /* Ask for an advisory. If it's negative, fail immediately. */
2475 advised
= VG_(am_get_advisory
)( &req
, True
/*forClient*/, &ok
);
2476 if (!ok
|| advised
!= start
)
2477 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2479 /* We have been advised that the mapping is allowable at the
2480 specified address. So hand it off to the kernel, and propagate
2481 any resulting failure immediately. */
2482 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2483 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
2484 start
, length
, prot
, flags
,
2487 if (sr_isError(sres
))
2490 if (sr_Res(sres
) != start
) {
2491 /* I don't think this can happen. It means the kernel made a
2492 fixed map succeed but not at the requested location. Try to
2493 repair the damage, then return saying the mapping failed. */
2494 (void)ML_(am_do_munmap_NO_NOTIFY
)( sr_Res(sres
), length
);
2495 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2498 /* Ok, the mapping succeeded. Now notify the interval map. */
2499 init_nsegment( &seg
);
2502 seg
.end
= seg
.start
+ VG_PGROUNDUP(length
) - 1;
2503 seg
.offset
= offset
;
2504 seg
.hasR
= toBool(prot
& VKI_PROT_READ
);
2505 seg
.hasW
= toBool(prot
& VKI_PROT_WRITE
);
2506 seg
.hasX
= toBool(prot
& VKI_PROT_EXEC
);
2507 if (ML_(am_get_fd_d_i_m
)(fd
, &dev
, &ino
, &mode
)) {
2513 seg
.fnIdx
= ML_(am_allocate_segname
)( name
);
2514 } else if (ML_(am_resolve_filename
)(fd
, buf
, VKI_PATH_MAX
)) {
2515 seg
.fnIdx
= ML_(am_allocate_segname
)( buf
);
2517 #if defined(VGO_freebsd)
2518 seg
.isFF
= (flags
& VKI_MAP_FIXED
);
2520 add_segment( &seg
);
2527 /* Map anonymously at a fixed address for the client, and update
2528 the segment array accordingly. */
2530 SysRes
VG_(am_mmap_anon_fixed_client
) ( Addr start
, SizeT length
, UInt prot
)
2538 /* Not allowable. */
2539 if (length
== 0 || !VG_IS_PAGE_ALIGNED(start
))
2540 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2542 /* Ask for an advisory. If it's negative, fail immediately. */
2546 advised
= VG_(am_get_advisory
)( &req
, True
/*forClient*/, &ok
);
2547 if (!ok
|| advised
!= start
)
2548 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2550 /* We have been advised that the mapping is allowable at the
2551 specified address. So hand it off to the kernel, and propagate
2552 any resulting failure immediately. */
2553 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2554 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
2555 start
, length
, prot
,
2556 VKI_MAP_FIXED
|VKI_MAP_PRIVATE
|VKI_MAP_ANONYMOUS
,
2559 if (sr_isError(sres
))
2562 if (sr_Res(sres
) != start
) {
2563 /* I don't think this can happen. It means the kernel made a
2564 fixed map succeed but not at the requested location. Try to
2565 repair the damage, then return saying the mapping failed. */
2566 (void)ML_(am_do_munmap_NO_NOTIFY
)( sr_Res(sres
), length
);
2567 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2570 /* Ok, the mapping succeeded. Now notify the interval map. */
2571 init_nsegment( &seg
);
2574 seg
.end
= seg
.start
+ VG_PGROUNDUP(length
) - 1;
2575 seg
.hasR
= toBool(prot
& VKI_PROT_READ
);
2576 seg
.hasW
= toBool(prot
& VKI_PROT_WRITE
);
2577 seg
.hasX
= toBool(prot
& VKI_PROT_EXEC
);
2578 add_segment( &seg
);
2585 /* Map anonymously at an unconstrained address for the client, and
2586 update the segment array accordingly. */
2588 static SysRes
am_mmap_anon_float_client ( SizeT length
, Int prot
, Bool isCH
)
2596 /* Not allowable. */
2598 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2600 /* Ask for an advisory. If it's negative, fail immediately. */
2604 advised
= VG_(am_get_advisory
)( &req
, True
/*forClient*/, &ok
);
2606 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2608 /* We have been advised that the mapping is allowable at the
2609 advised address. So hand it off to the kernel, and propagate
2610 any resulting failure immediately. */
2611 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2612 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
2613 advised
, length
, prot
,
2614 VKI_MAP_FIXED
|VKI_MAP_PRIVATE
|VKI_MAP_ANONYMOUS
,
2617 if (sr_isError(sres
))
2620 if (sr_Res(sres
) != advised
) {
2621 /* I don't think this can happen. It means the kernel made a
2622 fixed map succeed but not at the requested location. Try to
2623 repair the damage, then return saying the mapping failed. */
2624 (void)ML_(am_do_munmap_NO_NOTIFY
)( sr_Res(sres
), length
);
2625 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2628 /* Ok, the mapping succeeded. Now notify the interval map. */
2629 init_nsegment( &seg
);
2631 seg
.start
= advised
;
2632 seg
.end
= seg
.start
+ VG_PGROUNDUP(length
) - 1;
2633 seg
.hasR
= toBool(prot
& VKI_PROT_READ
);
2634 seg
.hasW
= toBool(prot
& VKI_PROT_WRITE
);
2635 seg
.hasX
= toBool(prot
& VKI_PROT_EXEC
);
2637 add_segment( &seg
);
2643 SysRes
VG_(am_mmap_anon_float_client
) ( SizeT length
, Int prot
)
2645 return am_mmap_anon_float_client (length
, prot
, False
/* isCH */);
2648 /* Map anonymously at an unconstrained address for V, and update the
2649 segment array accordingly. This is fundamentally how V allocates
2650 itself more address space when needed. */
2652 SysRes
VG_(am_mmap_anon_float_valgrind
)( SizeT length
)
2660 /* Not allowable. */
2662 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2664 /* Ask for an advisory. If it's negative, fail immediately. */
2668 advised
= VG_(am_get_advisory
)( &req
, False
/*forClient*/, &ok
);
2670 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2672 // On Darwin, for anonymous maps you can pass in a tag which is used by
2673 // programs like vmmap for statistical purposes.
2674 #ifndef VM_TAG_VALGRIND
2675 # define VM_TAG_VALGRIND 0
2678 /* We have been advised that the mapping is allowable at the
2679 specified address. So hand it off to the kernel, and propagate
2680 any resulting failure immediately. */
2681 /* GrP fixme darwin: use advisory as a hint only, otherwise syscall in
2682 another thread can pre-empt our spot. [At one point on the DARWIN
2683 branch the VKI_MAP_FIXED was commented out; unclear if this is
2684 necessary or not given the second Darwin-only call that immediately
2685 follows if this one fails. --njn]
2686 Also, an inner valgrind cannot observe the mmap syscalls done by
2687 the outer valgrind. The outer Valgrind might make the mmap
2688 fail here, as the inner valgrind believes that a segment is free,
2689 while it is in fact used by the outer valgrind.
2690 So, for an inner valgrind, similarly to DARWIN, if the fixed mmap
2691 fails, retry the mmap without map fixed.
2692 This is a kludge which on linux is only activated for the inner.
2693 The state of the inner aspacemgr is not made correct by this kludge
2694 and so a.o. VG_(am_do_sync_check) could fail.
2695 A proper solution implies a better collaboration between the
2696 inner and the outer (e.g. inner VG_(am_get_advisory) should do
2697 a client request to call the outer VG_(am_get_advisory). */
2698 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
2700 VKI_PROT_READ
|VKI_PROT_WRITE
|VKI_PROT_EXEC
,
2701 VKI_MAP_FIXED
|VKI_MAP_PRIVATE
|VKI_MAP_ANONYMOUS
,
2704 #if defined(VGO_darwin) || defined(ENABLE_INNER)
2705 /* Kludge on Darwin and inner linux if the fixed mmap failed. */
2706 if (sr_isError(sres
)) {
2707 /* try again, ignoring the advisory */
2708 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
2710 VKI_PROT_READ
|VKI_PROT_WRITE
|VKI_PROT_EXEC
,
2711 /*VKI_MAP_FIXED|*/VKI_MAP_PRIVATE
|VKI_MAP_ANONYMOUS
,
2716 if (sr_isError(sres
))
2719 #if defined(VGO_linux) && !defined(ENABLE_INNER)
2720 /* Doing the check only in linux not inner, as the below
2721 check can fail when the kludge above has been used. */
2722 if (sr_Res(sres
) != advised
) {
2723 /* I don't think this can happen. It means the kernel made a
2724 fixed map succeed but not at the requested location. Try to
2725 repair the damage, then return saying the mapping failed. */
2726 (void)ML_(am_do_munmap_NO_NOTIFY
)( sr_Res(sres
), length
);
2727 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2731 /* Ok, the mapping succeeded. Now notify the interval map. */
2732 init_nsegment( &seg
);
2734 seg
.start
= sr_Res(sres
);
2735 seg
.end
= seg
.start
+ VG_PGROUNDUP(length
) - 1;
2739 add_segment( &seg
);
2745 /* Really just a wrapper around VG_(am_mmap_anon_float_valgrind). */
2747 SysRes
VG_(am_shadow_alloc
)(SizeT size
)
2749 return VG_(am_mmap_anon_float_valgrind
)( size
);
2752 /* Map a file at an unconstrained address for V, and update the
2753 segment array accordingly. Use the provided flags */
2755 static SysRes
VG_(am_mmap_file_float_valgrind_flags
) ( SizeT length
, UInt prot
,
2757 Int fd
, Off64T offset
)
2766 HChar buf
[VKI_PATH_MAX
];
2768 /* Not allowable. */
2769 if (length
== 0 || !VG_IS_PAGE_ALIGNED(offset
))
2770 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2772 /* Ask for an advisory. If it's negative, fail immediately. */
2775 #if defined(VGA_arm) || defined(VGA_arm64) \
2776 || defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
2777 aspacem_assert(VKI_SHMLBA
>= VKI_PAGE_SIZE
);
2779 aspacem_assert(VKI_SHMLBA
== VKI_PAGE_SIZE
);
2781 if ((VKI_SHMLBA
> VKI_PAGE_SIZE
) && (VKI_MAP_SHARED
& flags
)) {
2782 /* arm-linux only. See ML_(generic_PRE_sys_shmat) and bug 290974 */
2783 req
.len
= length
+ VKI_SHMLBA
- VKI_PAGE_SIZE
;
2787 advised
= VG_(am_get_advisory
)( &req
, False
/*forClient*/, &ok
);
2789 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2790 if ((VKI_SHMLBA
> VKI_PAGE_SIZE
) && (VKI_MAP_SHARED
& flags
))
2791 advised
= VG_ROUNDUP(advised
, VKI_SHMLBA
);
2793 /* We have been advised that the mapping is allowable at the
2794 specified address. So hand it off to the kernel, and propagate
2795 any resulting failure immediately. */
2796 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
2797 advised
, length
, prot
,
2801 if (sr_isError(sres
))
2804 if (sr_Res(sres
) != advised
) {
2805 /* I don't think this can happen. It means the kernel made a
2806 fixed map succeed but not at the requested location. Try to
2807 repair the damage, then return saying the mapping failed. */
2808 (void)ML_(am_do_munmap_NO_NOTIFY
)( sr_Res(sres
), length
);
2809 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2812 /* Ok, the mapping succeeded. Now notify the interval map. */
2813 init_nsegment( &seg
);
2815 seg
.start
= sr_Res(sres
);
2816 seg
.end
= seg
.start
+ VG_PGROUNDUP(length
) - 1;
2817 seg
.offset
= offset
;
2818 seg
.hasR
= toBool(prot
& VKI_PROT_READ
);
2819 seg
.hasW
= toBool(prot
& VKI_PROT_WRITE
);
2820 seg
.hasX
= toBool(prot
& VKI_PROT_EXEC
);
2821 if (ML_(am_get_fd_d_i_m
)(fd
, &dev
, &ino
, &mode
)) {
2826 if (ML_(am_resolve_filename
)(fd
, buf
, VKI_PATH_MAX
)) {
2827 seg
.fnIdx
= ML_(am_allocate_segname
)( buf
);
2829 #if defined(VGO_freebsd)
2830 seg
.isFF
= (flags
& VKI_MAP_FIXED
);
2832 add_segment( &seg
);
2837 /* Map privately a file at an unconstrained address for V, and update the
2838 segment array accordingly. This is used by V for transiently
2839 mapping in object files to read their debug info. */
2841 SysRes
VG_(am_mmap_file_float_valgrind
) ( SizeT length
, UInt prot
,
2842 Int fd
, Off64T offset
)
2844 return VG_(am_mmap_file_float_valgrind_flags
) (length
, prot
,
2845 VKI_MAP_FIXED
|VKI_MAP_PRIVATE
,
2849 SysRes
VG_(am_shared_mmap_file_float_valgrind
)
2850 ( SizeT length
, UInt prot
, Int fd
, Off64T offset
)
2852 return VG_(am_mmap_file_float_valgrind_flags
) (length
, prot
,
2853 VKI_MAP_FIXED
|VKI_MAP_SHARED
,
2857 /* Similar to VG_(am_mmap_anon_float_client) but also
2858 marks the segment as containing the client heap. This is for the benefit
2859 of the leak checker which needs to be able to identify such segments
2860 so as not to use them as sources of roots during leak checks. */
2861 SysRes
VG_(am_mmap_client_heap
) ( SizeT length
, Int prot
)
2863 return am_mmap_anon_float_client (length
, prot
, True
/* isCH */);
2866 /* --- --- munmap helper --- --- */
2869 SysRes
am_munmap_both_wrk ( /*OUT*/Bool
* need_discard
,
2870 Addr start
, SizeT len
, Bool forClient
)
2875 /* Be safe with this regardless of return path. */
2876 *need_discard
= False
;
2878 if (!VG_IS_PAGE_ALIGNED(start
))
2882 *need_discard
= False
;
2883 return VG_(mk_SysRes_Success
)( 0 );
2886 if (start
+ len
< len
)
2889 len
= VG_PGROUNDUP(len
);
2890 aspacem_assert(VG_IS_PAGE_ALIGNED(start
));
2891 aspacem_assert(VG_IS_PAGE_ALIGNED(len
));
2894 if (!VG_(am_is_valid_for_client_or_free_or_resvn
)
2895 ( start
, len
, VKI_PROT_NONE
))
2898 if (!VG_(am_is_valid_for_valgrind
)
2899 ( start
, len
, VKI_PROT_NONE
))
2903 d
= any_Ts_in_range( start
, len
);
2905 sres
= ML_(am_do_munmap_NO_NOTIFY
)( start
, len
);
2906 if (sr_isError(sres
))
2909 VG_(am_notify_munmap
)( start
, len
);
2915 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2918 /* Unmap the given address range and update the segment array
2919 accordingly. This fails if the range isn't valid for the client.
2920 If *need_discard is True after a successful return, the caller
2921 should immediately discard translations from the specified address
2924 SysRes
VG_(am_munmap_client
)( /*OUT*/Bool
* need_discard
,
2925 Addr start
, SizeT len
)
2927 return am_munmap_both_wrk( need_discard
, start
, len
, True
/*client*/ );
2930 /* Unmap the given address range and update the segment array
2931 accordingly. This fails if the range isn't valid for valgrind. */
2933 SysRes
VG_(am_munmap_valgrind
)( Addr start
, SizeT len
)
2936 SysRes r
= am_munmap_both_wrk( &need_discard
,
2937 start
, len
, False
/*valgrind*/ );
2938 /* If this assertion fails, it means we allowed translations to be
2939 made from a V-owned section. Which shouldn't happen. */
2941 aspacem_assert(!need_discard
);
2945 /* Let (start,len) denote an area within a single Valgrind-owned
2946 segment (anon or file). Change the ownership of [start, start+len)
2947 to the client instead. Fails if (start,len) does not denote a
2948 suitable segment. */
2950 Bool
VG_(am_change_ownership_v_to_c
)( Addr start
, SizeT len
)
2956 if (start
+ len
< start
)
2958 if (!VG_IS_PAGE_ALIGNED(start
) || !VG_IS_PAGE_ALIGNED(len
))
2961 i
= find_nsegment_idx(start
);
2962 if (nsegments
[i
].kind
!= SkFileV
&& nsegments
[i
].kind
!= SkAnonV
)
2964 if (start
+len
-1 > nsegments
[i
].end
)
2967 aspacem_assert(start
>= nsegments
[i
].start
);
2968 aspacem_assert(start
+len
-1 <= nsegments
[i
].end
);
2970 /* This scheme is like how mprotect works: split the to-be-changed
2971 range into its own segment(s), then mess with them (it). There
2972 should be only one. */
2973 split_nsegments_lo_and_hi( start
, start
+len
-1, &iLo
, &iHi
);
2974 aspacem_assert(iLo
== iHi
);
2975 switch (nsegments
[iLo
].kind
) {
2976 case SkFileV
: nsegments
[iLo
].kind
= SkFileC
; break;
2977 case SkAnonV
: nsegments
[iLo
].kind
= SkAnonC
; break;
2978 default: aspacem_assert(0); /* can't happen - guarded above */
2985 /* Set the 'hasT' bit on the segment containing ADDR indicating that
2986 translations have or may have been taken from this segment. ADDR is
2987 expected to belong to a client segment. */
2988 void VG_(am_set_segment_hasT
)( Addr addr
)
2990 Int i
= find_nsegment_idx(addr
);
2991 SegKind kind
= nsegments
[i
].kind
;
2992 aspacem_assert(kind
== SkAnonC
|| kind
== SkFileC
|| kind
== SkShmC
);
2993 nsegments
[i
].hasT
= True
;
2997 /* --- --- --- reservations --- --- --- */
2999 /* Create a reservation from START .. START+LENGTH-1, with the given
3000 ShrinkMode. When checking whether the reservation can be created,
3001 also ensure that at least abs(EXTRA) extra free bytes will remain
3002 above (> 0) or below (< 0) the reservation.
3004 The reservation will only be created if it, plus the extra-zone,
3005 falls entirely within a single free segment. The returned Bool
3006 indicates whether the creation succeeded. */
3008 Bool
VG_(am_create_reservation
) ( Addr start
, SizeT length
,
3009 ShrinkMode smode
, SSizeT extra
)
3014 /* start and end, not taking into account the extra space. */
3015 Addr start1
= start
;
3016 Addr end1
= start
+ length
- 1;
3018 /* start and end, taking into account the extra space. */
3019 Addr start2
= start1
;
3022 if (extra
< 0) start2
+= extra
; // this moves it down :-)
3023 if (extra
> 0) end2
+= extra
;
3025 aspacem_assert(VG_IS_PAGE_ALIGNED(start
));
3026 aspacem_assert(VG_IS_PAGE_ALIGNED(start
+length
));
3027 aspacem_assert(VG_IS_PAGE_ALIGNED(start2
));
3028 aspacem_assert(VG_IS_PAGE_ALIGNED(end2
+1));
3030 startI
= find_nsegment_idx( start2
);
3031 endI
= find_nsegment_idx( end2
);
3033 /* If the start and end points don't fall within the same (free)
3034 segment, we're hosed. This does rely on the assumption that all
3035 mergeable adjacent segments can be merged, but add_segment()
3036 should ensure that. */
3040 if (nsegments
[startI
].kind
!= SkFree
)
3043 /* Looks good - make the reservation. */
3044 aspacem_assert(nsegments
[startI
].start
<= start2
);
3045 aspacem_assert(end2
<= nsegments
[startI
].end
);
3047 init_nsegment( &seg
);
3049 seg
.start
= start1
; /* NB: extra space is not included in the
3053 add_segment( &seg
);
3060 /* ADDR is the start address of an anonymous client mapping. This fn extends
3061 the mapping by DELTA bytes, taking the space from a reservation section
3062 which must be adjacent. If DELTA is positive, the segment is
3063 extended forwards in the address space, and the reservation must be
3064 the next one along. If DELTA is negative, the segment is extended
3065 backwards in the address space and the reservation must be the
3066 previous one. DELTA must be page aligned. abs(DELTA) must not
3067 exceed the size of the reservation segment minus one page, that is,
3068 the reservation segment after the operation must be at least one
3069 page long. The function returns a pointer to the resized segment. */
3071 const NSegment
*VG_(am_extend_into_adjacent_reservation_client
)( Addr addr
,
3081 segA
= find_nsegment_idx(addr
);
3082 aspacem_assert(nsegments
[segA
].kind
== SkAnonC
);
3085 return nsegments
+ segA
;
3087 prot
= (nsegments
[segA
].hasR
? VKI_PROT_READ
: 0)
3088 | (nsegments
[segA
].hasW
? VKI_PROT_WRITE
: 0)
3089 | (nsegments
[segA
].hasX
? VKI_PROT_EXEC
: 0);
3091 aspacem_assert(VG_IS_PAGE_ALIGNED(delta
<0 ? -delta
: delta
));
3095 /* Extending the segment forwards. */
3097 if (segR
>= nsegments_used
3098 || nsegments
[segR
].kind
!= SkResvn
3099 || nsegments
[segR
].smode
!= SmLower
)
3102 if (delta
+ VKI_PAGE_SIZE
3103 > (nsegments
[segR
].end
- nsegments
[segR
].start
+ 1)) {
3108 /* Extend the kernel's mapping. */
3109 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
3110 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
3111 nsegments
[segR
].start
, delta
,
3113 VKI_MAP_FIXED
|VKI_MAP_PRIVATE
|VKI_MAP_ANONYMOUS
,
3116 if (sr_isError(sres
))
3117 return NULL
; /* kernel bug if this happens? */
3118 if (sr_Res(sres
) != nsegments
[segR
].start
) {
3119 /* kernel bug if this happens? */
3120 (void)ML_(am_do_munmap_NO_NOTIFY
)( sr_Res(sres
), delta
);
3124 /* Ok, success with the kernel. Update our structures. */
3125 nsegments
[segR
].start
+= delta
;
3126 nsegments
[segA
].end
+= delta
;
3127 aspacem_assert(nsegments
[segR
].start
<= nsegments
[segR
].end
);
3131 /* Extending the segment backwards. */
3133 aspacem_assert(delta
> 0);
3137 || nsegments
[segR
].kind
!= SkResvn
3138 || nsegments
[segR
].smode
!= SmUpper
)
3141 if (delta
+ VKI_PAGE_SIZE
3142 > (nsegments
[segR
].end
- nsegments
[segR
].start
+ 1)) {
3147 /* Extend the kernel's mapping. */
3148 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
3149 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
3150 nsegments
[segA
].start
-delta
, delta
,
3152 VKI_MAP_FIXED
|VKI_MAP_PRIVATE
|VKI_MAP_ANONYMOUS
,
3155 if (sr_isError(sres
))
3156 return NULL
; /* kernel bug if this happens? */
3157 if (sr_Res(sres
) != nsegments
[segA
].start
-delta
) {
3158 /* kernel bug if this happens? */
3159 (void)ML_(am_do_munmap_NO_NOTIFY
)( sr_Res(sres
), delta
);
3163 /* Ok, success with the kernel. Update our structures. */
3164 nsegments
[segR
].end
-= delta
;
3165 nsegments
[segA
].start
-= delta
;
3166 aspacem_assert(nsegments
[segR
].start
<= nsegments
[segR
].end
);
3170 return nsegments
+ segA
;
3174 /* --- --- --- resizing/move a mapping --- --- --- */
3178 /* This function grows a client mapping in place into an adjacent free segment.
3179 ADDR is the client mapping's start address and DELTA, which must be page
3180 aligned, is the growth amount. The function returns a pointer to the
3181 resized segment. The function is used in support of mremap. */
3182 const NSegment
*VG_(am_extend_map_client
)( Addr addr
, SizeT delta
)
3188 VG_(am_show_nsegments
)(0, "VG_(am_extend_map_client) BEFORE");
3190 /* Get the client segment */
3191 Int ix
= find_nsegment_idx(addr
);
3192 aspacem_assert(ix
>= 0 && ix
< nsegments_used
);
3194 NSegment
*seg
= nsegments
+ ix
;
3196 aspacem_assert(seg
->kind
== SkFileC
|| seg
->kind
== SkAnonC
||
3197 seg
->kind
== SkShmC
);
3198 aspacem_assert(delta
> 0 && VG_IS_PAGE_ALIGNED(delta
)) ;
3200 xStart
= seg
->end
+1;
3201 aspacem_assert(xStart
+ delta
>= delta
); // no wrap-around
3203 /* The segment following the client segment must be a free segment and
3204 it must be large enough to cover the additional memory. */
3205 NSegment
*segf
= seg
+ 1;
3206 aspacem_assert(segf
->kind
== SkFree
);
3207 aspacem_assert(segf
->start
== xStart
);
3208 aspacem_assert(xStart
+ delta
- 1 <= segf
->end
);
3210 SizeT seg_old_len
= seg
->end
+ 1 - seg
->start
;
3213 sres
= ML_(am_do_extend_mapping_NO_NOTIFY
)( seg
->start
,
3215 seg_old_len
+ delta
);
3216 if (sr_isError(sres
)) {
3220 /* the area must not have moved */
3221 aspacem_assert(sr_Res(sres
) == seg
->start
);
3224 NSegment seg_copy
= *seg
;
3225 seg_copy
.end
+= delta
;
3226 add_segment( &seg_copy
);
3229 VG_(am_show_nsegments
)(0, "VG_(am_extend_map_client) AFTER");
3232 return nsegments
+ find_nsegment_idx(addr
);
3236 /* Remap the old address range to the new address range. Fails if any
3237 parameter is not page aligned, if the either size is zero, if any
3238 wraparound is implied, if the old address range does not fall
3239 entirely within a single segment, if the new address range overlaps
3240 with the old one, or if the old address range is not a valid client
3241 mapping. If *need_discard is True after a successful return, the
3242 caller should immediately discard translations from both specified
3245 Bool
VG_(am_relocate_nooverlap_client
)( /*OUT*/Bool
* need_discard
,
3246 Addr old_addr
, SizeT old_len
,
3247 Addr new_addr
, SizeT new_len
)
3253 if (old_len
== 0 || new_len
== 0)
3256 if (!VG_IS_PAGE_ALIGNED(old_addr
) || !VG_IS_PAGE_ALIGNED(old_len
)
3257 || !VG_IS_PAGE_ALIGNED(new_addr
) || !VG_IS_PAGE_ALIGNED(new_len
))
3260 if (old_addr
+ old_len
< old_addr
3261 || new_addr
+ new_len
< new_addr
)
3264 if (old_addr
+ old_len
- 1 < new_addr
3265 || new_addr
+ new_len
- 1 < old_addr
) {
3270 iLo
= find_nsegment_idx( old_addr
);
3271 iHi
= find_nsegment_idx( old_addr
+ old_len
- 1 );
3275 if (nsegments
[iLo
].kind
!= SkFileC
&& nsegments
[iLo
].kind
!= SkAnonC
&&
3276 nsegments
[iLo
].kind
!= SkShmC
)
3279 sres
= ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY
)
3280 ( old_addr
, old_len
, new_addr
, new_len
);
3281 if (sr_isError(sres
)) {
3285 aspacem_assert(sr_Res(sres
) == new_addr
);
3288 *need_discard
= any_Ts_in_range( old_addr
, old_len
)
3289 || any_Ts_in_range( new_addr
, new_len
);
3291 seg
= nsegments
[iLo
];
3293 /* Mark the new area based on the old seg. */
3294 if (seg
.kind
== SkFileC
) {
3295 seg
.offset
+= ((ULong
)old_addr
) - ((ULong
)seg
.start
);
3297 seg
.start
= new_addr
;
3298 seg
.end
= new_addr
+ new_len
- 1;
3299 add_segment( &seg
);
3301 /* Create a free hole in the old location. */
3302 init_nsegment( &seg
);
3303 seg
.start
= old_addr
;
3304 seg
.end
= old_addr
+ old_len
- 1;
3305 /* See comments in VG_(am_notify_munmap) about this SkResvn vs
3307 if (old_addr
> aspacem_maxAddr
3308 && /* check previous comparison is meaningful */
3309 aspacem_maxAddr
< Addr_MAX
)
3314 add_segment( &seg
);
3320 #endif // HAVE_MREMAP
3323 #if defined(VGO_linux)
3325 /*-----------------------------------------------------------------*/
3327 /*--- A simple parser for /proc/self/maps on Linux 2.4.X/2.6.X. ---*/
3328 /*--- Almost completely independent of the stuff above. The ---*/
3329 /*--- only function it 'exports' to the code above this comment ---*/
3330 /*--- is parse_procselfmaps. ---*/
3332 /*-----------------------------------------------------------------*/
3334 /*------BEGIN-procmaps-parser-for-Linux--------------------------*/
3336 /* Size of a smallish table used to read /proc/self/map entries. */
3337 #define M_PROCMAP_BUF 100000
3339 /* static ... to keep it out of the stack frame. */
3340 static HChar procmap_buf
[M_PROCMAP_BUF
];
3342 /* Records length of /proc/self/maps read into procmap_buf. */
3343 static Int buf_n_tot
;
3347 static Int
hexdigit ( HChar c
)
3349 if (c
>= '0' && c
<= '9') return (Int
)(c
- '0');
3350 if (c
>= 'a' && c
<= 'f') return 10 + (Int
)(c
- 'a');
3351 if (c
>= 'A' && c
<= 'F') return 10 + (Int
)(c
- 'A');
3355 static Int
decdigit ( HChar c
)
3357 if (c
>= '0' && c
<= '9') return (Int
)(c
- '0');
3361 static Int
readchar ( const HChar
* buf
, HChar
* ch
)
3363 if (*buf
== 0) return 0;
3368 static Int
readhex ( const HChar
* buf
, UWord
* val
)
3370 /* Read a word-sized hex number. */
3373 while (hexdigit(*buf
) >= 0) {
3374 *val
= (*val
<< 4) + hexdigit(*buf
);
3380 static Int
readhex64 ( const HChar
* buf
, ULong
* val
)
3382 /* Read a potentially 64-bit hex number. */
3385 while (hexdigit(*buf
) >= 0) {
3386 *val
= (*val
<< 4) + hexdigit(*buf
);
3392 static Int
readdec64 ( const HChar
* buf
, ULong
* val
)
3396 while (decdigit(*buf
) >= 0) {
3397 *val
= (*val
* 10) + decdigit(*buf
);
3404 /* Get the contents of /proc/self/maps into a static buffer. If
3405 there's a syntax error, it won't fit, or other failure, just
3408 static void read_procselfmaps_into_buf ( void )
3413 /* Read the initial memory mapping from the /proc filesystem. */
3414 fd
= ML_(am_open
)( "/proc/self/maps", VKI_O_RDONLY
, 0 );
3416 ML_(am_barf
)("can't open /proc/self/maps");
3420 n_chunk
= ML_(am_read
)( sr_Res(fd
), &procmap_buf
[buf_n_tot
],
3421 M_PROCMAP_BUF
- buf_n_tot
);
3423 buf_n_tot
+= n_chunk
;
3424 } while ( n_chunk
> 0 && buf_n_tot
< M_PROCMAP_BUF
);
3426 ML_(am_close
)(sr_Res(fd
));
3428 if (buf_n_tot
>= M_PROCMAP_BUF
-5)
3429 ML_(am_barf_toolow
)("M_PROCMAP_BUF");
3431 ML_(am_barf
)("I/O error on /proc/self/maps");
3433 procmap_buf
[buf_n_tot
] = 0;
3436 /* Parse /proc/self/maps. For each map entry, call
3437 record_mapping, passing it, in this order:
3439 start address in memory
3441 page protections (using the VKI_PROT_* flags)
3442 mapped file device and inode
3443 offset in file, or zero if no file
3444 filename, zero terminated, or NULL if no file
3446 So the sig of the called fn might be
3448 void (*record_mapping)( Addr start, SizeT size, UInt prot,
3449 UInt dev, UInt info,
3450 ULong foffset, UChar* filename )
3452 Note that the supplied filename is transiently stored; record_mapping
3453 should make a copy if it wants to keep it.
3455 Nb: it is important that this function does not alter the contents of
3458 static void parse_procselfmaps (
3459 void (*record_mapping
)( Addr addr
, SizeT len
, UInt prot
,
3460 ULong dev
, ULong ino
, Off64T offset
,
3461 const HChar
* filename
),
3462 void (*record_gap
)( Addr addr
, SizeT len
)
3466 Addr start
, endPlusOne
, gapStart
;
3468 HChar rr
, ww
, xx
, pp
, ch
, tmp
;
3471 ULong foffset
, dev
, ino
;
3473 foffset
= ino
= 0; /* keep gcc-4.1.0 happy */
3475 read_procselfmaps_into_buf();
3477 aspacem_assert('\0' != procmap_buf
[0] && 0 != buf_n_tot
);
3480 VG_(debugLog
)(0, "procselfmaps", "raw:\n%s\n", procmap_buf
);
3482 /* Ok, it's safely aboard. Parse the entries. */
3484 gapStart
= Addr_MIN
;
3486 if (i
>= buf_n_tot
) break;
3488 /* Read (without fscanf :) the pattern %16x-%16x %c%c%c%c %16x %2x:%2x %d */
3489 j
= readhex(&procmap_buf
[i
], &start
);
3490 if (j
> 0) i
+= j
; else goto syntaxerror
;
3491 j
= readchar(&procmap_buf
[i
], &ch
);
3492 if (j
== 1 && ch
== '-') i
+= j
; else goto syntaxerror
;
3493 j
= readhex(&procmap_buf
[i
], &endPlusOne
);
3494 if (j
> 0) i
+= j
; else goto syntaxerror
;
3496 j
= readchar(&procmap_buf
[i
], &ch
);
3497 if (j
== 1 && ch
== ' ') i
+= j
; else goto syntaxerror
;
3499 j
= readchar(&procmap_buf
[i
], &rr
);
3500 if (j
== 1 && (rr
== 'r' || rr
== '-')) i
+= j
; else goto syntaxerror
;
3501 j
= readchar(&procmap_buf
[i
], &ww
);
3502 if (j
== 1 && (ww
== 'w' || ww
== '-')) i
+= j
; else goto syntaxerror
;
3503 j
= readchar(&procmap_buf
[i
], &xx
);
3504 if (j
== 1 && (xx
== 'x' || xx
== '-')) i
+= j
; else goto syntaxerror
;
3505 /* This field is the shared/private flag */
3506 j
= readchar(&procmap_buf
[i
], &pp
);
3507 if (j
== 1 && (pp
== 'p' || pp
== '-' || pp
== 's'))
3508 i
+= j
; else goto syntaxerror
;
3510 j
= readchar(&procmap_buf
[i
], &ch
);
3511 if (j
== 1 && ch
== ' ') i
+= j
; else goto syntaxerror
;
3513 j
= readhex64(&procmap_buf
[i
], &foffset
);
3514 if (j
> 0) i
+= j
; else goto syntaxerror
;
3516 j
= readchar(&procmap_buf
[i
], &ch
);
3517 if (j
== 1 && ch
== ' ') i
+= j
; else goto syntaxerror
;
3519 j
= readhex(&procmap_buf
[i
], &maj
);
3520 if (j
> 0) i
+= j
; else goto syntaxerror
;
3521 j
= readchar(&procmap_buf
[i
], &ch
);
3522 if (j
== 1 && ch
== ':') i
+= j
; else goto syntaxerror
;
3523 j
= readhex(&procmap_buf
[i
], &min
);
3524 if (j
> 0) i
+= j
; else goto syntaxerror
;
3526 j
= readchar(&procmap_buf
[i
], &ch
);
3527 if (j
== 1 && ch
== ' ') i
+= j
; else goto syntaxerror
;
3529 j
= readdec64(&procmap_buf
[i
], &ino
);
3530 if (j
> 0) i
+= j
; else goto syntaxerror
;
3535 VG_(debugLog
)(0, "Valgrind:",
3536 "FATAL: syntax error reading /proc/self/maps\n");
3543 for (; k
<= i
; k
++) {
3544 buf50
[m
] = procmap_buf
[k
];
3548 VG_(debugLog
)(0, "procselfmaps", "Last 50 chars: '%s'\n", buf50
);
3554 aspacem_assert(i
< buf_n_tot
);
3556 /* Try and find the name of the file mapped to this segment, if
3557 it exists. Note that file names can contain spaces. */
3559 // Move i to the next non-space char, which should be either a '/',
3560 // a '[', or a newline.
3561 while (procmap_buf
[i
] == ' ') i
++;
3563 // Move i_eol to the end of the line.
3565 while (procmap_buf
[i_eol
] != '\n') i_eol
++;
3567 // If there's a filename...
3568 if (procmap_buf
[i
] == '/') {
3569 /* Minor hack: put a '\0' at the filename end for the call to
3570 'record_mapping', then restore the old char with 'tmp'. */
3571 filename
= &procmap_buf
[i
];
3572 tmp
= filename
[i_eol
- i
];
3573 filename
[i_eol
- i
] = '\0';
3581 if (rr
== 'r') prot
|= VKI_PROT_READ
;
3582 if (ww
== 'w') prot
|= VKI_PROT_WRITE
;
3583 if (xx
== 'x') prot
|= VKI_PROT_EXEC
;
3585 /* Linux has two ways to encode a device number when it
3586 is exposed to user space (via fstat etc). The old way
3587 is the traditional unix scheme that produces a 16 bit
3588 device number with the top 8 being the major number and
3589 the bottom 8 the minor number.
3591 The new scheme allows for a 12 bit major number and
3592 a 20 bit minor number by using a 32 bit device number
3593 and putting the top 12 bits of the minor number into
3594 the top 12 bits of the device number thus leaving an
3595 extra 4 bits for the major number.
3597 If the minor and major number are both single byte
3598 values then both schemes give the same result so we
3599 use the new scheme here in case either number is
3600 outside the 0-255 range and then use fstat64 when
3601 available (or fstat on 64 bit systems) so that we
3602 should always have a new style device number and
3603 everything should match. */
3604 dev
= (min
& 0xff) | (maj
<< 8) | ((min
& ~0xff) << 12);
3606 if (record_gap
&& gapStart
< start
)
3607 (*record_gap
) ( gapStart
, start
-gapStart
);
3609 if (record_mapping
&& start
< endPlusOne
)
3610 (*record_mapping
) ( start
, endPlusOne
-start
,
3612 foffset
, filename
);
3615 filename
[i_eol
- i
] = tmp
;
3619 gapStart
= endPlusOne
;
3622 # if defined(VGP_arm_linux)
3623 /* ARM puts code at the end of memory that contains processor
3624 specific stuff (cmpxchg, getting the thread local storage, etc.)
3625 This isn't specified in /proc/self/maps, so do it here. This
3626 kludgery causes the view of memory, as presented to
3627 record_gap/record_mapping, to actually reflect reality. IMO
3628 (JRS, 2010-Jan-03) the fact that /proc/.../maps does not list
3629 the commpage should be regarded as a bug in the kernel. */
3630 { const Addr commpage_start
= ARM_LINUX_FAKE_COMMPAGE_START
;
3631 const Addr commpage_end1
= ARM_LINUX_FAKE_COMMPAGE_END1
;
3632 if (gapStart
< commpage_start
) {
3634 (*record_gap
)( gapStart
, commpage_start
- gapStart
);
3636 (*record_mapping
)( commpage_start
, commpage_end1
- commpage_start
,
3637 VKI_PROT_READ
|VKI_PROT_EXEC
,
3638 0/*dev*/, 0/*ino*/, 0/*foffset*/,
3640 gapStart
= commpage_end1
;
3645 if (record_gap
&& gapStart
< Addr_MAX
)
3646 (*record_gap
) ( gapStart
, Addr_MAX
- gapStart
+ 1 );
3649 /*------END-procmaps-parser-for-Linux----------------------------*/
3651 /*------BEGIN-procmaps-parser-for-Darwin-------------------------*/
3653 #elif defined(VGO_darwin)
3654 #include <mach/mach.h>
3655 #include <mach/mach_vm.h>
3657 static unsigned int mach2vki(unsigned int vm_prot
)
3660 ((vm_prot
& VM_PROT_READ
) ? VKI_PROT_READ
: 0) |
3661 ((vm_prot
& VM_PROT_WRITE
) ? VKI_PROT_WRITE
: 0) |
3662 ((vm_prot
& VM_PROT_EXECUTE
) ? VKI_PROT_EXEC
: 0) ;
3665 static UInt stats_machcalls
= 0;
3667 static void parse_procselfmaps (
3668 void (*record_mapping
)( Addr addr
, SizeT len
, UInt prot
,
3669 ULong dev
, ULong ino
, Off64T offset
,
3670 const HChar
* filename
),
3671 void (*record_gap
)( Addr addr
, SizeT len
)
3682 mach_vm_address_t addr
= iter
;
3683 mach_vm_size_t size
;
3684 vm_region_submap_short_info_data_64_t info
;
3688 mach_msg_type_number_t info_count
3689 = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64
;
3691 kr
= mach_vm_region_recurse(mach_task_self(), &addr
, &size
, &depth
,
3692 (vm_region_info_t
)&info
, &info_count
);
3695 if (info
.is_submap
) {
3703 if (addr
> last
&& record_gap
) {
3704 (*record_gap
)(last
, addr
- last
);
3706 if (record_mapping
) {
3707 (*record_mapping
)(addr
, size
, mach2vki(info
.protection
),
3708 0, 0, info
.offset
, NULL
);
3713 if ((Addr
)-1 > last
&& record_gap
)
3714 (*record_gap
)(last
, (Addr
)-1 - last
);
3717 // Urr. So much for thread safety.
3718 static Bool css_overflowed
;
3719 static ChangedSeg
* css_local
;
3720 static Int css_size_local
;
3721 static Int css_used_local
;
3723 static Addr
Addr__max ( Addr a
, Addr b
) { return a
> b
? a
: b
; }
3724 static Addr
Addr__min ( Addr a
, Addr b
) { return a
< b
? a
: b
; }
3726 static void add_mapping_callback(Addr addr
, SizeT len
, UInt prot
,
3727 ULong dev
, ULong ino
, Off64T offset
,
3728 const HChar
*filename
)
3730 // derived from sync_check_mapping_callback()
3732 /* JRS 2012-Mar-07: this all seems very dubious to me. It would be
3733 safer to see if we can find, in V's segment collection, one
3734 single segment that completely covers the range [addr, +len)
3735 (and possibly more), and that has the exact same other
3736 properties (prot, dev, ino, offset, etc) as the data presented
3737 here. If found, we just skip. Otherwise add the data presented
3738 here into css_local[]. */
3742 if (len
== 0) return;
3744 /* The kernel should not give us wraparounds. */
3745 aspacem_assert(addr
<= addr
+ len
- 1);
3747 iLo
= find_nsegment_idx( addr
);
3748 iHi
= find_nsegment_idx( addr
+ len
- 1 );
3750 /* NSegments iLo .. iHi inclusive should agree with the presented
3752 for (i
= iLo
; i
<= iHi
; i
++) {
3756 if (nsegments
[i
].kind
== SkAnonV
|| nsegments
[i
].kind
== SkFileV
) {
3757 /* Ignore V regions */
3760 else if (nsegments
[i
].kind
== SkFree
|| nsegments
[i
].kind
== SkResvn
) {
3761 /* Add mapping for SkResvn regions */
3762 ChangedSeg
* cs
= &css_local
[css_used_local
];
3763 if (css_used_local
< css_size_local
) {
3764 cs
->is_added
= True
;
3766 cs
->end
= addr
+ len
- 1;
3768 cs
->offset
= offset
;
3771 css_overflowed
= True
;
3776 else if (nsegments
[i
].kind
== SkAnonC
||
3777 nsegments
[i
].kind
== SkFileC
||
3778 nsegments
[i
].kind
== SkShmC
)
3780 /* Check permissions on client regions */
3783 if (nsegments
[i
].hasR
) seg_prot
|= VKI_PROT_READ
;
3784 if (nsegments
[i
].hasW
) seg_prot
|= VKI_PROT_WRITE
;
3785 # if defined(VGA_x86)
3786 // GrP fixme sloppyXcheck
3787 // darwin: kernel X ignored and spuriously changes? (vm_copy)
3788 seg_prot
|= (prot
& VKI_PROT_EXEC
);
3790 if (nsegments
[i
].hasX
) seg_prot
|= VKI_PROT_EXEC
;
3792 if (seg_prot
!= prot
) {
3793 if (VG_(clo_trace_syscalls
))
3794 VG_(debugLog
)(0,"aspacem","region %p..%p permission "
3795 "mismatch (kernel %x, V %x)\n",
3796 (void*)nsegments
[i
].start
,
3797 (void*)(nsegments
[i
].end
+1), prot
, seg_prot
);
3798 /* Add mapping for regions with protection changes */
3799 ChangedSeg
* cs
= &css_local
[css_used_local
];
3800 if (css_used_local
< css_size_local
) {
3801 cs
->is_added
= True
;
3803 cs
->end
= addr
+ len
- 1;
3805 cs
->offset
= offset
;
3808 css_overflowed
= True
;
3820 static void remove_mapping_callback(Addr addr
, SizeT len
)
3822 // derived from sync_check_gap_callback()
3829 /* The kernel should not give us wraparounds. */
3830 aspacem_assert(addr
<= addr
+ len
- 1);
3832 iLo
= find_nsegment_idx( addr
);
3833 iHi
= find_nsegment_idx( addr
+ len
- 1 );
3835 /* NSegments iLo .. iHi inclusive should agree with the presented data. */
3836 for (i
= iLo
; i
<= iHi
; i
++) {
3837 if (nsegments
[i
].kind
!= SkFree
&& nsegments
[i
].kind
!= SkResvn
) {
3838 /* V has a mapping, kernel doesn't. Add to css_local[],
3839 directives to chop off the part of the V mapping that
3840 falls within the gap that the kernel tells us is
3842 ChangedSeg
* cs
= &css_local
[css_used_local
];
3843 if (css_used_local
< css_size_local
) {
3844 cs
->is_added
= False
;
3845 cs
->start
= Addr__max(nsegments
[i
].start
, addr
);
3846 cs
->end
= Addr__min(nsegments
[i
].end
, addr
+ len
- 1);
3847 aspacem_assert(VG_IS_PAGE_ALIGNED(cs
->start
));
3848 aspacem_assert(VG_IS_PAGE_ALIGNED(cs
->end
+1));
3849 /* I don't think the following should fail. But if it
3850 does, just omit the css_used_local++ in the cases where
3852 aspacem_assert(cs
->start
< cs
->end
);
3857 css_overflowed
= True
;
3864 // Returns False if 'css' wasn't big enough.
3865 Bool
VG_(get_changed_segments
)(
3866 const HChar
* when
, const HChar
* where
, /*OUT*/ChangedSeg
* css
,
3867 Int css_size
, /*OUT*/Int
* css_used
)
3869 static UInt stats_synccalls
= 1;
3870 aspacem_assert(when
&& where
);
3873 VG_(debugLog
)(0,"aspacem",
3874 "[%u,%u] VG_(get_changed_segments)(%s, %s)\n",
3875 stats_synccalls
++, stats_machcalls
, when
, where
3878 css_overflowed
= False
;
3880 css_size_local
= css_size
;
3883 // Get the list of segs that need to be added/removed.
3884 parse_procselfmaps(&add_mapping_callback
, &remove_mapping_callback
);
3886 *css_used
= css_used_local
;
3888 if (css_overflowed
) {
3889 aspacem_assert(css_used_local
== css_size_local
);
3892 return !css_overflowed
;
3896 /*------END-procmaps-parser-for-Darwin---------------------------*/
3898 /*------BEGIN-procmaps-parser-for-Freebsd------------------------*/
3899 #elif defined(VGO_freebsd)
3901 /* Size of a smallish table used to read /proc/self/map entries. */
3902 #define M_PROCMAP_BUF 10485760 /* 10M */
3904 /* static ... to keep it out of the stack frame. */
3905 static char procmap_buf
[M_PROCMAP_BUF
];
3907 static void parse_procselfmaps (
3908 void (*record_mapping
)( Addr addr
, SizeT len
, UInt prot
,
3909 ULong dev
, ULong ino
, Off64T offset
,
3910 const HChar
* filename
),
3911 void (*record_gap
)( Addr addr
, SizeT len
)
3914 Addr start
, endPlusOne
, gapStart
;
3918 ULong foffset
, dev
, ino
;
3919 struct vki_kinfo_vmentry
*kve
;
3924 foffset
= ino
= 0; /* keep gcc-4.1.0 happy */
3926 oid
[0] = VKI_CTL_KERN
;
3927 oid
[1] = VKI_KERN_PROC
;
3928 oid
[2] = VKI_KERN_PROC_VMMAP
;
3929 oid
[3] = sr_Res(VG_(do_syscall0
)(__NR_getpid
));
3930 len
= sizeof(procmap_buf
);
3932 sres
= VG_(do_syscall6
)(__NR___sysctl
, (UWord
)oid
, 4, (UWord
)procmap_buf
,
3934 if (sr_isError(sres
)) {
3935 VG_(debugLog
)(0, "procselfmaps", "sysctl %lu\n", sr_Err(sres
));
3938 gapStart
= Addr_MIN
;
3940 while (p
< (char *)procmap_buf
+ len
) {
3941 kve
= (struct vki_kinfo_vmentry
*)p
;
3942 start
= (UWord
)kve
->kve_start
;
3943 endPlusOne
= (UWord
)kve
->kve_end
;
3944 foffset
= kve
->kve_offset
;
3945 filename
= kve
->kve_path
;
3946 dev
= kve
->kve_vn_fsid_freebsd11
;
3947 ino
= kve
->kve_fileid
;
3948 if (filename
[0] != '/') {
3954 if (kve
->kve_protection
& VKI_KVME_PROT_READ
) prot
|= VKI_PROT_READ
;
3955 if (kve
->kve_protection
& VKI_KVME_PROT_WRITE
) prot
|= VKI_PROT_WRITE
;
3956 if (kve
->kve_protection
& VKI_KVME_PROT_EXEC
) prot
|= VKI_PROT_EXEC
;
3958 if (record_gap
&& gapStart
< start
)
3959 (*record_gap
) ( gapStart
, start
-gapStart
);
3961 if (record_mapping
&& start
< endPlusOne
)
3962 (*record_mapping
) ( start
, endPlusOne
-start
,
3964 foffset
, filename
);
3965 gapStart
= endPlusOne
;
3966 p
+= kve
->kve_structsize
;
3969 if (record_gap
&& gapStart
< Addr_MAX
)
3970 (*record_gap
) ( gapStart
, Addr_MAX
- gapStart
+ 1 );
3973 /*------END-procmaps-parser-for-Freebsd--------------------------*/
3975 /*------BEGIN-procmaps-parser-for-Solaris------------------------*/
3977 #elif defined(VGO_solaris)
3979 /* Note: /proc/self/xmap contains extended information about already
3980 materialized mappings whereas /proc/self/rmap contains information about
3981 all mappings including reserved but yet-to-materialize mappings (mmap'ed
3982 with MAP_NORESERVE flag, such as thread stacks). But /proc/self/rmap does
3983 not contain extended information found in /proc/self/xmap. Therefore
3984 information from both sources need to be combined.
3995 HChar filename
[VKI_PATH_MAX
];
3998 static SizeT
read_proc_file(const HChar
*filename
, HChar
*buf
,
3999 SizeT buf_size
, const HChar
*buf_size_name
,
4002 SysRes res
= ML_(am_open
)(filename
, VKI_O_RDONLY
, 0);
4003 if (sr_isError(res
)) {
4005 ML_(am_sprintf
)(message
, "Cannot open %s.", filename
);
4006 ML_(am_barf
)(message
);
4008 Int fd
= sr_Res(res
);
4010 Int r
= ML_(am_read
)(fd
, buf
, buf_size
);
4014 ML_(am_sprintf
)(message
, "I/O error on %s.", filename
);
4015 ML_(am_barf
)(message
);
4019 ML_(am_barf_toolow
)(buf_size_name
);
4021 if (r
% entry_size
!= 0) {
4023 ML_(am_sprintf
)(message
, "Bogus values read from %s.", filename
);
4024 ML_(am_barf
)(message
);
4027 return r
/ entry_size
;
4030 static Mapping
*next_xmap(const HChar
*buffer
, SizeT entries
, SizeT
*idx
,
4033 aspacem_assert(idx
);
4034 aspacem_assert(mapping
);
4036 if (*idx
>= entries
)
4037 return NULL
; /* No more entries */
4039 const vki_prxmap_t
*map
= (const vki_prxmap_t
*)buffer
+ *idx
;
4041 mapping
->addr
= map
->pr_vaddr
;
4042 mapping
->size
= map
->pr_size
;
4045 if (map
->pr_mflags
& VKI_MA_READ
)
4046 mapping
->prot
|= VKI_PROT_READ
;
4047 if (map
->pr_mflags
& VKI_MA_WRITE
)
4048 mapping
->prot
|= VKI_PROT_WRITE
;
4049 if (map
->pr_mflags
& VKI_MA_EXEC
)
4050 mapping
->prot
|= VKI_PROT_EXEC
;
4052 if (map
->pr_dev
!= VKI_PRNODEV
) {
4053 mapping
->dev
= map
->pr_dev
;
4054 mapping
->ino
= map
->pr_ino
;
4055 mapping
->foffset
= map
->pr_offset
;
4060 mapping
->foffset
= 0;
4063 /* Try to get the filename. */
4064 mapping
->filename
[0] = '\0';
4065 if (map
->pr_mapname
[0] != '\0') {
4066 ML_(am_sprintf
)(mapping
->filename
, "/proc/self/path/%s",
4068 Int r
= ML_(am_readlink
)(mapping
->filename
, mapping
->filename
,
4069 sizeof(mapping
->filename
) - 1);
4071 /* If Valgrind is executed in a non-global zone and the link in
4072 /proc/self/path/ represents a file that is available through lofs
4073 from a global zone then the kernel may not be able to resolve the
4076 In such a case, return a corresponding /proc/self/object/ file to
4077 allow Valgrind to read the file if it is necessary.
4079 This can create some discrepancy for the sanity check. For
4080 instance, if a client program mmaps some file then the address
4081 space manager will have a correct zone-local name of that file,
4082 but the sanity check will receive a different file name from this
4083 code. This currently does not represent a problem because the
4084 sanity check ignores the file names (it uses device and inode
4085 numbers for the comparison).
4087 ML_(am_sprintf
)(mapping
->filename
, "/proc/self/object/%s",
4091 aspacem_assert(r
>= 0);
4092 mapping
->filename
[r
] = '\0';
4100 static Mapping
*next_rmap(const HChar
*buffer
, SizeT entries
, SizeT
*idx
,
4103 aspacem_assert(idx
);
4104 aspacem_assert(mapping
);
4106 if (*idx
>= entries
)
4107 return NULL
; /* No more entries */
4109 const vki_prmap_t
*map
= (const vki_prmap_t
*)buffer
+ *idx
;
4111 mapping
->addr
= map
->pr_vaddr
;
4112 mapping
->size
= map
->pr_size
;
4115 if (map
->pr_mflags
& VKI_MA_READ
)
4116 mapping
->prot
|= VKI_PROT_READ
;
4117 if (map
->pr_mflags
& VKI_MA_WRITE
)
4118 mapping
->prot
|= VKI_PROT_WRITE
;
4119 if (map
->pr_mflags
& VKI_MA_EXEC
)
4120 mapping
->prot
|= VKI_PROT_EXEC
;
4124 mapping
->foffset
= 0;
4125 mapping
->filename
[0] = '\0';
4131 /* Used for two purposes:
4132 1. Establish initial mappings upon the process startup
4133 2. Check mappings during aspacemgr sanity check
4135 static void parse_procselfmaps (
4136 void (*record_mapping
)( Addr addr
, SizeT len
, UInt prot
,
4137 ULong dev
, ULong ino
, Off64T offset
,
4138 const HChar
*filename
),
4139 void (*record_gap
)( Addr addr
, SizeT len
)
4142 Addr start
= Addr_MIN
;
4143 Addr gap_start
= Addr_MIN
;
4145 #define M_XMAP_BUF (VG_N_SEGMENTS * sizeof(vki_prxmap_t))
4146 /* Static to keep it out of stack frame... */
4147 static HChar xmap_buf
[M_XMAP_BUF
];
4148 const Mapping
*xmap
= NULL
;
4149 SizeT xmap_index
= 0; /* Current entry */
4151 Mapping xmap_mapping
;
4154 #define M_RMAP_BUF (VG_N_SEGMENTS * sizeof(vki_prmap_t))
4155 static HChar rmap_buf
[M_RMAP_BUF
];
4156 const Mapping
*rmap
= NULL
;
4157 SizeT rmap_index
= 0; /* Current entry */
4159 Mapping rmap_mapping
;
4162 /* Read fully /proc/self/xmap and /proc/self/rmap. */
4163 xmap_entries
= read_proc_file("/proc/self/xmap", xmap_buf
, M_XMAP_BUF
,
4164 "M_XMAP_BUF", sizeof(vki_prxmap_t
));
4166 rmap_entries
= read_proc_file("/proc/self/rmap", rmap_buf
, M_RMAP_BUF
,
4167 "M_RMAP_BUF", sizeof(vki_prmap_t
));
4169 /* Get the first xmap and rmap. */
4170 advance_xmap
= True
;
4171 advance_rmap
= True
;
4174 /* Get next xmap or rmap if necessary. */
4176 xmap
= next_xmap(xmap_buf
, xmap_entries
, &xmap_index
, &xmap_mapping
);
4177 advance_xmap
= False
;
4180 rmap
= next_rmap(rmap_buf
, rmap_entries
, &rmap_index
, &rmap_mapping
);
4181 advance_rmap
= False
;
4184 /* Check if the end has been reached. */
4190 aspacem_assert(start
<= xmap
->addr
);
4191 aspacem_assert(rmap
->addr
<= xmap
->addr
);
4194 if (xmap
!= NULL
&& start
== xmap
->addr
) {
4195 /* xmap mapping reached. */
4196 aspacem_assert(xmap
->addr
>= rmap
->addr
&&
4197 xmap
->addr
+ xmap
->size
<= rmap
->addr
+ rmap
->size
);
4198 aspacem_assert(xmap
->prot
== rmap
->prot
);
4200 if (record_mapping
!= NULL
)
4201 (*record_mapping
)(xmap
->addr
, xmap
->size
, xmap
->prot
, xmap
->dev
,
4202 xmap
->ino
, xmap
->foffset
,
4203 (xmap
->filename
[0] != '\0') ?
4204 xmap
->filename
: NULL
);
4206 start
= xmap
->addr
+ xmap
->size
;
4207 advance_xmap
= True
;
4209 else if (start
>= rmap
->addr
) {
4210 /* Reserved-only part. */
4211 /* First calculate size until the end of this reserved mapping... */
4212 SizeT size
= rmap
->addr
+ rmap
->size
- start
;
4213 /* ... but shrink it if some xmap is in a way. */
4214 if (xmap
!= NULL
&& size
> xmap
->addr
- start
)
4215 size
= xmap
->addr
- start
;
4217 if (record_mapping
!= NULL
)
4218 (*record_mapping
)(start
, size
, rmap
->prot
, 0, 0, 0, NULL
);
4223 if (record_gap
!= NULL
&& gap_start
< start
)
4224 (*record_gap
)(gap_start
, start
- gap_start
);
4228 if (rmap
->addr
+ rmap
->size
<= start
)
4229 advance_rmap
= True
;
4234 if (record_gap
!= NULL
&& gap_start
< Addr_MAX
)
4235 (*record_gap
)(gap_start
, Addr_MAX
- gap_start
+ 1);
4238 /* parse_procselfmaps() callbacks do not allow for easy thread safety. */
4239 static Addr found_addr
;
4240 static SizeT found_size
;
4241 static UInt found_prot
;
4243 /* Reports a new mapping into variables above. */
4244 static void new_segment_found_callback(Addr addr
, SizeT len
, UInt prot
,
4245 ULong dev
, ULong ino
, Off64T offset
, const HChar
*filename
)
4247 aspacem_assert(addr
<= addr
+ len
- 1);
4249 Int iLo
= find_nsegment_idx(addr
);
4250 Int iHi
= find_nsegment_idx(addr
+ len
- 1);
4251 aspacem_assert(iLo
<= iHi
);
4252 aspacem_assert(nsegments
[iLo
].start
<= addr
);
4253 aspacem_assert(nsegments
[iHi
].end
>= addr
+ len
- 1);
4255 /* Do not perform any sanity checks. That is done in other places.
4256 Just find if a reported mapping is found in aspacemgr's book keeping. */
4257 for (Int i
= iLo
; i
<= iHi
; i
++) {
4258 if ((nsegments
[i
].kind
== SkFree
) || (nsegments
[i
].kind
== SkResvn
)) {
4267 /* Returns True if a new segment was found. */
4268 Bool
VG_(am_search_for_new_segment
)(Addr
*addr
, SizeT
*size
, UInt
*prot
)
4271 parse_procselfmaps(new_segment_found_callback
, NULL
);
4273 if (found_addr
!= 0) {
4283 #endif // defined(VGO_solaris)
4285 /*------END-procmaps-parser-for-Solaris--------------------------*/
4287 #endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_freebsd)
4289 /*--------------------------------------------------------------------*/
4291 /*--------------------------------------------------------------------*/