Update bug status.
[valgrind.git] / memcheck / memcheck.h
blob8450f0ba969f6ad59a13bbaaad5e9a887bc81ede
2 /*
3 ----------------------------------------------------------------
5 Notice that the following BSD-style license applies to this one
6 file (memcheck.h) only. The rest of Valgrind is licensed under the
7 terms of the GNU General Public License, version 2, unless
8 otherwise indicated. See the COPYING file in the source
9 distribution for details.
11 ----------------------------------------------------------------
13 This file is part of MemCheck, a heavyweight Valgrind tool for
14 detecting memory errors.
16 Copyright (C) 2000-2017 Julian Seward. All rights reserved.
18 Redistribution and use in source and binary forms, with or without
19 modification, are permitted provided that the following conditions
20 are met:
22 1. Redistributions of source code must retain the above copyright
23 notice, this list of conditions and the following disclaimer.
25 2. The origin of this software must not be misrepresented; you must
26 not claim that you wrote the original software. If you use this
27 software in a product, an acknowledgment in the product
28 documentation would be appreciated but is not required.
30 3. Altered source versions must be plainly marked as such, and must
31 not be misrepresented as being the original software.
33 4. The name of the author may not be used to endorse or promote
34 products derived from this software without specific prior written
35 permission.
37 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
38 OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
39 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
41 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
43 GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
45 WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 ----------------------------------------------------------------
51 Notice that the above BSD-style license applies to this one file
52 (memcheck.h) only. The entire rest of Valgrind is licensed under
53 the terms of the GNU General Public License, version 2. See the
54 COPYING file in the source distribution for details.
56 ----------------------------------------------------------------
60 #ifndef __MEMCHECK_H
61 #define __MEMCHECK_H
64 /* This file is for inclusion into client (your!) code.
66 You can use these macros to manipulate and query memory permissions
67 inside your own programs.
69 See comment near the top of valgrind.h on how to use them.
72 #include "valgrind.h"
74 /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
75 This enum comprises an ABI exported by Valgrind to programs
76 which use client requests. DO NOT CHANGE THE ORDER OF THESE
77 ENTRIES, NOR DELETE ANY -- add new ones at the end. */
78 typedef
79 enum {
80 VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
81 VG_USERREQ__MAKE_MEM_UNDEFINED,
82 VG_USERREQ__MAKE_MEM_DEFINED,
83 VG_USERREQ__DISCARD,
84 VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
85 VG_USERREQ__CHECK_MEM_IS_DEFINED,
86 VG_USERREQ__DO_LEAK_CHECK,
87 VG_USERREQ__COUNT_LEAKS,
89 VG_USERREQ__GET_VBITS,
90 VG_USERREQ__SET_VBITS,
92 VG_USERREQ__CREATE_BLOCK,
94 VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
96 /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
97 VG_USERREQ__COUNT_LEAK_BLOCKS,
99 VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE,
100 VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE,
102 /* This is just for memcheck's internal use - don't use it */
103 _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
104 = VG_USERREQ_TOOL_BASE('M','C') + 256
105 } Vg_MemCheckClientRequest;
109 /* Client-code macros to manipulate the state of memory. */
111 /* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
112 #define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \
113 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
114 VG_USERREQ__MAKE_MEM_NOACCESS, \
115 (_qzz_addr), (_qzz_len), 0, 0, 0)
117 /* Similarly, mark memory at _qzz_addr as addressable but undefined
118 for _qzz_len bytes. */
119 #define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \
120 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
121 VG_USERREQ__MAKE_MEM_UNDEFINED, \
122 (_qzz_addr), (_qzz_len), 0, 0, 0)
124 /* Similarly, mark memory at _qzz_addr as addressable and defined
125 for _qzz_len bytes. */
126 #define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \
127 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
128 VG_USERREQ__MAKE_MEM_DEFINED, \
129 (_qzz_addr), (_qzz_len), 0, 0, 0)
131 /* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
132 not altered: bytes which are addressable are marked as defined,
133 but those which are not addressable are left unchanged. */
134 #define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \
135 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
136 VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
137 (_qzz_addr), (_qzz_len), 0, 0, 0)
139 /* Create a block-description handle. The description is an ascii
140 string which is included in any messages pertaining to addresses
141 within the specified memory range. Has no other effect on the
142 properties of the memory range. */
143 #define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \
144 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
145 VG_USERREQ__CREATE_BLOCK, \
146 (_qzz_addr), (_qzz_len), (_qzz_desc), \
147 0, 0)
149 /* Discard a block-description-handle. Returns 1 for an
150 invalid handle, 0 for a valid handle. */
151 #define VALGRIND_DISCARD(_qzz_blkindex) \
152 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
153 VG_USERREQ__DISCARD, \
154 0, (_qzz_blkindex), 0, 0, 0)
157 /* Client-code macros to check the state of memory. */
159 /* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
160 If suitable addressibility is not established, Valgrind prints an
161 error message and returns the address of the first offending byte.
162 Otherwise it returns zero. */
163 #define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \
164 VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
165 VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, \
166 (_qzz_addr), (_qzz_len), 0, 0, 0)
168 /* Check that memory at _qzz_addr is addressable and defined for
169 _qzz_len bytes. If suitable addressibility and definedness are not
170 established, Valgrind prints an error message and returns the
171 address of the first offending byte. Otherwise it returns zero. */
172 #define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \
173 VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
174 VG_USERREQ__CHECK_MEM_IS_DEFINED, \
175 (_qzz_addr), (_qzz_len), 0, 0, 0)
177 /* Use this macro to force the definedness and addressibility of an
178 lvalue to be checked. If suitable addressibility and definedness
179 are not established, Valgrind prints an error message and returns
180 the address of the first offending byte. Otherwise it returns
181 zero. */
182 #define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \
183 VALGRIND_CHECK_MEM_IS_DEFINED( \
184 (volatile unsigned char *)&(__lvalue), \
185 (unsigned long)(sizeof (__lvalue)))
188 /* Do a full memory leak check (like --leak-check=full) mid-execution. */
189 #define VALGRIND_DO_LEAK_CHECK \
190 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
191 0, 0, 0, 0, 0)
193 /* Same as VALGRIND_DO_LEAK_CHECK but only showing the entries for
194 which there was an increase in leaked bytes or leaked nr of blocks
195 since the previous leak search. */
196 #define VALGRIND_DO_ADDED_LEAK_CHECK \
197 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
198 0, 1, 0, 0, 0)
200 /* Same as VALGRIND_DO_ADDED_LEAK_CHECK but showing entries with
201 increased or decreased leaked bytes/blocks since previous leak
202 search. */
203 #define VALGRIND_DO_CHANGED_LEAK_CHECK \
204 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
205 0, 2, 0, 0, 0)
207 /* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
208 #define VALGRIND_DO_QUICK_LEAK_CHECK \
209 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
210 1, 0, 0, 0, 0)
212 /* Return number of leaked, dubious, reachable and suppressed bytes found by
213 all previous leak checks. They must be lvalues. */
214 #define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \
215 /* For safety on 64-bit platforms we assign the results to private
216 unsigned long variables, then assign these to the lvalues the user
217 specified, which works no matter what type 'leaked', 'dubious', etc
218 are. We also initialise '_qzz_leaked', etc because
219 VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
220 defined. */ \
222 unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
223 unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
224 VALGRIND_DO_CLIENT_REQUEST_STMT( \
225 VG_USERREQ__COUNT_LEAKS, \
226 &_qzz_leaked, &_qzz_dubious, \
227 &_qzz_reachable, &_qzz_suppressed, 0); \
228 leaked = _qzz_leaked; \
229 dubious = _qzz_dubious; \
230 reachable = _qzz_reachable; \
231 suppressed = _qzz_suppressed; \
234 /* Return number of leaked, dubious, reachable and suppressed bytes found by
235 all previous leak checks. They must be lvalues. */
236 #define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
237 /* For safety on 64-bit platforms we assign the results to private
238 unsigned long variables, then assign these to the lvalues the user
239 specified, which works no matter what type 'leaked', 'dubious', etc
240 are. We also initialise '_qzz_leaked', etc because
241 VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
242 defined. */ \
244 unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
245 unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
246 VALGRIND_DO_CLIENT_REQUEST_STMT( \
247 VG_USERREQ__COUNT_LEAK_BLOCKS, \
248 &_qzz_leaked, &_qzz_dubious, \
249 &_qzz_reachable, &_qzz_suppressed, 0); \
250 leaked = _qzz_leaked; \
251 dubious = _qzz_dubious; \
252 reachable = _qzz_reachable; \
253 suppressed = _qzz_suppressed; \
257 /* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
258 into the provided zzvbits array. Return values:
259 0 if not running on valgrind
260 1 success
261 2 [previously indicated unaligned arrays; these are now allowed]
262 3 if any parts of zzsrc/zzvbits are not addressable.
263 The metadata is not copied in cases 0, 2 or 3 so it should be
264 impossible to segfault your system by using this call.
266 #define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \
267 (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
268 VG_USERREQ__GET_VBITS, \
269 (const char*)(zza), \
270 (char*)(zzvbits), \
271 (zznbytes), 0, 0)
273 /* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
274 from the provided zzvbits array. Return values:
275 0 if not running on valgrind
276 1 success
277 2 [previously indicated unaligned arrays; these are now allowed]
278 3 if any parts of zza/zzvbits are not addressable.
279 The metadata is not copied in cases 0, 2 or 3 so it should be
280 impossible to segfault your system by using this call.
282 #define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \
283 (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
284 VG_USERREQ__SET_VBITS, \
285 (const char*)(zza), \
286 (const char*)(zzvbits), \
287 (zznbytes), 0, 0 )
289 /* Disable and re-enable reporting of addressing errors in the
290 specified address range. */
291 #define VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
292 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
293 VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
294 (_qzz_addr), (_qzz_len), 0, 0, 0)
296 #define VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
297 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
298 VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
299 (_qzz_addr), (_qzz_len), 0, 0, 0)
301 #endif