Removed EPWMD_KEY. It's no longer used on the server because empty
[libpwmd.git] / valgrind.h
blobe4eb5bd759a4d6695c3d4f49ed2d15d36b3d4e3f
1 /* -*- c -*-
2 ----------------------------------------------------------------
4 Notice that the following BSD-style license applies to this one
5 file (valgrind.h) only. The rest of Valgrind is licensed under the
6 terms of the GNU General Public License, version 2, unless
7 otherwise indicated. See the COPYING file in the source
8 distribution for details.
10 ----------------------------------------------------------------
12 This file is part of Valgrind, a dynamic binary instrumentation
13 framework.
15 Copyright (C) 2000-2007 Julian Seward. All rights reserved.
17 Redistribution and use in source and binary forms, with or without
18 modification, are permitted provided that the following conditions
19 are met:
21 1. Redistributions of source code must retain the above copyright
22 notice, this list of conditions and the following disclaimer.
24 2. The origin of this software must not be misrepresented; you must
25 not claim that you wrote the original software. If you use this
26 software in a product, an acknowledgment in the product
27 documentation would be appreciated but is not required.
29 3. Altered source versions must be plainly marked as such, and must
30 not be misrepresented as being the original software.
32 4. The name of the author may not be used to endorse or promote
33 products derived from this software without specific prior written
34 permission.
36 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
37 OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
38 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
40 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
42 GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
43 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
44 WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
45 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
46 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 ----------------------------------------------------------------
50 Notice that the above BSD-style license applies to this one file
51 (valgrind.h) only. The entire rest of Valgrind is licensed under
52 the terms of the GNU General Public License, version 2. See the
53 COPYING file in the source distribution for details.
55 ----------------------------------------------------------------
59 /* This file is for inclusion into client (your!) code.
61 You can use these macros to manipulate and query Valgrind's
62 execution inside your own programs.
64 The resulting executables will still run without Valgrind, just a
65 little bit more slowly than they otherwise would, but otherwise
66 unchanged. When not running on valgrind, each client request
67 consumes very few (eg. 7) instructions, so the resulting performance
68 loss is negligible unless you plan to execute client requests
69 millions of times per second. Nevertheless, if that is still a
70 problem, you can compile with the NVALGRIND symbol defined (gcc
71 -DNVALGRIND) so that client requests are not even compiled in. */
73 #ifndef __VALGRIND_H
74 #define __VALGRIND_H
76 #include <stdarg.h>
78 /* Nb: this file might be included in a file compiled with -ansi. So
79 we can't use C++ style "//" comments nor the "asm" keyword (instead
80 use "__asm__"). */
82 /* Derive some tags indicating what the target architecture is. Note
83 that in this file we're using the compiler's CPP symbols for
84 identifying architectures, which are different to the ones we use
85 within the rest of Valgrind. Note, __powerpc__ is active for both
86 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
87 latter. */
88 #undef ARCH_x86
89 #undef ARCH_amd64
90 #undef ARCH_ppc32
91 #undef ARCH_ppc64
93 #if defined(__i386__)
94 # define ARCH_x86 1
95 #elif defined(__x86_64__)
96 # define ARCH_amd64 1
97 #elif defined(__powerpc__) && !defined(__powerpc64__)
98 # define ARCH_ppc32 1
99 #elif defined(__powerpc__) && defined(__powerpc64__)
100 # define ARCH_ppc64 1
101 #endif
103 /* If we're not compiling for our target architecture, don't generate
104 any inline asms. */
105 #if !defined(ARCH_x86) && !defined(ARCH_amd64) \
106 && !defined(ARCH_ppc32) && !defined(ARCH_ppc64)
107 # if !defined(NVALGRIND)
108 # define NVALGRIND 1
109 # endif
110 #endif
113 /* ------------------------------------------------------------------ */
114 /* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
115 /* in here of use to end-users -- skip to the next section. */
116 /* ------------------------------------------------------------------ */
118 #if defined(NVALGRIND)
120 /* Define NVALGRIND to completely remove the Valgrind magic sequence
121 from the compiled code (analogous to NDEBUG's effects on
122 assert()) */
123 #define VALGRIND_DO_CLIENT_REQUEST( \
124 _zzq_rlval, _zzq_default, _zzq_request, \
125 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
127 (_zzq_rlval) = (_zzq_default); \
130 #else /* ! NVALGRIND */
132 /* The following defines the magic code sequences which the JITter
133 spots and handles magically. Don't look too closely at them as
134 they will rot your brain.
136 The assembly code sequences for all architectures is in this one
137 file. This is because this file must be stand-alone, and we don't
138 want to have multiple files.
140 For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
141 value gets put in the return slot, so that everything works when
142 this is executed not under Valgrind. Args are passed in a memory
143 block, and so there's no intrinsic limit to the number that could
144 be passed, but it's currently five.
146 The macro args are:
147 _zzq_rlval result lvalue
148 _zzq_default default value (result returned when running on real CPU)
149 _zzq_request request code
150 _zzq_arg1..5 request params
152 The other two macros are used to support function wrapping, and are
153 a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
154 guest's NRADDR pseudo-register and whatever other information is
155 needed to safely run the call original from the wrapper: on
156 ppc64-linux, the R2 value at the divert point is also needed. This
157 information is abstracted into a user-visible type, OrigFn.
159 VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
160 guest, but guarantees that the branch instruction will not be
161 redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
162 branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
163 complete inline asm, since it needs to be combined with more magic
164 inline asm stuff to be useful.
167 /* ---------------------------- x86 ---------------------------- */
169 #if defined(ARCH_x86)
171 typedef
172 struct {
173 unsigned int nraddr; /* where's the code? */
175 OrigFn;
177 #define __SPECIAL_INSTRUCTION_PREAMBLE \
178 "roll $3, %%edi ; roll $13, %%edi\n\t" \
179 "roll $29, %%edi ; roll $19, %%edi\n\t"
181 #define VALGRIND_DO_CLIENT_REQUEST( \
182 _zzq_rlval, _zzq_default, _zzq_request, \
183 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
184 { volatile unsigned int _zzq_args[6]; \
185 volatile unsigned int _zzq_result; \
186 _zzq_args[0] = (unsigned int)(_zzq_request); \
187 _zzq_args[1] = (unsigned int)(_zzq_arg1); \
188 _zzq_args[2] = (unsigned int)(_zzq_arg2); \
189 _zzq_args[3] = (unsigned int)(_zzq_arg3); \
190 _zzq_args[4] = (unsigned int)(_zzq_arg4); \
191 _zzq_args[5] = (unsigned int)(_zzq_arg5); \
192 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
193 /* %EDX = client_request ( %EAX ) */ \
194 "xchgl %%ebx,%%ebx" \
195 : "=d" (_zzq_result) \
196 : "a" (&_zzq_args[0]), "0" (_zzq_default) \
197 : "cc", "memory" \
198 ); \
199 _zzq_rlval = _zzq_result; \
202 #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
203 { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
204 volatile unsigned int __addr; \
205 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
206 /* %EAX = guest_NRADDR */ \
207 "xchgl %%ecx,%%ecx" \
208 : "=a" (__addr) \
210 : "cc", "memory" \
211 ); \
212 _zzq_orig->nraddr = __addr; \
215 #define VALGRIND_CALL_NOREDIR_EAX \
216 __SPECIAL_INSTRUCTION_PREAMBLE \
217 /* call-noredir *%EAX */ \
218 "xchgl %%edx,%%edx\n\t"
219 #endif /* ARCH_x86 */
221 /* --------------------------- amd64 --------------------------- */
223 #if defined(ARCH_amd64)
225 typedef
226 struct {
227 unsigned long long int nraddr; /* where's the code? */
229 OrigFn;
231 #define __SPECIAL_INSTRUCTION_PREAMBLE \
232 "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
233 "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
235 #define VALGRIND_DO_CLIENT_REQUEST( \
236 _zzq_rlval, _zzq_default, _zzq_request, \
237 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
238 { volatile unsigned long long int _zzq_args[6]; \
239 volatile unsigned long long int _zzq_result; \
240 _zzq_args[0] = (unsigned long long int)(_zzq_request); \
241 _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
242 _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
243 _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
244 _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
245 _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
246 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
247 /* %RDX = client_request ( %RAX ) */ \
248 "xchgq %%rbx,%%rbx" \
249 : "=d" (_zzq_result) \
250 : "a" (&_zzq_args[0]), "0" (_zzq_default) \
251 : "cc", "memory" \
252 ); \
253 _zzq_rlval = _zzq_result; \
256 #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
257 { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
258 volatile unsigned long long int __addr; \
259 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
260 /* %RAX = guest_NRADDR */ \
261 "xchgq %%rcx,%%rcx" \
262 : "=a" (__addr) \
264 : "cc", "memory" \
265 ); \
266 _zzq_orig->nraddr = __addr; \
269 #define VALGRIND_CALL_NOREDIR_RAX \
270 __SPECIAL_INSTRUCTION_PREAMBLE \
271 /* call-noredir *%RAX */ \
272 "xchgq %%rdx,%%rdx\n\t"
273 #endif /* ARCH_amd64 */
275 /* --------------------------- ppc32 --------------------------- */
277 #if defined(ARCH_ppc32)
279 typedef
280 struct {
281 unsigned int nraddr; /* where's the code? */
283 OrigFn;
285 #define __SPECIAL_INSTRUCTION_PREAMBLE \
286 "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
287 "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
289 #define VALGRIND_DO_CLIENT_REQUEST( \
290 _zzq_rlval, _zzq_default, _zzq_request, \
291 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
293 { unsigned int _zzq_args[6]; \
294 register unsigned int _zzq_result __asm__("r3"); \
295 register unsigned int* _zzq_ptr __asm__("r4"); \
296 _zzq_args[0] = (unsigned int)(_zzq_request); \
297 _zzq_args[1] = (unsigned int)(_zzq_arg1); \
298 _zzq_args[2] = (unsigned int)(_zzq_arg2); \
299 _zzq_args[3] = (unsigned int)(_zzq_arg3); \
300 _zzq_args[4] = (unsigned int)(_zzq_arg4); \
301 _zzq_args[5] = (unsigned int)(_zzq_arg5); \
302 _zzq_ptr = _zzq_args; \
303 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
304 /* %R3 = client_request ( %R4 ) */ \
305 "or 1,1,1" \
306 : "=r" (_zzq_result) \
307 : "0" (_zzq_default), "r" (_zzq_ptr) \
308 : "cc", "memory"); \
309 _zzq_rlval = _zzq_result; \
312 #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
313 { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
314 register unsigned int __addr __asm__("r3"); \
315 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
316 /* %R3 = guest_NRADDR */ \
317 "or 2,2,2" \
318 : "=r" (__addr) \
320 : "cc", "memory" \
321 ); \
322 _zzq_orig->nraddr = __addr; \
325 #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
326 __SPECIAL_INSTRUCTION_PREAMBLE \
327 /* branch-and-link-to-noredir *%R11 */ \
328 "or 3,3,3\n\t"
329 #endif /* ARCH_ppc32 */
331 /* --------------------------- ppc64 --------------------------- */
333 #if defined(ARCH_ppc64)
335 typedef
336 struct {
337 unsigned long long int nraddr; /* where's the code? */
338 unsigned long long int r2; /* what tocptr do we need? */
340 OrigFn;
342 #define __SPECIAL_INSTRUCTION_PREAMBLE \
343 "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
344 "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
346 #define VALGRIND_DO_CLIENT_REQUEST( \
347 _zzq_rlval, _zzq_default, _zzq_request, \
348 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
350 { unsigned long long int _zzq_args[6]; \
351 register unsigned long long int _zzq_result __asm__("r3"); \
352 register unsigned long long int* _zzq_ptr __asm__("r4"); \
353 _zzq_args[0] = (unsigned long long int)(_zzq_request); \
354 _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
355 _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
356 _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
357 _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
358 _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
359 _zzq_ptr = _zzq_args; \
360 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
361 /* %R3 = client_request ( %R4 ) */ \
362 "or 1,1,1" \
363 : "=r" (_zzq_result) \
364 : "0" (_zzq_default), "r" (_zzq_ptr) \
365 : "cc", "memory"); \
366 _zzq_rlval = _zzq_result; \
369 #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
370 { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
371 register unsigned long long int __addr __asm__("r3"); \
372 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
373 /* %R3 = guest_NRADDR */ \
374 "or 2,2,2" \
375 : "=r" (__addr) \
377 : "cc", "memory" \
378 ); \
379 _zzq_orig->nraddr = __addr; \
380 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
381 /* %R3 = guest_NRADDR_GPR2 */ \
382 "or 4,4,4" \
383 : "=r" (__addr) \
385 : "cc", "memory" \
386 ); \
387 _zzq_orig->r2 = __addr; \
390 #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
391 __SPECIAL_INSTRUCTION_PREAMBLE \
392 /* branch-and-link-to-noredir *%R11 */ \
393 "or 3,3,3\n\t"
395 #endif /* ARCH_ppc64 */
397 /* Insert assembly code for other architectures here... */
399 #endif /* NVALGRIND */
402 /* ------------------------------------------------------------------ */
403 /* ARCHITECTURE SPECIFICS for FUNCTION WRAPPING. This is all very */
404 /* ugly. It's the least-worst tradeoff I can think of. */
405 /* ------------------------------------------------------------------ */
407 /* This section defines magic (a.k.a appalling-hack) macros for doing
408 guaranteed-no-redirection macros, so as to get from function
409 wrappers to the functions they are wrapping. The whole point is to
410 construct standard call sequences, but to do the call itself with a
411 special no-redirect call pseudo-instruction that the JIT
412 understands and handles specially. This section is long and
413 repetitious, and I can't see a way to make it shorter.
415 The naming scheme is as follows:
417 CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
419 'W' stands for "word" and 'v' for "void". Hence there are
420 different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
421 and for each, the possibility of returning a word-typed result, or
422 no result.
425 /* Use these to write the name of your wrapper. NOTE: duplicates
426 VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
428 #define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
429 _vgwZU_##soname##_##fnname
431 #define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
432 _vgwZZ_##soname##_##fnname
434 /* Use this macro from within a wrapper function to collect the
435 context (address and possibly other info) of the original function.
436 Once you have that you can then use it in one of the CALL_FN_
437 macros. The type of the argument _lval is OrigFn. */
438 #define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
440 /* Derivatives of the main macros below, for calling functions
441 returning void. */
443 #define CALL_FN_v_v(fnptr) \
444 do { volatile unsigned long _junk; \
445 CALL_FN_W_v(_junk,fnptr); } while (0)
447 #define CALL_FN_v_W(fnptr, arg1) \
448 do { volatile unsigned long _junk; \
449 CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
451 #define CALL_FN_v_WW(fnptr, arg1,arg2) \
452 do { volatile unsigned long _junk; \
453 CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
455 #define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \
456 do { volatile unsigned long _junk; \
457 CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
459 /* ---------------------------- x86 ---------------------------- */
461 #if defined(ARCH_x86)
463 /* These regs are trashed by the hidden call. No need to mention eax
464 as gcc can already see that, plus causes gcc to bomb. */
465 #define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
467 /* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
468 long) == 4. */
470 #define CALL_FN_W_v(lval, orig) \
471 do { \
472 volatile OrigFn _orig = (orig); \
473 volatile unsigned long _argvec[1]; \
474 volatile unsigned long _res; \
475 _argvec[0] = (unsigned long)_orig.nraddr; \
476 __asm__ volatile( \
477 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
478 VALGRIND_CALL_NOREDIR_EAX \
479 : /*out*/ "=a" (_res) \
480 : /*in*/ "a" (&_argvec[0]) \
481 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
482 ); \
483 lval = (__typeof__(lval)) _res; \
484 } while (0)
486 #define CALL_FN_W_W(lval, orig, arg1) \
487 do { \
488 volatile OrigFn _orig = (orig); \
489 volatile unsigned long _argvec[2]; \
490 volatile unsigned long _res; \
491 _argvec[0] = (unsigned long)_orig.nraddr; \
492 _argvec[1] = (unsigned long)(arg1); \
493 __asm__ volatile( \
494 "pushl 4(%%eax)\n\t" \
495 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
496 VALGRIND_CALL_NOREDIR_EAX \
497 "addl $4, %%esp\n" \
498 : /*out*/ "=a" (_res) \
499 : /*in*/ "a" (&_argvec[0]) \
500 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
501 ); \
502 lval = (__typeof__(lval)) _res; \
503 } while (0)
505 #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
506 do { \
507 volatile OrigFn _orig = (orig); \
508 volatile unsigned long _argvec[3]; \
509 volatile unsigned long _res; \
510 _argvec[0] = (unsigned long)_orig.nraddr; \
511 _argvec[1] = (unsigned long)(arg1); \
512 _argvec[2] = (unsigned long)(arg2); \
513 __asm__ volatile( \
514 "pushl 8(%%eax)\n\t" \
515 "pushl 4(%%eax)\n\t" \
516 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
517 VALGRIND_CALL_NOREDIR_EAX \
518 "addl $8, %%esp\n" \
519 : /*out*/ "=a" (_res) \
520 : /*in*/ "a" (&_argvec[0]) \
521 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
522 ); \
523 lval = (__typeof__(lval)) _res; \
524 } while (0)
526 #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
527 do { \
528 volatile OrigFn _orig = (orig); \
529 volatile unsigned long _argvec[4]; \
530 volatile unsigned long _res; \
531 _argvec[0] = (unsigned long)_orig.nraddr; \
532 _argvec[1] = (unsigned long)(arg1); \
533 _argvec[2] = (unsigned long)(arg2); \
534 _argvec[3] = (unsigned long)(arg3); \
535 __asm__ volatile( \
536 "pushl 12(%%eax)\n\t" \
537 "pushl 8(%%eax)\n\t" \
538 "pushl 4(%%eax)\n\t" \
539 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
540 VALGRIND_CALL_NOREDIR_EAX \
541 "addl $12, %%esp\n" \
542 : /*out*/ "=a" (_res) \
543 : /*in*/ "a" (&_argvec[0]) \
544 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
545 ); \
546 lval = (__typeof__(lval)) _res; \
547 } while (0)
549 #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
550 do { \
551 volatile OrigFn _orig = (orig); \
552 volatile unsigned long _argvec[5]; \
553 volatile unsigned long _res; \
554 _argvec[0] = (unsigned long)_orig.nraddr; \
555 _argvec[1] = (unsigned long)(arg1); \
556 _argvec[2] = (unsigned long)(arg2); \
557 _argvec[3] = (unsigned long)(arg3); \
558 _argvec[4] = (unsigned long)(arg4); \
559 __asm__ volatile( \
560 "pushl 16(%%eax)\n\t" \
561 "pushl 12(%%eax)\n\t" \
562 "pushl 8(%%eax)\n\t" \
563 "pushl 4(%%eax)\n\t" \
564 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
565 VALGRIND_CALL_NOREDIR_EAX \
566 "addl $16, %%esp\n" \
567 : /*out*/ "=a" (_res) \
568 : /*in*/ "a" (&_argvec[0]) \
569 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
570 ); \
571 lval = (__typeof__(lval)) _res; \
572 } while (0)
574 #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
575 do { \
576 volatile OrigFn _orig = (orig); \
577 volatile unsigned long _argvec[6]; \
578 volatile unsigned long _res; \
579 _argvec[0] = (unsigned long)_orig.nraddr; \
580 _argvec[1] = (unsigned long)(arg1); \
581 _argvec[2] = (unsigned long)(arg2); \
582 _argvec[3] = (unsigned long)(arg3); \
583 _argvec[4] = (unsigned long)(arg4); \
584 _argvec[5] = (unsigned long)(arg5); \
585 __asm__ volatile( \
586 "pushl 20(%%eax)\n\t" \
587 "pushl 16(%%eax)\n\t" \
588 "pushl 12(%%eax)\n\t" \
589 "pushl 8(%%eax)\n\t" \
590 "pushl 4(%%eax)\n\t" \
591 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
592 VALGRIND_CALL_NOREDIR_EAX \
593 "addl $20, %%esp\n" \
594 : /*out*/ "=a" (_res) \
595 : /*in*/ "a" (&_argvec[0]) \
596 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
597 ); \
598 lval = (__typeof__(lval)) _res; \
599 } while (0)
601 #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
602 do { \
603 volatile OrigFn _orig = (orig); \
604 volatile unsigned long _argvec[7]; \
605 volatile unsigned long _res; \
606 _argvec[0] = (unsigned long)_orig.nraddr; \
607 _argvec[1] = (unsigned long)(arg1); \
608 _argvec[2] = (unsigned long)(arg2); \
609 _argvec[3] = (unsigned long)(arg3); \
610 _argvec[4] = (unsigned long)(arg4); \
611 _argvec[5] = (unsigned long)(arg5); \
612 _argvec[6] = (unsigned long)(arg6); \
613 __asm__ volatile( \
614 "pushl 24(%%eax)\n\t" \
615 "pushl 20(%%eax)\n\t" \
616 "pushl 16(%%eax)\n\t" \
617 "pushl 12(%%eax)\n\t" \
618 "pushl 8(%%eax)\n\t" \
619 "pushl 4(%%eax)\n\t" \
620 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
621 VALGRIND_CALL_NOREDIR_EAX \
622 "addl $24, %%esp\n" \
623 : /*out*/ "=a" (_res) \
624 : /*in*/ "a" (&_argvec[0]) \
625 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
626 ); \
627 lval = (__typeof__(lval)) _res; \
628 } while (0)
630 #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
631 arg7) \
632 do { \
633 volatile OrigFn _orig = (orig); \
634 volatile unsigned long _argvec[8]; \
635 volatile unsigned long _res; \
636 _argvec[0] = (unsigned long)_orig.nraddr; \
637 _argvec[1] = (unsigned long)(arg1); \
638 _argvec[2] = (unsigned long)(arg2); \
639 _argvec[3] = (unsigned long)(arg3); \
640 _argvec[4] = (unsigned long)(arg4); \
641 _argvec[5] = (unsigned long)(arg5); \
642 _argvec[6] = (unsigned long)(arg6); \
643 _argvec[7] = (unsigned long)(arg7); \
644 __asm__ volatile( \
645 "pushl 28(%%eax)\n\t" \
646 "pushl 24(%%eax)\n\t" \
647 "pushl 20(%%eax)\n\t" \
648 "pushl 16(%%eax)\n\t" \
649 "pushl 12(%%eax)\n\t" \
650 "pushl 8(%%eax)\n\t" \
651 "pushl 4(%%eax)\n\t" \
652 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
653 VALGRIND_CALL_NOREDIR_EAX \
654 "addl $28, %%esp\n" \
655 : /*out*/ "=a" (_res) \
656 : /*in*/ "a" (&_argvec[0]) \
657 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
658 ); \
659 lval = (__typeof__(lval)) _res; \
660 } while (0)
662 #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
663 arg7,arg8) \
664 do { \
665 volatile OrigFn _orig = (orig); \
666 volatile unsigned long _argvec[9]; \
667 volatile unsigned long _res; \
668 _argvec[0] = (unsigned long)_orig.nraddr; \
669 _argvec[1] = (unsigned long)(arg1); \
670 _argvec[2] = (unsigned long)(arg2); \
671 _argvec[3] = (unsigned long)(arg3); \
672 _argvec[4] = (unsigned long)(arg4); \
673 _argvec[5] = (unsigned long)(arg5); \
674 _argvec[6] = (unsigned long)(arg6); \
675 _argvec[7] = (unsigned long)(arg7); \
676 _argvec[8] = (unsigned long)(arg8); \
677 __asm__ volatile( \
678 "pushl 32(%%eax)\n\t" \
679 "pushl 28(%%eax)\n\t" \
680 "pushl 24(%%eax)\n\t" \
681 "pushl 20(%%eax)\n\t" \
682 "pushl 16(%%eax)\n\t" \
683 "pushl 12(%%eax)\n\t" \
684 "pushl 8(%%eax)\n\t" \
685 "pushl 4(%%eax)\n\t" \
686 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
687 VALGRIND_CALL_NOREDIR_EAX \
688 "addl $32, %%esp\n" \
689 : /*out*/ "=a" (_res) \
690 : /*in*/ "a" (&_argvec[0]) \
691 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
692 ); \
693 lval = (__typeof__(lval)) _res; \
694 } while (0)
696 #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
697 arg7,arg8,arg9) \
698 do { \
699 volatile OrigFn _orig = (orig); \
700 volatile unsigned long _argvec[10]; \
701 volatile unsigned long _res; \
702 _argvec[0] = (unsigned long)_orig.nraddr; \
703 _argvec[1] = (unsigned long)(arg1); \
704 _argvec[2] = (unsigned long)(arg2); \
705 _argvec[3] = (unsigned long)(arg3); \
706 _argvec[4] = (unsigned long)(arg4); \
707 _argvec[5] = (unsigned long)(arg5); \
708 _argvec[6] = (unsigned long)(arg6); \
709 _argvec[7] = (unsigned long)(arg7); \
710 _argvec[8] = (unsigned long)(arg8); \
711 _argvec[9] = (unsigned long)(arg9); \
712 __asm__ volatile( \
713 "pushl 36(%%eax)\n\t" \
714 "pushl 32(%%eax)\n\t" \
715 "pushl 28(%%eax)\n\t" \
716 "pushl 24(%%eax)\n\t" \
717 "pushl 20(%%eax)\n\t" \
718 "pushl 16(%%eax)\n\t" \
719 "pushl 12(%%eax)\n\t" \
720 "pushl 8(%%eax)\n\t" \
721 "pushl 4(%%eax)\n\t" \
722 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
723 VALGRIND_CALL_NOREDIR_EAX \
724 "addl $36, %%esp\n" \
725 : /*out*/ "=a" (_res) \
726 : /*in*/ "a" (&_argvec[0]) \
727 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
728 ); \
729 lval = (__typeof__(lval)) _res; \
730 } while (0)
732 #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
733 arg7,arg8,arg9,arg10) \
734 do { \
735 volatile OrigFn _orig = (orig); \
736 volatile unsigned long _argvec[11]; \
737 volatile unsigned long _res; \
738 _argvec[0] = (unsigned long)_orig.nraddr; \
739 _argvec[1] = (unsigned long)(arg1); \
740 _argvec[2] = (unsigned long)(arg2); \
741 _argvec[3] = (unsigned long)(arg3); \
742 _argvec[4] = (unsigned long)(arg4); \
743 _argvec[5] = (unsigned long)(arg5); \
744 _argvec[6] = (unsigned long)(arg6); \
745 _argvec[7] = (unsigned long)(arg7); \
746 _argvec[8] = (unsigned long)(arg8); \
747 _argvec[9] = (unsigned long)(arg9); \
748 _argvec[10] = (unsigned long)(arg10); \
749 __asm__ volatile( \
750 "pushl 40(%%eax)\n\t" \
751 "pushl 36(%%eax)\n\t" \
752 "pushl 32(%%eax)\n\t" \
753 "pushl 28(%%eax)\n\t" \
754 "pushl 24(%%eax)\n\t" \
755 "pushl 20(%%eax)\n\t" \
756 "pushl 16(%%eax)\n\t" \
757 "pushl 12(%%eax)\n\t" \
758 "pushl 8(%%eax)\n\t" \
759 "pushl 4(%%eax)\n\t" \
760 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
761 VALGRIND_CALL_NOREDIR_EAX \
762 "addl $40, %%esp\n" \
763 : /*out*/ "=a" (_res) \
764 : /*in*/ "a" (&_argvec[0]) \
765 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
766 ); \
767 lval = (__typeof__(lval)) _res; \
768 } while (0)
770 #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
771 arg6,arg7,arg8,arg9,arg10, \
772 arg11) \
773 do { \
774 volatile OrigFn _orig = (orig); \
775 volatile unsigned long _argvec[12]; \
776 volatile unsigned long _res; \
777 _argvec[0] = (unsigned long)_orig.nraddr; \
778 _argvec[1] = (unsigned long)(arg1); \
779 _argvec[2] = (unsigned long)(arg2); \
780 _argvec[3] = (unsigned long)(arg3); \
781 _argvec[4] = (unsigned long)(arg4); \
782 _argvec[5] = (unsigned long)(arg5); \
783 _argvec[6] = (unsigned long)(arg6); \
784 _argvec[7] = (unsigned long)(arg7); \
785 _argvec[8] = (unsigned long)(arg8); \
786 _argvec[9] = (unsigned long)(arg9); \
787 _argvec[10] = (unsigned long)(arg10); \
788 _argvec[11] = (unsigned long)(arg11); \
789 __asm__ volatile( \
790 "pushl 44(%%eax)\n\t" \
791 "pushl 40(%%eax)\n\t" \
792 "pushl 36(%%eax)\n\t" \
793 "pushl 32(%%eax)\n\t" \
794 "pushl 28(%%eax)\n\t" \
795 "pushl 24(%%eax)\n\t" \
796 "pushl 20(%%eax)\n\t" \
797 "pushl 16(%%eax)\n\t" \
798 "pushl 12(%%eax)\n\t" \
799 "pushl 8(%%eax)\n\t" \
800 "pushl 4(%%eax)\n\t" \
801 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
802 VALGRIND_CALL_NOREDIR_EAX \
803 "addl $44, %%esp\n" \
804 : /*out*/ "=a" (_res) \
805 : /*in*/ "a" (&_argvec[0]) \
806 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
807 ); \
808 lval = (__typeof__(lval)) _res; \
809 } while (0)
811 #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
812 arg6,arg7,arg8,arg9,arg10, \
813 arg11,arg12) \
814 do { \
815 volatile OrigFn _orig = (orig); \
816 volatile unsigned long _argvec[13]; \
817 volatile unsigned long _res; \
818 _argvec[0] = (unsigned long)_orig.nraddr; \
819 _argvec[1] = (unsigned long)(arg1); \
820 _argvec[2] = (unsigned long)(arg2); \
821 _argvec[3] = (unsigned long)(arg3); \
822 _argvec[4] = (unsigned long)(arg4); \
823 _argvec[5] = (unsigned long)(arg5); \
824 _argvec[6] = (unsigned long)(arg6); \
825 _argvec[7] = (unsigned long)(arg7); \
826 _argvec[8] = (unsigned long)(arg8); \
827 _argvec[9] = (unsigned long)(arg9); \
828 _argvec[10] = (unsigned long)(arg10); \
829 _argvec[11] = (unsigned long)(arg11); \
830 _argvec[12] = (unsigned long)(arg12); \
831 __asm__ volatile( \
832 "pushl 48(%%eax)\n\t" \
833 "pushl 44(%%eax)\n\t" \
834 "pushl 40(%%eax)\n\t" \
835 "pushl 36(%%eax)\n\t" \
836 "pushl 32(%%eax)\n\t" \
837 "pushl 28(%%eax)\n\t" \
838 "pushl 24(%%eax)\n\t" \
839 "pushl 20(%%eax)\n\t" \
840 "pushl 16(%%eax)\n\t" \
841 "pushl 12(%%eax)\n\t" \
842 "pushl 8(%%eax)\n\t" \
843 "pushl 4(%%eax)\n\t" \
844 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
845 VALGRIND_CALL_NOREDIR_EAX \
846 "addl $48, %%esp\n" \
847 : /*out*/ "=a" (_res) \
848 : /*in*/ "a" (&_argvec[0]) \
849 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
850 ); \
851 lval = (__typeof__(lval)) _res; \
852 } while (0)
854 #endif /* ARCH_x86 */
856 /* --------------------------- amd64 --------------------------- */
858 #if defined(ARCH_amd64)
860 /* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
862 /* These regs are trashed by the hidden call. */
863 #define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
864 "rdi", "r8", "r9", "r10", "r11"
866 /* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
867 long) == 8. */
869 #define CALL_FN_W_v(lval, orig) \
870 do { \
871 volatile OrigFn _orig = (orig); \
872 volatile unsigned long _argvec[1]; \
873 volatile unsigned long _res; \
874 _argvec[0] = (unsigned long)_orig.nraddr; \
875 __asm__ volatile( \
876 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
877 VALGRIND_CALL_NOREDIR_RAX \
878 : /*out*/ "=a" (_res) \
879 : /*in*/ "a" (&_argvec[0]) \
880 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
881 ); \
882 lval = (__typeof__(lval)) _res; \
883 } while (0)
885 #define CALL_FN_W_W(lval, orig, arg1) \
886 do { \
887 volatile OrigFn _orig = (orig); \
888 volatile unsigned long _argvec[2]; \
889 volatile unsigned long _res; \
890 _argvec[0] = (unsigned long)_orig.nraddr; \
891 _argvec[1] = (unsigned long)(arg1); \
892 __asm__ volatile( \
893 "movq 8(%%rax), %%rdi\n\t" \
894 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
895 VALGRIND_CALL_NOREDIR_RAX \
896 : /*out*/ "=a" (_res) \
897 : /*in*/ "a" (&_argvec[0]) \
898 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
899 ); \
900 lval = (__typeof__(lval)) _res; \
901 } while (0)
903 #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
904 do { \
905 volatile OrigFn _orig = (orig); \
906 volatile unsigned long _argvec[3]; \
907 volatile unsigned long _res; \
908 _argvec[0] = (unsigned long)_orig.nraddr; \
909 _argvec[1] = (unsigned long)(arg1); \
910 _argvec[2] = (unsigned long)(arg2); \
911 __asm__ volatile( \
912 "movq 16(%%rax), %%rsi\n\t" \
913 "movq 8(%%rax), %%rdi\n\t" \
914 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
915 VALGRIND_CALL_NOREDIR_RAX \
916 : /*out*/ "=a" (_res) \
917 : /*in*/ "a" (&_argvec[0]) \
918 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
919 ); \
920 lval = (__typeof__(lval)) _res; \
921 } while (0)
923 #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
924 do { \
925 volatile OrigFn _orig = (orig); \
926 volatile unsigned long _argvec[4]; \
927 volatile unsigned long _res; \
928 _argvec[0] = (unsigned long)_orig.nraddr; \
929 _argvec[1] = (unsigned long)(arg1); \
930 _argvec[2] = (unsigned long)(arg2); \
931 _argvec[3] = (unsigned long)(arg3); \
932 __asm__ volatile( \
933 "movq 24(%%rax), %%rdx\n\t" \
934 "movq 16(%%rax), %%rsi\n\t" \
935 "movq 8(%%rax), %%rdi\n\t" \
936 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
937 VALGRIND_CALL_NOREDIR_RAX \
938 : /*out*/ "=a" (_res) \
939 : /*in*/ "a" (&_argvec[0]) \
940 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
941 ); \
942 lval = (__typeof__(lval)) _res; \
943 } while (0)
945 #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
946 do { \
947 volatile OrigFn _orig = (orig); \
948 volatile unsigned long _argvec[5]; \
949 volatile unsigned long _res; \
950 _argvec[0] = (unsigned long)_orig.nraddr; \
951 _argvec[1] = (unsigned long)(arg1); \
952 _argvec[2] = (unsigned long)(arg2); \
953 _argvec[3] = (unsigned long)(arg3); \
954 _argvec[4] = (unsigned long)(arg4); \
955 __asm__ volatile( \
956 "movq 32(%%rax), %%rcx\n\t" \
957 "movq 24(%%rax), %%rdx\n\t" \
958 "movq 16(%%rax), %%rsi\n\t" \
959 "movq 8(%%rax), %%rdi\n\t" \
960 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
961 VALGRIND_CALL_NOREDIR_RAX \
962 : /*out*/ "=a" (_res) \
963 : /*in*/ "a" (&_argvec[0]) \
964 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
965 ); \
966 lval = (__typeof__(lval)) _res; \
967 } while (0)
969 #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
970 do { \
971 volatile OrigFn _orig = (orig); \
972 volatile unsigned long _argvec[6]; \
973 volatile unsigned long _res; \
974 _argvec[0] = (unsigned long)_orig.nraddr; \
975 _argvec[1] = (unsigned long)(arg1); \
976 _argvec[2] = (unsigned long)(arg2); \
977 _argvec[3] = (unsigned long)(arg3); \
978 _argvec[4] = (unsigned long)(arg4); \
979 _argvec[5] = (unsigned long)(arg5); \
980 __asm__ volatile( \
981 "movq 40(%%rax), %%r8\n\t" \
982 "movq 32(%%rax), %%rcx\n\t" \
983 "movq 24(%%rax), %%rdx\n\t" \
984 "movq 16(%%rax), %%rsi\n\t" \
985 "movq 8(%%rax), %%rdi\n\t" \
986 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
987 VALGRIND_CALL_NOREDIR_RAX \
988 : /*out*/ "=a" (_res) \
989 : /*in*/ "a" (&_argvec[0]) \
990 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
991 ); \
992 lval = (__typeof__(lval)) _res; \
993 } while (0)
995 #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
996 do { \
997 volatile OrigFn _orig = (orig); \
998 volatile unsigned long _argvec[7]; \
999 volatile unsigned long _res; \
1000 _argvec[0] = (unsigned long)_orig.nraddr; \
1001 _argvec[1] = (unsigned long)(arg1); \
1002 _argvec[2] = (unsigned long)(arg2); \
1003 _argvec[3] = (unsigned long)(arg3); \
1004 _argvec[4] = (unsigned long)(arg4); \
1005 _argvec[5] = (unsigned long)(arg5); \
1006 _argvec[6] = (unsigned long)(arg6); \
1007 __asm__ volatile( \
1008 "movq 48(%%rax), %%r9\n\t" \
1009 "movq 40(%%rax), %%r8\n\t" \
1010 "movq 32(%%rax), %%rcx\n\t" \
1011 "movq 24(%%rax), %%rdx\n\t" \
1012 "movq 16(%%rax), %%rsi\n\t" \
1013 "movq 8(%%rax), %%rdi\n\t" \
1014 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1015 VALGRIND_CALL_NOREDIR_RAX \
1016 : /*out*/ "=a" (_res) \
1017 : /*in*/ "a" (&_argvec[0]) \
1018 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1019 ); \
1020 lval = (__typeof__(lval)) _res; \
1021 } while (0)
1023 #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1024 arg7) \
1025 do { \
1026 volatile OrigFn _orig = (orig); \
1027 volatile unsigned long _argvec[8]; \
1028 volatile unsigned long _res; \
1029 _argvec[0] = (unsigned long)_orig.nraddr; \
1030 _argvec[1] = (unsigned long)(arg1); \
1031 _argvec[2] = (unsigned long)(arg2); \
1032 _argvec[3] = (unsigned long)(arg3); \
1033 _argvec[4] = (unsigned long)(arg4); \
1034 _argvec[5] = (unsigned long)(arg5); \
1035 _argvec[6] = (unsigned long)(arg6); \
1036 _argvec[7] = (unsigned long)(arg7); \
1037 __asm__ volatile( \
1038 "pushq 56(%%rax)\n\t" \
1039 "movq 48(%%rax), %%r9\n\t" \
1040 "movq 40(%%rax), %%r8\n\t" \
1041 "movq 32(%%rax), %%rcx\n\t" \
1042 "movq 24(%%rax), %%rdx\n\t" \
1043 "movq 16(%%rax), %%rsi\n\t" \
1044 "movq 8(%%rax), %%rdi\n\t" \
1045 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1046 VALGRIND_CALL_NOREDIR_RAX \
1047 "addq $8, %%rsp\n" \
1048 : /*out*/ "=a" (_res) \
1049 : /*in*/ "a" (&_argvec[0]) \
1050 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1051 ); \
1052 lval = (__typeof__(lval)) _res; \
1053 } while (0)
1055 #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1056 arg7,arg8) \
1057 do { \
1058 volatile OrigFn _orig = (orig); \
1059 volatile unsigned long _argvec[9]; \
1060 volatile unsigned long _res; \
1061 _argvec[0] = (unsigned long)_orig.nraddr; \
1062 _argvec[1] = (unsigned long)(arg1); \
1063 _argvec[2] = (unsigned long)(arg2); \
1064 _argvec[3] = (unsigned long)(arg3); \
1065 _argvec[4] = (unsigned long)(arg4); \
1066 _argvec[5] = (unsigned long)(arg5); \
1067 _argvec[6] = (unsigned long)(arg6); \
1068 _argvec[7] = (unsigned long)(arg7); \
1069 _argvec[8] = (unsigned long)(arg8); \
1070 __asm__ volatile( \
1071 "pushq 64(%%rax)\n\t" \
1072 "pushq 56(%%rax)\n\t" \
1073 "movq 48(%%rax), %%r9\n\t" \
1074 "movq 40(%%rax), %%r8\n\t" \
1075 "movq 32(%%rax), %%rcx\n\t" \
1076 "movq 24(%%rax), %%rdx\n\t" \
1077 "movq 16(%%rax), %%rsi\n\t" \
1078 "movq 8(%%rax), %%rdi\n\t" \
1079 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1080 VALGRIND_CALL_NOREDIR_RAX \
1081 "addq $16, %%rsp\n" \
1082 : /*out*/ "=a" (_res) \
1083 : /*in*/ "a" (&_argvec[0]) \
1084 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1085 ); \
1086 lval = (__typeof__(lval)) _res; \
1087 } while (0)
1089 #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1090 arg7,arg8,arg9) \
1091 do { \
1092 volatile OrigFn _orig = (orig); \
1093 volatile unsigned long _argvec[10]; \
1094 volatile unsigned long _res; \
1095 _argvec[0] = (unsigned long)_orig.nraddr; \
1096 _argvec[1] = (unsigned long)(arg1); \
1097 _argvec[2] = (unsigned long)(arg2); \
1098 _argvec[3] = (unsigned long)(arg3); \
1099 _argvec[4] = (unsigned long)(arg4); \
1100 _argvec[5] = (unsigned long)(arg5); \
1101 _argvec[6] = (unsigned long)(arg6); \
1102 _argvec[7] = (unsigned long)(arg7); \
1103 _argvec[8] = (unsigned long)(arg8); \
1104 _argvec[9] = (unsigned long)(arg9); \
1105 __asm__ volatile( \
1106 "pushq 72(%%rax)\n\t" \
1107 "pushq 64(%%rax)\n\t" \
1108 "pushq 56(%%rax)\n\t" \
1109 "movq 48(%%rax), %%r9\n\t" \
1110 "movq 40(%%rax), %%r8\n\t" \
1111 "movq 32(%%rax), %%rcx\n\t" \
1112 "movq 24(%%rax), %%rdx\n\t" \
1113 "movq 16(%%rax), %%rsi\n\t" \
1114 "movq 8(%%rax), %%rdi\n\t" \
1115 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1116 VALGRIND_CALL_NOREDIR_RAX \
1117 "addq $24, %%rsp\n" \
1118 : /*out*/ "=a" (_res) \
1119 : /*in*/ "a" (&_argvec[0]) \
1120 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1121 ); \
1122 lval = (__typeof__(lval)) _res; \
1123 } while (0)
1125 #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1126 arg7,arg8,arg9,arg10) \
1127 do { \
1128 volatile OrigFn _orig = (orig); \
1129 volatile unsigned long _argvec[11]; \
1130 volatile unsigned long _res; \
1131 _argvec[0] = (unsigned long)_orig.nraddr; \
1132 _argvec[1] = (unsigned long)(arg1); \
1133 _argvec[2] = (unsigned long)(arg2); \
1134 _argvec[3] = (unsigned long)(arg3); \
1135 _argvec[4] = (unsigned long)(arg4); \
1136 _argvec[5] = (unsigned long)(arg5); \
1137 _argvec[6] = (unsigned long)(arg6); \
1138 _argvec[7] = (unsigned long)(arg7); \
1139 _argvec[8] = (unsigned long)(arg8); \
1140 _argvec[9] = (unsigned long)(arg9); \
1141 _argvec[10] = (unsigned long)(arg10); \
1142 __asm__ volatile( \
1143 "pushq 80(%%rax)\n\t" \
1144 "pushq 72(%%rax)\n\t" \
1145 "pushq 64(%%rax)\n\t" \
1146 "pushq 56(%%rax)\n\t" \
1147 "movq 48(%%rax), %%r9\n\t" \
1148 "movq 40(%%rax), %%r8\n\t" \
1149 "movq 32(%%rax), %%rcx\n\t" \
1150 "movq 24(%%rax), %%rdx\n\t" \
1151 "movq 16(%%rax), %%rsi\n\t" \
1152 "movq 8(%%rax), %%rdi\n\t" \
1153 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1154 VALGRIND_CALL_NOREDIR_RAX \
1155 "addq $32, %%rsp\n" \
1156 : /*out*/ "=a" (_res) \
1157 : /*in*/ "a" (&_argvec[0]) \
1158 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1159 ); \
1160 lval = (__typeof__(lval)) _res; \
1161 } while (0)
1163 #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1164 arg7,arg8,arg9,arg10,arg11) \
1165 do { \
1166 volatile OrigFn _orig = (orig); \
1167 volatile unsigned long _argvec[12]; \
1168 volatile unsigned long _res; \
1169 _argvec[0] = (unsigned long)_orig.nraddr; \
1170 _argvec[1] = (unsigned long)(arg1); \
1171 _argvec[2] = (unsigned long)(arg2); \
1172 _argvec[3] = (unsigned long)(arg3); \
1173 _argvec[4] = (unsigned long)(arg4); \
1174 _argvec[5] = (unsigned long)(arg5); \
1175 _argvec[6] = (unsigned long)(arg6); \
1176 _argvec[7] = (unsigned long)(arg7); \
1177 _argvec[8] = (unsigned long)(arg8); \
1178 _argvec[9] = (unsigned long)(arg9); \
1179 _argvec[10] = (unsigned long)(arg10); \
1180 _argvec[11] = (unsigned long)(arg11); \
1181 __asm__ volatile( \
1182 "pushq 88(%%rax)\n\t" \
1183 "pushq 80(%%rax)\n\t" \
1184 "pushq 72(%%rax)\n\t" \
1185 "pushq 64(%%rax)\n\t" \
1186 "pushq 56(%%rax)\n\t" \
1187 "movq 48(%%rax), %%r9\n\t" \
1188 "movq 40(%%rax), %%r8\n\t" \
1189 "movq 32(%%rax), %%rcx\n\t" \
1190 "movq 24(%%rax), %%rdx\n\t" \
1191 "movq 16(%%rax), %%rsi\n\t" \
1192 "movq 8(%%rax), %%rdi\n\t" \
1193 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1194 VALGRIND_CALL_NOREDIR_RAX \
1195 "addq $40, %%rsp\n" \
1196 : /*out*/ "=a" (_res) \
1197 : /*in*/ "a" (&_argvec[0]) \
1198 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1199 ); \
1200 lval = (__typeof__(lval)) _res; \
1201 } while (0)
1203 #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1204 arg7,arg8,arg9,arg10,arg11,arg12) \
1205 do { \
1206 volatile OrigFn _orig = (orig); \
1207 volatile unsigned long _argvec[13]; \
1208 volatile unsigned long _res; \
1209 _argvec[0] = (unsigned long)_orig.nraddr; \
1210 _argvec[1] = (unsigned long)(arg1); \
1211 _argvec[2] = (unsigned long)(arg2); \
1212 _argvec[3] = (unsigned long)(arg3); \
1213 _argvec[4] = (unsigned long)(arg4); \
1214 _argvec[5] = (unsigned long)(arg5); \
1215 _argvec[6] = (unsigned long)(arg6); \
1216 _argvec[7] = (unsigned long)(arg7); \
1217 _argvec[8] = (unsigned long)(arg8); \
1218 _argvec[9] = (unsigned long)(arg9); \
1219 _argvec[10] = (unsigned long)(arg10); \
1220 _argvec[11] = (unsigned long)(arg11); \
1221 _argvec[12] = (unsigned long)(arg12); \
1222 __asm__ volatile( \
1223 "pushq 96(%%rax)\n\t" \
1224 "pushq 88(%%rax)\n\t" \
1225 "pushq 80(%%rax)\n\t" \
1226 "pushq 72(%%rax)\n\t" \
1227 "pushq 64(%%rax)\n\t" \
1228 "pushq 56(%%rax)\n\t" \
1229 "movq 48(%%rax), %%r9\n\t" \
1230 "movq 40(%%rax), %%r8\n\t" \
1231 "movq 32(%%rax), %%rcx\n\t" \
1232 "movq 24(%%rax), %%rdx\n\t" \
1233 "movq 16(%%rax), %%rsi\n\t" \
1234 "movq 8(%%rax), %%rdi\n\t" \
1235 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1236 VALGRIND_CALL_NOREDIR_RAX \
1237 "addq $48, %%rsp\n" \
1238 : /*out*/ "=a" (_res) \
1239 : /*in*/ "a" (&_argvec[0]) \
1240 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1241 ); \
1242 lval = (__typeof__(lval)) _res; \
1243 } while (0)
1245 #endif /* ARCH_amd64 */
1247 /* --------------------------- ppc32 --------------------------- */
1249 #if defined(ARCH_ppc32)
1251 /* This is useful for finding out about the on-stack stuff:
1253 extern int f9 ( int,int,int,int,int,int,int,int,int );
1254 extern int f10 ( int,int,int,int,int,int,int,int,int,int );
1255 extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
1256 extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
1258 int g9 ( void ) {
1259 return f9(11,22,33,44,55,66,77,88,99);
1261 int g10 ( void ) {
1262 return f10(11,22,33,44,55,66,77,88,99,110);
1264 int g11 ( void ) {
1265 return f11(11,22,33,44,55,66,77,88,99,110,121);
1267 int g12 ( void ) {
1268 return f12(11,22,33,44,55,66,77,88,99,110,121,132);
1272 /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
1274 /* These regs are trashed by the hidden call. */
1275 #define __CALLER_SAVED_REGS \
1276 "lr", "ctr", "xer", \
1277 "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
1278 "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
1279 "r11", "r12", "r13"
1281 /* These CALL_FN_ macros assume that on ppc32-linux,
1282 sizeof(unsigned long) == 4. */
1284 #define CALL_FN_W_v(lval, orig) \
1285 do { \
1286 volatile OrigFn _orig = (orig); \
1287 volatile unsigned long _argvec[1]; \
1288 volatile unsigned long _res; \
1289 _argvec[0] = (unsigned long)_orig.nraddr; \
1290 __asm__ volatile( \
1291 "mr 11,%1\n\t" \
1292 "lwz 11,0(11)\n\t" /* target->r11 */ \
1293 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1294 "mr %0,3" \
1295 : /*out*/ "=r" (_res) \
1296 : /*in*/ "r" (&_argvec[0]) \
1297 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1298 ); \
1299 lval = (__typeof__(lval)) _res; \
1300 } while (0)
1302 #define CALL_FN_W_W(lval, orig, arg1) \
1303 do { \
1304 volatile OrigFn _orig = (orig); \
1305 volatile unsigned long _argvec[2]; \
1306 volatile unsigned long _res; \
1307 _argvec[0] = (unsigned long)_orig.nraddr; \
1308 _argvec[1] = (unsigned long)arg1; \
1309 __asm__ volatile( \
1310 "mr 11,%1\n\t" \
1311 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1312 "lwz 11,0(11)\n\t" /* target->r11 */ \
1313 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1314 "mr %0,3" \
1315 : /*out*/ "=r" (_res) \
1316 : /*in*/ "r" (&_argvec[0]) \
1317 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1318 ); \
1319 lval = (__typeof__(lval)) _res; \
1320 } while (0)
1322 #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
1323 do { \
1324 volatile OrigFn _orig = (orig); \
1325 volatile unsigned long _argvec[3]; \
1326 volatile unsigned long _res; \
1327 _argvec[0] = (unsigned long)_orig.nraddr; \
1328 _argvec[1] = (unsigned long)arg1; \
1329 _argvec[2] = (unsigned long)arg2; \
1330 __asm__ volatile( \
1331 "mr 11,%1\n\t" \
1332 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1333 "lwz 4,8(11)\n\t" \
1334 "lwz 11,0(11)\n\t" /* target->r11 */ \
1335 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1336 "mr %0,3" \
1337 : /*out*/ "=r" (_res) \
1338 : /*in*/ "r" (&_argvec[0]) \
1339 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1340 ); \
1341 lval = (__typeof__(lval)) _res; \
1342 } while (0)
1344 #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
1345 do { \
1346 volatile OrigFn _orig = (orig); \
1347 volatile unsigned long _argvec[4]; \
1348 volatile unsigned long _res; \
1349 _argvec[0] = (unsigned long)_orig.nraddr; \
1350 _argvec[1] = (unsigned long)arg1; \
1351 _argvec[2] = (unsigned long)arg2; \
1352 _argvec[3] = (unsigned long)arg3; \
1353 __asm__ volatile( \
1354 "mr 11,%1\n\t" \
1355 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1356 "lwz 4,8(11)\n\t" \
1357 "lwz 5,12(11)\n\t" \
1358 "lwz 11,0(11)\n\t" /* target->r11 */ \
1359 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1360 "mr %0,3" \
1361 : /*out*/ "=r" (_res) \
1362 : /*in*/ "r" (&_argvec[0]) \
1363 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1364 ); \
1365 lval = (__typeof__(lval)) _res; \
1366 } while (0)
1368 #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
1369 do { \
1370 volatile OrigFn _orig = (orig); \
1371 volatile unsigned long _argvec[5]; \
1372 volatile unsigned long _res; \
1373 _argvec[0] = (unsigned long)_orig.nraddr; \
1374 _argvec[1] = (unsigned long)arg1; \
1375 _argvec[2] = (unsigned long)arg2; \
1376 _argvec[3] = (unsigned long)arg3; \
1377 _argvec[4] = (unsigned long)arg4; \
1378 __asm__ volatile( \
1379 "mr 11,%1\n\t" \
1380 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1381 "lwz 4,8(11)\n\t" \
1382 "lwz 5,12(11)\n\t" \
1383 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1384 "lwz 11,0(11)\n\t" /* target->r11 */ \
1385 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1386 "mr %0,3" \
1387 : /*out*/ "=r" (_res) \
1388 : /*in*/ "r" (&_argvec[0]) \
1389 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1390 ); \
1391 lval = (__typeof__(lval)) _res; \
1392 } while (0)
1394 #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
1395 do { \
1396 volatile OrigFn _orig = (orig); \
1397 volatile unsigned long _argvec[6]; \
1398 volatile unsigned long _res; \
1399 _argvec[0] = (unsigned long)_orig.nraddr; \
1400 _argvec[1] = (unsigned long)arg1; \
1401 _argvec[2] = (unsigned long)arg2; \
1402 _argvec[3] = (unsigned long)arg3; \
1403 _argvec[4] = (unsigned long)arg4; \
1404 _argvec[5] = (unsigned long)arg5; \
1405 __asm__ volatile( \
1406 "mr 11,%1\n\t" \
1407 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1408 "lwz 4,8(11)\n\t" \
1409 "lwz 5,12(11)\n\t" \
1410 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1411 "lwz 7,20(11)\n\t" \
1412 "lwz 11,0(11)\n\t" /* target->r11 */ \
1413 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1414 "mr %0,3" \
1415 : /*out*/ "=r" (_res) \
1416 : /*in*/ "r" (&_argvec[0]) \
1417 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1418 ); \
1419 lval = (__typeof__(lval)) _res; \
1420 } while (0)
1422 #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
1423 do { \
1424 volatile OrigFn _orig = (orig); \
1425 volatile unsigned long _argvec[7]; \
1426 volatile unsigned long _res; \
1427 _argvec[0] = (unsigned long)_orig.nraddr; \
1428 _argvec[1] = (unsigned long)arg1; \
1429 _argvec[2] = (unsigned long)arg2; \
1430 _argvec[3] = (unsigned long)arg3; \
1431 _argvec[4] = (unsigned long)arg4; \
1432 _argvec[5] = (unsigned long)arg5; \
1433 _argvec[6] = (unsigned long)arg6; \
1434 __asm__ volatile( \
1435 "mr 11,%1\n\t" \
1436 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1437 "lwz 4,8(11)\n\t" \
1438 "lwz 5,12(11)\n\t" \
1439 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1440 "lwz 7,20(11)\n\t" \
1441 "lwz 8,24(11)\n\t" \
1442 "lwz 11,0(11)\n\t" /* target->r11 */ \
1443 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1444 "mr %0,3" \
1445 : /*out*/ "=r" (_res) \
1446 : /*in*/ "r" (&_argvec[0]) \
1447 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1448 ); \
1449 lval = (__typeof__(lval)) _res; \
1450 } while (0)
1452 #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1453 arg7) \
1454 do { \
1455 volatile OrigFn _orig = (orig); \
1456 volatile unsigned long _argvec[8]; \
1457 volatile unsigned long _res; \
1458 _argvec[0] = (unsigned long)_orig.nraddr; \
1459 _argvec[1] = (unsigned long)arg1; \
1460 _argvec[2] = (unsigned long)arg2; \
1461 _argvec[3] = (unsigned long)arg3; \
1462 _argvec[4] = (unsigned long)arg4; \
1463 _argvec[5] = (unsigned long)arg5; \
1464 _argvec[6] = (unsigned long)arg6; \
1465 _argvec[7] = (unsigned long)arg7; \
1466 __asm__ volatile( \
1467 "mr 11,%1\n\t" \
1468 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1469 "lwz 4,8(11)\n\t" \
1470 "lwz 5,12(11)\n\t" \
1471 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1472 "lwz 7,20(11)\n\t" \
1473 "lwz 8,24(11)\n\t" \
1474 "lwz 9,28(11)\n\t" \
1475 "lwz 11,0(11)\n\t" /* target->r11 */ \
1476 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1477 "mr %0,3" \
1478 : /*out*/ "=r" (_res) \
1479 : /*in*/ "r" (&_argvec[0]) \
1480 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1481 ); \
1482 lval = (__typeof__(lval)) _res; \
1483 } while (0)
1485 #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1486 arg7,arg8) \
1487 do { \
1488 volatile OrigFn _orig = (orig); \
1489 volatile unsigned long _argvec[9]; \
1490 volatile unsigned long _res; \
1491 _argvec[0] = (unsigned long)_orig.nraddr; \
1492 _argvec[1] = (unsigned long)arg1; \
1493 _argvec[2] = (unsigned long)arg2; \
1494 _argvec[3] = (unsigned long)arg3; \
1495 _argvec[4] = (unsigned long)arg4; \
1496 _argvec[5] = (unsigned long)arg5; \
1497 _argvec[6] = (unsigned long)arg6; \
1498 _argvec[7] = (unsigned long)arg7; \
1499 _argvec[8] = (unsigned long)arg8; \
1500 __asm__ volatile( \
1501 "mr 11,%1\n\t" \
1502 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1503 "lwz 4,8(11)\n\t" \
1504 "lwz 5,12(11)\n\t" \
1505 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1506 "lwz 7,20(11)\n\t" \
1507 "lwz 8,24(11)\n\t" \
1508 "lwz 9,28(11)\n\t" \
1509 "lwz 10,32(11)\n\t" /* arg8->r10 */ \
1510 "lwz 11,0(11)\n\t" /* target->r11 */ \
1511 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1512 "mr %0,3" \
1513 : /*out*/ "=r" (_res) \
1514 : /*in*/ "r" (&_argvec[0]) \
1515 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1516 ); \
1517 lval = (__typeof__(lval)) _res; \
1518 } while (0)
1520 #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1521 arg7,arg8,arg9) \
1522 do { \
1523 volatile OrigFn _orig = (orig); \
1524 volatile unsigned long _argvec[10]; \
1525 volatile unsigned long _res; \
1526 _argvec[0] = (unsigned long)_orig.nraddr; \
1527 _argvec[1] = (unsigned long)arg1; \
1528 _argvec[2] = (unsigned long)arg2; \
1529 _argvec[3] = (unsigned long)arg3; \
1530 _argvec[4] = (unsigned long)arg4; \
1531 _argvec[5] = (unsigned long)arg5; \
1532 _argvec[6] = (unsigned long)arg6; \
1533 _argvec[7] = (unsigned long)arg7; \
1534 _argvec[8] = (unsigned long)arg8; \
1535 _argvec[9] = (unsigned long)arg9; \
1536 __asm__ volatile( \
1537 "mr 11,%1\n\t" \
1538 "addi 1,1,-16\n\t" \
1539 /* arg9 */ \
1540 "lwz 3,36(11)\n\t" \
1541 "stw 3,8(1)\n\t" \
1542 /* args1-8 */ \
1543 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1544 "lwz 4,8(11)\n\t" \
1545 "lwz 5,12(11)\n\t" \
1546 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1547 "lwz 7,20(11)\n\t" \
1548 "lwz 8,24(11)\n\t" \
1549 "lwz 9,28(11)\n\t" \
1550 "lwz 10,32(11)\n\t" /* arg8->r10 */ \
1551 "lwz 11,0(11)\n\t" /* target->r11 */ \
1552 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1553 "addi 1,1,16\n\t" \
1554 "mr %0,3" \
1555 : /*out*/ "=r" (_res) \
1556 : /*in*/ "r" (&_argvec[0]) \
1557 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1558 ); \
1559 lval = (__typeof__(lval)) _res; \
1560 } while (0)
1562 #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1563 arg7,arg8,arg9,arg10) \
1564 do { \
1565 volatile OrigFn _orig = (orig); \
1566 volatile unsigned long _argvec[11]; \
1567 volatile unsigned long _res; \
1568 _argvec[0] = (unsigned long)_orig.nraddr; \
1569 _argvec[1] = (unsigned long)arg1; \
1570 _argvec[2] = (unsigned long)arg2; \
1571 _argvec[3] = (unsigned long)arg3; \
1572 _argvec[4] = (unsigned long)arg4; \
1573 _argvec[5] = (unsigned long)arg5; \
1574 _argvec[6] = (unsigned long)arg6; \
1575 _argvec[7] = (unsigned long)arg7; \
1576 _argvec[8] = (unsigned long)arg8; \
1577 _argvec[9] = (unsigned long)arg9; \
1578 _argvec[10] = (unsigned long)arg10; \
1579 __asm__ volatile( \
1580 "mr 11,%1\n\t" \
1581 "addi 1,1,-16\n\t" \
1582 /* arg10 */ \
1583 "lwz 3,40(11)\n\t" \
1584 "stw 3,12(1)\n\t" \
1585 /* arg9 */ \
1586 "lwz 3,36(11)\n\t" \
1587 "stw 3,8(1)\n\t" \
1588 /* args1-8 */ \
1589 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1590 "lwz 4,8(11)\n\t" \
1591 "lwz 5,12(11)\n\t" \
1592 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1593 "lwz 7,20(11)\n\t" \
1594 "lwz 8,24(11)\n\t" \
1595 "lwz 9,28(11)\n\t" \
1596 "lwz 10,32(11)\n\t" /* arg8->r10 */ \
1597 "lwz 11,0(11)\n\t" /* target->r11 */ \
1598 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1599 "addi 1,1,16\n\t" \
1600 "mr %0,3" \
1601 : /*out*/ "=r" (_res) \
1602 : /*in*/ "r" (&_argvec[0]) \
1603 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1604 ); \
1605 lval = (__typeof__(lval)) _res; \
1606 } while (0)
1608 #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1609 arg7,arg8,arg9,arg10,arg11) \
1610 do { \
1611 volatile OrigFn _orig = (orig); \
1612 volatile unsigned long _argvec[12]; \
1613 volatile unsigned long _res; \
1614 _argvec[0] = (unsigned long)_orig.nraddr; \
1615 _argvec[1] = (unsigned long)arg1; \
1616 _argvec[2] = (unsigned long)arg2; \
1617 _argvec[3] = (unsigned long)arg3; \
1618 _argvec[4] = (unsigned long)arg4; \
1619 _argvec[5] = (unsigned long)arg5; \
1620 _argvec[6] = (unsigned long)arg6; \
1621 _argvec[7] = (unsigned long)arg7; \
1622 _argvec[8] = (unsigned long)arg8; \
1623 _argvec[9] = (unsigned long)arg9; \
1624 _argvec[10] = (unsigned long)arg10; \
1625 _argvec[11] = (unsigned long)arg11; \
1626 __asm__ volatile( \
1627 "mr 11,%1\n\t" \
1628 "addi 1,1,-32\n\t" \
1629 /* arg11 */ \
1630 "lwz 3,44(11)\n\t" \
1631 "stw 3,16(1)\n\t" \
1632 /* arg10 */ \
1633 "lwz 3,40(11)\n\t" \
1634 "stw 3,12(1)\n\t" \
1635 /* arg9 */ \
1636 "lwz 3,36(11)\n\t" \
1637 "stw 3,8(1)\n\t" \
1638 /* args1-8 */ \
1639 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1640 "lwz 4,8(11)\n\t" \
1641 "lwz 5,12(11)\n\t" \
1642 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1643 "lwz 7,20(11)\n\t" \
1644 "lwz 8,24(11)\n\t" \
1645 "lwz 9,28(11)\n\t" \
1646 "lwz 10,32(11)\n\t" /* arg8->r10 */ \
1647 "lwz 11,0(11)\n\t" /* target->r11 */ \
1648 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1649 "addi 1,1,32\n\t" \
1650 "mr %0,3" \
1651 : /*out*/ "=r" (_res) \
1652 : /*in*/ "r" (&_argvec[0]) \
1653 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1654 ); \
1655 lval = (__typeof__(lval)) _res; \
1656 } while (0)
1658 #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1659 arg7,arg8,arg9,arg10,arg11,arg12) \
1660 do { \
1661 volatile OrigFn _orig = (orig); \
1662 volatile unsigned long _argvec[13]; \
1663 volatile unsigned long _res; \
1664 _argvec[0] = (unsigned long)_orig.nraddr; \
1665 _argvec[1] = (unsigned long)arg1; \
1666 _argvec[2] = (unsigned long)arg2; \
1667 _argvec[3] = (unsigned long)arg3; \
1668 _argvec[4] = (unsigned long)arg4; \
1669 _argvec[5] = (unsigned long)arg5; \
1670 _argvec[6] = (unsigned long)arg6; \
1671 _argvec[7] = (unsigned long)arg7; \
1672 _argvec[8] = (unsigned long)arg8; \
1673 _argvec[9] = (unsigned long)arg9; \
1674 _argvec[10] = (unsigned long)arg10; \
1675 _argvec[11] = (unsigned long)arg11; \
1676 _argvec[12] = (unsigned long)arg12; \
1677 __asm__ volatile( \
1678 "mr 11,%1\n\t" \
1679 "addi 1,1,-32\n\t" \
1680 /* arg12 */ \
1681 "lwz 3,48(11)\n\t" \
1682 "stw 3,20(1)\n\t" \
1683 /* arg11 */ \
1684 "lwz 3,44(11)\n\t" \
1685 "stw 3,16(1)\n\t" \
1686 /* arg10 */ \
1687 "lwz 3,40(11)\n\t" \
1688 "stw 3,12(1)\n\t" \
1689 /* arg9 */ \
1690 "lwz 3,36(11)\n\t" \
1691 "stw 3,8(1)\n\t" \
1692 /* args1-8 */ \
1693 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1694 "lwz 4,8(11)\n\t" \
1695 "lwz 5,12(11)\n\t" \
1696 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1697 "lwz 7,20(11)\n\t" \
1698 "lwz 8,24(11)\n\t" \
1699 "lwz 9,28(11)\n\t" \
1700 "lwz 10,32(11)\n\t" /* arg8->r10 */ \
1701 "lwz 11,0(11)\n\t" /* target->r11 */ \
1702 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1703 "addi 1,1,32\n\t" \
1704 "mr %0,3" \
1705 : /*out*/ "=r" (_res) \
1706 : /*in*/ "r" (&_argvec[0]) \
1707 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1708 ); \
1709 lval = (__typeof__(lval)) _res; \
1710 } while (0)
1712 #endif /* ARCH_ppc32 */
1714 /* --------------------------- ppc64 --------------------------- */
1716 #if defined(ARCH_ppc64)
1718 /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
1720 /* These regs are trashed by the hidden call. */
1721 #define __CALLER_SAVED_REGS \
1722 "lr", "ctr", "xer", \
1723 "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
1724 "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
1725 "r11", "r12", "r13"
1727 /* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
1728 long) == 8. */
1730 #define CALL_FN_W_v(lval, orig) \
1731 do { \
1732 volatile OrigFn _orig = (orig); \
1733 volatile unsigned long _argvec[3+0]; \
1734 volatile unsigned long _res; \
1735 /* _argvec[0] holds current r2 across the call */ \
1736 _argvec[1] = (unsigned long)_orig.r2; \
1737 _argvec[2] = (unsigned long)_orig.nraddr; \
1738 __asm__ volatile( \
1739 "mr 11,%1\n\t" \
1740 "std 2,-16(11)\n\t" /* save tocptr */ \
1741 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
1742 "ld 11, 0(11)\n\t" /* target->r11 */ \
1743 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1744 "mr 11,%1\n\t" \
1745 "mr %0,3\n\t" \
1746 "ld 2,-16(11)" /* restore tocptr */ \
1747 : /*out*/ "=r" (_res) \
1748 : /*in*/ "r" (&_argvec[2]) \
1749 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1750 ); \
1751 lval = (__typeof__(lval)) _res; \
1752 } while (0)
1754 #define CALL_FN_W_W(lval, orig, arg1) \
1755 do { \
1756 volatile OrigFn _orig = (orig); \
1757 volatile unsigned long _argvec[3+1]; \
1758 volatile unsigned long _res; \
1759 /* _argvec[0] holds current r2 across the call */ \
1760 _argvec[1] = (unsigned long)_orig.r2; \
1761 _argvec[2] = (unsigned long)_orig.nraddr; \
1762 _argvec[2+1] = (unsigned long)arg1; \
1763 __asm__ volatile( \
1764 "mr 11,%1\n\t" \
1765 "std 2,-16(11)\n\t" /* save tocptr */ \
1766 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
1767 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
1768 "ld 11, 0(11)\n\t" /* target->r11 */ \
1769 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1770 "mr 11,%1\n\t" \
1771 "mr %0,3\n\t" \
1772 "ld 2,-16(11)" /* restore tocptr */ \
1773 : /*out*/ "=r" (_res) \
1774 : /*in*/ "r" (&_argvec[2]) \
1775 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1776 ); \
1777 lval = (__typeof__(lval)) _res; \
1778 } while (0)
1780 #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
1781 do { \
1782 volatile OrigFn _orig = (orig); \
1783 volatile unsigned long _argvec[3+2]; \
1784 volatile unsigned long _res; \
1785 /* _argvec[0] holds current r2 across the call */ \
1786 _argvec[1] = (unsigned long)_orig.r2; \
1787 _argvec[2] = (unsigned long)_orig.nraddr; \
1788 _argvec[2+1] = (unsigned long)arg1; \
1789 _argvec[2+2] = (unsigned long)arg2; \
1790 __asm__ volatile( \
1791 "mr 11,%1\n\t" \
1792 "std 2,-16(11)\n\t" /* save tocptr */ \
1793 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
1794 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
1795 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
1796 "ld 11, 0(11)\n\t" /* target->r11 */ \
1797 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1798 "mr 11,%1\n\t" \
1799 "mr %0,3\n\t" \
1800 "ld 2,-16(11)" /* restore tocptr */ \
1801 : /*out*/ "=r" (_res) \
1802 : /*in*/ "r" (&_argvec[2]) \
1803 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1804 ); \
1805 lval = (__typeof__(lval)) _res; \
1806 } while (0)
1808 #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
1809 do { \
1810 volatile OrigFn _orig = (orig); \
1811 volatile unsigned long _argvec[3+3]; \
1812 volatile unsigned long _res; \
1813 /* _argvec[0] holds current r2 across the call */ \
1814 _argvec[1] = (unsigned long)_orig.r2; \
1815 _argvec[2] = (unsigned long)_orig.nraddr; \
1816 _argvec[2+1] = (unsigned long)arg1; \
1817 _argvec[2+2] = (unsigned long)arg2; \
1818 _argvec[2+3] = (unsigned long)arg3; \
1819 __asm__ volatile( \
1820 "mr 11,%1\n\t" \
1821 "std 2,-16(11)\n\t" /* save tocptr */ \
1822 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
1823 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
1824 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
1825 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
1826 "ld 11, 0(11)\n\t" /* target->r11 */ \
1827 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1828 "mr 11,%1\n\t" \
1829 "mr %0,3\n\t" \
1830 "ld 2,-16(11)" /* restore tocptr */ \
1831 : /*out*/ "=r" (_res) \
1832 : /*in*/ "r" (&_argvec[2]) \
1833 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1834 ); \
1835 lval = (__typeof__(lval)) _res; \
1836 } while (0)
1838 #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
1839 do { \
1840 volatile OrigFn _orig = (orig); \
1841 volatile unsigned long _argvec[3+4]; \
1842 volatile unsigned long _res; \
1843 /* _argvec[0] holds current r2 across the call */ \
1844 _argvec[1] = (unsigned long)_orig.r2; \
1845 _argvec[2] = (unsigned long)_orig.nraddr; \
1846 _argvec[2+1] = (unsigned long)arg1; \
1847 _argvec[2+2] = (unsigned long)arg2; \
1848 _argvec[2+3] = (unsigned long)arg3; \
1849 _argvec[2+4] = (unsigned long)arg4; \
1850 __asm__ volatile( \
1851 "mr 11,%1\n\t" \
1852 "std 2,-16(11)\n\t" /* save tocptr */ \
1853 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
1854 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
1855 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
1856 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
1857 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
1858 "ld 11, 0(11)\n\t" /* target->r11 */ \
1859 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1860 "mr 11,%1\n\t" \
1861 "mr %0,3\n\t" \
1862 "ld 2,-16(11)" /* restore tocptr */ \
1863 : /*out*/ "=r" (_res) \
1864 : /*in*/ "r" (&_argvec[2]) \
1865 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1866 ); \
1867 lval = (__typeof__(lval)) _res; \
1868 } while (0)
1870 #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
1871 do { \
1872 volatile OrigFn _orig = (orig); \
1873 volatile unsigned long _argvec[3+5]; \
1874 volatile unsigned long _res; \
1875 /* _argvec[0] holds current r2 across the call */ \
1876 _argvec[1] = (unsigned long)_orig.r2; \
1877 _argvec[2] = (unsigned long)_orig.nraddr; \
1878 _argvec[2+1] = (unsigned long)arg1; \
1879 _argvec[2+2] = (unsigned long)arg2; \
1880 _argvec[2+3] = (unsigned long)arg3; \
1881 _argvec[2+4] = (unsigned long)arg4; \
1882 _argvec[2+5] = (unsigned long)arg5; \
1883 __asm__ volatile( \
1884 "mr 11,%1\n\t" \
1885 "std 2,-16(11)\n\t" /* save tocptr */ \
1886 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
1887 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
1888 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
1889 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
1890 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
1891 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
1892 "ld 11, 0(11)\n\t" /* target->r11 */ \
1893 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1894 "mr 11,%1\n\t" \
1895 "mr %0,3\n\t" \
1896 "ld 2,-16(11)" /* restore tocptr */ \
1897 : /*out*/ "=r" (_res) \
1898 : /*in*/ "r" (&_argvec[2]) \
1899 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1900 ); \
1901 lval = (__typeof__(lval)) _res; \
1902 } while (0)
1904 #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
1905 do { \
1906 volatile OrigFn _orig = (orig); \
1907 volatile unsigned long _argvec[3+6]; \
1908 volatile unsigned long _res; \
1909 /* _argvec[0] holds current r2 across the call */ \
1910 _argvec[1] = (unsigned long)_orig.r2; \
1911 _argvec[2] = (unsigned long)_orig.nraddr; \
1912 _argvec[2+1] = (unsigned long)arg1; \
1913 _argvec[2+2] = (unsigned long)arg2; \
1914 _argvec[2+3] = (unsigned long)arg3; \
1915 _argvec[2+4] = (unsigned long)arg4; \
1916 _argvec[2+5] = (unsigned long)arg5; \
1917 _argvec[2+6] = (unsigned long)arg6; \
1918 __asm__ volatile( \
1919 "mr 11,%1\n\t" \
1920 "std 2,-16(11)\n\t" /* save tocptr */ \
1921 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
1922 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
1923 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
1924 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
1925 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
1926 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
1927 "ld 8, 48(11)\n\t" /* arg6->r8 */ \
1928 "ld 11, 0(11)\n\t" /* target->r11 */ \
1929 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1930 "mr 11,%1\n\t" \
1931 "mr %0,3\n\t" \
1932 "ld 2,-16(11)" /* restore tocptr */ \
1933 : /*out*/ "=r" (_res) \
1934 : /*in*/ "r" (&_argvec[2]) \
1935 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1936 ); \
1937 lval = (__typeof__(lval)) _res; \
1938 } while (0)
1940 #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1941 arg7) \
1942 do { \
1943 volatile OrigFn _orig = (orig); \
1944 volatile unsigned long _argvec[3+7]; \
1945 volatile unsigned long _res; \
1946 /* _argvec[0] holds current r2 across the call */ \
1947 _argvec[1] = (unsigned long)_orig.r2; \
1948 _argvec[2] = (unsigned long)_orig.nraddr; \
1949 _argvec[2+1] = (unsigned long)arg1; \
1950 _argvec[2+2] = (unsigned long)arg2; \
1951 _argvec[2+3] = (unsigned long)arg3; \
1952 _argvec[2+4] = (unsigned long)arg4; \
1953 _argvec[2+5] = (unsigned long)arg5; \
1954 _argvec[2+6] = (unsigned long)arg6; \
1955 _argvec[2+7] = (unsigned long)arg7; \
1956 __asm__ volatile( \
1957 "mr 11,%1\n\t" \
1958 "std 2,-16(11)\n\t" /* save tocptr */ \
1959 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
1960 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
1961 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
1962 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
1963 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
1964 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
1965 "ld 8, 48(11)\n\t" /* arg6->r8 */ \
1966 "ld 9, 56(11)\n\t" /* arg7->r9 */ \
1967 "ld 11, 0(11)\n\t" /* target->r11 */ \
1968 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1969 "mr 11,%1\n\t" \
1970 "mr %0,3\n\t" \
1971 "ld 2,-16(11)" /* restore tocptr */ \
1972 : /*out*/ "=r" (_res) \
1973 : /*in*/ "r" (&_argvec[2]) \
1974 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1975 ); \
1976 lval = (__typeof__(lval)) _res; \
1977 } while (0)
1979 #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1980 arg7,arg8) \
1981 do { \
1982 volatile OrigFn _orig = (orig); \
1983 volatile unsigned long _argvec[3+8]; \
1984 volatile unsigned long _res; \
1985 /* _argvec[0] holds current r2 across the call */ \
1986 _argvec[1] = (unsigned long)_orig.r2; \
1987 _argvec[2] = (unsigned long)_orig.nraddr; \
1988 _argvec[2+1] = (unsigned long)arg1; \
1989 _argvec[2+2] = (unsigned long)arg2; \
1990 _argvec[2+3] = (unsigned long)arg3; \
1991 _argvec[2+4] = (unsigned long)arg4; \
1992 _argvec[2+5] = (unsigned long)arg5; \
1993 _argvec[2+6] = (unsigned long)arg6; \
1994 _argvec[2+7] = (unsigned long)arg7; \
1995 _argvec[2+8] = (unsigned long)arg8; \
1996 __asm__ volatile( \
1997 "mr 11,%1\n\t" \
1998 "std 2,-16(11)\n\t" /* save tocptr */ \
1999 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2000 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2001 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2002 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2003 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2004 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2005 "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2006 "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2007 "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2008 "ld 11, 0(11)\n\t" /* target->r11 */ \
2009 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2010 "mr 11,%1\n\t" \
2011 "mr %0,3\n\t" \
2012 "ld 2,-16(11)" /* restore tocptr */ \
2013 : /*out*/ "=r" (_res) \
2014 : /*in*/ "r" (&_argvec[2]) \
2015 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2016 ); \
2017 lval = (__typeof__(lval)) _res; \
2018 } while (0)
2020 #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
2021 arg7,arg8,arg9) \
2022 do { \
2023 volatile OrigFn _orig = (orig); \
2024 volatile unsigned long _argvec[3+9]; \
2025 volatile unsigned long _res; \
2026 /* _argvec[0] holds current r2 across the call */ \
2027 _argvec[1] = (unsigned long)_orig.r2; \
2028 _argvec[2] = (unsigned long)_orig.nraddr; \
2029 _argvec[2+1] = (unsigned long)arg1; \
2030 _argvec[2+2] = (unsigned long)arg2; \
2031 _argvec[2+3] = (unsigned long)arg3; \
2032 _argvec[2+4] = (unsigned long)arg4; \
2033 _argvec[2+5] = (unsigned long)arg5; \
2034 _argvec[2+6] = (unsigned long)arg6; \
2035 _argvec[2+7] = (unsigned long)arg7; \
2036 _argvec[2+8] = (unsigned long)arg8; \
2037 _argvec[2+9] = (unsigned long)arg9; \
2038 __asm__ volatile( \
2039 "mr 11,%1\n\t" \
2040 "std 2,-16(11)\n\t" /* save tocptr */ \
2041 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2042 "addi 1,1,-128\n\t" /* expand stack frame */ \
2043 /* arg9 */ \
2044 "ld 3,72(11)\n\t" \
2045 "std 3,112(1)\n\t" \
2046 /* args1-8 */ \
2047 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2048 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2049 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2050 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2051 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2052 "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2053 "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2054 "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2055 "ld 11, 0(11)\n\t" /* target->r11 */ \
2056 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2057 "mr 11,%1\n\t" \
2058 "mr %0,3\n\t" \
2059 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2060 "addi 1,1,128" /* restore frame */ \
2061 : /*out*/ "=r" (_res) \
2062 : /*in*/ "r" (&_argvec[2]) \
2063 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2064 ); \
2065 lval = (__typeof__(lval)) _res; \
2066 } while (0)
2068 #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
2069 arg7,arg8,arg9,arg10) \
2070 do { \
2071 volatile OrigFn _orig = (orig); \
2072 volatile unsigned long _argvec[3+10]; \
2073 volatile unsigned long _res; \
2074 /* _argvec[0] holds current r2 across the call */ \
2075 _argvec[1] = (unsigned long)_orig.r2; \
2076 _argvec[2] = (unsigned long)_orig.nraddr; \
2077 _argvec[2+1] = (unsigned long)arg1; \
2078 _argvec[2+2] = (unsigned long)arg2; \
2079 _argvec[2+3] = (unsigned long)arg3; \
2080 _argvec[2+4] = (unsigned long)arg4; \
2081 _argvec[2+5] = (unsigned long)arg5; \
2082 _argvec[2+6] = (unsigned long)arg6; \
2083 _argvec[2+7] = (unsigned long)arg7; \
2084 _argvec[2+8] = (unsigned long)arg8; \
2085 _argvec[2+9] = (unsigned long)arg9; \
2086 _argvec[2+10] = (unsigned long)arg10; \
2087 __asm__ volatile( \
2088 "mr 11,%1\n\t" \
2089 "std 2,-16(11)\n\t" /* save tocptr */ \
2090 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2091 "addi 1,1,-128\n\t" /* expand stack frame */ \
2092 /* arg10 */ \
2093 "ld 3,80(11)\n\t" \
2094 "std 3,120(1)\n\t" \
2095 /* arg9 */ \
2096 "ld 3,72(11)\n\t" \
2097 "std 3,112(1)\n\t" \
2098 /* args1-8 */ \
2099 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2100 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2101 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2102 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2103 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2104 "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2105 "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2106 "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2107 "ld 11, 0(11)\n\t" /* target->r11 */ \
2108 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2109 "mr 11,%1\n\t" \
2110 "mr %0,3\n\t" \
2111 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2112 "addi 1,1,128" /* restore frame */ \
2113 : /*out*/ "=r" (_res) \
2114 : /*in*/ "r" (&_argvec[2]) \
2115 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2116 ); \
2117 lval = (__typeof__(lval)) _res; \
2118 } while (0)
2120 #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
2121 arg7,arg8,arg9,arg10,arg11) \
2122 do { \
2123 volatile OrigFn _orig = (orig); \
2124 volatile unsigned long _argvec[3+11]; \
2125 volatile unsigned long _res; \
2126 /* _argvec[0] holds current r2 across the call */ \
2127 _argvec[1] = (unsigned long)_orig.r2; \
2128 _argvec[2] = (unsigned long)_orig.nraddr; \
2129 _argvec[2+1] = (unsigned long)arg1; \
2130 _argvec[2+2] = (unsigned long)arg2; \
2131 _argvec[2+3] = (unsigned long)arg3; \
2132 _argvec[2+4] = (unsigned long)arg4; \
2133 _argvec[2+5] = (unsigned long)arg5; \
2134 _argvec[2+6] = (unsigned long)arg6; \
2135 _argvec[2+7] = (unsigned long)arg7; \
2136 _argvec[2+8] = (unsigned long)arg8; \
2137 _argvec[2+9] = (unsigned long)arg9; \
2138 _argvec[2+10] = (unsigned long)arg10; \
2139 _argvec[2+11] = (unsigned long)arg11; \
2140 __asm__ volatile( \
2141 "mr 11,%1\n\t" \
2142 "std 2,-16(11)\n\t" /* save tocptr */ \
2143 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2144 "addi 1,1,-144\n\t" /* expand stack frame */ \
2145 /* arg11 */ \
2146 "ld 3,88(11)\n\t" \
2147 "std 3,128(1)\n\t" \
2148 /* arg10 */ \
2149 "ld 3,80(11)\n\t" \
2150 "std 3,120(1)\n\t" \
2151 /* arg9 */ \
2152 "ld 3,72(11)\n\t" \
2153 "std 3,112(1)\n\t" \
2154 /* args1-8 */ \
2155 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2156 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2157 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2158 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2159 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2160 "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2161 "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2162 "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2163 "ld 11, 0(11)\n\t" /* target->r11 */ \
2164 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2165 "mr 11,%1\n\t" \
2166 "mr %0,3\n\t" \
2167 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2168 "addi 1,1,144" /* restore frame */ \
2169 : /*out*/ "=r" (_res) \
2170 : /*in*/ "r" (&_argvec[2]) \
2171 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2172 ); \
2173 lval = (__typeof__(lval)) _res; \
2174 } while (0)
2176 #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
2177 arg7,arg8,arg9,arg10,arg11,arg12) \
2178 do { \
2179 volatile OrigFn _orig = (orig); \
2180 volatile unsigned long _argvec[3+12]; \
2181 volatile unsigned long _res; \
2182 /* _argvec[0] holds current r2 across the call */ \
2183 _argvec[1] = (unsigned long)_orig.r2; \
2184 _argvec[2] = (unsigned long)_orig.nraddr; \
2185 _argvec[2+1] = (unsigned long)arg1; \
2186 _argvec[2+2] = (unsigned long)arg2; \
2187 _argvec[2+3] = (unsigned long)arg3; \
2188 _argvec[2+4] = (unsigned long)arg4; \
2189 _argvec[2+5] = (unsigned long)arg5; \
2190 _argvec[2+6] = (unsigned long)arg6; \
2191 _argvec[2+7] = (unsigned long)arg7; \
2192 _argvec[2+8] = (unsigned long)arg8; \
2193 _argvec[2+9] = (unsigned long)arg9; \
2194 _argvec[2+10] = (unsigned long)arg10; \
2195 _argvec[2+11] = (unsigned long)arg11; \
2196 _argvec[2+12] = (unsigned long)arg12; \
2197 __asm__ volatile( \
2198 "mr 11,%1\n\t" \
2199 "std 2,-16(11)\n\t" /* save tocptr */ \
2200 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2201 "addi 1,1,-144\n\t" /* expand stack frame */ \
2202 /* arg12 */ \
2203 "ld 3,96(11)\n\t" \
2204 "std 3,136(1)\n\t" \
2205 /* arg11 */ \
2206 "ld 3,88(11)\n\t" \
2207 "std 3,128(1)\n\t" \
2208 /* arg10 */ \
2209 "ld 3,80(11)\n\t" \
2210 "std 3,120(1)\n\t" \
2211 /* arg9 */ \
2212 "ld 3,72(11)\n\t" \
2213 "std 3,112(1)\n\t" \
2214 /* args1-8 */ \
2215 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2216 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2217 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2218 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2219 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2220 "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2221 "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2222 "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2223 "ld 11, 0(11)\n\t" /* target->r11 */ \
2224 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2225 "mr 11,%1\n\t" \
2226 "mr %0,3\n\t" \
2227 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2228 "addi 1,1,144" /* restore frame */ \
2229 : /*out*/ "=r" (_res) \
2230 : /*in*/ "r" (&_argvec[2]) \
2231 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2232 ); \
2233 lval = (__typeof__(lval)) _res; \
2234 } while (0)
2236 #endif /* ARCH_ppc64 */
2239 /* ------------------------------------------------------------------ */
2240 /* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
2241 /* */
2242 /* ------------------------------------------------------------------ */
2244 /* Some request codes. There are many more of these, but most are not
2245 exposed to end-user view. These are the public ones, all of the
2246 form 0x1000 + small_number.
2248 Core ones are in the range 0x00000000--0x0000ffff. The non-public
2249 ones start at 0x2000.
2252 /* These macros are used by tools -- they must be public, but don't
2253 embed them into other programs. */
2254 #define VG_USERREQ_TOOL_BASE(a,b) \
2255 ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
2256 #define VG_IS_TOOL_USERREQ(a, b, v) \
2257 (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
2259 /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
2260 This enum comprises an ABI exported by Valgrind to programs
2261 which use client requests. DO NOT CHANGE THE ORDER OF THESE
2262 ENTRIES, NOR DELETE ANY -- add new ones at the end. */
2263 typedef
2264 enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
2265 VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
2267 /* These allow any function to be called from the simulated
2268 CPU but run on the real CPU. Nb: the first arg passed to
2269 the function is always the ThreadId of the running
2270 thread! So CLIENT_CALL0 actually requires a 1 arg
2271 function, etc. */
2272 VG_USERREQ__CLIENT_CALL0 = 0x1101,
2273 VG_USERREQ__CLIENT_CALL1 = 0x1102,
2274 VG_USERREQ__CLIENT_CALL2 = 0x1103,
2275 VG_USERREQ__CLIENT_CALL3 = 0x1104,
2277 /* Can be useful in regression testing suites -- eg. can
2278 send Valgrind's output to /dev/null and still count
2279 errors. */
2280 VG_USERREQ__COUNT_ERRORS = 0x1201,
2282 /* These are useful and can be interpreted by any tool that
2283 tracks malloc() et al, by using vg_replace_malloc.c. */
2284 VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
2285 VG_USERREQ__FREELIKE_BLOCK = 0x1302,
2286 /* Memory pool support. */
2287 VG_USERREQ__CREATE_MEMPOOL = 0x1303,
2288 VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
2289 VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
2290 VG_USERREQ__MEMPOOL_FREE = 0x1306,
2292 /* Allow printfs to valgrind log. */
2293 VG_USERREQ__PRINTF = 0x1401,
2294 VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
2296 /* Stack support. */
2297 VG_USERREQ__STACK_REGISTER = 0x1501,
2298 VG_USERREQ__STACK_DEREGISTER = 0x1502,
2299 VG_USERREQ__STACK_CHANGE = 0x1503
2300 } Vg_ClientRequest;
2302 #if !defined(__GNUC__)
2303 # define __extension__ /* */
2304 #endif
2306 /* Returns the number of Valgrinds this code is running under. That
2307 is, 0 if running natively, 1 if running under Valgrind, 2 if
2308 running under Valgrind which is running under another Valgrind,
2309 etc. */
2310 #define RUNNING_ON_VALGRIND __extension__ \
2311 ({unsigned int _qzz_res; \
2312 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */, \
2313 VG_USERREQ__RUNNING_ON_VALGRIND, \
2314 0, 0, 0, 0, 0); \
2315 _qzz_res; \
2319 /* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
2320 _qzz_len - 1]. Useful if you are debugging a JITter or some such,
2321 since it provides a way to make sure valgrind will retranslate the
2322 invalidated area. Returns no value. */
2323 #define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
2324 {unsigned int _qzz_res; \
2325 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
2326 VG_USERREQ__DISCARD_TRANSLATIONS, \
2327 _qzz_addr, _qzz_len, 0, 0, 0); \
2331 /* These requests are for getting Valgrind itself to print something.
2332 Possibly with a backtrace. This is a really ugly hack. */
2334 #if defined(NVALGRIND)
2336 # define VALGRIND_PRINTF(...)
2337 # define VALGRIND_PRINTF_BACKTRACE(...)
2339 #else /* NVALGRIND */
2341 /* Modern GCC will optimize the static routine out if unused,
2342 and unused attribute will shut down warnings about it. */
2343 static int VALGRIND_PRINTF(const char *format, ...)
2344 __attribute__((format(__printf__, 1, 2), __unused__));
2345 static int
2346 VALGRIND_PRINTF(const char *format, ...)
2348 unsigned long _qzz_res;
2349 va_list vargs;
2350 va_start(vargs, format);
2351 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF,
2352 (unsigned long)format, (unsigned long)vargs,
2353 0, 0, 0);
2354 va_end(vargs);
2355 return (int)_qzz_res;
2358 static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
2359 __attribute__((format(__printf__, 1, 2), __unused__));
2360 static int
2361 VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
2363 unsigned long _qzz_res;
2364 va_list vargs;
2365 va_start(vargs, format);
2366 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
2367 (unsigned long)format, (unsigned long)vargs,
2368 0, 0, 0);
2369 va_end(vargs);
2370 return (int)_qzz_res;
2373 #endif /* NVALGRIND */
2376 /* These requests allow control to move from the simulated CPU to the
2377 real CPU, calling an arbitary function */
2378 #define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
2379 __extension__ \
2380 ({unsigned long _qyy_res; \
2381 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
2382 VG_USERREQ__CLIENT_CALL0, \
2383 _qyy_fn, \
2384 0, 0, 0, 0); \
2385 _qyy_res; \
2388 #define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
2389 __extension__ \
2390 ({unsigned long _qyy_res; \
2391 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
2392 VG_USERREQ__CLIENT_CALL1, \
2393 _qyy_fn, \
2394 _qyy_arg1, 0, 0, 0); \
2395 _qyy_res; \
2398 #define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
2399 __extension__ \
2400 ({unsigned long _qyy_res; \
2401 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
2402 VG_USERREQ__CLIENT_CALL2, \
2403 _qyy_fn, \
2404 _qyy_arg1, _qyy_arg2, 0, 0); \
2405 _qyy_res; \
2408 #define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
2409 __extension__ \
2410 ({unsigned long _qyy_res; \
2411 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
2412 VG_USERREQ__CLIENT_CALL3, \
2413 _qyy_fn, \
2414 _qyy_arg1, _qyy_arg2, \
2415 _qyy_arg3, 0); \
2416 _qyy_res; \
2420 /* Counts the number of errors that have been recorded by a tool. Nb:
2421 the tool must record the errors with VG_(maybe_record_error)() or
2422 VG_(unique_error)() for them to be counted. */
2423 #define VALGRIND_COUNT_ERRORS \
2424 __extension__ \
2425 ({unsigned int _qyy_res; \
2426 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
2427 VG_USERREQ__COUNT_ERRORS, \
2428 0, 0, 0, 0, 0); \
2429 _qyy_res; \
2432 /* Mark a block of memory as having been allocated by a malloc()-like
2433 function. `addr' is the start of the usable block (ie. after any
2434 redzone) `rzB' is redzone size if the allocator can apply redzones;
2435 use '0' if not. Adding redzones makes it more likely Valgrind will spot
2436 block overruns. `is_zeroed' indicates if the memory is zeroed, as it is
2437 for calloc(). Put it immediately after the point where a block is
2438 allocated.
2440 If you're allocating memory via superblocks, and then handing out small
2441 chunks of each superblock, if you don't have redzones on your small
2442 blocks, it's worth marking the superblock with VALGRIND_MAKE_MEM_NOACCESS
2443 when it's created, so that block overruns are detected. But if you can
2444 put redzones on, it's probably better to not do this, so that messages
2445 for small overruns are described in terms of the small block rather than
2446 the superblock (but if you have a big overrun that skips over a redzone,
2447 you could miss an error this way). See memcheck/tests/custom_alloc.c
2448 for an example.
2450 WARNING: if your allocator uses malloc() or 'new' to allocate
2451 superblocks, rather than mmap() or brk(), this will not work properly --
2452 you'll likely get assertion failures during leak detection. This is
2453 because Valgrind doesn't like seeing overlapping heap blocks. Sorry.
2455 Nb: block must be freed via a free()-like function specified
2456 with VALGRIND_FREELIKE_BLOCK or mismatch errors will occur. */
2457 #define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
2458 {unsigned int _qzz_res; \
2459 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
2460 VG_USERREQ__MALLOCLIKE_BLOCK, \
2461 addr, sizeB, rzB, is_zeroed, 0); \
2464 /* Mark a block of memory as having been freed by a free()-like function.
2465 `rzB' is redzone size; it must match that given to
2466 VALGRIND_MALLOCLIKE_BLOCK. Memory not freed will be detected by the leak
2467 checker. Put it immediately after the point where the block is freed. */
2468 #define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
2469 {unsigned int _qzz_res; \
2470 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
2471 VG_USERREQ__FREELIKE_BLOCK, \
2472 addr, rzB, 0, 0, 0); \
2475 /* Create a memory pool. */
2476 #define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
2477 {unsigned int _qzz_res; \
2478 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
2479 VG_USERREQ__CREATE_MEMPOOL, \
2480 pool, rzB, is_zeroed, 0, 0); \
2483 /* Destroy a memory pool. */
2484 #define VALGRIND_DESTROY_MEMPOOL(pool) \
2485 {unsigned int _qzz_res; \
2486 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
2487 VG_USERREQ__DESTROY_MEMPOOL, \
2488 pool, 0, 0, 0, 0); \
2491 /* Associate a piece of memory with a memory pool. */
2492 #define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
2493 {unsigned int _qzz_res; \
2494 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
2495 VG_USERREQ__MEMPOOL_ALLOC, \
2496 pool, addr, size, 0, 0); \
2499 /* Disassociate a piece of memory from a memory pool. */
2500 #define VALGRIND_MEMPOOL_FREE(pool, addr) \
2501 {unsigned int _qzz_res; \
2502 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
2503 VG_USERREQ__MEMPOOL_FREE, \
2504 pool, addr, 0, 0, 0); \
2507 /* Mark a piece of memory as being a stack. Returns a stack id. */
2508 #define VALGRIND_STACK_REGISTER(start, end) \
2509 ({unsigned int _qzz_res; \
2510 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
2511 VG_USERREQ__STACK_REGISTER, \
2512 start, end, 0, 0, 0); \
2513 _qzz_res; \
2516 /* Unmark the piece of memory associated with a stack id as being a
2517 stack. */
2518 #define VALGRIND_STACK_DEREGISTER(id) \
2519 {unsigned int _qzz_res; \
2520 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
2521 VG_USERREQ__STACK_DEREGISTER, \
2522 id, 0, 0, 0, 0); \
2525 /* Change the start and end address of the stack id. */
2526 #define VALGRIND_STACK_CHANGE(id, start, end) \
2527 {unsigned int _qzz_res; \
2528 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
2529 VG_USERREQ__STACK_CHANGE, \
2530 id, start, end, 0, 0); \
2534 #undef ARCH_x86
2535 #undef ARCH_amd64
2536 #undef ARCH_ppc32
2537 #undef ARCH_ppc64
2539 #endif /* __VALGRIND_H */