Bug 439685 compiler warning in callgrind/main.c
[valgrind.git] / coregrind / m_translate.c
blob8ae06d2a67fa966e9a365a8ce201b6b0b6b2b6fd
2 /*--------------------------------------------------------------------*/
3 /*--- Interface to LibVEX_Translate, and the SP-update pass ---*/
4 /*--- m_translate.c ---*/
5 /*--------------------------------------------------------------------*/
7 /*
8 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
11 Copyright (C) 2000-2017 Julian Seward
12 jseward@acm.org
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_core_basics.h"
31 #include "pub_core_vki.h"
32 #include "pub_core_aspacemgr.h"
34 #include "pub_core_machine.h" // VG_(fnptr_to_fnentry)
35 // VG_(get_SP)
36 // VG_(machine_get_VexArchInfo)
37 #include "pub_core_libcbase.h"
38 #include "pub_core_libcassert.h"
39 #include "pub_core_libcprint.h"
40 #include "pub_core_options.h"
42 #include "pub_core_debuginfo.h" // VG_(get_fnname_w_offset)
43 #include "pub_core_redir.h" // VG_(redir_do_lookup)
45 #include "pub_core_signals.h" // VG_(synth_fault_{perms,mapping}
46 #include "pub_core_stacks.h" // VG_(unknown_SP_update*)()
47 #include "pub_core_tooliface.h" // VG_(tdict)
49 #include "pub_core_translate.h"
50 #include "pub_core_transtab.h"
51 #include "pub_core_dispatch.h" // VG_(run_innerloop__dispatch_{un}profiled)
52 // VG_(run_a_noredir_translation__return_point)
54 #include "pub_core_threadstate.h" // VexGuestArchState
55 #include "pub_core_trampoline.h" // VG_(ppctoc_magic_redirect_return_stub)
57 #include "pub_core_execontext.h" // VG_(make_depth_1_ExeContext_from_Addr)
59 #include "pub_core_gdbserver.h" // VG_(instrument_for_gdbserver_if_needed)
61 #include "libvex_emnote.h" // For PPC, EmWarn_PPC64_redir_underflow
63 /*------------------------------------------------------------*/
64 /*--- Stats ---*/
65 /*------------------------------------------------------------*/
67 static ULong n_TRACE_total_constructed = 0;
68 static ULong n_TRACE_total_guest_insns = 0;
69 static ULong n_TRACE_total_uncond_branches_followed = 0;
70 static ULong n_TRACE_total_cond_branches_followed = 0;
72 static ULong n_SP_updates_new_fast = 0;
73 static ULong n_SP_updates_new_generic_known = 0;
74 static ULong n_SP_updates_die_fast = 0;
75 static ULong n_SP_updates_die_generic_known = 0;
76 static ULong n_SP_updates_generic_unknown = 0;
78 static ULong n_PX_VexRegUpdSpAtMemAccess = 0;
79 static ULong n_PX_VexRegUpdUnwindregsAtMemAccess = 0;
80 static ULong n_PX_VexRegUpdAllregsAtMemAccess = 0;
81 static ULong n_PX_VexRegUpdAllregsAtEachInsn = 0;
83 void VG_(print_translation_stats) ( void )
85 VG_(message)
86 (Vg_DebugMsg,
87 "translate: %'llu guest insns, %'llu traces, "
88 "%'llu uncond chased, %llu cond chased\n",
89 n_TRACE_total_guest_insns, n_TRACE_total_constructed,
90 n_TRACE_total_uncond_branches_followed,
91 n_TRACE_total_cond_branches_followed);
92 UInt n_SP_updates = n_SP_updates_new_fast + n_SP_updates_new_generic_known
93 + n_SP_updates_die_fast + n_SP_updates_die_generic_known
94 + n_SP_updates_generic_unknown;
95 if (n_SP_updates == 0) {
96 VG_(message)(Vg_DebugMsg, "translate: no SP updates identified\n");
97 } else {
98 VG_(message)(Vg_DebugMsg,
99 "translate: fast new/die SP updates identified: "
100 "%'llu (%3.1f%%)/%'llu (%3.1f%%)\n",
101 n_SP_updates_new_fast, n_SP_updates_new_fast * 100.0 / n_SP_updates,
102 n_SP_updates_die_fast, n_SP_updates_die_fast * 100.0 / n_SP_updates );
104 VG_(message)(Vg_DebugMsg,
105 "translate: generic_known new/die SP updates identified: "
106 "%'llu (%3.1f%%)/%'llu (%3.1f%%)\n",
107 n_SP_updates_new_generic_known,
108 n_SP_updates_new_generic_known * 100.0 / n_SP_updates,
109 n_SP_updates_die_generic_known,
110 n_SP_updates_die_generic_known * 100.0 / n_SP_updates );
112 VG_(message)(Vg_DebugMsg,
113 "translate: generic_unknown SP updates identified: %'llu (%3.1f%%)\n",
114 n_SP_updates_generic_unknown,
115 n_SP_updates_generic_unknown * 100.0 / n_SP_updates );
118 VG_(message)
119 (Vg_DebugMsg,
120 "translate: PX: SPonly %'llu, UnwRegs %'llu,"
121 " AllRegs %'llu, AllRegsAllInsns %'llu\n",
122 n_PX_VexRegUpdSpAtMemAccess, n_PX_VexRegUpdUnwindregsAtMemAccess,
123 n_PX_VexRegUpdAllregsAtMemAccess, n_PX_VexRegUpdAllregsAtEachInsn);
126 /*------------------------------------------------------------*/
127 /*--- %SP-update pass ---*/
128 /*------------------------------------------------------------*/
130 static Bool need_to_handle_SP_assignment(void)
132 return VG_(tdict).any_new_mem_stack || VG_(tdict).any_die_mem_stack;
135 // - The SP aliases are held in an array which is used as a circular buffer.
136 // This misses very few constant updates of SP (ie. < 0.1%) while using a
137 // small, constant structure that will also never fill up and cause
138 // execution to abort.
139 // - Unused slots have a .temp value of 'IRTemp_INVALID'.
140 // - 'next_SP_alias_slot' is the index where the next alias will be stored.
141 // - If the buffer fills, we circle around and start over-writing
142 // non-IRTemp_INVALID values. This is rare, and the overwriting of a
143 // value that would have subsequently be used is even rarer.
144 // - Every slot below next_SP_alias_slot holds a non-IRTemp_INVALID value.
145 // The rest either all won't (if we haven't yet circled around) or all
146 // will (if we have circled around).
148 typedef
149 struct {
150 IRTemp temp;
151 Long delta;
153 SP_Alias;
155 // With 32 slots the buffer fills very rarely -- eg. once in a run of GCC.
156 // And I've tested with smaller values and the wrap-around case works ok.
157 #define N_ALIASES 32
158 static SP_Alias SP_aliases[N_ALIASES];
159 static Int next_SP_alias_slot = 0;
161 static void clear_SP_aliases(void)
163 Int i;
164 for (i = 0; i < N_ALIASES; i++) {
165 SP_aliases[i].temp = IRTemp_INVALID;
166 SP_aliases[i].delta = 0;
168 next_SP_alias_slot = 0;
171 static void add_SP_alias(IRTemp temp, Long delta)
173 vg_assert(temp != IRTemp_INVALID);
174 SP_aliases[ next_SP_alias_slot ].temp = temp;
175 SP_aliases[ next_SP_alias_slot ].delta = delta;
176 next_SP_alias_slot++;
177 if (N_ALIASES == next_SP_alias_slot) next_SP_alias_slot = 0;
180 static Bool get_SP_delta(IRTemp temp, Long* delta)
182 Int i; // i must be signed!
183 vg_assert(IRTemp_INVALID != temp);
184 // Search backwards between current buffer position and the start.
185 for (i = next_SP_alias_slot-1; i >= 0; i--) {
186 if (temp == SP_aliases[i].temp) {
187 *delta = SP_aliases[i].delta;
188 return True;
191 // Search backwards between the end and the current buffer position.
192 for (i = N_ALIASES-1; i >= next_SP_alias_slot; i--) {
193 if (temp == SP_aliases[i].temp) {
194 *delta = SP_aliases[i].delta;
195 return True;
198 return False;
201 static void update_SP_aliases(Long delta)
203 Int i;
204 for (i = 0; i < N_ALIASES; i++) {
205 if (SP_aliases[i].temp == IRTemp_INVALID) {
206 return;
208 SP_aliases[i].delta += delta;
212 /* Given a guest IP, get an origin tag for a 1-element stack trace,
213 and wrap it up in an IR atom that can be passed as the origin-tag
214 value for a stack-adjustment helper function. */
215 static IRExpr* mk_ecu_Expr ( Addr guest_IP )
217 UInt ecu;
218 ExeContext* ec
219 = VG_(make_depth_1_ExeContext_from_Addr)( guest_IP );
220 vg_assert(ec);
221 ecu = VG_(get_ECU_from_ExeContext)( ec );
222 vg_assert(VG_(is_plausible_ECU)(ecu));
223 /* This is always safe to do, since ecu is only 32 bits, and
224 HWord is 32 or 64. */
225 return mkIRExpr_HWord( (HWord)ecu );
228 /* When gdbserver is activated, the translation of a block must
229 first be done by the tool function, then followed by a pass
230 which (if needed) instruments the code for gdbserver.
232 static
233 IRSB* tool_instrument_then_gdbserver_if_needed ( VgCallbackClosure* closureV,
234 IRSB* sb_in,
235 const VexGuestLayout* layout,
236 const VexGuestExtents* vge,
237 const VexArchInfo* vai,
238 IRType gWordTy,
239 IRType hWordTy )
241 return VG_(instrument_for_gdbserver_if_needed)
242 (VG_(tdict).tool_instrument (closureV,
243 sb_in,
244 layout,
245 vge,
246 vai,
247 gWordTy,
248 hWordTy),
249 layout,
250 vge,
251 gWordTy,
252 hWordTy);
255 /* For tools that want to know about SP changes, this pass adds
256 in the appropriate hooks. We have to do it after the tool's
257 instrumentation, so the tool doesn't have to worry about the C calls
258 it adds in, and we must do it before register allocation because
259 spilled temps make it much harder to work out the SP deltas.
260 This it is done with Vex's "second instrumentation" pass.
262 Basically, we look for GET(SP)/PUT(SP) pairs and track constant
263 increments/decrements of SP between them. (This requires tracking one or
264 more "aliases", which are not exact aliases but instead are tempregs
265 whose value is equal to the SP's plus or minus a known constant.)
266 If all the changes to SP leading up to a PUT(SP) are by known, small
267 constants, we can do a specific call to eg. new_mem_stack_4, otherwise
268 we fall back to the case that handles an unknown SP change.
270 There is some extra complexity to deal correctly with updates to
271 only parts of SP. Bizarre, but it has been known to happen.
273 static
274 IRSB* vg_SP_update_pass ( void* closureV,
275 IRSB* sb_in,
276 const VexGuestLayout* layout,
277 const VexGuestExtents* vge,
278 const VexArchInfo* vai,
279 IRType gWordTy,
280 IRType hWordTy )
282 Int i, j, k, minoff_ST, maxoff_ST, sizeof_SP, offset_SP;
283 Int first_SP, last_SP, first_Put, last_Put;
284 IRDirty *dcall, *d;
285 IRStmt* st;
286 IRExpr* e;
287 IRRegArray* descr;
288 IRType typeof_SP;
289 Long delta, con;
291 /* Set up stuff for tracking the guest IP */
292 Bool curr_IP_known = False;
293 Addr curr_IP = 0;
295 /* Set up BB */
296 IRSB* bb = emptyIRSB();
297 bb->tyenv = deepCopyIRTypeEnv(sb_in->tyenv);
298 bb->next = deepCopyIRExpr(sb_in->next);
299 bb->jumpkind = sb_in->jumpkind;
300 bb->offsIP = sb_in->offsIP;
302 delta = 0;
304 sizeof_SP = layout->sizeof_SP;
305 offset_SP = layout->offset_SP;
306 typeof_SP = sizeof_SP==4 ? Ity_I32 : Ity_I64;
307 vg_assert(sizeof_SP == 4 || sizeof_SP == 8);
309 /* --- Start of #defines --- */
311 # define IS_ADD(op) (sizeof_SP==4 ? ((op)==Iop_Add32) : ((op)==Iop_Add64))
312 # define IS_SUB(op) (sizeof_SP==4 ? ((op)==Iop_Sub32) : ((op)==Iop_Sub64))
314 # define IS_ADD_OR_SUB(op) (IS_ADD(op) || IS_SUB(op))
316 # define GET_CONST(con) \
317 (sizeof_SP==4 ? (Long)(Int)(con->Ico.U32) \
318 : (Long)(con->Ico.U64))
320 # define DO_NEW(syze, tmpp) \
321 do { \
322 Bool vanilla, w_ecu; \
323 vg_assert(curr_IP_known); \
324 vanilla = NULL != VG_(tdict).track_new_mem_stack_##syze; \
325 w_ecu = NULL != VG_(tdict).track_new_mem_stack_##syze##_w_ECU; \
326 vg_assert(!(vanilla && w_ecu)); /* can't have both */ \
327 if (VG_(tdict).any_new_mem_stack \
328 && !vanilla && !w_ecu) { \
329 n_SP_updates_new_generic_known++; \
330 goto generic; \
333 if (VG_(tdict).any_new_mem_stack) { \
334 /* I don't know if it's really necessary to say that the */ \
335 /* call reads the stack pointer. But anyway, we do. */ \
336 if (w_ecu) { \
337 dcall = unsafeIRDirty_0_N( \
338 2/*regparms*/, \
339 "track_new_mem_stack_" #syze "_w_ECU", \
340 VG_(fnptr_to_fnentry)( \
341 VG_(tdict).track_new_mem_stack_##syze##_w_ECU ), \
342 mkIRExprVec_2(IRExpr_RdTmp(tmpp), \
343 mk_ecu_Expr(curr_IP)) \
344 ); \
345 } else { \
346 dcall = unsafeIRDirty_0_N( \
347 1/*regparms*/, \
348 "track_new_mem_stack_" #syze , \
349 VG_(fnptr_to_fnentry)( \
350 VG_(tdict).track_new_mem_stack_##syze ), \
351 mkIRExprVec_1(IRExpr_RdTmp(tmpp)) \
352 ); \
354 dcall->nFxState = 1; \
355 dcall->fxState[0].fx = Ifx_Read; \
356 dcall->fxState[0].offset = layout->offset_SP; \
357 dcall->fxState[0].size = layout->sizeof_SP; \
358 dcall->fxState[0].nRepeats = 0; \
359 dcall->fxState[0].repeatLen = 0; \
361 addStmtToIRSB( bb, IRStmt_Dirty(dcall) ); \
364 vg_assert(syze > 0); \
365 update_SP_aliases(syze); \
367 n_SP_updates_new_fast++; \
369 } while (0)
371 # define DO_DIE(syze, tmpp) \
372 do { \
373 if (VG_(tdict).any_die_mem_stack \
374 && !VG_(tdict).track_die_mem_stack_##syze) { \
375 n_SP_updates_die_generic_known++; \
376 goto generic; \
379 if (VG_(tdict).any_die_mem_stack) { \
380 /* I don't know if it's really necessary to say that the */ \
381 /* call reads the stack pointer. But anyway, we do. */ \
382 dcall = unsafeIRDirty_0_N( \
383 1/*regparms*/, \
384 "track_die_mem_stack_" #syze, \
385 VG_(fnptr_to_fnentry)( \
386 VG_(tdict).track_die_mem_stack_##syze ), \
387 mkIRExprVec_1(IRExpr_RdTmp(tmpp)) \
388 ); \
389 dcall->nFxState = 1; \
390 dcall->fxState[0].fx = Ifx_Read; \
391 dcall->fxState[0].offset = layout->offset_SP; \
392 dcall->fxState[0].size = layout->sizeof_SP; \
393 dcall->fxState[0].nRepeats = 0; \
394 dcall->fxState[0].repeatLen = 0; \
396 addStmtToIRSB( bb, IRStmt_Dirty(dcall) ); \
399 vg_assert(syze > 0); \
400 update_SP_aliases(-(syze)); \
402 n_SP_updates_die_fast++; \
404 } while (0)
406 /* --- End of #defines --- */
408 clear_SP_aliases();
410 for (i = 0; i < sb_in->stmts_used; i++) {
412 st = sb_in->stmts[i];
414 if (st->tag == Ist_IMark) {
415 curr_IP_known = True;
416 curr_IP = st->Ist.IMark.addr;
419 /* t = Get(sp): curr = t, delta = 0 */
420 if (st->tag != Ist_WrTmp) goto case2;
421 e = st->Ist.WrTmp.data;
422 if (e->tag != Iex_Get) goto case2;
423 if (e->Iex.Get.offset != offset_SP) goto case2;
424 if (e->Iex.Get.ty != typeof_SP) goto case2;
425 vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
426 add_SP_alias(st->Ist.WrTmp.tmp, 0);
427 addStmtToIRSB( bb, st );
428 continue;
430 case2:
431 /* t' = curr +/- const: curr = t', delta +=/-= const */
432 if (st->tag != Ist_WrTmp) goto case3;
433 e = st->Ist.WrTmp.data;
434 if (e->tag != Iex_Binop) goto case3;
435 if (e->Iex.Binop.arg1->tag != Iex_RdTmp) goto case3;
436 if (!get_SP_delta(e->Iex.Binop.arg1->Iex.RdTmp.tmp, &delta)) goto case3;
437 if (e->Iex.Binop.arg2->tag != Iex_Const) goto case3;
438 if (!IS_ADD_OR_SUB(e->Iex.Binop.op)) goto case3;
439 con = GET_CONST(e->Iex.Binop.arg2->Iex.Const.con);
440 vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
441 if (IS_ADD(e->Iex.Binop.op)) {
442 add_SP_alias(st->Ist.WrTmp.tmp, delta + con);
443 } else {
444 add_SP_alias(st->Ist.WrTmp.tmp, delta - con);
446 addStmtToIRSB( bb, st );
447 continue;
449 case3:
450 /* t' = curr: curr = t' */
451 if (st->tag != Ist_WrTmp) goto case4;
452 e = st->Ist.WrTmp.data;
453 if (e->tag != Iex_RdTmp) goto case4;
454 if (!get_SP_delta(e->Iex.RdTmp.tmp, &delta)) goto case4;
455 vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
456 add_SP_alias(st->Ist.WrTmp.tmp, delta);
457 addStmtToIRSB( bb, st );
458 continue;
460 case4:
461 /* Put(sp) = curr */
462 /* More generally, we must correctly handle a Put which writes
463 any part of SP, not just the case where all of SP is
464 written. */
465 if (st->tag != Ist_Put) goto case5;
466 first_SP = offset_SP;
467 last_SP = first_SP + sizeof_SP - 1;
468 first_Put = st->Ist.Put.offset;
469 last_Put = first_Put
470 + sizeofIRType( typeOfIRExpr( bb->tyenv, st->Ist.Put.data ))
471 - 1;
472 vg_assert(first_SP <= last_SP);
473 vg_assert(first_Put <= last_Put);
475 if (last_Put < first_SP || last_SP < first_Put)
476 goto case5; /* no overlap */
478 if (st->Ist.Put.data->tag == Iex_RdTmp
479 && get_SP_delta(st->Ist.Put.data->Iex.RdTmp.tmp, &delta)) {
480 IRTemp tttmp = st->Ist.Put.data->Iex.RdTmp.tmp;
481 /* Why should the following assertion hold? Because any
482 alias added by put_SP_alias must be of a temporary which
483 has the same type as typeof_SP, and whose value is a Get
484 at exactly offset_SP of size typeof_SP. Each call to
485 put_SP_alias is immediately preceded by an assertion that
486 we are putting in a binding for a correctly-typed
487 temporary. */
488 vg_assert( typeOfIRTemp(bb->tyenv, tttmp) == typeof_SP );
489 /* From the same type-and-offset-correctness argument, if
490 we found a useable alias, it must for an "exact" write of SP. */
491 vg_assert(first_SP == first_Put);
492 vg_assert(last_SP == last_Put);
493 switch (delta) {
494 case 0: addStmtToIRSB(bb,st); continue;
495 case 4: DO_DIE( 4, tttmp); addStmtToIRSB(bb,st); continue;
496 case -4: DO_NEW( 4, tttmp); addStmtToIRSB(bb,st); continue;
497 case 8: DO_DIE( 8, tttmp); addStmtToIRSB(bb,st); continue;
498 case -8: DO_NEW( 8, tttmp); addStmtToIRSB(bb,st); continue;
499 case 12: DO_DIE( 12, tttmp); addStmtToIRSB(bb,st); continue;
500 case -12: DO_NEW( 12, tttmp); addStmtToIRSB(bb,st); continue;
501 case 16: DO_DIE( 16, tttmp); addStmtToIRSB(bb,st); continue;
502 case -16: DO_NEW( 16, tttmp); addStmtToIRSB(bb,st); continue;
503 case 32: DO_DIE( 32, tttmp); addStmtToIRSB(bb,st); continue;
504 case -32: DO_NEW( 32, tttmp); addStmtToIRSB(bb,st); continue;
505 case 112: DO_DIE( 112, tttmp); addStmtToIRSB(bb,st); continue;
506 case -112: DO_NEW( 112, tttmp); addStmtToIRSB(bb,st); continue;
507 case 128: DO_DIE( 128, tttmp); addStmtToIRSB(bb,st); continue;
508 case -128: DO_NEW( 128, tttmp); addStmtToIRSB(bb,st); continue;
509 case 144: DO_DIE( 144, tttmp); addStmtToIRSB(bb,st); continue;
510 case -144: DO_NEW( 144, tttmp); addStmtToIRSB(bb,st); continue;
511 case 160: DO_DIE( 160, tttmp); addStmtToIRSB(bb,st); continue;
512 case -160: DO_NEW( 160, tttmp); addStmtToIRSB(bb,st); continue;
513 default:
514 if (delta > 0) {
515 n_SP_updates_die_generic_known++;
516 if (VG_(tdict).any_die_mem_stack)
517 goto generic;
518 } else {
519 n_SP_updates_new_generic_known++;
520 if (VG_(tdict).any_new_mem_stack)
521 goto generic;
523 /* No tracking for delta. Just add the original statement. */
524 addStmtToIRSB(bb,st); continue;
526 } else {
527 /* Deal with an unknown update to SP. We're here because
528 either:
529 (1) the Put does not exactly cover SP; it is a partial update.
530 Highly unlikely, but has been known to happen for 16-bit
531 Windows apps running on Wine, doing 16-bit adjustments to
532 %sp.
533 (2) the Put does exactly cover SP, but we are unable to
534 determine how the value relates to the old SP. In any
535 case, we cannot assume that the Put.data value is a tmp;
536 we must assume it can be anything allowed in flat IR (tmp
537 or const).
539 IRTemp old_SP;
540 n_SP_updates_generic_unknown++;
542 // Nb: if all is well, this generic case will typically be
543 // called something like every 1000th SP update. If it's more than
544 // that, the above code may be missing some cases.
545 generic:
546 /* Pass both the old and new SP values to this helper. Also,
547 pass an origin tag, even if it isn't needed. */
548 old_SP = newIRTemp(bb->tyenv, typeof_SP);
549 addStmtToIRSB(
551 IRStmt_WrTmp( old_SP, IRExpr_Get(offset_SP, typeof_SP) )
554 /* Now we know what the old value of SP is. But knowing the new
555 value is a bit tricky if there is a partial write. */
556 if (first_Put == first_SP && last_Put == last_SP) {
557 /* The common case, an exact write to SP. So st->Ist.Put.data
558 does hold the new value; simple. */
559 vg_assert(curr_IP_known);
560 if (NULL != VG_(tdict).track_new_mem_stack_w_ECU)
561 dcall = unsafeIRDirty_0_N(
562 3/*regparms*/,
563 "VG_(unknown_SP_update_w_ECU)",
564 VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update_w_ECU) ),
565 mkIRExprVec_3( IRExpr_RdTmp(old_SP), st->Ist.Put.data,
566 mk_ecu_Expr(curr_IP) )
568 else
569 dcall = unsafeIRDirty_0_N(
570 2/*regparms*/,
571 "VG_(unknown_SP_update)",
572 VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update) ),
573 mkIRExprVec_2( IRExpr_RdTmp(old_SP), st->Ist.Put.data )
576 addStmtToIRSB( bb, IRStmt_Dirty(dcall) );
577 /* don't forget the original assignment */
578 addStmtToIRSB( bb, st );
579 } else {
580 /* We have a partial update to SP. We need to know what
581 the new SP will be, and hand that to the helper call,
582 but when the helper call happens, SP must hold the
583 value it had before the update. Tricky.
584 Therefore use the following kludge:
585 1. do the partial SP update (Put)
586 2. Get the new SP value into a tmp, new_SP
587 3. Put old_SP
588 4. Call the helper
589 5. Put new_SP
591 IRTemp new_SP;
592 /* 1 */
593 addStmtToIRSB( bb, st );
594 /* 2 */
595 new_SP = newIRTemp(bb->tyenv, typeof_SP);
596 addStmtToIRSB(
598 IRStmt_WrTmp( new_SP, IRExpr_Get(offset_SP, typeof_SP) )
600 /* 3 */
601 addStmtToIRSB( bb, IRStmt_Put(offset_SP, IRExpr_RdTmp(old_SP) ));
602 /* 4 */
603 vg_assert(curr_IP_known);
604 if (NULL != VG_(tdict).track_new_mem_stack_w_ECU)
605 dcall = unsafeIRDirty_0_N(
606 3/*regparms*/,
607 "VG_(unknown_SP_update_w_ECU)",
608 VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update_w_ECU) ),
609 mkIRExprVec_3( IRExpr_RdTmp(old_SP),
610 IRExpr_RdTmp(new_SP),
611 mk_ecu_Expr(curr_IP) )
613 else
614 dcall = unsafeIRDirty_0_N(
615 2/*regparms*/,
616 "VG_(unknown_SP_update)",
617 VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update) ),
618 mkIRExprVec_2( IRExpr_RdTmp(old_SP),
619 IRExpr_RdTmp(new_SP) )
621 addStmtToIRSB( bb, IRStmt_Dirty(dcall) );
622 /* 5 */
623 addStmtToIRSB( bb, IRStmt_Put(offset_SP, IRExpr_RdTmp(new_SP) ));
626 /* Forget what we already know. */
627 clear_SP_aliases();
629 /* If this is a Put of a tmp that exactly updates SP,
630 start tracking aliases against this tmp. */
632 if (first_Put == first_SP && last_Put == last_SP
633 && st->Ist.Put.data->tag == Iex_RdTmp) {
634 vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.Put.data->Iex.RdTmp.tmp)
635 == typeof_SP );
636 add_SP_alias(st->Ist.Put.data->Iex.RdTmp.tmp, 0);
638 continue;
641 case5:
642 /* PutI or Dirty call which overlaps SP: complain. We can't
643 deal with SP changing in weird ways (well, we can, but not at
644 this time of night). */
645 if (st->tag == Ist_PutI) {
646 descr = st->Ist.PutI.details->descr;
647 minoff_ST = descr->base;
648 maxoff_ST = descr->base
649 + descr->nElems * sizeofIRType(descr->elemTy) - 1;
650 if (!(offset_SP > maxoff_ST
651 || (offset_SP + sizeof_SP - 1) < minoff_ST))
652 goto complain;
654 if (st->tag == Ist_Dirty) {
655 d = st->Ist.Dirty.details;
656 for (j = 0; j < d->nFxState; j++) {
657 if (d->fxState[j].fx == Ifx_Read || d->fxState[j].fx == Ifx_None)
658 continue;
659 /* Enumerate the described state segments */
660 for (k = 0; k < 1 + d->fxState[j].nRepeats; k++) {
661 minoff_ST = d->fxState[j].offset + k * d->fxState[j].repeatLen;
662 maxoff_ST = minoff_ST + d->fxState[j].size - 1;
663 if (!(offset_SP > maxoff_ST
664 || (offset_SP + sizeof_SP - 1) < minoff_ST))
665 goto complain;
670 /* well, not interesting. Just copy and keep going. */
671 addStmtToIRSB( bb, st );
673 } /* for (i = 0; i < sb_in->stmts_used; i++) */
675 return bb;
677 complain:
678 VG_(core_panic)("vg_SP_update_pass: PutI or Dirty which overlaps SP");
680 #undef IS_ADD
681 #undef IS_SUB
682 #undef IS_ADD_OR_SUB
683 #undef GET_CONST
684 #undef DO_NEW
685 #undef DO_DIE
688 /*------------------------------------------------------------*/
689 /*--- Main entry point for the JITter. ---*/
690 /*------------------------------------------------------------*/
692 /* Extra comments re self-checking translations and self-modifying
693 code. (JRS 14 Oct 05).
695 There are 3 modes:
696 (1) no checking: all code assumed to be not self-modifying
697 (2) partial: known-problematic situations get a self-check
698 (3) full checking: all translations get a self-check
700 As currently implemented, the default is (2). (3) is always safe,
701 but very slow. (1) works mostly, but fails for gcc nested-function
702 code which uses trampolines on the stack; this situation is
703 detected and handled by (2).
705 ----------
707 A more robust and transparent solution, which is not currently
708 implemented, is a variant of (2): if a translation is made from an
709 area which aspacem says does not have 'w' permission, then it can
710 be non-self-checking. Otherwise, it needs a self-check.
712 This is complicated by Vex's basic-block chasing. If a self-check
713 is requested, then Vex will not chase over basic block boundaries
714 (it's too complex). However there is still a problem if it chases
715 from a non-'w' area into a 'w' area.
717 I think the right thing to do is:
719 - if a translation request starts in a 'w' area, ask for a
720 self-checking translation, and do not allow any chasing (make
721 chase_into_ok return False). Note that the latter is redundant
722 in the sense that Vex won't chase anyway in this situation.
724 - if a translation request starts in a non-'w' area, do not ask for
725 a self-checking translation. However, do not allow chasing (as
726 determined by chase_into_ok) to go into a 'w' area.
728 The result of this is that all code inside 'w' areas is self
729 checking.
731 To complete the trick, there is a caveat: we must watch the
732 client's mprotect calls. If pages are changed from non-'w' to 'w'
733 then we should throw away all translations which intersect the
734 affected area, so as to force them to be redone with self-checks.
736 ----------
738 The above outlines the conditions under which bb chasing is allowed
739 from a self-modifying-code point of view. There are other
740 situations pertaining to function redirection in which it is
741 necessary to disallow chasing, but those fall outside the scope of
742 this comment.
746 /* Vex dumps the final code in here. Then we can copy it off
747 wherever we like. */
748 /* 60000: should agree with assertion in VG_(add_to_transtab) in
749 m_transtab.c. */
750 #define N_TMPBUF 60000
751 static UChar tmpbuf[N_TMPBUF];
754 /* Function pointers we must supply to LibVEX in order that it
755 can bomb out and emit messages under Valgrind's control. */
756 __attribute__ ((noreturn))
757 static
758 void failure_exit ( void )
760 LibVEX_ShowAllocStats();
761 VG_(core_panic)("LibVEX called failure_exit().");
764 static
765 void log_bytes ( const HChar* bytes, SizeT nbytes )
767 SizeT i = 0;
768 if (nbytes >= 4)
769 for (; i < nbytes-3; i += 4)
770 VG_(printf)("%c%c%c%c", bytes[i], bytes[i+1], bytes[i+2], bytes[i+3]);
771 for (; i < nbytes; i++)
772 VG_(printf)("%c", bytes[i]);
776 /* --------- Various helper functions for translation --------- */
778 /* Look for reasons to disallow making translations from the given
779 segment/addr. */
781 static Bool translations_allowable_from_seg ( NSegment const* seg, Addr addr )
783 # if defined(VGA_x86) || defined(VGA_s390x) || defined(VGA_mips32) \
784 || defined(VGA_mips64) || defined(VGA_nanomips)
785 Bool allowR = True;
786 # else
787 Bool allowR = False;
788 # endif
789 return seg != NULL
790 && (seg->kind == SkAnonC || seg->kind == SkFileC || seg->kind == SkShmC)
791 && (seg->hasX
792 || (seg->hasR && (allowR
793 || VG_(has_gdbserver_breakpoint) (addr))));
794 /* If GDB/gdbsrv has inserted a breakpoint at addr, assume this is a valid
795 location to translate if seg is not executable but is readable.
796 This is needed for inferior function calls from GDB: GDB inserts a
797 breakpoint on the stack, and expects to regain control before the
798 breakpoint instruction at the breakpoint address is really
799 executed. For this, the breakpoint instruction must be translated
800 so as to have the call to gdbserver executed. */
804 /* Produce a bitmask stating which of the supplied extents needs a
805 self-check. See documentation of
806 VexTranslateArgs::needs_self_check for more details about the
807 return convention. */
809 static UInt needs_self_check ( void* closureV,
810 /*MAYBE_MOD*/VexRegisterUpdates* pxControl,
811 const VexGuestExtents* vge )
813 VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
814 UInt i, bitset;
816 vg_assert(vge->n_used >= 1 && vge->n_used <= 3);
817 bitset = 0;
819 /* Will we need to do a second pass in order to compute a
820 revised *pxControl value? */
821 Bool pxStatusMightChange
822 = /* "the user actually set it" */
823 VG_(clo_px_file_backed) != VexRegUpd_INVALID
824 /* "and they set it to something other than the default. */
825 && *pxControl != VG_(clo_px_file_backed);
827 /* First, compute |bitset|, which specifies which extent(s) need a
828 self check. Whilst we're at it, note any NSegments that we get,
829 so as to reduce the number of calls required to
830 VG_(am_find_nsegment) in a possible second pass. */
831 const NSegment *segs[3] = { NULL, NULL, NULL };
833 for (i = 0; i < vge->n_used; i++) {
834 Bool check = False;
835 Addr addr = vge->base[i];
836 SizeT len = vge->len[i];
837 NSegment const* segA = NULL;
839 # if defined(VGO_darwin)
840 // GrP fixme hack - dyld i386 IMPORT gets rewritten.
841 // To really do this correctly, we'd need to flush the
842 // translation cache whenever a segment became +WX.
843 segA = VG_(am_find_nsegment)(addr);
844 if (segA && segA->hasX && segA->hasW)
845 check = True;
846 # endif
848 if (!check) {
849 switch (VG_(clo_smc_check)) {
850 case Vg_SmcNone:
851 /* never check (except as per Darwin hack above) */
852 break;
853 case Vg_SmcAll:
854 /* always check */
855 check = True;
856 break;
857 case Vg_SmcStack: {
858 /* check if the address is in the same segment as this
859 thread's stack pointer */
860 Addr sp = VG_(get_SP)(closure->tid);
861 if (!segA) {
862 segA = VG_(am_find_nsegment)(addr);
864 NSegment const* segSP = VG_(am_find_nsegment)(sp);
865 if (segA && segSP && segA == segSP)
866 check = True;
867 break;
869 case Vg_SmcAllNonFile: {
870 /* check if any part of the extent is not in a
871 file-mapped segment */
872 if (!segA) {
873 segA = VG_(am_find_nsegment)(addr);
875 if (segA && segA->kind == SkFileC && segA->start <= addr
876 && (len == 0 || addr + len <= segA->end + 1)) {
877 /* in a file-mapped segment; skip the check */
878 } else {
879 check = True;
881 break;
883 default:
884 vg_assert(0);
888 if (check)
889 bitset |= (1 << i);
891 if (pxStatusMightChange && segA) {
892 vg_assert(i < sizeof(segs)/sizeof(segs[0]));
893 segs[i] = segA;
897 /* Now, possibly do a second pass, to see if the PX status might
898 change. This can happen if the user specified value via
899 --px-file-backed= which is different from the default PX value
900 specified via --vex-iropt-register-updates (also known by the
901 shorter alias --px-default). */
902 if (pxStatusMightChange) {
904 Bool allFileBacked = True;
905 for (i = 0; i < vge->n_used; i++) {
906 Addr addr = vge->base[i];
907 SizeT len = vge->len[i];
908 NSegment const* segA = segs[i];
909 if (!segA) {
910 /* If we don't have a cached value for |segA|, compute it now. */
911 segA = VG_(am_find_nsegment)(addr);
913 vg_assert(segA); /* Can this ever fail? */
914 if (segA && segA->kind == SkFileC && segA->start <= addr
915 && (len == 0 || addr + len <= segA->end + 1)) {
916 /* in a file-mapped segment */
917 } else {
918 /* not in a file-mapped segment, or we can't figure out
919 where it is */
920 allFileBacked = False;
921 break;
925 /* So, finally, if all the extents are in file backed segments, perform
926 the user-specified PX change. */
927 if (allFileBacked) {
928 *pxControl = VG_(clo_px_file_backed);
933 /* Update running PX stats, as it is difficult without these to
934 check that the system is behaving as expected. */
935 switch (*pxControl) {
936 case VexRegUpdSpAtMemAccess:
937 n_PX_VexRegUpdSpAtMemAccess++; break;
938 case VexRegUpdUnwindregsAtMemAccess:
939 n_PX_VexRegUpdUnwindregsAtMemAccess++; break;
940 case VexRegUpdAllregsAtMemAccess:
941 n_PX_VexRegUpdAllregsAtMemAccess++; break;
942 case VexRegUpdAllregsAtEachInsn:
943 n_PX_VexRegUpdAllregsAtEachInsn++; break;
944 default:
945 vg_assert(0);
948 return bitset;
952 /* This is a callback passed to LibVEX_Translate. It stops Vex from
953 chasing into function entry points that we wish to redirect.
954 Chasing across them obviously defeats the redirect mechanism, with
955 bad effects for Memcheck, Helgrind, DRD, Massif, and possibly others.
957 static Bool chase_into_ok ( void* closureV, Addr addr )
959 NSegment const* seg = VG_(am_find_nsegment)(addr);
961 /* Work through a list of possibilities why we might not want to
962 allow a chase. */
964 /* Destination not in a plausible segment? */
965 if (!translations_allowable_from_seg(seg, addr))
966 goto dontchase;
968 /* Destination is redirected? */
969 if (addr != VG_(redir_do_lookup)(addr, NULL))
970 goto dontchase;
972 # if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
973 /* This needs to be at the start of its own block. Don't chase. */
974 if (addr == (Addr)&VG_(ppctoc_magic_redirect_return_stub))
975 goto dontchase;
976 # endif
978 /* overly conservative, but .. don't chase into the distinguished
979 address that m_transtab uses as an empty-slot marker for
980 VG_(tt_fast). */
981 if (addr == TRANSTAB_BOGUS_GUEST_ADDR)
982 goto dontchase;
984 # if defined(VGA_s390x)
985 /* Never chase into an EX instruction. Generating IR for EX causes
986 a round-trip through the scheduler including VG_(discard_translations).
987 And that's expensive as shown by perf/tinycc.c:
988 Chasing into EX increases the number of EX translations from 21 to
989 102666 causing a 7x runtime increase for "none" and a 3.2x runtime
990 increase for memcheck. */
991 if (((UChar *)addr)[0] == 0x44 || /* EX */
992 ((UChar *)addr)[0] == 0xC6) /* EXRL */
993 goto dontchase;
994 # endif
996 /* well, ok then. go on and chase. */
997 return True;
999 vg_assert(0);
1000 /*NOTREACHED*/
1002 dontchase:
1003 if (0) VG_(printf)("not chasing into 0x%lx\n", addr);
1004 return False;
1008 /* --------------- helpers for with-TOC platforms --------------- */
1010 /* NOTE: with-TOC platforms are: ppc64-linux. */
1012 static IRExpr* mkU64 ( ULong n ) {
1013 return IRExpr_Const(IRConst_U64(n));
1015 static IRExpr* mkU32 ( UInt n ) {
1016 return IRExpr_Const(IRConst_U32(n));
1019 #if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
1020 static IRExpr* mkU8 ( UChar n ) {
1021 return IRExpr_Const(IRConst_U8(n));
1023 static IRExpr* narrowTo32 ( IRTypeEnv* tyenv, IRExpr* e ) {
1024 if (typeOfIRExpr(tyenv, e) == Ity_I32) {
1025 return e;
1026 } else {
1027 vg_assert(typeOfIRExpr(tyenv, e) == Ity_I64);
1028 return IRExpr_Unop(Iop_64to32, e);
1032 /* Generate code to push word-typed expression 'e' onto this thread's
1033 redir stack, checking for stack overflow and generating code to
1034 bomb out if so. */
1036 static void gen_PUSH ( IRSB* bb, IRExpr* e )
1038 IRRegArray* descr;
1039 IRTemp t1;
1040 IRExpr* one;
1042 # if defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1043 Int stack_size = VEX_GUEST_PPC64_REDIR_STACK_SIZE;
1044 Int offB_REDIR_SP = offsetof(VexGuestPPC64State,guest_REDIR_SP);
1045 Int offB_REDIR_STACK = offsetof(VexGuestPPC64State,guest_REDIR_STACK);
1046 Int offB_EMNOTE = offsetof(VexGuestPPC64State,guest_EMNOTE);
1047 Int offB_CIA = offsetof(VexGuestPPC64State,guest_CIA);
1048 Bool is64 = True;
1049 IRType ty_Word = Ity_I64;
1050 IROp op_CmpNE = Iop_CmpNE64;
1051 IROp op_Sar = Iop_Sar64;
1052 IROp op_Sub = Iop_Sub64;
1053 IROp op_Add = Iop_Add64;
1054 IRExpr*(*mkU)(ULong) = mkU64;
1055 vg_assert(VG_WORDSIZE == 8);
1056 # else
1057 Int stack_size = VEX_GUEST_PPC32_REDIR_STACK_SIZE;
1058 Int offB_REDIR_SP = offsetof(VexGuestPPC32State,guest_REDIR_SP);
1059 Int offB_REDIR_STACK = offsetof(VexGuestPPC32State,guest_REDIR_STACK);
1060 Int offB_EMNOTE = offsetof(VexGuestPPC32State,guest_EMNOTE);
1061 Int offB_CIA = offsetof(VexGuestPPC32State,guest_CIA);
1062 Bool is64 = False;
1063 IRType ty_Word = Ity_I32;
1064 IROp op_CmpNE = Iop_CmpNE32;
1065 IROp op_Sar = Iop_Sar32;
1066 IROp op_Sub = Iop_Sub32;
1067 IROp op_Add = Iop_Add32;
1068 IRExpr*(*mkU)(UInt) = mkU32;
1069 vg_assert(VG_WORDSIZE == 4);
1070 # endif
1072 vg_assert(sizeof(void*) == VG_WORDSIZE);
1073 vg_assert(sizeof(Word) == VG_WORDSIZE);
1074 vg_assert(sizeof(Addr) == VG_WORDSIZE);
1076 descr = mkIRRegArray( offB_REDIR_STACK, ty_Word, stack_size );
1077 t1 = newIRTemp( bb->tyenv, ty_Word );
1078 one = mkU(1);
1080 vg_assert(typeOfIRExpr(bb->tyenv, e) == ty_Word);
1082 /* t1 = guest_REDIR_SP + 1 */
1083 addStmtToIRSB(
1084 bb,
1085 IRStmt_WrTmp(
1086 t1,
1087 IRExpr_Binop(op_Add, IRExpr_Get( offB_REDIR_SP, ty_Word ), one)
1091 /* Bomb out if t1 >=s stack_size, that is, (stack_size-1)-t1 <s 0.
1092 The destination (0) is a bit bogus but it doesn't matter since
1093 this is an unrecoverable error and will lead to Valgrind
1094 shutting down. _EMNOTE is set regardless - that's harmless
1095 since is only has a meaning if the exit is taken. */
1096 addStmtToIRSB(
1098 IRStmt_Put(offB_EMNOTE, mkU32(EmWarn_PPC64_redir_overflow))
1100 addStmtToIRSB(
1102 IRStmt_Exit(
1103 IRExpr_Binop(
1104 op_CmpNE,
1105 IRExpr_Binop(
1106 op_Sar,
1107 IRExpr_Binop(op_Sub,mkU(stack_size-1),IRExpr_RdTmp(t1)),
1108 mkU8(8 * VG_WORDSIZE - 1)
1110 mkU(0)
1112 Ijk_EmFail,
1113 is64 ? IRConst_U64(0) : IRConst_U32(0),
1114 offB_CIA
1118 /* guest_REDIR_SP = t1 */
1119 addStmtToIRSB(bb, IRStmt_Put(offB_REDIR_SP, IRExpr_RdTmp(t1)));
1121 /* guest_REDIR_STACK[t1+0] = e */
1122 /* PutI/GetI have I32-typed indexes regardless of guest word size */
1123 addStmtToIRSB(
1124 bb,
1125 IRStmt_PutI(mkIRPutI(descr,
1126 narrowTo32(bb->tyenv,IRExpr_RdTmp(t1)), 0, e)));
1130 /* Generate code to pop a word-sized value from this thread's redir
1131 stack, binding it to a new temporary, which is returned. As with
1132 gen_PUSH, an overflow check is also performed. */
1134 static IRTemp gen_POP ( IRSB* bb )
1136 # if defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1137 Int stack_size = VEX_GUEST_PPC64_REDIR_STACK_SIZE;
1138 Int offB_REDIR_SP = offsetof(VexGuestPPC64State,guest_REDIR_SP);
1139 Int offB_REDIR_STACK = offsetof(VexGuestPPC64State,guest_REDIR_STACK);
1140 Int offB_EMNOTE = offsetof(VexGuestPPC64State,guest_EMNOTE);
1141 Int offB_CIA = offsetof(VexGuestPPC64State,guest_CIA);
1142 Bool is64 = True;
1143 IRType ty_Word = Ity_I64;
1144 IROp op_CmpNE = Iop_CmpNE64;
1145 IROp op_Sar = Iop_Sar64;
1146 IROp op_Sub = Iop_Sub64;
1147 IRExpr*(*mkU)(ULong) = mkU64;
1148 # else
1149 Int stack_size = VEX_GUEST_PPC32_REDIR_STACK_SIZE;
1150 Int offB_REDIR_SP = offsetof(VexGuestPPC32State,guest_REDIR_SP);
1151 Int offB_REDIR_STACK = offsetof(VexGuestPPC32State,guest_REDIR_STACK);
1152 Int offB_EMNOTE = offsetof(VexGuestPPC32State,guest_EMNOTE);
1153 Int offB_CIA = offsetof(VexGuestPPC32State,guest_CIA);
1154 Bool is64 = False;
1155 IRType ty_Word = Ity_I32;
1156 IROp op_CmpNE = Iop_CmpNE32;
1157 IROp op_Sar = Iop_Sar32;
1158 IROp op_Sub = Iop_Sub32;
1159 IRExpr*(*mkU)(UInt) = mkU32;
1160 # endif
1162 IRRegArray* descr = mkIRRegArray( offB_REDIR_STACK, ty_Word, stack_size );
1163 IRTemp t1 = newIRTemp( bb->tyenv, ty_Word );
1164 IRTemp res = newIRTemp( bb->tyenv, ty_Word );
1165 IRExpr* one = mkU(1);
1167 vg_assert(sizeof(void*) == VG_WORDSIZE);
1168 vg_assert(sizeof(Word) == VG_WORDSIZE);
1169 vg_assert(sizeof(Addr) == VG_WORDSIZE);
1171 /* t1 = guest_REDIR_SP */
1172 addStmtToIRSB(
1173 bb,
1174 IRStmt_WrTmp( t1, IRExpr_Get( offB_REDIR_SP, ty_Word ) )
1177 /* Bomb out if t1 < 0. Same comments as gen_PUSH apply. */
1178 addStmtToIRSB(
1180 IRStmt_Put(offB_EMNOTE, mkU32(EmWarn_PPC64_redir_underflow))
1182 addStmtToIRSB(
1184 IRStmt_Exit(
1185 IRExpr_Binop(
1186 op_CmpNE,
1187 IRExpr_Binop(
1188 op_Sar,
1189 IRExpr_RdTmp(t1),
1190 mkU8(8 * VG_WORDSIZE - 1)
1192 mkU(0)
1194 Ijk_EmFail,
1195 is64 ? IRConst_U64(0) : IRConst_U32(0),
1196 offB_CIA
1200 /* res = guest_REDIR_STACK[t1+0] */
1201 /* PutI/GetI have I32-typed indexes regardless of guest word size */
1202 addStmtToIRSB(
1204 IRStmt_WrTmp(
1205 res,
1206 IRExpr_GetI(descr, narrowTo32(bb->tyenv,IRExpr_RdTmp(t1)), 0)
1210 /* guest_REDIR_SP = t1-1 */
1211 addStmtToIRSB(
1212 bb,
1213 IRStmt_Put(offB_REDIR_SP, IRExpr_Binop(op_Sub, IRExpr_RdTmp(t1), one))
1216 return res;
1219 #endif
1221 #if defined(VG_PLAT_USES_PPCTOC)
1223 /* Generate code to push LR and R2 onto this thread's redir stack,
1224 then set R2 to the new value (which is the TOC pointer to be used
1225 for the duration of the replacement function, as determined by
1226 m_debuginfo), and set LR to the magic return stub, so we get to
1227 intercept the return and restore R2 and L2 to the values saved
1228 here. */
1230 static void gen_push_and_set_LR_R2 ( IRSB* bb, Addr new_R2_value )
1232 # if defined(VGP_ppc64be_linux)
1233 Addr bogus_RA = (Addr)&VG_(ppctoc_magic_redirect_return_stub);
1234 Int offB_GPR2 = offsetof(VexGuestPPC64State,guest_GPR2);
1235 Int offB_LR = offsetof(VexGuestPPC64State,guest_LR);
1236 gen_PUSH( bb, IRExpr_Get(offB_LR, Ity_I64) );
1237 gen_PUSH( bb, IRExpr_Get(offB_GPR2, Ity_I64) );
1238 addStmtToIRSB( bb, IRStmt_Put( offB_LR, mkU64( bogus_RA )) );
1239 addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, mkU64( new_R2_value )) );
1241 # else
1242 # error Platform is not TOC-afflicted, fortunately
1243 # endif
1245 #endif
1247 #if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
1249 static void gen_pop_R2_LR_then_bLR ( IRSB* bb )
1251 # if defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1252 Int offB_GPR2 = offsetof(VexGuestPPC64State,guest_GPR2);
1253 Int offB_LR = offsetof(VexGuestPPC64State,guest_LR);
1254 Int offB_CIA = offsetof(VexGuestPPC64State,guest_CIA);
1255 IRTemp old_R2 = newIRTemp( bb->tyenv, Ity_I64 );
1256 IRTemp old_LR = newIRTemp( bb->tyenv, Ity_I64 );
1257 /* Restore R2 */
1258 old_R2 = gen_POP( bb );
1259 addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, IRExpr_RdTmp(old_R2)) );
1260 /* Restore LR */
1261 old_LR = gen_POP( bb );
1262 addStmtToIRSB( bb, IRStmt_Put( offB_LR, IRExpr_RdTmp(old_LR)) );
1263 /* Branch to LR */
1264 /* re boring, we arrived here precisely because a wrapped fn did a
1265 blr (hence Ijk_Ret); so we should just mark this jump as Boring,
1266 else one _Call will have resulted in two _Rets. */
1267 bb->jumpkind = Ijk_Boring;
1268 bb->next = IRExpr_Binop(Iop_And64, IRExpr_RdTmp(old_LR), mkU64(~(3ULL)));
1269 bb->offsIP = offB_CIA;
1270 # else
1271 # error Platform is not TOC-afflicted, fortunately
1272 # endif
1274 #endif
1276 #if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
1278 static
1279 Bool mk_preamble__ppctoc_magic_return_stub ( void* closureV, IRSB* bb )
1281 VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
1282 /* Since we're creating the entire IRSB right here, give it a
1283 proper IMark, as it won't get one any other way, and cachegrind
1284 will barf if it doesn't have one (fair enough really). */
1285 addStmtToIRSB( bb, IRStmt_IMark( closure->readdr, 4, 0 ) );
1286 /* Generate the magic sequence:
1287 pop R2 from hidden stack
1288 pop LR from hidden stack
1289 goto LR
1291 gen_pop_R2_LR_then_bLR(bb);
1292 return True; /* True == this is the entire BB; don't disassemble any
1293 real insns into it - just hand it directly to
1294 optimiser/instrumenter/backend. */
1296 #endif
1298 #if defined(VGP_ppc64le_linux)
1299 /* Generate code to push LR and R2 onto this thread's redir stack.
1300 Need to save R2 in case we redirect to a global entry point. The
1301 value of R2 is not preserved when entering the global entry point.
1302 Need to make sure R2 gets restored on return. Set LR to the magic
1303 return stub, so we get to intercept the return and restore R2 and
1304 L2 to the values saved here.
1306 The existing infrastruture for the TOC enabled architectures is
1307 being exploited here. So, we need to enable a number of the
1308 code sections used by VG_PLAT_USES_PPCTOC.
1311 static void gen_push_R2_and_set_LR ( IRSB* bb )
1313 Addr bogus_RA = (Addr)&VG_(ppctoc_magic_redirect_return_stub);
1314 Int offB_GPR2 = offsetof(VexGuestPPC64State,guest_GPR2);
1315 Int offB_LR = offsetof(VexGuestPPC64State,guest_LR);
1316 gen_PUSH( bb, IRExpr_Get(offB_LR, Ity_I64) );
1317 gen_PUSH( bb, IRExpr_Get(offB_GPR2, Ity_I64) );
1318 addStmtToIRSB( bb, IRStmt_Put( offB_LR, mkU64( bogus_RA )) );
1320 # endif
1322 /* --------------- END helpers for with-TOC platforms --------------- */
1325 /* This is the IR preamble generator used for replacement
1326 functions. It adds code to set the guest_NRADDR{_GPR2} to zero
1327 (technically not necessary, but facilitates detecting mixups in
1328 which a replacement function has been erroneously declared using
1329 VG_REPLACE_FUNCTION_Z{U,Z} when instead it should have been written
1330 using VG_WRAP_FUNCTION_Z{U,Z}).
1332 On with-TOC platforms the follow hacks are also done: LR and R2 are
1333 pushed onto a hidden stack, R2 is set to the correct value for the
1334 replacement function, and LR is set to point at the magic
1335 return-stub address. Setting LR causes the return of the
1336 wrapped/redirected function to lead to our magic return stub, which
1337 restores LR and R2 from said stack and returns for real.
1339 VG_(get_StackTrace_wrk) understands that the LR value may point to
1340 the return stub address, and that in that case it can get the real
1341 LR value from the hidden stack instead. */
1342 static
1343 Bool mk_preamble__set_NRADDR_to_zero ( void* closureV, IRSB* bb )
1345 Int nraddr_szB
1346 = sizeof(((VexGuestArchState*)0)->guest_NRADDR);
1347 vg_assert(nraddr_szB == 4 || nraddr_szB == 8);
1348 vg_assert(nraddr_szB == sizeof(RegWord));
1349 addStmtToIRSB(
1351 IRStmt_Put(
1352 offsetof(VexGuestArchState,guest_NRADDR),
1353 nraddr_szB == 8 ? mkU64(0) : mkU32(0)
1356 // t9 needs to be set to point to the start of the redirected function.
1357 # if defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)
1358 VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
1359 Int offB_GPR25 = offsetof(VexGuestMIPS32State, guest_r25);
1360 addStmtToIRSB(bb, IRStmt_Put(offB_GPR25, mkU32(closure->readdr)));
1361 # endif
1362 # if defined(VGP_mips64_linux)
1363 VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
1364 Int offB_GPR25 = offsetof(VexGuestMIPS64State, guest_r25);
1365 addStmtToIRSB(bb, IRStmt_Put(offB_GPR25, mkU64(closure->readdr)));
1366 # endif
1367 # if defined(VG_PLAT_USES_PPCTOC)
1368 { VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
1369 addStmtToIRSB(
1371 IRStmt_Put(
1372 offsetof(VexGuestArchState,guest_NRADDR_GPR2),
1373 VG_WORDSIZE==8 ? mkU64(0) : mkU32(0)
1376 gen_push_and_set_LR_R2 ( bb, VG_(get_tocptr)( VG_(current_DiEpoch)(),
1377 closure->readdr ) );
1379 # endif
1381 #if defined(VGP_ppc64le_linux)
1382 VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
1383 Int offB_GPR12 = offsetof(VexGuestArchState, guest_GPR12);
1384 addStmtToIRSB(bb, IRStmt_Put(offB_GPR12, mkU64(closure->readdr)));
1385 addStmtToIRSB(bb,
1386 IRStmt_Put(
1387 offsetof(VexGuestArchState,guest_NRADDR_GPR2),
1388 VG_WORDSIZE==8 ? mkU64(0) : mkU32(0)
1391 gen_push_R2_and_set_LR ( bb );
1392 #endif
1393 return False;
1396 /* Ditto, except set guest_NRADDR to nraddr (the un-redirected guest
1397 address). This is needed for function wrapping - so the wrapper
1398 can read _NRADDR and find the address of the function being
1399 wrapped. On toc-afflicted platforms we must also snarf r2. */
1400 static
1401 Bool mk_preamble__set_NRADDR_to_nraddr ( void* closureV, IRSB* bb )
1403 VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
1404 Int nraddr_szB
1405 = sizeof(((VexGuestArchState*)0)->guest_NRADDR);
1406 vg_assert(nraddr_szB == 4 || nraddr_szB == 8);
1407 vg_assert(nraddr_szB == sizeof(RegWord));
1408 addStmtToIRSB(
1410 IRStmt_Put(
1411 offsetof(VexGuestArchState,guest_NRADDR),
1412 nraddr_szB == 8
1413 ? IRExpr_Const(IRConst_U64( closure->nraddr ))
1414 : IRExpr_Const(IRConst_U32( (UInt)closure->nraddr ))
1417 // t9 needs to be set to point to the start of the redirected function.
1418 # if defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)
1419 Int offB_GPR25 = offsetof(VexGuestMIPS32State, guest_r25);
1420 addStmtToIRSB(bb, IRStmt_Put(offB_GPR25, mkU32(closure->readdr)));
1421 # endif
1422 # if defined(VGP_mips64_linux)
1423 Int offB_GPR25 = offsetof(VexGuestMIPS64State, guest_r25);
1424 addStmtToIRSB(bb, IRStmt_Put(offB_GPR25, mkU64(closure->readdr)));
1425 # endif
1426 # if defined(VG_PLAT_USES_PPCTOC)
1427 addStmtToIRSB(
1429 IRStmt_Put(
1430 offsetof(VexGuestArchState,guest_NRADDR_GPR2),
1431 IRExpr_Get(offsetof(VexGuestArchState,guest_GPR2),
1432 VG_WORDSIZE==8 ? Ity_I64 : Ity_I32)
1435 gen_push_and_set_LR_R2 ( bb, VG_(get_tocptr)( VG_(current_DiEpoch)(),
1436 closure->readdr ) );
1437 # endif
1438 #if defined(VGP_ppc64le_linux)
1439 /* This saves the r2 before leaving the function. We need to move
1440 * guest_NRADDR_GPR2 back to R2 on return.
1442 Int offB_GPR12 = offsetof(VexGuestArchState, guest_GPR12);
1443 addStmtToIRSB(
1445 IRStmt_Put(
1446 offsetof(VexGuestArchState,guest_NRADDR_GPR2),
1447 IRExpr_Get(offsetof(VexGuestArchState,guest_GPR2),
1448 VG_WORDSIZE==8 ? Ity_I64 : Ity_I32)
1451 addStmtToIRSB(bb, IRStmt_Put(offB_GPR12, mkU64(closure->readdr)));
1452 gen_push_R2_and_set_LR ( bb );
1453 #endif
1454 return False;
1457 /* --- Helpers to do with PPC related stack redzones. --- */
1459 __attribute__((unused))
1460 static Bool const_True ( Addr guest_addr )
1462 return True;
1465 /* --------------- main translation function --------------- */
1467 /* Note: see comments at top of m_redir.c for the Big Picture on how
1468 redirections are managed. */
1470 typedef
1471 enum {
1472 /* normal translation, redir neither requested nor inhibited */
1473 T_Normal,
1474 /* redir translation, function-wrap (set _NRADDR) style */
1475 T_Redir_Wrap,
1476 /* redir translation, replacement (don't set _NRADDR) style */
1477 T_Redir_Replace,
1478 /* a translation in which redir is specifically disallowed */
1479 T_NoRedir
1481 T_Kind;
1483 /* Translate the basic block beginning at NRADDR, and add it to the
1484 translation cache & translation table. Unless
1485 DEBUGGING_TRANSLATION is true, in which case the call is being done
1486 for debugging purposes, so (a) throw away the translation once it
1487 is made, and (b) produce a load of debugging output. If
1488 ALLOW_REDIRECTION is False, do not attempt redirection of NRADDR,
1489 and also, put the resulting translation into the no-redirect tt/tc
1490 instead of the normal one.
1492 TID is the identity of the thread requesting this translation.
1495 Bool VG_(translate) ( ThreadId tid,
1496 Addr nraddr,
1497 Bool debugging_translation,
1498 Int debugging_verbosity,
1499 ULong bbs_done,
1500 Bool allow_redirection )
1502 Addr addr;
1503 T_Kind kind;
1504 Int tmpbuf_used, verbosity, i;
1505 Bool (*preamble_fn)(void*,IRSB*);
1506 VexArch vex_arch;
1507 VexArchInfo vex_archinfo;
1508 VexAbiInfo vex_abiinfo;
1509 VexGuestExtents vge;
1510 VexTranslateArgs vta;
1511 VexTranslateResult tres;
1512 VgCallbackClosure closure;
1514 /* Make sure Vex is initialised right. */
1516 static Bool vex_init_done = False;
1518 if (!vex_init_done) {
1519 LibVEX_Init ( &failure_exit, &log_bytes,
1520 1, /* debug_paranoia */
1521 &VG_(clo_vex_control) );
1522 vex_init_done = True;
1525 /* Establish the translation kind and actual guest address to
1526 start from. Sets (addr,kind). */
1527 if (allow_redirection) {
1528 Bool isWrap;
1529 Addr tmp = VG_(redir_do_lookup)( nraddr, &isWrap );
1530 if (tmp == nraddr) {
1531 /* no redirection found */
1532 addr = nraddr;
1533 kind = T_Normal;
1534 } else {
1535 /* found a redirect */
1536 addr = tmp;
1537 kind = isWrap ? T_Redir_Wrap : T_Redir_Replace;
1539 } else {
1540 addr = nraddr;
1541 kind = T_NoRedir;
1544 /* Established: (nraddr, addr, kind) */
1546 /* Printing redirection info. */
1548 if ((kind == T_Redir_Wrap || kind == T_Redir_Replace)
1549 && (VG_(clo_verbosity) >= 2 || VG_(clo_trace_redir))) {
1550 Bool ok;
1551 const HChar *buf;
1552 const HChar *name2;
1553 const DiEpoch ep = VG_(current_DiEpoch)();
1555 /* Try also to get the soname (not the filename) of the "from"
1556 object. This makes it much easier to debug redirection
1557 problems. */
1558 const HChar* nraddr_soname = "???";
1559 DebugInfo* nraddr_di = VG_(find_DebugInfo)(ep, nraddr);
1560 if (nraddr_di) {
1561 const HChar* t = VG_(DebugInfo_get_soname)(nraddr_di);
1562 if (t)
1563 nraddr_soname = t;
1566 ok = VG_(get_fnname_w_offset)(ep, nraddr, &buf);
1567 if (!ok) buf = "???";
1568 // Stash away name1
1569 HChar name1[VG_(strlen)(buf) + 1];
1570 VG_(strcpy)(name1, buf);
1571 ok = VG_(get_fnname_w_offset)(ep, addr, &name2);
1572 if (!ok) name2 = "???";
1574 VG_(message)(Vg_DebugMsg,
1575 "REDIR: 0x%lx (%s:%s) redirected to 0x%lx (%s)\n",
1576 nraddr, nraddr_soname, name1,
1577 addr, name2 );
1580 if (!debugging_translation)
1581 VG_TRACK( pre_mem_read, Vg_CoreTranslate,
1582 tid, "(translator)", addr, 1 );
1584 /* If doing any code printing, print a basic block start marker */
1585 if (VG_(clo_trace_flags) || debugging_translation) {
1586 const HChar* objname = "UNKNOWN_OBJECT";
1587 OffT objoff = 0;
1588 const DiEpoch ep = VG_(current_DiEpoch)();
1589 DebugInfo* di = VG_(find_DebugInfo)( ep, addr );
1590 if (di) {
1591 objname = VG_(DebugInfo_get_filename)(di);
1592 objoff = addr - VG_(DebugInfo_get_text_bias)(di);
1594 vg_assert(objname);
1596 const HChar *fnname;
1597 Bool ok = VG_(get_fnname_w_offset)(ep, addr, &fnname);
1598 if (!ok) fnname = "UNKNOWN_FUNCTION";
1599 VG_(printf)(
1600 "==== SB %llu (evchecks %llu) [tid %u] 0x%lx %s %s%c0x%lx\n",
1601 VG_(get_bbs_translated)(), bbs_done, tid, addr,
1602 fnname, objname, objoff >= 0 ? '+' : '-',
1603 (UWord)(objoff >= 0 ? objoff : -objoff)
1607 /* Are we allowed to translate here? */
1609 { /* BEGIN new scope specially for 'seg' */
1610 NSegment const* seg = VG_(am_find_nsegment)(addr);
1612 if ( (!translations_allowable_from_seg(seg, addr))
1613 || addr == TRANSTAB_BOGUS_GUEST_ADDR ) {
1614 if (VG_(clo_trace_signals))
1615 VG_(message)(Vg_DebugMsg, "translations not allowed here (0x%lx)"
1616 " - throwing SEGV\n", addr);
1617 /* U R busted, sonny. Place your hands on your head and step
1618 away from the orig_addr. */
1619 /* Code address is bad - deliver a signal instead */
1620 if (seg != NULL) {
1621 /* There's some kind of segment at the requested place, but we
1622 aren't allowed to execute code here. */
1623 if (debugging_translation)
1624 VG_(printf)("translations not allowed here (segment not executable)"
1625 "(0x%lx)\n", addr);
1626 else
1627 VG_(synth_fault_perms)(tid, addr);
1628 } else {
1629 /* There is no segment at all; we are attempting to execute in
1630 the middle of nowhere. */
1631 if (debugging_translation)
1632 VG_(printf)("translations not allowed here (no segment)"
1633 "(0x%lx)\n", addr);
1634 else
1635 VG_(synth_fault_mapping)(tid, addr);
1637 return False;
1640 /* True if a debug trans., or if bit N set in VG_(clo_trace_codegen). */
1641 verbosity = 0;
1642 if (debugging_translation) {
1643 verbosity = debugging_verbosity;
1645 else
1646 if ( (VG_(clo_trace_flags) > 0
1647 && VG_(get_bbs_translated)() <= VG_(clo_trace_notabove)
1648 && VG_(get_bbs_translated)() >= VG_(clo_trace_notbelow) )) {
1649 verbosity = VG_(clo_trace_flags);
1652 /* Figure out which preamble-mangling callback to send. */
1653 preamble_fn = NULL;
1654 if (kind == T_Redir_Replace)
1655 preamble_fn = mk_preamble__set_NRADDR_to_zero;
1656 else
1657 if (kind == T_Redir_Wrap)
1658 preamble_fn = mk_preamble__set_NRADDR_to_nraddr;
1660 /* LE we setup the LR */
1661 # if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
1662 if (nraddr == (Addr)&VG_(ppctoc_magic_redirect_return_stub)) {
1663 /* If entering the special return stub, this means a wrapped or
1664 redirected function is returning. Make this translation one
1665 which restores R2 and LR from the thread's hidden redir
1666 stack, and branch to the (restored) link register, thereby
1667 really causing the function to return. */
1668 vg_assert(kind == T_Normal);
1669 vg_assert(nraddr == addr);
1670 preamble_fn = mk_preamble__ppctoc_magic_return_stub;
1672 # endif
1674 /* ------ Actually do the translation. ------ */
1675 vg_assert2(VG_(tdict).tool_instrument,
1676 "you forgot to set VgToolInterface function 'tool_instrument'");
1678 /* Get the CPU info established at startup. */
1679 VG_(machine_get_VexArchInfo)( &vex_arch, &vex_archinfo );
1681 /* Set up 'abiinfo' structure with stuff Vex needs to know about
1682 the guest and host ABIs. */
1684 LibVEX_default_VexAbiInfo( &vex_abiinfo );
1685 vex_abiinfo.guest_stack_redzone_size = VG_STACK_REDZONE_SZB;
1687 # if defined(VGP_amd64_linux)
1688 vex_abiinfo.guest_amd64_assume_fs_is_const = True;
1689 vex_abiinfo.guest_amd64_assume_gs_is_const = True;
1690 # endif
1691 # if defined(VGP_amd64_freebsd)
1692 vex_abiinfo.guest_amd64_assume_fs_is_const = True;
1693 vex_abiinfo.guest_amd64_sigbus_on_misalign = True;
1694 # endif
1695 # if defined(VGP_amd64_darwin)
1696 vex_abiinfo.guest_amd64_assume_gs_is_const = True;
1697 # endif
1699 # if defined(VGP_amd64_solaris)
1700 vex_abiinfo.guest_amd64_assume_fs_is_const = True;
1701 # endif
1703 # if defined(VGP_ppc32_linux)
1704 vex_abiinfo.guest_ppc_zap_RZ_at_blr = False;
1705 vex_abiinfo.guest_ppc_zap_RZ_at_bl = NULL;
1706 # endif
1708 # if defined(VGP_ppc64be_linux)
1709 vex_abiinfo.guest_ppc_zap_RZ_at_blr = True;
1710 vex_abiinfo.guest_ppc_zap_RZ_at_bl = const_True;
1711 vex_abiinfo.host_ppc_calls_use_fndescrs = True;
1712 # endif
1714 # if defined(VGP_ppc64le_linux)
1715 vex_abiinfo.guest_ppc_zap_RZ_at_blr = True;
1716 vex_abiinfo.guest_ppc_zap_RZ_at_bl = const_True;
1717 vex_abiinfo.host_ppc_calls_use_fndescrs = False;
1718 # endif
1720 # if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
1721 ThreadArchState* arch = &VG_(threads)[tid].arch;
1722 vex_abiinfo.guest_mips_fp_mode =
1723 !!(arch->vex.guest_CP0_status & MIPS_CP0_STATUS_FR);
1724 # if defined(VGP_mips32_linux)
1725 vex_abiinfo.guest_mips_fp_mode |=
1726 (!!(arch->vex.guest_CP0_Config5 & MIPS_CONF5_FRE)) << 1;
1727 # endif
1728 /* Compute guest__use_fallback_LLSC, overiding any settings of
1729 VG_(clo_fallback_llsc) that we know would cause the guest to
1730 fail (loop). */
1731 if (VEX_MIPS_COMP_ID(vex_archinfo.hwcaps) == VEX_PRID_COMP_CAVIUM) {
1732 /* We must use the fallback scheme. */
1733 vex_abiinfo.guest__use_fallback_LLSC = True;
1734 } else {
1735 vex_abiinfo.guest__use_fallback_LLSC
1736 = SimHintiS(SimHint_fallback_llsc, VG_(clo_sim_hints));
1738 # endif
1740 #if defined(VGP_nanomips_linux)
1741 vex_abiinfo.guest__use_fallback_LLSC
1742 = SimHintiS(SimHint_fallback_llsc, VG_(clo_sim_hints));
1743 #endif
1745 # if defined(VGP_arm64_linux)
1746 vex_abiinfo.guest__use_fallback_LLSC
1747 = /* The user asked explicitly */
1748 SimHintiS(SimHint_fallback_llsc, VG_(clo_sim_hints))
1749 || /* we autodetected that it is necessary */
1750 vex_archinfo.arm64_requires_fallback_LLSC;
1751 # endif
1753 /* Set up closure args. */
1754 closure.tid = tid;
1755 closure.nraddr = nraddr;
1756 closure.readdr = addr;
1758 /* Set up args for LibVEX_Translate. */
1759 vta.arch_guest = vex_arch;
1760 vta.archinfo_guest = vex_archinfo;
1761 vta.arch_host = vex_arch;
1762 vta.archinfo_host = vex_archinfo;
1763 vta.abiinfo_both = vex_abiinfo;
1764 vta.callback_opaque = (void*)&closure;
1765 vta.guest_bytes = (UChar*)addr;
1766 vta.guest_bytes_addr = addr;
1767 vta.chase_into_ok = chase_into_ok;
1768 vta.guest_extents = &vge;
1769 vta.host_bytes = tmpbuf;
1770 vta.host_bytes_size = N_TMPBUF;
1771 vta.host_bytes_used = &tmpbuf_used;
1772 { /* At this point we have to reconcile Vex's view of the
1773 instrumentation callback - which takes a void* first argument
1774 - with Valgrind's view, in which the first arg is a
1775 VgCallbackClosure*. Hence the following longwinded casts.
1776 They are entirely legal but longwinded so as to maximise the
1777 chance of the C typechecker picking up any type snafus. */
1778 IRSB*(*f)(VgCallbackClosure*,
1779 IRSB*,const VexGuestLayout*,const VexGuestExtents*,
1780 const VexArchInfo*,IRType,IRType)
1781 = VG_(clo_vgdb) != Vg_VgdbNo
1782 ? tool_instrument_then_gdbserver_if_needed
1783 : VG_(tdict).tool_instrument;
1784 IRSB*(*g)(void*,
1785 IRSB*,const VexGuestLayout*,const VexGuestExtents*,
1786 const VexArchInfo*,IRType,IRType) = (__typeof__(g)) f;
1787 vta.instrument1 = g;
1789 /* No need for type kludgery here. */
1790 vta.instrument2 = need_to_handle_SP_assignment()
1791 ? vg_SP_update_pass
1792 : NULL;
1793 vta.finaltidy = VG_(needs).final_IR_tidy_pass
1794 ? VG_(tdict).tool_final_IR_tidy_pass
1795 : NULL;
1796 vta.needs_self_check = needs_self_check;
1797 vta.preamble_function = preamble_fn;
1798 vta.traceflags = verbosity;
1799 vta.sigill_diag = VG_(clo_sigill_diag);
1800 vta.addProfInc = VG_(clo_profyle_sbs) && kind != T_NoRedir;
1802 /* Set up the dispatch continuation-point info. If this is a
1803 no-redir translation then it cannot be chained, and the chain-me
1804 points are set to NULL to indicate that. The indir point must
1805 also be NULL, since we can't allow this translation to do an
1806 indir transfer -- that would take it back into the main
1807 translation cache too.
1809 All this is because no-redir translations live outside the main
1810 translation cache (in a secondary one) and chaining them would
1811 involve more adminstrative complexity that isn't worth the
1812 hassle, because we don't expect them to get used often. So
1813 don't bother. */
1814 if (allow_redirection) {
1815 vta.disp_cp_chain_me_to_slowEP
1816 = VG_(fnptr_to_fnentry)( &VG_(disp_cp_chain_me_to_slowEP) );
1817 vta.disp_cp_chain_me_to_fastEP
1818 = VG_(fnptr_to_fnentry)( &VG_(disp_cp_chain_me_to_fastEP) );
1819 vta.disp_cp_xindir
1820 = VG_(fnptr_to_fnentry)( &VG_(disp_cp_xindir) );
1821 } else {
1822 vta.disp_cp_chain_me_to_slowEP = NULL;
1823 vta.disp_cp_chain_me_to_fastEP = NULL;
1824 vta.disp_cp_xindir = NULL;
1826 /* This doesn't involve chaining and so is always allowable. */
1827 vta.disp_cp_xassisted
1828 = VG_(fnptr_to_fnentry)( &VG_(disp_cp_xassisted) );
1830 /* Sheesh. Finally, actually _do_ the translation! */
1831 tres = LibVEX_Translate ( &vta );
1833 vg_assert(tres.status == VexTransOK);
1834 vg_assert(tres.n_sc_extents >= 0 && tres.n_sc_extents <= 3);
1835 vg_assert(tmpbuf_used <= N_TMPBUF);
1836 vg_assert(tmpbuf_used > 0);
1838 n_TRACE_total_constructed += 1;
1839 n_TRACE_total_guest_insns += tres.n_guest_instrs;
1840 n_TRACE_total_uncond_branches_followed += tres.n_uncond_in_trace;
1841 n_TRACE_total_cond_branches_followed += tres.n_cond_in_trace;
1842 } /* END new scope specially for 'seg' */
1844 /* Tell aspacem of all segments that have had translations taken
1845 from them. */
1846 for (i = 0; i < vge.n_used; i++) {
1847 VG_(am_set_segment_hasT)( vge.base[i] );
1850 /* Copy data at trans_addr into the translation cache. */
1851 vg_assert(tmpbuf_used > 0 && tmpbuf_used < 65536);
1853 // If debugging, don't do anything with the translated block; we
1854 // only did this for the debugging output produced along the way.
1855 if (!debugging_translation) {
1857 if (kind != T_NoRedir) {
1858 // Put it into the normal TT/TC structures. This is the
1859 // normal case.
1861 // Note that we use nraddr (the non-redirected address), not
1862 // addr, which might have been changed by the redirection
1863 VG_(add_to_transtab)( &vge,
1864 nraddr,
1865 (Addr)(&tmpbuf[0]),
1866 tmpbuf_used,
1867 tres.n_sc_extents > 0,
1868 tres.offs_profInc,
1869 tres.n_guest_instrs );
1870 } else {
1871 vg_assert(tres.offs_profInc == -1); /* -1 == unset */
1872 VG_(add_to_unredir_transtab)( &vge,
1873 nraddr,
1874 (Addr)(&tmpbuf[0]),
1875 tmpbuf_used );
1879 return True;
1882 /*--------------------------------------------------------------------*/
1883 /*--- end ---*/
1884 /*--------------------------------------------------------------------*/