syswrap openat2 for all linux arches
[valgrind.git] / drd / drd_load_store.c
blob80d326a0e2a91354aaf61fcb3c70716a1f3f5c5c
1 /*
2 This file is part of drd, a thread error detector.
4 Copyright (C) 2006-2020 Bart Van Assche <bvanassche@acm.org>.
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, see <http://www.gnu.org/licenses/>.
19 The GNU General Public License is contained in the file COPYING.
23 #include "drd_bitmap.h"
24 #include "drd_thread_bitmap.h"
25 #include "drd_vc.h" /* DRD_(vc_snprint)() */
27 /* Include several source files here in order to allow the compiler to */
28 /* do more inlining. */
29 #include "drd_bitmap.c"
30 #include "drd_load_store.h"
31 #include "drd_segment.c"
32 #include "drd_thread.c"
33 #include "drd_vc.c"
34 #include "libvex_guest_offsets.h"
37 /* STACK_POINTER_OFFSET: VEX register offset for the stack pointer register. */
38 #if defined(VGA_x86)
39 #define STACK_POINTER_OFFSET OFFSET_x86_ESP
40 #elif defined(VGA_amd64)
41 #define STACK_POINTER_OFFSET OFFSET_amd64_RSP
42 #elif defined(VGA_ppc32)
43 #define STACK_POINTER_OFFSET OFFSET_ppc32_GPR1
44 #elif defined(VGA_ppc64be) || defined(VGA_ppc64le)
45 #define STACK_POINTER_OFFSET OFFSET_ppc64_GPR1
46 #elif defined(VGA_arm)
47 #define STACK_POINTER_OFFSET OFFSET_arm_R13
48 #elif defined(VGA_arm64)
49 #define STACK_POINTER_OFFSET OFFSET_arm64_XSP
50 #elif defined(VGA_s390x)
51 #define STACK_POINTER_OFFSET OFFSET_s390x_r15
52 #elif defined(VGA_mips32) || defined(VGA_nanomips)
53 #define STACK_POINTER_OFFSET OFFSET_mips32_r29
54 #elif defined(VGA_mips64)
55 #define STACK_POINTER_OFFSET OFFSET_mips64_r29
56 #else
57 #error Unknown architecture.
58 #endif
61 /* Local variables. */
63 static Bool s_check_stack_accesses = False;
64 static Bool s_first_race_only = False;
67 /* Function definitions. */
69 Bool DRD_(get_check_stack_accesses)(void)
71 return s_check_stack_accesses;
74 void DRD_(set_check_stack_accesses)(const Bool c)
76 tl_assert(c == False || c == True);
77 s_check_stack_accesses = c;
80 Bool DRD_(get_first_race_only)(void)
82 return s_first_race_only;
85 void DRD_(set_first_race_only)(const Bool fro)
87 tl_assert(fro == False || fro == True);
88 s_first_race_only = fro;
91 void DRD_(trace_mem_access)(const Addr addr, const SizeT size,
92 const BmAccessTypeT access_type,
93 const HWord stored_value_hi,
94 const HWord stored_value_lo)
96 if (DRD_(is_any_traced)(addr, addr + size))
98 HChar* vc;
100 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(DRD_(thread_get_running_tid)()));
101 if (access_type == eStore && size <= sizeof(HWord)) {
102 DRD_(trace_msg_w_bt)("store 0x%lx size %lu val %lu/0x%lx (thread %u /"
103 " vc %s)", addr, size, stored_value_lo,
104 stored_value_lo, DRD_(thread_get_running_tid)(),
105 vc);
106 } else if (access_type == eStore && size > sizeof(HWord)) {
107 ULong sv;
109 tl_assert(sizeof(HWord) == 4);
110 sv = ((ULong)stored_value_hi << 32) | stored_value_lo;
111 DRD_(trace_msg_w_bt)("store 0x%lx size %lu val %llu/0x%llx (thread %u"
112 " / vc %s)", addr, size, sv, sv,
113 DRD_(thread_get_running_tid)(), vc);
114 } else {
115 DRD_(trace_msg_w_bt)("%s 0x%lx size %lu (thread %u / vc %s)",
116 access_type == eLoad ? "load "
117 : access_type == eStore ? "store"
118 : access_type == eStart ? "start"
119 : access_type == eEnd ? "end " : "????",
120 addr, size, DRD_(thread_get_running_tid)(), vc);
122 VG_(free)(vc);
123 tl_assert(DRD_(DrdThreadIdToVgThreadId)(DRD_(thread_get_running_tid)())
124 == VG_(get_running_tid)());
128 static VG_REGPARM(2) void drd_trace_mem_load(const Addr addr, const SizeT size)
130 return DRD_(trace_mem_access)(addr, size, eLoad, 0, 0);
133 static VG_REGPARM(3) void drd_trace_mem_store(const Addr addr,const SizeT size,
134 const HWord stored_value_hi,
135 const HWord stored_value_lo)
137 return DRD_(trace_mem_access)(addr, size, eStore, stored_value_hi,
138 stored_value_lo);
141 static void drd_report_race(const Addr addr, const SizeT size,
142 const BmAccessTypeT access_type)
144 ThreadId vg_tid;
146 vg_tid = VG_(get_running_tid)();
147 if (!DRD_(get_check_stack_accesses)()
148 && DRD_(thread_address_on_any_stack)(addr)) {
149 #if 0
150 GenericErrInfo GEI = {
151 .tid = DRD_(thread_get_running_tid)(),
152 .addr = addr,
154 VG_(maybe_record_error)(vg_tid, GenericErr, VG_(get_IP)(vg_tid),
155 "--check-stack-var=no skips checking stack"
156 " variables shared over threads",
157 &GEI);
158 #endif
159 } else {
160 DataRaceErrInfo drei = {
161 .tid = DRD_(thread_get_running_tid)(),
162 .addr = addr,
163 .size = size,
164 .access_type = access_type,
166 VG_(maybe_record_error)(vg_tid, DataRaceErr, VG_(get_IP)(vg_tid),
167 "Conflicting access", &drei);
169 if (s_first_race_only)
170 DRD_(start_suppression)(addr, addr + size, "first race only");
174 VG_REGPARM(2) void DRD_(trace_load)(Addr addr, SizeT size)
176 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
177 /* The assert below has been commented out because of performance reasons.*/
178 tl_assert(DRD_(thread_get_running_tid)()
179 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid)()));
180 #endif
182 if (DRD_(running_thread_is_recording_loads)()
183 && (s_check_stack_accesses
184 || ! DRD_(thread_address_on_stack)(addr))
185 && bm_access_load_triggers_conflict(addr, addr + size)
186 && ! DRD_(is_suppressed)(addr, addr + size))
188 drd_report_race(addr, size, eLoad);
192 static VG_REGPARM(1) void drd_trace_load_1(Addr addr)
194 if (DRD_(running_thread_is_recording_loads)()
195 && (s_check_stack_accesses
196 || ! DRD_(thread_address_on_stack)(addr))
197 && bm_access_load_1_triggers_conflict(addr)
198 && ! DRD_(is_suppressed)(addr, addr + 1))
200 drd_report_race(addr, 1, eLoad);
204 static VG_REGPARM(1) void drd_trace_load_2(Addr addr)
206 if (DRD_(running_thread_is_recording_loads)()
207 && (s_check_stack_accesses
208 || ! DRD_(thread_address_on_stack)(addr))
209 && bm_access_load_2_triggers_conflict(addr)
210 && ! DRD_(is_suppressed)(addr, addr + 2))
212 drd_report_race(addr, 2, eLoad);
216 static VG_REGPARM(1) void drd_trace_load_4(Addr addr)
218 if (DRD_(running_thread_is_recording_loads)()
219 && (s_check_stack_accesses
220 || ! DRD_(thread_address_on_stack)(addr))
221 && bm_access_load_4_triggers_conflict(addr)
222 && ! DRD_(is_suppressed)(addr, addr + 4))
224 drd_report_race(addr, 4, eLoad);
228 static VG_REGPARM(1) void drd_trace_load_8(Addr addr)
230 if (DRD_(running_thread_is_recording_loads)()
231 && (s_check_stack_accesses
232 || ! DRD_(thread_address_on_stack)(addr))
233 && bm_access_load_8_triggers_conflict(addr)
234 && ! DRD_(is_suppressed)(addr, addr + 8))
236 drd_report_race(addr, 8, eLoad);
240 VG_REGPARM(2) void DRD_(trace_store)(Addr addr, SizeT size)
242 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
243 /* The assert below has been commented out because of performance reasons.*/
244 tl_assert(DRD_(thread_get_running_tid)()
245 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid)()));
246 #endif
248 if (DRD_(running_thread_is_recording_stores)()
249 && (s_check_stack_accesses
250 || ! DRD_(thread_address_on_stack)(addr))
251 && bm_access_store_triggers_conflict(addr, addr + size)
252 && ! DRD_(is_suppressed)(addr, addr + size))
254 drd_report_race(addr, size, eStore);
258 static VG_REGPARM(1) void drd_trace_store_1(Addr addr)
260 if (DRD_(running_thread_is_recording_stores)()
261 && (s_check_stack_accesses
262 || ! DRD_(thread_address_on_stack)(addr))
263 && bm_access_store_1_triggers_conflict(addr)
264 && ! DRD_(is_suppressed)(addr, addr + 1))
266 drd_report_race(addr, 1, eStore);
270 static VG_REGPARM(1) void drd_trace_store_2(Addr addr)
272 if (DRD_(running_thread_is_recording_stores)()
273 && (s_check_stack_accesses
274 || ! DRD_(thread_address_on_stack)(addr))
275 && bm_access_store_2_triggers_conflict(addr)
276 && ! DRD_(is_suppressed)(addr, addr + 2))
278 drd_report_race(addr, 2, eStore);
282 static VG_REGPARM(1) void drd_trace_store_4(Addr addr)
284 if (DRD_(running_thread_is_recording_stores)()
285 && (s_check_stack_accesses
286 || !DRD_(thread_address_on_stack)(addr))
287 && bm_access_store_4_triggers_conflict(addr)
288 && !DRD_(is_suppressed)(addr, addr + 4))
290 drd_report_race(addr, 4, eStore);
294 static VG_REGPARM(1) void drd_trace_store_8(Addr addr)
296 if (DRD_(running_thread_is_recording_stores)()
297 && (s_check_stack_accesses
298 || ! DRD_(thread_address_on_stack)(addr))
299 && bm_access_store_8_triggers_conflict(addr)
300 && ! DRD_(is_suppressed)(addr, addr + 8))
302 drd_report_race(addr, 8, eStore);
307 * Return true if and only if addr_expr matches the pattern (SP) or
308 * <offset>(SP).
310 static Bool is_stack_access(IRSB* const bb, IRExpr* const addr_expr)
312 Bool result = False;
314 if (addr_expr->tag == Iex_RdTmp)
316 int i;
317 for (i = 0; i < bb->stmts_used; i++)
319 if (bb->stmts[i]
320 && bb->stmts[i]->tag == Ist_WrTmp
321 && bb->stmts[i]->Ist.WrTmp.tmp == addr_expr->Iex.RdTmp.tmp)
323 IRExpr* e = bb->stmts[i]->Ist.WrTmp.data;
324 if (e->tag == Iex_Get && e->Iex.Get.offset == STACK_POINTER_OFFSET)
326 result = True;
329 //ppIRExpr(e);
330 //VG_(printf)(" (%s)\n", result ? "True" : "False");
331 break;
335 return result;
338 static const IROp u_widen_irop[5][9] = {
339 [Ity_I1 - Ity_I1] = { [4] = Iop_1Uto32, [8] = Iop_1Uto64 },
340 [Ity_I8 - Ity_I1] = { [4] = Iop_8Uto32, [8] = Iop_8Uto64 },
341 [Ity_I16 - Ity_I1] = { [4] = Iop_16Uto32, [8] = Iop_16Uto64 },
342 [Ity_I32 - Ity_I1] = { [8] = Iop_32Uto64 },
346 * Instrument the client code to trace a memory load (--trace-addr).
348 static IRExpr* instr_trace_mem_load(IRSB* const bb, IRExpr* addr_expr,
349 const HWord size,
350 IRExpr* const guard/* NULL => True */)
352 IRTemp tmp;
354 tmp = newIRTemp(bb->tyenv, typeOfIRExpr(bb->tyenv, addr_expr));
355 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, addr_expr));
356 addr_expr = IRExpr_RdTmp(tmp);
357 IRDirty* di
358 = unsafeIRDirty_0_N(/*regparms*/2,
359 "drd_trace_mem_load",
360 VG_(fnptr_to_fnentry)
361 (drd_trace_mem_load),
362 mkIRExprVec_2(addr_expr, mkIRExpr_HWord(size)));
363 if (guard) di->guard = guard;
364 addStmtToIRSB(bb, IRStmt_Dirty(di));
366 return addr_expr;
370 * Instrument the client code to trace a memory store (--trace-addr).
372 static void instr_trace_mem_store(IRSB* const bb, IRExpr* const addr_expr,
373 IRExpr* data_expr_hi, IRExpr* data_expr_lo,
374 IRExpr* const guard/* NULL => True */)
376 IRType ty_data_expr;
377 HWord size;
379 tl_assert(sizeof(HWord) == 4 || sizeof(HWord) == 8);
380 tl_assert(!data_expr_hi || typeOfIRExpr(bb->tyenv, data_expr_hi) == Ity_I32);
382 ty_data_expr = typeOfIRExpr(bb->tyenv, data_expr_lo);
383 size = sizeofIRType(ty_data_expr);
385 #if 0
386 // Test code
387 if (ty_data_expr == Ity_I32) {
388 IRTemp tmp = newIRTemp(bb->tyenv, Ity_F32);
389 data_expr_lo = IRExpr_Unop(Iop_ReinterpI32asF32, data_expr_lo);
390 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, data_expr_lo));
391 data_expr_lo = IRExpr_RdTmp(tmp);
392 ty_data_expr = Ity_F32;
393 } else if (ty_data_expr == Ity_I64) {
394 IRTemp tmp = newIRTemp(bb->tyenv, Ity_F64);
395 data_expr_lo = IRExpr_Unop(Iop_ReinterpI64asF64, data_expr_lo);
396 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, data_expr_lo));
397 data_expr_lo = IRExpr_RdTmp(tmp);
398 ty_data_expr = Ity_F64;
400 #endif
402 if (ty_data_expr == Ity_F32) {
403 IRTemp tmp = newIRTemp(bb->tyenv, Ity_I32);
404 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_ReinterpF32asI32,
405 data_expr_lo)));
406 data_expr_lo = IRExpr_RdTmp(tmp);
407 ty_data_expr = Ity_I32;
408 } else if (ty_data_expr == Ity_F64) {
409 IRTemp tmp = newIRTemp(bb->tyenv, Ity_I64);
410 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_ReinterpF64asI64,
411 data_expr_lo)));
412 data_expr_lo = IRExpr_RdTmp(tmp);
413 ty_data_expr = Ity_I64;
416 if (size == sizeof(HWord)
417 && (ty_data_expr == Ity_I32 || ty_data_expr == Ity_I64))
419 /* No conversion necessary */
420 } else {
421 IROp widen_op;
423 if (Ity_I1 <= ty_data_expr
424 && ty_data_expr
425 < Ity_I1 + sizeof(u_widen_irop)/sizeof(u_widen_irop[0]))
427 widen_op = u_widen_irop[ty_data_expr - Ity_I1][sizeof(HWord)];
428 if (!widen_op)
429 widen_op = Iop_INVALID;
430 } else {
431 widen_op = Iop_INVALID;
433 if (widen_op != Iop_INVALID) {
434 IRTemp tmp;
436 /* Widen the integer expression to a HWord */
437 tmp = newIRTemp(bb->tyenv, sizeof(HWord) == 4 ? Ity_I32 : Ity_I64);
438 addStmtToIRSB(bb,
439 IRStmt_WrTmp(tmp, IRExpr_Unop(widen_op, data_expr_lo)));
440 data_expr_lo = IRExpr_RdTmp(tmp);
441 } else if (size > sizeof(HWord) && !data_expr_hi
442 && ty_data_expr == Ity_I64) {
443 IRTemp tmp;
445 tl_assert(sizeof(HWord) == 4);
446 tl_assert(size == 8);
447 tmp = newIRTemp(bb->tyenv, Ity_I32);
448 addStmtToIRSB(bb,
449 IRStmt_WrTmp(tmp,
450 IRExpr_Unop(Iop_64HIto32, data_expr_lo)));
451 data_expr_hi = IRExpr_RdTmp(tmp);
452 tmp = newIRTemp(bb->tyenv, Ity_I32);
453 addStmtToIRSB(bb, IRStmt_WrTmp(tmp,
454 IRExpr_Unop(Iop_64to32, data_expr_lo)));
455 data_expr_lo = IRExpr_RdTmp(tmp);
456 } else {
457 data_expr_lo = mkIRExpr_HWord(0);
460 IRDirty* di
461 = unsafeIRDirty_0_N(/*regparms*/3,
462 "drd_trace_mem_store",
463 VG_(fnptr_to_fnentry)(drd_trace_mem_store),
464 mkIRExprVec_4(addr_expr, mkIRExpr_HWord(size),
465 data_expr_hi ? data_expr_hi
466 : mkIRExpr_HWord(0), data_expr_lo));
467 if (guard) di->guard = guard;
468 addStmtToIRSB(bb, IRStmt_Dirty(di) );
471 static void instrument_load(IRSB* const bb, IRExpr* const addr_expr,
472 const HWord size,
473 IRExpr* const guard/* NULL => True */)
475 IRExpr* size_expr;
476 IRExpr** argv;
477 IRDirty* di;
479 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr))
480 return;
482 switch (size)
484 case 1:
485 argv = mkIRExprVec_1(addr_expr);
486 di = unsafeIRDirty_0_N(/*regparms*/1,
487 "drd_trace_load_1",
488 VG_(fnptr_to_fnentry)(drd_trace_load_1),
489 argv);
490 break;
491 case 2:
492 argv = mkIRExprVec_1(addr_expr);
493 di = unsafeIRDirty_0_N(/*regparms*/1,
494 "drd_trace_load_2",
495 VG_(fnptr_to_fnentry)(drd_trace_load_2),
496 argv);
497 break;
498 case 4:
499 argv = mkIRExprVec_1(addr_expr);
500 di = unsafeIRDirty_0_N(/*regparms*/1,
501 "drd_trace_load_4",
502 VG_(fnptr_to_fnentry)(drd_trace_load_4),
503 argv);
504 break;
505 case 8:
506 argv = mkIRExprVec_1(addr_expr);
507 di = unsafeIRDirty_0_N(/*regparms*/1,
508 "drd_trace_load_8",
509 VG_(fnptr_to_fnentry)(drd_trace_load_8),
510 argv);
511 break;
512 default:
513 size_expr = mkIRExpr_HWord(size);
514 argv = mkIRExprVec_2(addr_expr, size_expr);
515 di = unsafeIRDirty_0_N(/*regparms*/2,
516 "drd_trace_load",
517 VG_(fnptr_to_fnentry)(DRD_(trace_load)),
518 argv);
519 break;
521 if (guard) di->guard = guard;
522 addStmtToIRSB(bb, IRStmt_Dirty(di));
525 static void instrument_store(IRSB* const bb, IRExpr* addr_expr,
526 IRExpr* const data_expr,
527 IRExpr* const guard_expr/* NULL => True */)
529 IRExpr* size_expr;
530 IRExpr** argv;
531 IRDirty* di;
532 HWord size;
534 size = sizeofIRType(typeOfIRExpr(bb->tyenv, data_expr));
536 if (UNLIKELY(DRD_(any_address_is_traced)())) {
537 IRTemp tmp = newIRTemp(bb->tyenv, typeOfIRExpr(bb->tyenv, addr_expr));
538 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, addr_expr));
539 addr_expr = IRExpr_RdTmp(tmp);
540 instr_trace_mem_store(bb, addr_expr, NULL, data_expr, guard_expr);
543 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr))
544 return;
546 switch (size)
548 case 1:
549 argv = mkIRExprVec_1(addr_expr);
550 di = unsafeIRDirty_0_N(/*regparms*/1,
551 "drd_trace_store_1",
552 VG_(fnptr_to_fnentry)(drd_trace_store_1),
553 argv);
554 break;
555 case 2:
556 argv = mkIRExprVec_1(addr_expr);
557 di = unsafeIRDirty_0_N(/*regparms*/1,
558 "drd_trace_store_2",
559 VG_(fnptr_to_fnentry)(drd_trace_store_2),
560 argv);
561 break;
562 case 4:
563 argv = mkIRExprVec_1(addr_expr);
564 di = unsafeIRDirty_0_N(/*regparms*/1,
565 "drd_trace_store_4",
566 VG_(fnptr_to_fnentry)(drd_trace_store_4),
567 argv);
568 break;
569 case 8:
570 argv = mkIRExprVec_1(addr_expr);
571 di = unsafeIRDirty_0_N(/*regparms*/1,
572 "drd_trace_store_8",
573 VG_(fnptr_to_fnentry)(drd_trace_store_8),
574 argv);
575 break;
576 default:
577 size_expr = mkIRExpr_HWord(size);
578 argv = mkIRExprVec_2(addr_expr, size_expr);
579 di = unsafeIRDirty_0_N(/*regparms*/2,
580 "drd_trace_store",
581 VG_(fnptr_to_fnentry)(DRD_(trace_store)),
582 argv);
583 break;
585 if (guard_expr) di->guard = guard_expr;
586 addStmtToIRSB(bb, IRStmt_Dirty(di));
589 IRSB* DRD_(instrument)(VgCallbackClosure* const closure,
590 IRSB* const bb_in,
591 const VexGuestLayout* const layout,
592 const VexGuestExtents* const vge,
593 const VexArchInfo* archinfo_host,
594 IRType const gWordTy,
595 IRType const hWordTy)
597 IRDirty* di;
598 Int i;
599 IRSB* bb;
600 IRExpr** argv;
601 Bool instrument = True;
603 /* Set up BB */
604 bb = emptyIRSB();
605 bb->tyenv = deepCopyIRTypeEnv(bb_in->tyenv);
606 bb->next = deepCopyIRExpr(bb_in->next);
607 bb->jumpkind = bb_in->jumpkind;
608 bb->offsIP = bb_in->offsIP;
610 for (i = 0; i < bb_in->stmts_used; i++)
612 IRStmt* const st = bb_in->stmts[i];
613 tl_assert(st);
614 tl_assert(isFlatIRStmt(st));
616 switch (st->tag)
618 /* Note: the code for not instrumenting the code in .plt */
619 /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21 */
620 /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4). */
621 /* This is because on this platform dynamic library symbols are */
622 /* relocated in another way than by later binutils versions. The */
623 /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */
624 case Ist_IMark:
625 instrument = VG_(DebugInfo_sect_kind)(NULL, st->Ist.IMark.addr)
626 != Vg_SectPLT;
627 addStmtToIRSB(bb, st);
628 break;
630 case Ist_MBE:
631 switch (st->Ist.MBE.event)
633 case Imbe_Fence:
634 break; /* not interesting to DRD */
635 case Imbe_CancelReservation:
636 break; /* not interesting to DRD */
637 default:
638 tl_assert(0);
640 addStmtToIRSB(bb, st);
641 break;
643 case Ist_Store:
644 if (instrument)
645 instrument_store(bb, st->Ist.Store.addr, st->Ist.Store.data,
646 NULL/* no guard */);
647 addStmtToIRSB(bb, st);
648 break;
650 case Ist_StoreG: {
651 IRStoreG* sg = st->Ist.StoreG.details;
652 IRExpr* data = sg->data;
653 IRExpr* addr = sg->addr;
654 if (instrument)
655 instrument_store(bb, addr, data, sg->guard);
656 addStmtToIRSB(bb, st);
657 break;
660 case Ist_LoadG: {
661 IRLoadG* lg = st->Ist.LoadG.details;
662 IRType type = Ity_INVALID; /* loaded type */
663 IRType typeWide = Ity_INVALID; /* after implicit widening */
664 IRExpr* addr_expr = lg->addr;
665 typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
666 tl_assert(type != Ity_INVALID);
667 if (UNLIKELY(DRD_(any_address_is_traced)())) {
668 addr_expr = instr_trace_mem_load(bb, addr_expr,
669 sizeofIRType(type), lg->guard);
671 instrument_load(bb, lg->addr,
672 sizeofIRType(type), lg->guard);
673 addStmtToIRSB(bb, st);
674 break;
677 case Ist_WrTmp:
678 if (instrument) {
679 const IRExpr* const data = st->Ist.WrTmp.data;
680 IRExpr* addr_expr = data->Iex.Load.addr;
681 if (data->tag == Iex_Load) {
682 if (UNLIKELY(DRD_(any_address_is_traced)())) {
683 addr_expr = instr_trace_mem_load(bb, addr_expr,
684 sizeofIRType(data->Iex.Load.ty),
685 NULL/* no guard */);
687 instrument_load(bb, addr_expr, sizeofIRType(data->Iex.Load.ty),
688 NULL/* no guard */);
691 addStmtToIRSB(bb, st);
692 break;
694 case Ist_Dirty:
695 if (instrument) {
696 IRDirty* d = st->Ist.Dirty.details;
697 IREffect const mFx = d->mFx;
698 switch (mFx) {
699 case Ifx_None:
700 break;
701 case Ifx_Read:
702 case Ifx_Write:
703 case Ifx_Modify:
704 tl_assert(d->mAddr);
705 tl_assert(d->mSize > 0);
706 argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize));
707 if (mFx == Ifx_Read || mFx == Ifx_Modify) {
708 di = unsafeIRDirty_0_N(
709 /*regparms*/2,
710 "drd_trace_load",
711 VG_(fnptr_to_fnentry)(DRD_(trace_load)),
712 argv);
713 addStmtToIRSB(bb, IRStmt_Dirty(di));
715 if (mFx == Ifx_Write || mFx == Ifx_Modify)
717 di = unsafeIRDirty_0_N(
718 /*regparms*/2,
719 "drd_trace_store",
720 VG_(fnptr_to_fnentry)(DRD_(trace_store)),
721 argv);
722 addStmtToIRSB(bb, IRStmt_Dirty(di));
724 break;
725 default:
726 tl_assert(0);
729 addStmtToIRSB(bb, st);
730 break;
732 case Ist_CAS:
733 if (instrument) {
735 * Treat compare-and-swap as a read. By handling atomic
736 * instructions as read instructions no data races are reported
737 * between conflicting atomic operations nor between atomic
738 * operations and non-atomic reads. Conflicts between atomic
739 * operations and non-atomic write operations are still reported
740 * however.
742 Int dataSize;
743 IRCAS* cas = st->Ist.CAS.details;
745 tl_assert(cas->addr != NULL);
746 tl_assert(cas->dataLo != NULL);
747 dataSize = sizeofIRType(typeOfIRExpr(bb->tyenv, cas->dataLo));
748 if (cas->dataHi != NULL)
749 dataSize *= 2; /* since it's a doubleword-CAS */
751 if (UNLIKELY(DRD_(any_address_is_traced)()))
752 instr_trace_mem_store(bb, cas->addr, cas->dataHi, cas->dataLo,
753 NULL/* no guard */);
755 instrument_load(bb, cas->addr, dataSize, NULL/*no guard*/);
757 addStmtToIRSB(bb, st);
758 break;
760 case Ist_LLSC: {
762 * Ignore store-conditionals (except for tracing), and handle
763 * load-linked's exactly like normal loads.
765 IRType dataTy;
767 if (st->Ist.LLSC.storedata == NULL) {
768 /* LL */
769 dataTy = typeOfIRTemp(bb_in->tyenv, st->Ist.LLSC.result);
770 if (instrument) {
771 IRExpr* addr_expr = st->Ist.LLSC.addr;
772 if (UNLIKELY(DRD_(any_address_is_traced)()))
773 addr_expr = instr_trace_mem_load(bb, addr_expr,
774 sizeofIRType(dataTy),
775 NULL /* no guard */);
777 instrument_load(bb, addr_expr, sizeofIRType(dataTy),
778 NULL/*no guard*/);
780 } else {
781 /* SC */
782 instr_trace_mem_store(bb, st->Ist.LLSC.addr, NULL,
783 st->Ist.LLSC.storedata,
784 NULL/* no guard */);
786 addStmtToIRSB(bb, st);
787 break;
790 case Ist_NoOp:
791 case Ist_AbiHint:
792 case Ist_Put:
793 case Ist_PutI:
794 case Ist_Exit:
795 /* None of these can contain any memory references. */
796 addStmtToIRSB(bb, st);
797 break;
799 default:
800 ppIRStmt(st);
801 tl_assert(0);
805 return bb;