-> 3.17.0.RC2
[valgrind.git] / drd / drd_thread_bitmap.h
blob2c7b0a91e0006a6690d1aa2f3685def046d8dc00
1 /*
2 This file is part of drd, a thread error detector.
4 Copyright (C) 2006-2020 Bart Van Assche <bvanassche@acm.org>.
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, see <http://www.gnu.org/licenses/>.
19 The GNU General Public License is contained in the file COPYING.
23 #ifndef __DRD_THREAD_BITMAP_H
24 #define __DRD_THREAD_BITMAP_H
27 #include "drd_bitmap.h"
28 #include "drd_thread.h" /* running_thread_get_segment() */
29 #include "pub_drd_bitmap.h"
32 static __inline__
33 Bool bm_access_load_1_triggers_conflict(const Addr a1)
35 DRD_(bm_access_load_1)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1);
36 return DRD_(bm_load_1_has_conflict_with)(DRD_(thread_get_conflict_set)(),
37 a1);
40 static __inline__
41 Bool bm_access_load_2_triggers_conflict(const Addr a1)
43 if ((a1 & 1) == 0)
45 bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 2);
46 return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(),
47 a1, 2);
49 else
51 DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
52 a1, a1 + 2, eLoad);
53 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
54 a1, a1 + 2, eLoad);
58 static __inline__
59 Bool bm_access_load_4_triggers_conflict(const Addr a1)
61 if ((a1 & 3) == 0)
63 bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 4);
64 return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(),
65 a1, 4);
67 else
69 DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
70 a1, a1 + 4, eLoad);
71 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
72 a1, a1 + 4, eLoad);
76 static __inline__
77 Bool bm_access_load_8_triggers_conflict(const Addr a1)
79 if ((a1 & 7) == 0)
81 bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 8);
82 return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(),
83 a1, 8);
85 else if ((a1 & 3) == 0)
87 bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1 + 0, 4);
88 bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1 + 4, 4);
89 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
90 a1, a1 + 8, eLoad);
92 else
94 DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
95 a1, a1 + 8, eLoad);
96 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
97 a1, a1 + 8, eLoad);
101 static __inline__
102 Bool bm_access_load_triggers_conflict(const Addr a1, const Addr a2)
104 DRD_(bm_access_range_load)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, a2);
105 return DRD_(bm_load_has_conflict_with)(DRD_(thread_get_conflict_set)(),
106 a1, a2);
109 static __inline__
110 Bool bm_access_store_1_triggers_conflict(const Addr a1)
112 DRD_(bm_access_store_1)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1);
113 return DRD_(bm_store_1_has_conflict_with)(DRD_(thread_get_conflict_set)(),
114 a1);
117 static __inline__
118 Bool bm_access_store_2_triggers_conflict(const Addr a1)
120 if ((a1 & 1) == 0)
122 bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 2);
123 return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(),
124 a1, 2);
126 else
128 DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
129 a1, a1 + 2, eStore);
130 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
131 a1, a1 + 2, eStore);
135 static __inline__
136 Bool bm_access_store_4_triggers_conflict(const Addr a1)
138 if ((a1 & 3) == 0)
140 bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 4);
141 return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(),
142 a1, 4);
144 else
146 DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
147 a1, a1 + 4, eStore);
148 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
149 a1, a1 + 4, eStore);
153 static __inline__
154 Bool bm_access_store_8_triggers_conflict(const Addr a1)
156 if ((a1 & 7) == 0)
158 bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 8);
159 return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(),
160 a1, 8);
162 else if ((a1 & 3) == 0)
164 bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
165 a1 + 0, 4);
166 bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
167 a1 + 4, 4);
168 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
169 a1, a1 + 8, eStore);
171 else
173 DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
174 a1, a1 + 8, eStore);
175 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
176 a1, a1 + 8, eStore);
180 static __inline__
181 Bool bm_access_store_triggers_conflict(const Addr a1, const Addr a2)
183 DRD_(bm_access_range_store)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, a2);
184 return DRD_(bm_store_has_conflict_with)(DRD_(thread_get_conflict_set)(),
185 a1, a2);
188 #endif // __DRD_THREAD_BITMAP_H