1 /* Copyright (C) 2011-2018 Free Software Foundation, Inc.
2 Contributed by Torvald Riegel <triegel@redhat.com>.
4 This file is part of the GNU Transactional Memory Library (libitm).
6 Libitm is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 Libitm is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
31 // Creates ABI load/store methods (can be made virtual or static using M,
32 // use M2 to create separate methods names for virtual and static)
33 // The _PV variants are for the pure-virtual methods in the base class.
34 #define ITM_READ_M(T, LSMOD, M, M2) \
35 M _ITM_TYPE_##T ITM_REGPARM ITM_##LSMOD##T##M2 (const _ITM_TYPE_##T *ptr) \
37 return load(ptr, abi_dispatch::LSMOD); \
40 #define ITM_READ_M_PV(T, LSMOD, M, M2) \
41 M _ITM_TYPE_##T ITM_REGPARM ITM_##LSMOD##T##M2 (const _ITM_TYPE_##T *ptr) \
44 #define ITM_WRITE_M(T, LSMOD, M, M2) \
45 M void ITM_REGPARM ITM_##LSMOD##T##M2 (_ITM_TYPE_##T *ptr, \
48 store(ptr, val, abi_dispatch::LSMOD); \
51 #define ITM_WRITE_M_PV(T, LSMOD, M, M2) \
52 M void ITM_REGPARM ITM_##LSMOD##T##M2 (_ITM_TYPE_##T *ptr, \
56 // Creates ABI load/store methods for all load/store modifiers for a particular
58 #define CREATE_DISPATCH_METHODS_T(T, M, M2) \
59 ITM_READ_M(T, R, M, M2) \
60 ITM_READ_M(T, RaR, M, M2) \
61 ITM_READ_M(T, RaW, M, M2) \
62 ITM_READ_M(T, RfW, M, M2) \
63 ITM_WRITE_M(T, W, M, M2) \
64 ITM_WRITE_M(T, WaR, M, M2) \
65 ITM_WRITE_M(T, WaW, M, M2)
66 #define CREATE_DISPATCH_METHODS_T_PV(T, M, M2) \
67 ITM_READ_M_PV(T, R, M, M2) \
68 ITM_READ_M_PV(T, RaR, M, M2) \
69 ITM_READ_M_PV(T, RaW, M, M2) \
70 ITM_READ_M_PV(T, RfW, M, M2) \
71 ITM_WRITE_M_PV(T, W, M, M2) \
72 ITM_WRITE_M_PV(T, WaR, M, M2) \
73 ITM_WRITE_M_PV(T, WaW, M, M2)
75 // Creates ABI load/store methods for all types.
76 // See CREATE_DISPATCH_FUNCTIONS for comments.
77 #define CREATE_DISPATCH_METHODS(M, M2) \
78 CREATE_DISPATCH_METHODS_T (U1, M, M2) \
79 CREATE_DISPATCH_METHODS_T (U2, M, M2) \
80 CREATE_DISPATCH_METHODS_T (U4, M, M2) \
81 CREATE_DISPATCH_METHODS_T (U8, M, M2) \
82 CREATE_DISPATCH_METHODS_T (F, M, M2) \
83 CREATE_DISPATCH_METHODS_T (D, M, M2) \
84 CREATE_DISPATCH_METHODS_T (E, M, M2) \
85 CREATE_DISPATCH_METHODS_T (CF, M, M2) \
86 CREATE_DISPATCH_METHODS_T (CD, M, M2) \
87 CREATE_DISPATCH_METHODS_T (CE, M, M2)
88 #define CREATE_DISPATCH_METHODS_PV(M, M2) \
89 CREATE_DISPATCH_METHODS_T_PV (U1, M, M2) \
90 CREATE_DISPATCH_METHODS_T_PV (U2, M, M2) \
91 CREATE_DISPATCH_METHODS_T_PV (U4, M, M2) \
92 CREATE_DISPATCH_METHODS_T_PV (U8, M, M2) \
93 CREATE_DISPATCH_METHODS_T_PV (F, M, M2) \
94 CREATE_DISPATCH_METHODS_T_PV (D, M, M2) \
95 CREATE_DISPATCH_METHODS_T_PV (E, M, M2) \
96 CREATE_DISPATCH_METHODS_T_PV (CF, M, M2) \
97 CREATE_DISPATCH_METHODS_T_PV (CD, M, M2) \
98 CREATE_DISPATCH_METHODS_T_PV (CE, M, M2)
100 // Creates memcpy/memmove/memset methods.
101 #define CREATE_DISPATCH_METHODS_MEM() \
102 virtual void memtransfer(void *dst, const void* src, size_t size, \
103 bool may_overlap, ls_modifier dst_mod, ls_modifier src_mod) \
106 memtransfer_static(dst, src, size, may_overlap, dst_mod, src_mod); \
108 virtual void memset(void *dst, int c, size_t size, ls_modifier mod) \
111 memset_static(dst, c, size, mod); \
114 #define CREATE_DISPATCH_METHODS_MEM_PV() \
115 virtual void memtransfer(void *dst, const void* src, size_t size, \
116 bool may_overlap, ls_modifier dst_mod, ls_modifier src_mod) = 0; \
117 virtual void memset(void *dst, int c, size_t size, ls_modifier mod) = 0;
120 // Creates ABI load/store functions that can target either a class or an
122 #define ITM_READ(T, LSMOD, TARGET, M2) \
123 _ITM_TYPE_##T ITM_REGPARM _ITM_##LSMOD##T (const _ITM_TYPE_##T *ptr) \
125 return TARGET ITM_##LSMOD##T##M2(ptr); \
128 #define ITM_WRITE(T, LSMOD, TARGET, M2) \
129 void ITM_REGPARM _ITM_##LSMOD##T (_ITM_TYPE_##T *ptr, _ITM_TYPE_##T val) \
131 TARGET ITM_##LSMOD##T##M2(ptr, val); \
134 // Creates ABI load/store functions for all load/store modifiers for a
136 #define CREATE_DISPATCH_FUNCTIONS_T(T, TARGET, M2) \
137 ITM_READ(T, R, TARGET, M2) \
138 ITM_READ(T, RaR, TARGET, M2) \
139 ITM_READ(T, RaW, TARGET, M2) \
140 ITM_READ(T, RfW, TARGET, M2) \
141 ITM_WRITE(T, W, TARGET, M2) \
142 ITM_WRITE(T, WaR, TARGET, M2) \
143 ITM_WRITE(T, WaW, TARGET, M2)
145 // Creates ABI memcpy/memmove/memset functions.
146 #define ITM_MEMTRANSFER_DEF(TARGET, M2, NAME, READ, WRITE) \
147 void ITM_REGPARM _ITM_memcpy##NAME(void *dst, const void *src, size_t size) \
149 TARGET memtransfer##M2 (dst, src, size, \
150 false, GTM::abi_dispatch::WRITE, GTM::abi_dispatch::READ); \
152 void ITM_REGPARM _ITM_memmove##NAME(void *dst, const void *src, size_t size) \
154 TARGET memtransfer##M2 (dst, src, size, \
155 GTM::abi_dispatch::memmove_overlap_check(dst, src, size, \
156 GTM::abi_dispatch::WRITE, GTM::abi_dispatch::READ), \
157 GTM::abi_dispatch::WRITE, GTM::abi_dispatch::READ); \
160 #define ITM_MEMSET_DEF(TARGET, M2, WRITE) \
161 void ITM_REGPARM _ITM_memset##WRITE(void *dst, int c, size_t size) \
163 TARGET memset##M2 (dst, c, size, GTM::abi_dispatch::WRITE); \
167 // ??? The number of virtual methods is large (7*4 for integers, 7*6 for FP,
168 // 7*3 for vectors). Is the cache footprint so costly that we should go for
169 // a small table instead (i.e., only have two virtual load/store methods for
170 // each supported type)? Note that this doesn't affect custom code paths at
171 // all because these use only direct calls.
172 // A large cache footprint could especially decrease HTM performance (due
173 // to HTM capacity). We could add the modifier (RaR etc.) as parameter, which
174 // would give us just 4*2+6*2+3*2 functions (so we'd just need one line for
175 // the integer loads/stores), but then the modifier can be checked only at
177 // For memcpy/memmove/memset, we just have two virtual methods (memtransfer
179 #define CREATE_DISPATCH_FUNCTIONS(TARGET, M2) \
180 CREATE_DISPATCH_FUNCTIONS_T (U1, TARGET, M2) \
181 CREATE_DISPATCH_FUNCTIONS_T (U2, TARGET, M2) \
182 CREATE_DISPATCH_FUNCTIONS_T (U4, TARGET, M2) \
183 CREATE_DISPATCH_FUNCTIONS_T (U8, TARGET, M2) \
184 CREATE_DISPATCH_FUNCTIONS_T (F, TARGET, M2) \
185 CREATE_DISPATCH_FUNCTIONS_T (D, TARGET, M2) \
186 CREATE_DISPATCH_FUNCTIONS_T (E, TARGET, M2) \
187 CREATE_DISPATCH_FUNCTIONS_T (CF, TARGET, M2) \
188 CREATE_DISPATCH_FUNCTIONS_T (CD, TARGET, M2) \
189 CREATE_DISPATCH_FUNCTIONS_T (CE, TARGET, M2) \
190 ITM_MEMTRANSFER_DEF(TARGET, M2, RnWt, NONTXNAL, W) \
191 ITM_MEMTRANSFER_DEF(TARGET, M2, RnWtaR, NONTXNAL, WaR) \
192 ITM_MEMTRANSFER_DEF(TARGET, M2, RnWtaW, NONTXNAL, WaW) \
193 ITM_MEMTRANSFER_DEF(TARGET, M2, RtWn, R, NONTXNAL) \
194 ITM_MEMTRANSFER_DEF(TARGET, M2, RtWt, R, W) \
195 ITM_MEMTRANSFER_DEF(TARGET, M2, RtWtaR, R, WaR) \
196 ITM_MEMTRANSFER_DEF(TARGET, M2, RtWtaW, R, WaW) \
197 ITM_MEMTRANSFER_DEF(TARGET, M2, RtaRWn, RaR, NONTXNAL) \
198 ITM_MEMTRANSFER_DEF(TARGET, M2, RtaRWt, RaR, W) \
199 ITM_MEMTRANSFER_DEF(TARGET, M2, RtaRWtaR, RaR, WaR) \
200 ITM_MEMTRANSFER_DEF(TARGET, M2, RtaRWtaW, RaR, WaW) \
201 ITM_MEMTRANSFER_DEF(TARGET, M2, RtaWWn, RaW, NONTXNAL) \
202 ITM_MEMTRANSFER_DEF(TARGET, M2, RtaWWt, RaW, W) \
203 ITM_MEMTRANSFER_DEF(TARGET, M2, RtaWWtaR, RaW, WaR) \
204 ITM_MEMTRANSFER_DEF(TARGET, M2, RtaWWtaW, RaW, WaW) \
205 ITM_MEMSET_DEF(TARGET, M2, W) \
206 ITM_MEMSET_DEF(TARGET, M2, WaR) \
207 ITM_MEMSET_DEF(TARGET, M2, WaW)
210 // Creates ABI load/store functions that delegate to a transactional memcpy.
211 #define ITM_READ_MEMCPY(T, LSMOD, TARGET, M2) \
212 _ITM_TYPE_##T ITM_REGPARM _ITM_##LSMOD##T (const _ITM_TYPE_##T *ptr)\
215 TARGET memtransfer##M2(&v, ptr, sizeof(_ITM_TYPE_##T), false, \
216 GTM::abi_dispatch::NONTXNAL, GTM::abi_dispatch::LSMOD); \
220 #define ITM_WRITE_MEMCPY(T, LSMOD, TARGET, M2) \
221 void ITM_REGPARM _ITM_##LSMOD##T (_ITM_TYPE_##T *ptr, _ITM_TYPE_##T val)\
223 TARGET memtransfer##M2(ptr, &val, sizeof(_ITM_TYPE_##T), false, \
224 GTM::abi_dispatch::LSMOD, GTM::abi_dispatch::NONTXNAL); \
227 #define CREATE_DISPATCH_FUNCTIONS_T_MEMCPY(T, TARGET, M2) \
228 ITM_READ_MEMCPY(T, R, TARGET, M2) \
229 ITM_READ_MEMCPY(T, RaR, TARGET, M2) \
230 ITM_READ_MEMCPY(T, RaW, TARGET, M2) \
231 ITM_READ_MEMCPY(T, RfW, TARGET, M2) \
232 ITM_WRITE_MEMCPY(T, W, TARGET, M2) \
233 ITM_WRITE_MEMCPY(T, WaR, TARGET, M2) \
234 ITM_WRITE_MEMCPY(T, WaW, TARGET, M2)
237 namespace GTM HIDDEN
{
239 struct gtm_transaction_cp
;
243 // Start using a TM method from this group. This constructs required meta
244 // data on demand when this method group is actually used. Will be called
245 // either on first use or after a previous call to fini().
246 virtual void init() = 0;
247 // Stop using any method from this group for now. This can be used to
248 // destruct meta data as soon as this method group is not used anymore.
249 virtual void fini() = 0;
250 // This can be overriden to implement more light-weight re-initialization.
251 virtual void reinit()
259 // This is the base interface that all TM methods have to implement.
263 enum ls_modifier
{ NONTXNAL
, R
, RaR
, RaW
, RfW
, W
, WaR
, WaW
};
267 abi_dispatch(const abi_dispatch
&) = delete;
268 abi_dispatch
& operator=(const abi_dispatch
&) = delete;
271 // Starts or restarts a transaction. Is called right before executing the
272 // transactional application code (by either returning from
273 // gtm_thread::begin_transaction or doing the longjmp when restarting).
274 // Returns NO_RESTART if the transaction started successfully. Returns
275 // a real restart reason if it couldn't start and does need to abort. This
276 // allows TM methods to just give up and delegate ensuring progress to the
277 // restart mechanism. If it returns a restart reason, this call must be
278 // idempotent because it will trigger the restart mechanism, which could
279 // switch to a different TM method.
280 virtual gtm_restart_reason
begin_or_restart() = 0;
281 // Tries to commit the transaction. Iff this returns true, the transaction
282 // got committed and all per-transaction data will have been reset.
283 // Currently, this is called only for the commit of the outermost
284 // transaction, or when switching to serial mode (which can happen in a
285 // nested transaction).
286 // If privatization safety must be ensured in a quiescence-based way, set
287 // priv_time to a value different to 0. Nontransactional code will not be
288 // executed after this commit until all registered threads' shared_state is
289 // larger than or equal to this value.
290 virtual bool trycommit(gtm_word
& priv_time
) = 0;
291 // Rolls back a transaction. Called on abort or after trycommit() returned
293 virtual void rollback(gtm_transaction_cp
*cp
= 0) = 0;
294 // Returns true iff the snapshot is most recent, which will be the case if
295 // this transaction cannot be the reason why other transactions cannot
296 // ensure privatization safety.
297 virtual bool snapshot_most_recent() = 0;
299 // Return an alternative method that is compatible with the current
300 // method but supports closed nesting. Return zero if there is none.
301 // Note that too be compatible, it must be possible to switch to this other
302 // method on begin of a nested transaction without committing or restarting
303 // the parent method.
304 virtual abi_dispatch
* closed_nesting_alternative() { return 0; }
305 // Returns true iff this method group supports the current situation.
306 // NUMBER_OF_THREADS is the current number of threads that might execute
308 virtual bool supports(unsigned number_of_threads
) { return true; }
310 bool read_only () const { return m_read_only
; }
311 bool write_through() const { return m_write_through
; }
312 bool can_run_uninstrumented_code() const
314 return m_can_run_uninstrumented_code
;
316 // Returns true iff this TM method supports closed nesting.
317 bool closed_nesting() const { return m_closed_nesting
; }
318 // Returns STATE_SERIAL or STATE_SERIAL | STATE_IRREVOCABLE iff the TM
319 // method only works for serial-mode transactions.
320 uint32_t requires_serial() const { return m_requires_serial
; }
321 method_group
* get_method_group() const { return m_method_group
; }
323 static void *operator new(size_t s
) { return xmalloc (s
); }
324 static void operator delete(void *p
) { free (p
); }
327 static bool memmove_overlap_check(void *dst
, const void *src
, size_t size
,
328 ls_modifier dst_mod
, ls_modifier src_mod
);
330 // Creates the ABI dispatch methods for loads and stores.
331 // ??? Should the dispatch table instead be embedded in the dispatch object
332 // to avoid the indirect lookup in the vtable?
333 CREATE_DISPATCH_METHODS_PV(virtual, )
334 // Creates the ABI dispatch methods for memcpy/memmove/memset.
335 CREATE_DISPATCH_METHODS_MEM_PV()
338 const bool m_read_only
;
339 const bool m_write_through
;
340 const bool m_can_run_uninstrumented_code
;
341 const bool m_closed_nesting
;
342 const uint32_t m_requires_serial
;
343 method_group
* const m_method_group
;
344 abi_dispatch(bool ro
, bool wt
, bool uninstrumented
, bool closed_nesting
,
345 uint32_t requires_serial
, method_group
* mg
) :
346 m_read_only(ro
), m_write_through(wt
),
347 m_can_run_uninstrumented_code(uninstrumented
),
348 m_closed_nesting(closed_nesting
), m_requires_serial(requires_serial
),