Make-lang.in: Update dependencies.
[official-gcc.git] / libitm / beginend.cc
blob08c2174ea6765369aeec52daf8c8236ab850ef4a
1 /* Copyright (C) 2008, 2009, 2011, 2012 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Transactional Memory Library (libitm).
6 Libitm is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 Libitm is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 #include "libitm_i.h"
26 #include <pthread.h>
29 using namespace GTM;
31 #if !defined(HAVE_ARCH_GTM_THREAD) || !defined(HAVE_ARCH_GTM_THREAD_DISP)
32 extern __thread gtm_thread_tls _gtm_thr_tls;
33 #endif
35 gtm_rwlock GTM::gtm_thread::serial_lock;
36 gtm_thread *GTM::gtm_thread::list_of_threads = 0;
37 unsigned GTM::gtm_thread::number_of_threads = 0;
39 gtm_stmlock GTM::gtm_stmlock_array[LOCK_ARRAY_SIZE];
40 atomic<gtm_version> GTM::gtm_clock;
42 /* ??? Move elsewhere when we figure out library initialization. */
43 uint64_t GTM::gtm_spin_count_var = 1000;
45 #ifdef HAVE_64BIT_SYNC_BUILTINS
46 static atomic<_ITM_transactionId_t> global_tid;
47 #else
48 static _ITM_transactionId_t global_tid;
49 static pthread_mutex_t global_tid_lock = PTHREAD_MUTEX_INITIALIZER;
50 #endif
53 // Provides a on-thread-exit callback used to release per-thread data.
54 static pthread_key_t thr_release_key;
55 static pthread_once_t thr_release_once = PTHREAD_ONCE_INIT;
58 /* Allocate a transaction structure. */
59 void *
60 GTM::gtm_thread::operator new (size_t s)
62 void *tx;
64 assert(s == sizeof(gtm_thread));
66 tx = xmalloc (sizeof (gtm_thread), true);
67 memset (tx, 0, sizeof (gtm_thread));
69 return tx;
72 /* Free the given transaction. Raises an error if the transaction is still
73 in use. */
74 void
75 GTM::gtm_thread::operator delete(void *tx)
77 free(tx);
80 static void
81 thread_exit_handler(void *)
83 gtm_thread *thr = gtm_thr();
84 if (thr)
85 delete thr;
86 set_gtm_thr(0);
89 static void
90 thread_exit_init()
92 if (pthread_key_create(&thr_release_key, thread_exit_handler))
93 GTM_fatal("Creating thread release TLS key failed.");
97 GTM::gtm_thread::~gtm_thread()
99 if (nesting > 0)
100 GTM_fatal("Thread exit while a transaction is still active.");
102 // Deregister this transaction.
103 serial_lock.write_lock ();
104 gtm_thread **prev = &list_of_threads;
105 for (; *prev; prev = &(*prev)->next_thread)
107 if (*prev == this)
109 *prev = (*prev)->next_thread;
110 break;
113 number_of_threads--;
114 number_of_threads_changed(number_of_threads + 1, number_of_threads);
115 serial_lock.write_unlock ();
118 GTM::gtm_thread::gtm_thread ()
120 // This object's memory has been set to zero by operator new, so no need
121 // to initialize any of the other primitive-type members that do not have
122 // constructors.
123 shared_state.store(-1, memory_order_relaxed);
125 // Register this transaction with the list of all threads' transactions.
126 serial_lock.write_lock ();
127 next_thread = list_of_threads;
128 list_of_threads = this;
129 number_of_threads++;
130 number_of_threads_changed(number_of_threads - 1, number_of_threads);
131 serial_lock.write_unlock ();
133 if (pthread_once(&thr_release_once, thread_exit_init))
134 GTM_fatal("Initializing thread release TLS key failed.");
135 // Any non-null value is sufficient to trigger destruction of this
136 // transaction when the current thread terminates.
137 if (pthread_setspecific(thr_release_key, this))
138 GTM_fatal("Setting thread release TLS key failed.");
141 static inline uint32_t
142 choose_code_path(uint32_t prop, abi_dispatch *disp)
144 if ((prop & pr_uninstrumentedCode) && disp->can_run_uninstrumented_code())
145 return a_runUninstrumentedCode;
146 else
147 return a_runInstrumentedCode;
150 uint32_t
151 GTM::gtm_thread::begin_transaction (uint32_t prop, const gtm_jmpbuf *jb)
153 static const _ITM_transactionId_t tid_block_size = 1 << 16;
155 gtm_thread *tx;
156 abi_dispatch *disp;
157 uint32_t ret;
159 // ??? pr_undoLogCode is not properly defined in the ABI. Are barriers
160 // omitted because they are not necessary (e.g., a transaction on thread-
161 // local data) or because the compiler thinks that some kind of global
162 // synchronization might perform better?
163 if (unlikely(prop & pr_undoLogCode))
164 GTM_fatal("pr_undoLogCode not supported");
166 tx = gtm_thr();
167 if (unlikely(tx == NULL))
169 // Create the thread object. The constructor will also set up automatic
170 // deletion on thread termination.
171 tx = new gtm_thread();
172 set_gtm_thr(tx);
175 if (tx->nesting > 0)
177 // This is a nested transaction.
178 // Check prop compatibility:
179 // The ABI requires pr_hasNoFloatUpdate, pr_hasNoVectorUpdate,
180 // pr_hasNoIrrevocable, pr_aWBarriersOmitted, pr_RaRBarriersOmitted, and
181 // pr_hasNoSimpleReads to hold for the full dynamic scope of a
182 // transaction. We could check that these are set for the nested
183 // transaction if they are also set for the parent transaction, but the
184 // ABI does not require these flags to be set if they could be set,
185 // so the check could be too strict.
186 // ??? For pr_readOnly, lexical or dynamic scope is unspecified.
188 if (prop & pr_hasNoAbort)
190 // We can use flat nesting, so elide this transaction.
191 if (!(prop & pr_instrumentedCode))
193 if (!(tx->state & STATE_SERIAL) ||
194 !(tx->state & STATE_IRREVOCABLE))
195 tx->serialirr_mode();
197 // Increment nesting level after checking that we have a method that
198 // allows us to continue.
199 tx->nesting++;
200 return choose_code_path(prop, abi_disp());
203 // The transaction might abort, so use closed nesting if possible.
204 // pr_hasNoAbort has lexical scope, so the compiler should really have
205 // generated an instrumented code path.
206 assert(prop & pr_instrumentedCode);
208 // Create a checkpoint of the current transaction.
209 gtm_transaction_cp *cp = tx->parent_txns.push();
210 cp->save(tx);
211 new (&tx->alloc_actions) aa_tree<uintptr_t, gtm_alloc_action>();
213 // Check whether the current method actually supports closed nesting.
214 // If we can switch to another one, do so.
215 // If not, we assume that actual aborts are infrequent, and rather
216 // restart in _ITM_abortTransaction when we really have to.
217 disp = abi_disp();
218 if (!disp->closed_nesting())
220 // ??? Should we elide the transaction if there is no alternative
221 // method that supports closed nesting? If we do, we need to set
222 // some flag to prevent _ITM_abortTransaction from aborting the
223 // wrong transaction (i.e., some parent transaction).
224 abi_dispatch *cn_disp = disp->closed_nesting_alternative();
225 if (cn_disp)
227 disp = cn_disp;
228 set_abi_disp(disp);
232 else
234 // Outermost transaction
235 disp = tx->decide_begin_dispatch (prop);
236 if (disp == dispatch_serialirr() || disp == dispatch_serial())
238 tx->state = STATE_SERIAL;
239 if (disp == dispatch_serialirr())
240 tx->state |= STATE_IRREVOCABLE;
241 serial_lock.write_lock ();
243 else
244 serial_lock.read_lock (tx);
246 set_abi_disp (disp);
249 // Initialization that is common for outermost and nested transactions.
250 tx->prop = prop;
251 tx->nesting++;
253 tx->jb = *jb;
255 // As long as we have not exhausted a previously allocated block of TIDs,
256 // we can avoid an atomic operation on a shared cacheline.
257 if (tx->local_tid & (tid_block_size - 1))
258 tx->id = tx->local_tid++;
259 else
261 #ifdef HAVE_64BIT_SYNC_BUILTINS
262 // We don't really care which block of TIDs we get but only that we
263 // acquire one atomically; therefore, relaxed memory order is
264 // sufficient.
265 tx->id = global_tid.fetch_add(tid_block_size, memory_order_relaxed);
266 tx->local_tid = tx->id + 1;
267 #else
268 pthread_mutex_lock (&global_tid_lock);
269 global_tid += tid_block_size;
270 tx->id = global_tid;
271 tx->local_tid = tx->id + 1;
272 pthread_mutex_unlock (&global_tid_lock);
273 #endif
276 // Run dispatch-specific restart code. Retry until we succeed.
277 GTM::gtm_restart_reason rr;
278 while ((rr = disp->begin_or_restart()) != NO_RESTART)
280 tx->decide_retry_strategy(rr);
281 disp = abi_disp();
284 // Determine the code path to run. Only irrevocable transactions cannot be
285 // restarted, so all other transactions need to save live variables.
286 ret = choose_code_path(prop, disp);
287 if (!(tx->state & STATE_IRREVOCABLE))
288 ret |= a_saveLiveVariables;
289 return ret;
293 void
294 GTM::gtm_transaction_cp::save(gtm_thread* tx)
296 // Save everything that we might have to restore on restarts or aborts.
297 jb = tx->jb;
298 undolog_size = tx->undolog.size();
299 memcpy(&alloc_actions, &tx->alloc_actions, sizeof(alloc_actions));
300 user_actions_size = tx->user_actions.size();
301 id = tx->id;
302 prop = tx->prop;
303 cxa_catch_count = tx->cxa_catch_count;
304 cxa_unthrown = tx->cxa_unthrown;
305 disp = abi_disp();
306 nesting = tx->nesting;
309 void
310 GTM::gtm_transaction_cp::commit(gtm_thread* tx)
312 // Restore state that is not persistent across commits. Exception handling,
313 // information, nesting level, and any logs do not need to be restored on
314 // commits of nested transactions. Allocation actions must be committed
315 // before committing the snapshot.
316 tx->jb = jb;
317 memcpy(&tx->alloc_actions, &alloc_actions, sizeof(alloc_actions));
318 tx->id = id;
319 tx->prop = prop;
323 void
324 GTM::gtm_thread::rollback (gtm_transaction_cp *cp, bool aborting)
326 // The undo log is special in that it used for both thread-local and shared
327 // data. Because of the latter, we have to roll it back before any
328 // dispatch-specific rollback (which handles synchronization with other
329 // transactions).
330 undolog.rollback (this, cp ? cp->undolog_size : 0);
332 // Perform dispatch-specific rollback.
333 abi_disp()->rollback (cp);
335 // Roll back all actions that are supposed to happen around the transaction.
336 rollback_user_actions (cp ? cp->user_actions_size : 0);
337 commit_allocations (true, (cp ? &cp->alloc_actions : 0));
338 revert_cpp_exceptions (cp);
340 if (cp)
342 // We do not yet handle restarts of nested transactions. To do that, we
343 // would have to restore some state (jb, id, prop, nesting) not to the
344 // checkpoint but to the transaction that was started from this
345 // checkpoint (e.g., nesting = cp->nesting + 1);
346 assert(aborting);
347 // Roll back the rest of the state to the checkpoint.
348 jb = cp->jb;
349 id = cp->id;
350 prop = cp->prop;
351 if (cp->disp != abi_disp())
352 set_abi_disp(cp->disp);
353 memcpy(&alloc_actions, &cp->alloc_actions, sizeof(alloc_actions));
354 nesting = cp->nesting;
356 else
358 // Roll back to the outermost transaction.
359 // Restore the jump buffer and transaction properties, which we will
360 // need for the longjmp used to restart or abort the transaction.
361 if (parent_txns.size() > 0)
363 jb = parent_txns[0].jb;
364 id = parent_txns[0].id;
365 prop = parent_txns[0].prop;
367 // Reset the transaction. Do not reset this->state, which is handled by
368 // the callers. Note that if we are not aborting, we reset the
369 // transaction to the point after having executed begin_transaction
370 // (we will return from it), so the nesting level must be one, not zero.
371 nesting = (aborting ? 0 : 1);
372 parent_txns.clear();
375 if (this->eh_in_flight)
377 _Unwind_DeleteException ((_Unwind_Exception *) this->eh_in_flight);
378 this->eh_in_flight = NULL;
382 void ITM_REGPARM
383 _ITM_abortTransaction (_ITM_abortReason reason)
385 gtm_thread *tx = gtm_thr();
387 assert (reason == userAbort || reason == (userAbort | outerAbort));
388 assert ((tx->prop & pr_hasNoAbort) == 0);
390 if (tx->state & gtm_thread::STATE_IRREVOCABLE)
391 abort ();
393 // Roll back to innermost transaction.
394 if (tx->parent_txns.size() > 0 && !(reason & outerAbort))
396 // If the current method does not support closed nesting but we are
397 // nested and must only roll back the innermost transaction, then
398 // restart with a method that supports closed nesting.
399 abi_dispatch *disp = abi_disp();
400 if (!disp->closed_nesting())
401 tx->restart(RESTART_CLOSED_NESTING);
403 // The innermost transaction is a closed nested transaction.
404 gtm_transaction_cp *cp = tx->parent_txns.pop();
405 uint32_t longjmp_prop = tx->prop;
406 gtm_jmpbuf longjmp_jb = tx->jb;
408 tx->rollback (cp, true);
410 // Jump to nested transaction (use the saved jump buffer).
411 GTM_longjmp (a_abortTransaction | a_restoreLiveVariables,
412 &longjmp_jb, longjmp_prop);
414 else
416 // There is no nested transaction or an abort of the outermost
417 // transaction was requested, so roll back to the outermost transaction.
418 tx->rollback (0, true);
420 // Aborting an outermost transaction finishes execution of the whole
421 // transaction. Therefore, reset transaction state.
422 if (tx->state & gtm_thread::STATE_SERIAL)
423 gtm_thread::serial_lock.write_unlock ();
424 else
425 gtm_thread::serial_lock.read_unlock (tx);
426 tx->state = 0;
428 GTM_longjmp (a_abortTransaction | a_restoreLiveVariables,
429 &tx->jb, tx->prop);
433 bool
434 GTM::gtm_thread::trycommit ()
436 nesting--;
438 // Skip any real commit for elided transactions.
439 if (nesting > 0 && (parent_txns.size() == 0 ||
440 nesting > parent_txns[parent_txns.size() - 1].nesting))
441 return true;
443 if (nesting > 0)
445 // Commit of a closed-nested transaction. Remove one checkpoint and add
446 // any effects of this transaction to the parent transaction.
447 gtm_transaction_cp *cp = parent_txns.pop();
448 commit_allocations(false, &cp->alloc_actions);
449 cp->commit(this);
450 return true;
453 // Commit of an outermost transaction.
454 gtm_word priv_time = 0;
455 if (abi_disp()->trycommit (priv_time))
457 // The transaction is now inactive. Everything that we still have to do
458 // will not synchronize with other transactions anymore.
459 if (state & gtm_thread::STATE_SERIAL)
461 gtm_thread::serial_lock.write_unlock ();
462 // There are no other active transactions, so there's no need to
463 // enforce privatization safety.
464 priv_time = 0;
466 else
467 gtm_thread::serial_lock.read_unlock (this);
468 state = 0;
470 // We can commit the undo log after dispatch-specific commit and after
471 // making the transaction inactive because we only have to reset
472 // gtm_thread state.
473 undolog.commit ();
474 // Reset further transaction state.
475 cxa_catch_count = 0;
476 cxa_unthrown = NULL;
477 restart_total = 0;
479 // Ensure privatization safety, if necessary.
480 if (priv_time)
482 // There must be a seq_cst fence between the following loads of the
483 // other transactions' shared_state and the dispatch-specific stores
484 // that signal updates by this transaction (e.g., lock
485 // acquisitions). This ensures that if we read prior to other
486 // reader transactions setting their shared_state to 0, then those
487 // readers will observe our updates. We can reuse the seq_cst fence
488 // in serial_lock.read_unlock() however, so we don't need another
489 // one here.
490 // TODO Don't just spin but also block using cond vars / futexes
491 // here. Should probably be integrated with the serial lock code.
492 for (gtm_thread *it = gtm_thread::list_of_threads; it != 0;
493 it = it->next_thread)
495 if (it == this) continue;
496 // We need to load other threads' shared_state using acquire
497 // semantics (matching the release semantics of the respective
498 // updates). This is necessary to ensure that the other
499 // threads' memory accesses happen before our actions that
500 // assume privatization safety.
501 // TODO Are there any platform-specific optimizations (e.g.,
502 // merging barriers)?
503 while (it->shared_state.load(memory_order_acquire) < priv_time)
504 cpu_relax();
508 // After ensuring privatization safety, we execute potentially
509 // privatizing actions (e.g., calling free()). User actions are first.
510 commit_user_actions ();
511 commit_allocations (false, 0);
513 return true;
515 return false;
518 void ITM_NORETURN
519 GTM::gtm_thread::restart (gtm_restart_reason r, bool finish_serial_upgrade)
521 // Roll back to outermost transaction. Do not reset transaction state because
522 // we will continue executing this transaction.
523 rollback ();
525 // If we have to restart while an upgrade of the serial lock is happening,
526 // we need to finish this here, after rollback (to ensure privatization
527 // safety despite undo writes) and before deciding about the retry strategy
528 // (which could switch to/from serial mode).
529 if (finish_serial_upgrade)
530 gtm_thread::serial_lock.write_upgrade_finish(this);
532 decide_retry_strategy (r);
534 // Run dispatch-specific restart code. Retry until we succeed.
535 abi_dispatch* disp = abi_disp();
536 GTM::gtm_restart_reason rr;
537 while ((rr = disp->begin_or_restart()) != NO_RESTART)
539 decide_retry_strategy(rr);
540 disp = abi_disp();
543 GTM_longjmp (choose_code_path(prop, disp) | a_restoreLiveVariables,
544 &jb, prop);
547 void ITM_REGPARM
548 _ITM_commitTransaction(void)
550 gtm_thread *tx = gtm_thr();
551 if (!tx->trycommit ())
552 tx->restart (RESTART_VALIDATE_COMMIT);
555 void ITM_REGPARM
556 _ITM_commitTransactionEH(void *exc_ptr)
558 gtm_thread *tx = gtm_thr();
559 if (!tx->trycommit ())
561 tx->eh_in_flight = exc_ptr;
562 tx->restart (RESTART_VALIDATE_COMMIT);