Daily bump.
[official-gcc.git] / libitm / beginend.cc
bloba3bf5492153bac5532cdd99409fc0525f36d92dd
1 /* Copyright (C) 2008-2013 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Transactional Memory Library (libitm).
6 Libitm is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 Libitm is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 #include "libitm_i.h"
26 #include <pthread.h>
29 using namespace GTM;
31 #if !defined(HAVE_ARCH_GTM_THREAD) || !defined(HAVE_ARCH_GTM_THREAD_DISP)
32 extern __thread gtm_thread_tls _gtm_thr_tls;
33 #endif
35 gtm_rwlock GTM::gtm_thread::serial_lock;
36 gtm_thread *GTM::gtm_thread::list_of_threads = 0;
37 unsigned GTM::gtm_thread::number_of_threads = 0;
39 gtm_stmlock GTM::gtm_stmlock_array[LOCK_ARRAY_SIZE];
40 atomic<gtm_version> GTM::gtm_clock;
42 /* ??? Move elsewhere when we figure out library initialization. */
43 uint64_t GTM::gtm_spin_count_var = 1000;
45 #ifdef HAVE_64BIT_SYNC_BUILTINS
46 static atomic<_ITM_transactionId_t> global_tid;
47 #else
48 static _ITM_transactionId_t global_tid;
49 static pthread_mutex_t global_tid_lock = PTHREAD_MUTEX_INITIALIZER;
50 #endif
53 // Provides a on-thread-exit callback used to release per-thread data.
54 static pthread_key_t thr_release_key;
55 static pthread_once_t thr_release_once = PTHREAD_ONCE_INIT;
57 // See gtm_thread::begin_transaction.
58 uint32_t GTM::htm_fastpath = 0;
60 /* Allocate a transaction structure. */
61 void *
62 GTM::gtm_thread::operator new (size_t s)
64 void *tx;
66 assert(s == sizeof(gtm_thread));
68 tx = xmalloc (sizeof (gtm_thread), true);
69 memset (tx, 0, sizeof (gtm_thread));
71 return tx;
74 /* Free the given transaction. Raises an error if the transaction is still
75 in use. */
76 void
77 GTM::gtm_thread::operator delete(void *tx)
79 free(tx);
82 static void
83 thread_exit_handler(void *)
85 gtm_thread *thr = gtm_thr();
86 if (thr)
87 delete thr;
88 set_gtm_thr(0);
91 static void
92 thread_exit_init()
94 if (pthread_key_create(&thr_release_key, thread_exit_handler))
95 GTM_fatal("Creating thread release TLS key failed.");
99 GTM::gtm_thread::~gtm_thread()
101 if (nesting > 0)
102 GTM_fatal("Thread exit while a transaction is still active.");
104 // Deregister this transaction.
105 serial_lock.write_lock ();
106 gtm_thread **prev = &list_of_threads;
107 for (; *prev; prev = &(*prev)->next_thread)
109 if (*prev == this)
111 *prev = (*prev)->next_thread;
112 break;
115 number_of_threads--;
116 number_of_threads_changed(number_of_threads + 1, number_of_threads);
117 serial_lock.write_unlock ();
120 GTM::gtm_thread::gtm_thread ()
122 // This object's memory has been set to zero by operator new, so no need
123 // to initialize any of the other primitive-type members that do not have
124 // constructors.
125 shared_state.store(-1, memory_order_relaxed);
127 // Register this transaction with the list of all threads' transactions.
128 serial_lock.write_lock ();
129 next_thread = list_of_threads;
130 list_of_threads = this;
131 number_of_threads++;
132 number_of_threads_changed(number_of_threads - 1, number_of_threads);
133 serial_lock.write_unlock ();
135 if (pthread_once(&thr_release_once, thread_exit_init))
136 GTM_fatal("Initializing thread release TLS key failed.");
137 // Any non-null value is sufficient to trigger destruction of this
138 // transaction when the current thread terminates.
139 if (pthread_setspecific(thr_release_key, this))
140 GTM_fatal("Setting thread release TLS key failed.");
143 static inline uint32_t
144 choose_code_path(uint32_t prop, abi_dispatch *disp)
146 if ((prop & pr_uninstrumentedCode) && disp->can_run_uninstrumented_code())
147 return a_runUninstrumentedCode;
148 else
149 return a_runInstrumentedCode;
152 uint32_t
153 GTM::gtm_thread::begin_transaction (uint32_t prop, const gtm_jmpbuf *jb)
155 static const _ITM_transactionId_t tid_block_size = 1 << 16;
157 gtm_thread *tx;
158 abi_dispatch *disp;
159 uint32_t ret;
161 // ??? pr_undoLogCode is not properly defined in the ABI. Are barriers
162 // omitted because they are not necessary (e.g., a transaction on thread-
163 // local data) or because the compiler thinks that some kind of global
164 // synchronization might perform better?
165 if (unlikely(prop & pr_undoLogCode))
166 GTM_fatal("pr_undoLogCode not supported");
168 #if defined(USE_HTM_FASTPATH) && !defined(HTM_CUSTOM_FASTPATH)
169 // HTM fastpath. Only chosen in the absence of transaction_cancel to allow
170 // using an uninstrumented code path.
171 // The fastpath is enabled only by dispatch_htm's method group, which uses
172 // serial-mode methods as fallback. Serial-mode transactions cannot execute
173 // concurrently with HW transactions because the latter monitor the serial
174 // lock's writer flag and thus abort if another thread is or becomes a
175 // serial transaction. Therefore, if the fastpath is enabled, then a
176 // transaction is not executing as a HW transaction iff the serial lock is
177 // write-locked. This allows us to use htm_fastpath and the serial lock's
178 // writer flag to reliable determine whether the current thread runs a HW
179 // transaction, and thus we do not need to maintain this information in
180 // per-thread state.
181 // If an uninstrumented code path is not available, we can still run
182 // instrumented code from a HW transaction because the HTM fastpath kicks
183 // in early in both begin and commit, and the transaction is not canceled.
184 // HW transactions might get requests to switch to serial-irrevocable mode,
185 // but these can be ignored because the HTM provides all necessary
186 // correctness guarantees. Transactions cannot detect whether they are
187 // indeed in serial mode, and HW transactions should never need serial mode
188 // for any internal changes (e.g., they never abort visibly to the STM code
189 // and thus do not trigger the standard retry handling).
190 if (likely(htm_fastpath && (prop & pr_hasNoAbort)))
192 for (uint32_t t = htm_fastpath; t; t--)
194 uint32_t ret = htm_begin();
195 if (htm_begin_success(ret))
197 // We are executing a transaction now.
198 // Monitor the writer flag in the serial-mode lock, and abort
199 // if there is an active or waiting serial-mode transaction.
200 // Note that this can also happen due to an enclosing
201 // serial-mode transaction; we handle this case below.
202 if (unlikely(serial_lock.is_write_locked()))
203 htm_abort();
204 else
205 // We do not need to set a_saveLiveVariables because of HTM.
206 return (prop & pr_uninstrumentedCode) ?
207 a_runUninstrumentedCode : a_runInstrumentedCode;
209 // The transaction has aborted. Don't retry if it's unlikely that
210 // retrying the transaction will be successful.
211 if (!htm_abort_should_retry(ret))
212 break;
213 // Wait until any concurrent serial-mode transactions have finished.
214 // This is an empty critical section, but won't be elided.
215 if (serial_lock.is_write_locked())
217 tx = gtm_thr();
218 if (unlikely(tx == NULL))
220 // See below.
221 tx = new gtm_thread();
222 set_gtm_thr(tx);
224 // Check whether there is an enclosing serial-mode transaction;
225 // if so, we just continue as a nested transaction and don't
226 // try to use the HTM fastpath. This case can happen when an
227 // outermost relaxed transaction calls unsafe code that starts
228 // a transaction.
229 if (tx->nesting > 0)
230 break;
231 // Another thread is running a serial-mode transaction. Wait.
232 serial_lock.read_lock(tx);
233 serial_lock.read_unlock(tx);
234 // TODO We should probably reset the retry count t here, unless
235 // we have retried so often that we should go serial to avoid
236 // starvation.
240 #endif
242 tx = gtm_thr();
243 if (unlikely(tx == NULL))
245 // Create the thread object. The constructor will also set up automatic
246 // deletion on thread termination.
247 tx = new gtm_thread();
248 set_gtm_thr(tx);
251 if (tx->nesting > 0)
253 // This is a nested transaction.
254 // Check prop compatibility:
255 // The ABI requires pr_hasNoFloatUpdate, pr_hasNoVectorUpdate,
256 // pr_hasNoIrrevocable, pr_aWBarriersOmitted, pr_RaRBarriersOmitted, and
257 // pr_hasNoSimpleReads to hold for the full dynamic scope of a
258 // transaction. We could check that these are set for the nested
259 // transaction if they are also set for the parent transaction, but the
260 // ABI does not require these flags to be set if they could be set,
261 // so the check could be too strict.
262 // ??? For pr_readOnly, lexical or dynamic scope is unspecified.
264 if (prop & pr_hasNoAbort)
266 // We can use flat nesting, so elide this transaction.
267 if (!(prop & pr_instrumentedCode))
269 if (!(tx->state & STATE_SERIAL) ||
270 !(tx->state & STATE_IRREVOCABLE))
271 tx->serialirr_mode();
273 // Increment nesting level after checking that we have a method that
274 // allows us to continue.
275 tx->nesting++;
276 return choose_code_path(prop, abi_disp());
279 // The transaction might abort, so use closed nesting if possible.
280 // pr_hasNoAbort has lexical scope, so the compiler should really have
281 // generated an instrumented code path.
282 assert(prop & pr_instrumentedCode);
284 // Create a checkpoint of the current transaction.
285 gtm_transaction_cp *cp = tx->parent_txns.push();
286 cp->save(tx);
287 new (&tx->alloc_actions) aa_tree<uintptr_t, gtm_alloc_action>();
289 // Check whether the current method actually supports closed nesting.
290 // If we can switch to another one, do so.
291 // If not, we assume that actual aborts are infrequent, and rather
292 // restart in _ITM_abortTransaction when we really have to.
293 disp = abi_disp();
294 if (!disp->closed_nesting())
296 // ??? Should we elide the transaction if there is no alternative
297 // method that supports closed nesting? If we do, we need to set
298 // some flag to prevent _ITM_abortTransaction from aborting the
299 // wrong transaction (i.e., some parent transaction).
300 abi_dispatch *cn_disp = disp->closed_nesting_alternative();
301 if (cn_disp)
303 disp = cn_disp;
304 set_abi_disp(disp);
308 else
310 // Outermost transaction
311 disp = tx->decide_begin_dispatch (prop);
312 set_abi_disp (disp);
315 // Initialization that is common for outermost and nested transactions.
316 tx->prop = prop;
317 tx->nesting++;
319 tx->jb = *jb;
321 // As long as we have not exhausted a previously allocated block of TIDs,
322 // we can avoid an atomic operation on a shared cacheline.
323 if (tx->local_tid & (tid_block_size - 1))
324 tx->id = tx->local_tid++;
325 else
327 #ifdef HAVE_64BIT_SYNC_BUILTINS
328 // We don't really care which block of TIDs we get but only that we
329 // acquire one atomically; therefore, relaxed memory order is
330 // sufficient.
331 tx->id = global_tid.fetch_add(tid_block_size, memory_order_relaxed);
332 tx->local_tid = tx->id + 1;
333 #else
334 pthread_mutex_lock (&global_tid_lock);
335 global_tid += tid_block_size;
336 tx->id = global_tid;
337 tx->local_tid = tx->id + 1;
338 pthread_mutex_unlock (&global_tid_lock);
339 #endif
342 // Run dispatch-specific restart code. Retry until we succeed.
343 GTM::gtm_restart_reason rr;
344 while ((rr = disp->begin_or_restart()) != NO_RESTART)
346 tx->decide_retry_strategy(rr);
347 disp = abi_disp();
350 // Determine the code path to run. Only irrevocable transactions cannot be
351 // restarted, so all other transactions need to save live variables.
352 ret = choose_code_path(prop, disp);
353 if (!(tx->state & STATE_IRREVOCABLE))
354 ret |= a_saveLiveVariables;
355 return ret;
359 void
360 GTM::gtm_transaction_cp::save(gtm_thread* tx)
362 // Save everything that we might have to restore on restarts or aborts.
363 jb = tx->jb;
364 undolog_size = tx->undolog.size();
365 memcpy(&alloc_actions, &tx->alloc_actions, sizeof(alloc_actions));
366 user_actions_size = tx->user_actions.size();
367 id = tx->id;
368 prop = tx->prop;
369 cxa_catch_count = tx->cxa_catch_count;
370 cxa_unthrown = tx->cxa_unthrown;
371 disp = abi_disp();
372 nesting = tx->nesting;
375 void
376 GTM::gtm_transaction_cp::commit(gtm_thread* tx)
378 // Restore state that is not persistent across commits. Exception handling,
379 // information, nesting level, and any logs do not need to be restored on
380 // commits of nested transactions. Allocation actions must be committed
381 // before committing the snapshot.
382 tx->jb = jb;
383 memcpy(&tx->alloc_actions, &alloc_actions, sizeof(alloc_actions));
384 tx->id = id;
385 tx->prop = prop;
389 void
390 GTM::gtm_thread::rollback (gtm_transaction_cp *cp, bool aborting)
392 // The undo log is special in that it used for both thread-local and shared
393 // data. Because of the latter, we have to roll it back before any
394 // dispatch-specific rollback (which handles synchronization with other
395 // transactions).
396 undolog.rollback (this, cp ? cp->undolog_size : 0);
398 // Perform dispatch-specific rollback.
399 abi_disp()->rollback (cp);
401 // Roll back all actions that are supposed to happen around the transaction.
402 rollback_user_actions (cp ? cp->user_actions_size : 0);
403 commit_allocations (true, (cp ? &cp->alloc_actions : 0));
404 revert_cpp_exceptions (cp);
406 if (cp)
408 // We do not yet handle restarts of nested transactions. To do that, we
409 // would have to restore some state (jb, id, prop, nesting) not to the
410 // checkpoint but to the transaction that was started from this
411 // checkpoint (e.g., nesting = cp->nesting + 1);
412 assert(aborting);
413 // Roll back the rest of the state to the checkpoint.
414 jb = cp->jb;
415 id = cp->id;
416 prop = cp->prop;
417 if (cp->disp != abi_disp())
418 set_abi_disp(cp->disp);
419 memcpy(&alloc_actions, &cp->alloc_actions, sizeof(alloc_actions));
420 nesting = cp->nesting;
422 else
424 // Roll back to the outermost transaction.
425 // Restore the jump buffer and transaction properties, which we will
426 // need for the longjmp used to restart or abort the transaction.
427 if (parent_txns.size() > 0)
429 jb = parent_txns[0].jb;
430 id = parent_txns[0].id;
431 prop = parent_txns[0].prop;
433 // Reset the transaction. Do not reset this->state, which is handled by
434 // the callers. Note that if we are not aborting, we reset the
435 // transaction to the point after having executed begin_transaction
436 // (we will return from it), so the nesting level must be one, not zero.
437 nesting = (aborting ? 0 : 1);
438 parent_txns.clear();
441 if (this->eh_in_flight)
443 _Unwind_DeleteException ((_Unwind_Exception *) this->eh_in_flight);
444 this->eh_in_flight = NULL;
448 void ITM_REGPARM
449 _ITM_abortTransaction (_ITM_abortReason reason)
451 gtm_thread *tx = gtm_thr();
453 assert (reason == userAbort || reason == (userAbort | outerAbort));
454 assert ((tx->prop & pr_hasNoAbort) == 0);
456 if (tx->state & gtm_thread::STATE_IRREVOCABLE)
457 abort ();
459 // Roll back to innermost transaction.
460 if (tx->parent_txns.size() > 0 && !(reason & outerAbort))
462 // If the current method does not support closed nesting but we are
463 // nested and must only roll back the innermost transaction, then
464 // restart with a method that supports closed nesting.
465 abi_dispatch *disp = abi_disp();
466 if (!disp->closed_nesting())
467 tx->restart(RESTART_CLOSED_NESTING);
469 // The innermost transaction is a closed nested transaction.
470 gtm_transaction_cp *cp = tx->parent_txns.pop();
471 uint32_t longjmp_prop = tx->prop;
472 gtm_jmpbuf longjmp_jb = tx->jb;
474 tx->rollback (cp, true);
476 // Jump to nested transaction (use the saved jump buffer).
477 GTM_longjmp (a_abortTransaction | a_restoreLiveVariables,
478 &longjmp_jb, longjmp_prop);
480 else
482 // There is no nested transaction or an abort of the outermost
483 // transaction was requested, so roll back to the outermost transaction.
484 tx->rollback (0, true);
486 // Aborting an outermost transaction finishes execution of the whole
487 // transaction. Therefore, reset transaction state.
488 if (tx->state & gtm_thread::STATE_SERIAL)
489 gtm_thread::serial_lock.write_unlock ();
490 else
491 gtm_thread::serial_lock.read_unlock (tx);
492 tx->state = 0;
494 GTM_longjmp (a_abortTransaction | a_restoreLiveVariables,
495 &tx->jb, tx->prop);
499 bool
500 GTM::gtm_thread::trycommit ()
502 nesting--;
504 // Skip any real commit for elided transactions.
505 if (nesting > 0 && (parent_txns.size() == 0 ||
506 nesting > parent_txns[parent_txns.size() - 1].nesting))
507 return true;
509 if (nesting > 0)
511 // Commit of a closed-nested transaction. Remove one checkpoint and add
512 // any effects of this transaction to the parent transaction.
513 gtm_transaction_cp *cp = parent_txns.pop();
514 commit_allocations(false, &cp->alloc_actions);
515 cp->commit(this);
516 return true;
519 // Commit of an outermost transaction.
520 gtm_word priv_time = 0;
521 if (abi_disp()->trycommit (priv_time))
523 // The transaction is now inactive. Everything that we still have to do
524 // will not synchronize with other transactions anymore.
525 if (state & gtm_thread::STATE_SERIAL)
527 gtm_thread::serial_lock.write_unlock ();
528 // There are no other active transactions, so there's no need to
529 // enforce privatization safety.
530 priv_time = 0;
532 else
533 gtm_thread::serial_lock.read_unlock (this);
534 state = 0;
536 // We can commit the undo log after dispatch-specific commit and after
537 // making the transaction inactive because we only have to reset
538 // gtm_thread state.
539 undolog.commit ();
540 // Reset further transaction state.
541 cxa_catch_count = 0;
542 cxa_unthrown = NULL;
543 restart_total = 0;
545 // Ensure privatization safety, if necessary.
546 if (priv_time)
548 // There must be a seq_cst fence between the following loads of the
549 // other transactions' shared_state and the dispatch-specific stores
550 // that signal updates by this transaction (e.g., lock
551 // acquisitions). This ensures that if we read prior to other
552 // reader transactions setting their shared_state to 0, then those
553 // readers will observe our updates. We can reuse the seq_cst fence
554 // in serial_lock.read_unlock() however, so we don't need another
555 // one here.
556 // TODO Don't just spin but also block using cond vars / futexes
557 // here. Should probably be integrated with the serial lock code.
558 for (gtm_thread *it = gtm_thread::list_of_threads; it != 0;
559 it = it->next_thread)
561 if (it == this) continue;
562 // We need to load other threads' shared_state using acquire
563 // semantics (matching the release semantics of the respective
564 // updates). This is necessary to ensure that the other
565 // threads' memory accesses happen before our actions that
566 // assume privatization safety.
567 // TODO Are there any platform-specific optimizations (e.g.,
568 // merging barriers)?
569 while (it->shared_state.load(memory_order_acquire) < priv_time)
570 cpu_relax();
574 // After ensuring privatization safety, we execute potentially
575 // privatizing actions (e.g., calling free()). User actions are first.
576 commit_user_actions ();
577 commit_allocations (false, 0);
579 return true;
581 return false;
584 void ITM_NORETURN
585 GTM::gtm_thread::restart (gtm_restart_reason r, bool finish_serial_upgrade)
587 // Roll back to outermost transaction. Do not reset transaction state because
588 // we will continue executing this transaction.
589 rollback ();
591 // If we have to restart while an upgrade of the serial lock is happening,
592 // we need to finish this here, after rollback (to ensure privatization
593 // safety despite undo writes) and before deciding about the retry strategy
594 // (which could switch to/from serial mode).
595 if (finish_serial_upgrade)
596 gtm_thread::serial_lock.write_upgrade_finish(this);
598 decide_retry_strategy (r);
600 // Run dispatch-specific restart code. Retry until we succeed.
601 abi_dispatch* disp = abi_disp();
602 GTM::gtm_restart_reason rr;
603 while ((rr = disp->begin_or_restart()) != NO_RESTART)
605 decide_retry_strategy(rr);
606 disp = abi_disp();
609 GTM_longjmp (choose_code_path(prop, disp) | a_restoreLiveVariables,
610 &jb, prop);
613 void ITM_REGPARM
614 _ITM_commitTransaction(void)
616 #if defined(USE_HTM_FASTPATH)
617 // HTM fastpath. If we are not executing a HW transaction, then we will be
618 // a serial-mode transaction. If we are, then there will be no other
619 // concurrent serial-mode transaction.
620 // See gtm_thread::begin_transaction.
621 if (likely(htm_fastpath && !gtm_thread::serial_lock.is_write_locked()))
623 htm_commit();
624 return;
626 #endif
627 gtm_thread *tx = gtm_thr();
628 if (!tx->trycommit ())
629 tx->restart (RESTART_VALIDATE_COMMIT);
632 void ITM_REGPARM
633 _ITM_commitTransactionEH(void *exc_ptr)
635 #if defined(USE_HTM_FASTPATH)
636 // See _ITM_commitTransaction.
637 if (likely(htm_fastpath && !gtm_thread::serial_lock.is_write_locked()))
639 htm_commit();
640 return;
642 #endif
643 gtm_thread *tx = gtm_thr();
644 if (!tx->trycommit ())
646 tx->eh_in_flight = exc_ptr;
647 tx->restart (RESTART_VALIDATE_COMMIT);