mmap.c (MAP_FAILED): Define if not defined.
[official-gcc.git] / libitm / beginend.cc
blob1a258ad7965ee23ac589f319a8ea1d8f1be35589
1 /* Copyright (C) 2008-2016 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Transactional Memory Library (libitm).
6 Libitm is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 Libitm is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 #include "libitm_i.h"
26 #include <pthread.h>
29 using namespace GTM;
31 #if !defined(HAVE_ARCH_GTM_THREAD) || !defined(HAVE_ARCH_GTM_THREAD_DISP)
32 extern __thread gtm_thread_tls _gtm_thr_tls;
33 #endif
35 // Put this at the start of a cacheline so that serial_lock's writers and
36 // htm_fastpath fields are on the same cacheline, so that HW transactions
37 // only have to pay one cacheline capacity to monitor both.
38 gtm_rwlock GTM::gtm_thread::serial_lock
39 __attribute__((aligned(HW_CACHELINE_SIZE)));
40 gtm_thread *GTM::gtm_thread::list_of_threads = 0;
41 unsigned GTM::gtm_thread::number_of_threads = 0;
43 /* ??? Move elsewhere when we figure out library initialization. */
44 uint64_t GTM::gtm_spin_count_var = 1000;
46 #ifdef HAVE_64BIT_SYNC_BUILTINS
47 static atomic<_ITM_transactionId_t> global_tid;
48 #else
49 static _ITM_transactionId_t global_tid;
50 static pthread_mutex_t global_tid_lock = PTHREAD_MUTEX_INITIALIZER;
51 #endif
54 // Provides a on-thread-exit callback used to release per-thread data.
55 static pthread_key_t thr_release_key;
56 static pthread_once_t thr_release_once = PTHREAD_ONCE_INIT;
58 /* Allocate a transaction structure. */
59 void *
60 GTM::gtm_thread::operator new (size_t s)
62 void *tx;
64 assert(s == sizeof(gtm_thread));
66 tx = xmalloc (sizeof (gtm_thread), true);
67 memset (tx, 0, sizeof (gtm_thread));
69 return tx;
72 /* Free the given transaction. Raises an error if the transaction is still
73 in use. */
74 void
75 GTM::gtm_thread::operator delete(void *tx)
77 free(tx);
80 static void
81 thread_exit_handler(void *)
83 gtm_thread *thr = gtm_thr();
84 if (thr)
85 delete thr;
86 set_gtm_thr(0);
89 static void
90 thread_exit_init()
92 if (pthread_key_create(&thr_release_key, thread_exit_handler))
93 GTM_fatal("Creating thread release TLS key failed.");
97 GTM::gtm_thread::~gtm_thread()
99 if (nesting > 0)
100 GTM_fatal("Thread exit while a transaction is still active.");
102 // Deregister this transaction.
103 serial_lock.write_lock ();
104 gtm_thread **prev = &list_of_threads;
105 for (; *prev; prev = &(*prev)->next_thread)
107 if (*prev == this)
109 *prev = (*prev)->next_thread;
110 break;
113 number_of_threads--;
114 number_of_threads_changed(number_of_threads + 1, number_of_threads);
115 serial_lock.write_unlock ();
118 GTM::gtm_thread::gtm_thread ()
120 // This object's memory has been set to zero by operator new, so no need
121 // to initialize any of the other primitive-type members that do not have
122 // constructors.
123 shared_state.store(-1, memory_order_relaxed);
125 // Register this transaction with the list of all threads' transactions.
126 serial_lock.write_lock ();
127 next_thread = list_of_threads;
128 list_of_threads = this;
129 number_of_threads++;
130 number_of_threads_changed(number_of_threads - 1, number_of_threads);
131 serial_lock.write_unlock ();
133 init_cpp_exceptions ();
135 if (pthread_once(&thr_release_once, thread_exit_init))
136 GTM_fatal("Initializing thread release TLS key failed.");
137 // Any non-null value is sufficient to trigger destruction of this
138 // transaction when the current thread terminates.
139 if (pthread_setspecific(thr_release_key, this))
140 GTM_fatal("Setting thread release TLS key failed.");
143 static inline uint32_t
144 choose_code_path(uint32_t prop, abi_dispatch *disp)
146 if ((prop & pr_uninstrumentedCode) && disp->can_run_uninstrumented_code())
147 return a_runUninstrumentedCode;
148 else
149 return a_runInstrumentedCode;
152 uint32_t
153 GTM::gtm_thread::begin_transaction (uint32_t prop, const gtm_jmpbuf *jb)
155 static const _ITM_transactionId_t tid_block_size = 1 << 16;
157 gtm_thread *tx;
158 abi_dispatch *disp;
159 uint32_t ret;
161 // ??? pr_undoLogCode is not properly defined in the ABI. Are barriers
162 // omitted because they are not necessary (e.g., a transaction on thread-
163 // local data) or because the compiler thinks that some kind of global
164 // synchronization might perform better?
165 if (unlikely(prop & pr_undoLogCode))
166 GTM_fatal("pr_undoLogCode not supported");
168 #ifdef USE_HTM_FASTPATH
169 // HTM fastpath. Only chosen in the absence of transaction_cancel to allow
170 // using an uninstrumented code path.
171 // The fastpath is enabled only by dispatch_htm's method group, which uses
172 // serial-mode methods as fallback. Serial-mode transactions cannot execute
173 // concurrently with HW transactions because the latter monitor the serial
174 // lock's writer flag and thus abort if another thread is or becomes a
175 // serial transaction. Therefore, if the fastpath is enabled, then a
176 // transaction is not executing as a HW transaction iff the serial lock is
177 // write-locked. Also, HW transactions monitor the fastpath control
178 // variable, so that they will only execute if dispatch_htm is still the
179 // current method group. This allows us to use htm_fastpath and the serial
180 // lock's writers flag to reliable determine whether the current thread runs
181 // a HW transaction, and thus we do not need to maintain this information in
182 // per-thread state.
183 // If an uninstrumented code path is not available, we can still run
184 // instrumented code from a HW transaction because the HTM fastpath kicks
185 // in early in both begin and commit, and the transaction is not canceled.
186 // HW transactions might get requests to switch to serial-irrevocable mode,
187 // but these can be ignored because the HTM provides all necessary
188 // correctness guarantees. Transactions cannot detect whether they are
189 // indeed in serial mode, and HW transactions should never need serial mode
190 // for any internal changes (e.g., they never abort visibly to the STM code
191 // and thus do not trigger the standard retry handling).
192 #ifndef HTM_CUSTOM_FASTPATH
193 if (likely(serial_lock.get_htm_fastpath() && (prop & pr_hasNoAbort)))
195 // Note that the snapshot of htm_fastpath that we take here could be
196 // outdated, and a different method group than dispatch_htm may have
197 // been chosen in the meantime. Therefore, take care not not touch
198 // anything besides the serial lock, which is independent of method
199 // groups.
200 for (uint32_t t = serial_lock.get_htm_fastpath(); t; t--)
202 uint32_t ret = htm_begin();
203 if (htm_begin_success(ret))
205 // We are executing a transaction now.
206 // Monitor the writer flag in the serial-mode lock, and abort
207 // if there is an active or waiting serial-mode transaction.
208 // Also checks that htm_fastpath is still nonzero and thus
209 // HW transactions are allowed to run.
210 // Note that this can also happen due to an enclosing
211 // serial-mode transaction; we handle this case below.
212 if (unlikely(serial_lock.htm_fastpath_disabled()))
213 htm_abort();
214 else
215 // We do not need to set a_saveLiveVariables because of HTM.
216 return (prop & pr_uninstrumentedCode) ?
217 a_runUninstrumentedCode : a_runInstrumentedCode;
219 // The transaction has aborted. Don't retry if it's unlikely that
220 // retrying the transaction will be successful.
221 if (!htm_abort_should_retry(ret))
222 break;
223 // Check whether the HTM fastpath has been disabled.
224 if (!serial_lock.get_htm_fastpath())
225 break;
226 // Wait until any concurrent serial-mode transactions have finished.
227 // This is an empty critical section, but won't be elided.
228 if (serial_lock.htm_fastpath_disabled())
230 tx = gtm_thr();
231 if (unlikely(tx == NULL))
233 // See below.
234 tx = new gtm_thread();
235 set_gtm_thr(tx);
237 // Check whether there is an enclosing serial-mode transaction;
238 // if so, we just continue as a nested transaction and don't
239 // try to use the HTM fastpath. This case can happen when an
240 // outermost relaxed transaction calls unsafe code that starts
241 // a transaction.
242 if (tx->nesting > 0)
243 break;
244 // Another thread is running a serial-mode transaction. Wait.
245 serial_lock.read_lock(tx);
246 serial_lock.read_unlock(tx);
247 // TODO We should probably reset the retry count t here, unless
248 // we have retried so often that we should go serial to avoid
249 // starvation.
253 #else
254 // If we have a custom HTM fastpath in ITM_beginTransaction, we implement
255 // just the retry policy here. We communicate with the custom fastpath
256 // through additional property bits and return codes, and either transfer
257 // control back to the custom fastpath or run the fallback mechanism. The
258 // fastpath synchronization algorithm itself is the same.
259 // pr_HTMRetryableAbort states that a HW transaction started by the custom
260 // HTM fastpath aborted, and that we thus have to decide whether to retry
261 // the fastpath (returning a_tryHTMFastPath) or just proceed with the
262 // fallback method.
263 if (likely(serial_lock.get_htm_fastpath() && (prop & pr_HTMRetryableAbort)))
265 tx = gtm_thr();
266 if (unlikely(tx == NULL))
268 // See below.
269 tx = new gtm_thread();
270 set_gtm_thr(tx);
272 // If this is the first abort, reset the retry count. We abuse
273 // restart_total for the retry count, which is fine because our only
274 // other fallback will use serial transactions, which don't use
275 // restart_total but will reset it when committing.
276 if (!(prop & pr_HTMRetriedAfterAbort))
277 tx->restart_total = gtm_thread::serial_lock.get_htm_fastpath();
279 if (--tx->restart_total > 0)
281 // Wait until any concurrent serial-mode transactions have finished.
282 // Essentially the same code as above.
283 if (!serial_lock.get_htm_fastpath())
284 goto stop_custom_htm_fastpath;
285 if (serial_lock.htm_fastpath_disabled())
287 if (tx->nesting > 0)
288 goto stop_custom_htm_fastpath;
289 serial_lock.read_lock(tx);
290 serial_lock.read_unlock(tx);
292 // Let ITM_beginTransaction retry the custom HTM fastpath.
293 return a_tryHTMFastPath;
296 stop_custom_htm_fastpath:
297 #endif
298 #endif
300 tx = gtm_thr();
301 if (unlikely(tx == NULL))
303 // Create the thread object. The constructor will also set up automatic
304 // deletion on thread termination.
305 tx = new gtm_thread();
306 set_gtm_thr(tx);
309 if (tx->nesting > 0)
311 // This is a nested transaction.
312 // Check prop compatibility:
313 // The ABI requires pr_hasNoFloatUpdate, pr_hasNoVectorUpdate,
314 // pr_hasNoIrrevocable, pr_aWBarriersOmitted, pr_RaRBarriersOmitted, and
315 // pr_hasNoSimpleReads to hold for the full dynamic scope of a
316 // transaction. We could check that these are set for the nested
317 // transaction if they are also set for the parent transaction, but the
318 // ABI does not require these flags to be set if they could be set,
319 // so the check could be too strict.
320 // ??? For pr_readOnly, lexical or dynamic scope is unspecified.
322 if (prop & pr_hasNoAbort)
324 // We can use flat nesting, so elide this transaction.
325 if (!(prop & pr_instrumentedCode))
327 if (!(tx->state & STATE_SERIAL) ||
328 !(tx->state & STATE_IRREVOCABLE))
329 tx->serialirr_mode();
331 // Increment nesting level after checking that we have a method that
332 // allows us to continue.
333 tx->nesting++;
334 return choose_code_path(prop, abi_disp());
337 // The transaction might abort, so use closed nesting if possible.
338 // pr_hasNoAbort has lexical scope, so the compiler should really have
339 // generated an instrumented code path.
340 assert(prop & pr_instrumentedCode);
342 // Create a checkpoint of the current transaction.
343 gtm_transaction_cp *cp = tx->parent_txns.push();
344 cp->save(tx);
345 new (&tx->alloc_actions) aa_tree<uintptr_t, gtm_alloc_action>();
347 // Check whether the current method actually supports closed nesting.
348 // If we can switch to another one, do so.
349 // If not, we assume that actual aborts are infrequent, and rather
350 // restart in _ITM_abortTransaction when we really have to.
351 disp = abi_disp();
352 if (!disp->closed_nesting())
354 // ??? Should we elide the transaction if there is no alternative
355 // method that supports closed nesting? If we do, we need to set
356 // some flag to prevent _ITM_abortTransaction from aborting the
357 // wrong transaction (i.e., some parent transaction).
358 abi_dispatch *cn_disp = disp->closed_nesting_alternative();
359 if (cn_disp)
361 disp = cn_disp;
362 set_abi_disp(disp);
366 else
368 // Outermost transaction
369 disp = tx->decide_begin_dispatch (prop);
370 set_abi_disp (disp);
373 // Initialization that is common for outermost and nested transactions.
374 tx->prop = prop;
375 tx->nesting++;
377 tx->jb = *jb;
379 // As long as we have not exhausted a previously allocated block of TIDs,
380 // we can avoid an atomic operation on a shared cacheline.
381 if (tx->local_tid & (tid_block_size - 1))
382 tx->id = tx->local_tid++;
383 else
385 #ifdef HAVE_64BIT_SYNC_BUILTINS
386 // We don't really care which block of TIDs we get but only that we
387 // acquire one atomically; therefore, relaxed memory order is
388 // sufficient.
389 tx->id = global_tid.fetch_add(tid_block_size, memory_order_relaxed);
390 tx->local_tid = tx->id + 1;
391 #else
392 pthread_mutex_lock (&global_tid_lock);
393 global_tid += tid_block_size;
394 tx->id = global_tid;
395 tx->local_tid = tx->id + 1;
396 pthread_mutex_unlock (&global_tid_lock);
397 #endif
400 // Log the number of uncaught exceptions if we might have to roll back this
401 // state.
402 if (tx->cxa_uncaught_count_ptr != 0)
403 tx->cxa_uncaught_count = *tx->cxa_uncaught_count_ptr;
405 // Run dispatch-specific restart code. Retry until we succeed.
406 GTM::gtm_restart_reason rr;
407 while ((rr = disp->begin_or_restart()) != NO_RESTART)
409 tx->decide_retry_strategy(rr);
410 disp = abi_disp();
413 // Determine the code path to run. Only irrevocable transactions cannot be
414 // restarted, so all other transactions need to save live variables.
415 ret = choose_code_path(prop, disp);
416 if (!(tx->state & STATE_IRREVOCABLE))
417 ret |= a_saveLiveVariables;
418 return ret;
422 void
423 GTM::gtm_transaction_cp::save(gtm_thread* tx)
425 // Save everything that we might have to restore on restarts or aborts.
426 jb = tx->jb;
427 undolog_size = tx->undolog.size();
428 memcpy(&alloc_actions, &tx->alloc_actions, sizeof(alloc_actions));
429 user_actions_size = tx->user_actions.size();
430 id = tx->id;
431 prop = tx->prop;
432 cxa_catch_count = tx->cxa_catch_count;
433 cxa_uncaught_count = tx->cxa_uncaught_count;
434 disp = abi_disp();
435 nesting = tx->nesting;
438 void
439 GTM::gtm_transaction_cp::commit(gtm_thread* tx)
441 // Restore state that is not persistent across commits. Exception handling,
442 // information, nesting level, and any logs do not need to be restored on
443 // commits of nested transactions. Allocation actions must be committed
444 // before committing the snapshot.
445 tx->jb = jb;
446 memcpy(&tx->alloc_actions, &alloc_actions, sizeof(alloc_actions));
447 tx->id = id;
448 tx->prop = prop;
452 void
453 GTM::gtm_thread::rollback (gtm_transaction_cp *cp, bool aborting)
455 // The undo log is special in that it used for both thread-local and shared
456 // data. Because of the latter, we have to roll it back before any
457 // dispatch-specific rollback (which handles synchronization with other
458 // transactions).
459 undolog.rollback (this, cp ? cp->undolog_size : 0);
461 // Perform dispatch-specific rollback.
462 abi_disp()->rollback (cp);
464 // Roll back all actions that are supposed to happen around the transaction.
465 rollback_user_actions (cp ? cp->user_actions_size : 0);
466 commit_allocations (true, (cp ? &cp->alloc_actions : 0));
467 revert_cpp_exceptions (cp);
469 if (cp)
471 // We do not yet handle restarts of nested transactions. To do that, we
472 // would have to restore some state (jb, id, prop, nesting) not to the
473 // checkpoint but to the transaction that was started from this
474 // checkpoint (e.g., nesting = cp->nesting + 1);
475 assert(aborting);
476 // Roll back the rest of the state to the checkpoint.
477 jb = cp->jb;
478 id = cp->id;
479 prop = cp->prop;
480 if (cp->disp != abi_disp())
481 set_abi_disp(cp->disp);
482 memcpy(&alloc_actions, &cp->alloc_actions, sizeof(alloc_actions));
483 nesting = cp->nesting;
485 else
487 // Roll back to the outermost transaction.
488 // Restore the jump buffer and transaction properties, which we will
489 // need for the longjmp used to restart or abort the transaction.
490 if (parent_txns.size() > 0)
492 jb = parent_txns[0].jb;
493 id = parent_txns[0].id;
494 prop = parent_txns[0].prop;
496 // Reset the transaction. Do not reset this->state, which is handled by
497 // the callers. Note that if we are not aborting, we reset the
498 // transaction to the point after having executed begin_transaction
499 // (we will return from it), so the nesting level must be one, not zero.
500 nesting = (aborting ? 0 : 1);
501 parent_txns.clear();
504 if (this->eh_in_flight)
506 _Unwind_DeleteException ((_Unwind_Exception *) this->eh_in_flight);
507 this->eh_in_flight = NULL;
511 void ITM_REGPARM
512 _ITM_abortTransaction (_ITM_abortReason reason)
514 gtm_thread *tx = gtm_thr();
516 assert (reason == userAbort || reason == (userAbort | outerAbort));
517 assert ((tx->prop & pr_hasNoAbort) == 0);
519 if (tx->state & gtm_thread::STATE_IRREVOCABLE)
520 abort ();
522 // Roll back to innermost transaction.
523 if (tx->parent_txns.size() > 0 && !(reason & outerAbort))
525 // If the current method does not support closed nesting but we are
526 // nested and must only roll back the innermost transaction, then
527 // restart with a method that supports closed nesting.
528 abi_dispatch *disp = abi_disp();
529 if (!disp->closed_nesting())
530 tx->restart(RESTART_CLOSED_NESTING);
532 // The innermost transaction is a closed nested transaction.
533 gtm_transaction_cp *cp = tx->parent_txns.pop();
534 uint32_t longjmp_prop = tx->prop;
535 gtm_jmpbuf longjmp_jb = tx->jb;
537 tx->rollback (cp, true);
539 // Jump to nested transaction (use the saved jump buffer).
540 GTM_longjmp (a_abortTransaction | a_restoreLiveVariables,
541 &longjmp_jb, longjmp_prop);
543 else
545 // There is no nested transaction or an abort of the outermost
546 // transaction was requested, so roll back to the outermost transaction.
547 tx->rollback (0, true);
549 // Aborting an outermost transaction finishes execution of the whole
550 // transaction. Therefore, reset transaction state.
551 if (tx->state & gtm_thread::STATE_SERIAL)
552 gtm_thread::serial_lock.write_unlock ();
553 else
554 gtm_thread::serial_lock.read_unlock (tx);
555 tx->state = 0;
557 GTM_longjmp (a_abortTransaction | a_restoreLiveVariables,
558 &tx->jb, tx->prop);
562 bool
563 GTM::gtm_thread::trycommit ()
565 nesting--;
567 // Skip any real commit for elided transactions.
568 if (nesting > 0 && (parent_txns.size() == 0 ||
569 nesting > parent_txns[parent_txns.size() - 1].nesting))
570 return true;
572 if (nesting > 0)
574 // Commit of a closed-nested transaction. Remove one checkpoint and add
575 // any effects of this transaction to the parent transaction.
576 gtm_transaction_cp *cp = parent_txns.pop();
577 commit_allocations(false, &cp->alloc_actions);
578 cp->commit(this);
579 return true;
582 // Commit of an outermost transaction.
583 gtm_word priv_time = 0;
584 if (abi_disp()->trycommit (priv_time))
586 // The transaction is now finished but we will still access some shared
587 // data if we have to ensure privatization safety.
588 bool do_read_unlock = false;
589 if (state & gtm_thread::STATE_SERIAL)
591 gtm_thread::serial_lock.write_unlock ();
592 // There are no other active transactions, so there's no need to
593 // enforce privatization safety.
594 priv_time = 0;
596 else
598 // If we have to ensure privatization safety, we must not yet
599 // release the read lock and become inactive because (1) we still
600 // have to go through the list of all transactions, which can be
601 // modified by serial mode threads, and (2) we interpret each
602 // transactions' shared_state in the context of what we believe to
603 // be the current method group (and serial mode transactions can
604 // change the method group). Therefore, if we have to ensure
605 // privatization safety, delay becoming inactive but set a maximum
606 // snapshot time (we have committed and thus have an empty snapshot,
607 // so it will always be most recent). Use release MO so that this
608 // synchronizes with other threads observing our snapshot time.
609 if (priv_time)
611 do_read_unlock = true;
612 shared_state.store((~(typeof gtm_thread::shared_state)0) - 1,
613 memory_order_release);
615 else
616 gtm_thread::serial_lock.read_unlock (this);
618 state = 0;
620 // We can commit the undo log after dispatch-specific commit and after
621 // making the transaction inactive because we only have to reset
622 // gtm_thread state.
623 undolog.commit ();
624 // Reset further transaction state.
625 cxa_catch_count = 0;
626 restart_total = 0;
628 // Ensure privatization safety, if necessary.
629 if (priv_time)
631 // There must be a seq_cst fence between the following loads of the
632 // other transactions' shared_state and the dispatch-specific stores
633 // that signal updates by this transaction (e.g., lock
634 // acquisitions). This ensures that if we read prior to other
635 // reader transactions setting their shared_state to 0, then those
636 // readers will observe our updates. We can reuse the seq_cst fence
637 // in serial_lock.read_unlock() if we performed that; if not, we
638 // issue the fence.
639 if (do_read_unlock)
640 atomic_thread_fence (memory_order_seq_cst);
641 // TODO Don't just spin but also block using cond vars / futexes
642 // here. Should probably be integrated with the serial lock code.
643 for (gtm_thread *it = gtm_thread::list_of_threads; it != 0;
644 it = it->next_thread)
646 if (it == this) continue;
647 // We need to load other threads' shared_state using acquire
648 // semantics (matching the release semantics of the respective
649 // updates). This is necessary to ensure that the other
650 // threads' memory accesses happen before our actions that
651 // assume privatization safety.
652 // TODO Are there any platform-specific optimizations (e.g.,
653 // merging barriers)?
654 while (it->shared_state.load(memory_order_acquire) < priv_time)
655 cpu_relax();
659 // After ensuring privatization safety, we are now truly inactive and
660 // thus can release the read lock. We will also execute potentially
661 // privatizing actions (e.g., calling free()). User actions are first.
662 if (do_read_unlock)
663 gtm_thread::serial_lock.read_unlock (this);
664 commit_user_actions ();
665 commit_allocations (false, 0);
667 return true;
669 return false;
672 void ITM_NORETURN
673 GTM::gtm_thread::restart (gtm_restart_reason r, bool finish_serial_upgrade)
675 // Roll back to outermost transaction. Do not reset transaction state because
676 // we will continue executing this transaction.
677 rollback ();
679 // If we have to restart while an upgrade of the serial lock is happening,
680 // we need to finish this here, after rollback (to ensure privatization
681 // safety despite undo writes) and before deciding about the retry strategy
682 // (which could switch to/from serial mode).
683 if (finish_serial_upgrade)
684 gtm_thread::serial_lock.write_upgrade_finish(this);
686 decide_retry_strategy (r);
688 // Run dispatch-specific restart code. Retry until we succeed.
689 abi_dispatch* disp = abi_disp();
690 GTM::gtm_restart_reason rr;
691 while ((rr = disp->begin_or_restart()) != NO_RESTART)
693 decide_retry_strategy(rr);
694 disp = abi_disp();
697 GTM_longjmp (choose_code_path(prop, disp) | a_restoreLiveVariables,
698 &jb, prop);
701 void ITM_REGPARM
702 _ITM_commitTransaction(void)
704 #if defined(USE_HTM_FASTPATH)
705 // HTM fastpath. If we are not executing a HW transaction, then we will be
706 // a serial-mode transaction. If we are, then there will be no other
707 // concurrent serial-mode transaction.
708 // See gtm_thread::begin_transaction.
709 if (likely(!gtm_thread::serial_lock.htm_fastpath_disabled()))
711 htm_commit();
712 return;
714 #endif
715 gtm_thread *tx = gtm_thr();
716 if (!tx->trycommit ())
717 tx->restart (RESTART_VALIDATE_COMMIT);
720 void ITM_REGPARM
721 _ITM_commitTransactionEH(void *exc_ptr)
723 #if defined(USE_HTM_FASTPATH)
724 // See _ITM_commitTransaction.
725 if (likely(!gtm_thread::serial_lock.htm_fastpath_disabled()))
727 htm_commit();
728 return;
730 #endif
731 gtm_thread *tx = gtm_thr();
732 if (!tx->trycommit ())
734 tx->eh_in_flight = exc_ptr;
735 tx->restart (RESTART_VALIDATE_COMMIT);