2013-11-22 Jonathan Wakely <jwakely.gcc@gmail.com>
[official-gcc.git] / gcc / trans-mem.c
blob9e6f4d9cec7b26f61154ee97b5ef86623b48fdc0
1 /* Passes for transactional memory support.
2 Copyright (C) 2008-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "calls.h"
27 #include "function.h"
28 #include "rtl.h"
29 #include "emit-rtl.h"
30 #include "gimplify.h"
31 #include "gimple-iterator.h"
32 #include "gimplify-me.h"
33 #include "gimple-walk.h"
34 #include "gimple-ssa.h"
35 #include "cgraph.h"
36 #include "tree-cfg.h"
37 #include "stringpool.h"
38 #include "tree-ssanames.h"
39 #include "tree-into-ssa.h"
40 #include "tree-pass.h"
41 #include "tree-inline.h"
42 #include "diagnostic-core.h"
43 #include "demangle.h"
44 #include "output.h"
45 #include "trans-mem.h"
46 #include "params.h"
47 #include "target.h"
48 #include "langhooks.h"
49 #include "gimple-pretty-print.h"
50 #include "cfgloop.h"
51 #include "tree-ssa-address.h"
54 #define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
55 #define PROB_VERY_LIKELY (PROB_ALWAYS - PROB_VERY_UNLIKELY)
56 #define PROB_UNLIKELY (REG_BR_PROB_BASE / 5 - 1)
57 #define PROB_LIKELY (PROB_ALWAYS - PROB_VERY_LIKELY)
58 #define PROB_ALWAYS (REG_BR_PROB_BASE)
60 #define A_RUNINSTRUMENTEDCODE 0x0001
61 #define A_RUNUNINSTRUMENTEDCODE 0x0002
62 #define A_SAVELIVEVARIABLES 0x0004
63 #define A_RESTORELIVEVARIABLES 0x0008
64 #define A_ABORTTRANSACTION 0x0010
66 #define AR_USERABORT 0x0001
67 #define AR_USERRETRY 0x0002
68 #define AR_TMCONFLICT 0x0004
69 #define AR_EXCEPTIONBLOCKABORT 0x0008
70 #define AR_OUTERABORT 0x0010
72 #define MODE_SERIALIRREVOCABLE 0x0000
75 /* The representation of a transaction changes several times during the
76 lowering process. In the beginning, in the front-end we have the
77 GENERIC tree TRANSACTION_EXPR. For example,
79 __transaction {
80 local++;
81 if (++global == 10)
82 __tm_abort;
85 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
86 trivially replaced with a GIMPLE_TRANSACTION node.
88 During pass_lower_tm, we examine the body of transactions looking
89 for aborts. Transactions that do not contain an abort may be
90 merged into an outer transaction. We also add a TRY-FINALLY node
91 to arrange for the transaction to be committed on any exit.
93 [??? Think about how this arrangement affects throw-with-commit
94 and throw-with-abort operations. In this case we want the TRY to
95 handle gotos, but not to catch any exceptions because the transaction
96 will already be closed.]
98 GIMPLE_TRANSACTION [label=NULL] {
99 try {
100 local = local + 1;
101 t0 = global;
102 t1 = t0 + 1;
103 global = t1;
104 if (t1 == 10)
105 __builtin___tm_abort ();
106 } finally {
107 __builtin___tm_commit ();
111 During pass_lower_eh, we create EH regions for the transactions,
112 intermixed with the regular EH stuff. This gives us a nice persistent
113 mapping (all the way through rtl) from transactional memory operation
114 back to the transaction, which allows us to get the abnormal edges
115 correct to model transaction aborts and restarts:
117 GIMPLE_TRANSACTION [label=over]
118 local = local + 1;
119 t0 = global;
120 t1 = t0 + 1;
121 global = t1;
122 if (t1 == 10)
123 __builtin___tm_abort ();
124 __builtin___tm_commit ();
125 over:
127 This is the end of all_lowering_passes, and so is what is present
128 during the IPA passes, and through all of the optimization passes.
130 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
131 functions and mark functions for cloning.
133 At the end of gimple optimization, before exiting SSA form,
134 pass_tm_edges replaces statements that perform transactional
135 memory operations with the appropriate TM builtins, and swap
136 out function calls with their transactional clones. At this
137 point we introduce the abnormal transaction restart edges and
138 complete lowering of the GIMPLE_TRANSACTION node.
140 x = __builtin___tm_start (MAY_ABORT);
141 eh_label:
142 if (x & abort_transaction)
143 goto over;
144 local = local + 1;
145 t0 = __builtin___tm_load (global);
146 t1 = t0 + 1;
147 __builtin___tm_store (&global, t1);
148 if (t1 == 10)
149 __builtin___tm_abort ();
150 __builtin___tm_commit ();
151 over:
154 static void *expand_regions (struct tm_region *,
155 void *(*callback)(struct tm_region *, void *),
156 void *, bool);
159 /* Return the attributes we want to examine for X, or NULL if it's not
160 something we examine. We look at function types, but allow pointers
161 to function types and function decls and peek through. */
163 static tree
164 get_attrs_for (const_tree x)
166 switch (TREE_CODE (x))
168 case FUNCTION_DECL:
169 return TYPE_ATTRIBUTES (TREE_TYPE (x));
170 break;
172 default:
173 if (TYPE_P (x))
174 return NULL;
175 x = TREE_TYPE (x);
176 if (TREE_CODE (x) != POINTER_TYPE)
177 return NULL;
178 /* FALLTHRU */
180 case POINTER_TYPE:
181 x = TREE_TYPE (x);
182 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
183 return NULL;
184 /* FALLTHRU */
186 case FUNCTION_TYPE:
187 case METHOD_TYPE:
188 return TYPE_ATTRIBUTES (x);
192 /* Return true if X has been marked TM_PURE. */
194 bool
195 is_tm_pure (const_tree x)
197 unsigned flags;
199 switch (TREE_CODE (x))
201 case FUNCTION_DECL:
202 case FUNCTION_TYPE:
203 case METHOD_TYPE:
204 break;
206 default:
207 if (TYPE_P (x))
208 return false;
209 x = TREE_TYPE (x);
210 if (TREE_CODE (x) != POINTER_TYPE)
211 return false;
212 /* FALLTHRU */
214 case POINTER_TYPE:
215 x = TREE_TYPE (x);
216 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
217 return false;
218 break;
221 flags = flags_from_decl_or_type (x);
222 return (flags & ECF_TM_PURE) != 0;
225 /* Return true if X has been marked TM_IRREVOCABLE. */
227 static bool
228 is_tm_irrevocable (tree x)
230 tree attrs = get_attrs_for (x);
232 if (attrs && lookup_attribute ("transaction_unsafe", attrs))
233 return true;
235 /* A call to the irrevocable builtin is by definition,
236 irrevocable. */
237 if (TREE_CODE (x) == ADDR_EXPR)
238 x = TREE_OPERAND (x, 0);
239 if (TREE_CODE (x) == FUNCTION_DECL
240 && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
241 && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
242 return true;
244 return false;
247 /* Return true if X has been marked TM_SAFE. */
249 bool
250 is_tm_safe (const_tree x)
252 if (flag_tm)
254 tree attrs = get_attrs_for (x);
255 if (attrs)
257 if (lookup_attribute ("transaction_safe", attrs))
258 return true;
259 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
260 return true;
263 return false;
266 /* Return true if CALL is const, or tm_pure. */
268 static bool
269 is_tm_pure_call (gimple call)
271 tree fn = gimple_call_fn (call);
273 if (TREE_CODE (fn) == ADDR_EXPR)
275 fn = TREE_OPERAND (fn, 0);
276 gcc_assert (TREE_CODE (fn) == FUNCTION_DECL);
278 else
279 fn = TREE_TYPE (fn);
281 return is_tm_pure (fn);
284 /* Return true if X has been marked TM_CALLABLE. */
286 static bool
287 is_tm_callable (tree x)
289 tree attrs = get_attrs_for (x);
290 if (attrs)
292 if (lookup_attribute ("transaction_callable", attrs))
293 return true;
294 if (lookup_attribute ("transaction_safe", attrs))
295 return true;
296 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
297 return true;
299 return false;
302 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
304 bool
305 is_tm_may_cancel_outer (tree x)
307 tree attrs = get_attrs_for (x);
308 if (attrs)
309 return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
310 return false;
313 /* Return true for built in functions that "end" a transaction. */
315 bool
316 is_tm_ending_fndecl (tree fndecl)
318 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
319 switch (DECL_FUNCTION_CODE (fndecl))
321 case BUILT_IN_TM_COMMIT:
322 case BUILT_IN_TM_COMMIT_EH:
323 case BUILT_IN_TM_ABORT:
324 case BUILT_IN_TM_IRREVOCABLE:
325 return true;
326 default:
327 break;
330 return false;
333 /* Return true if STMT is a built in function call that "ends" a
334 transaction. */
336 bool
337 is_tm_ending (gimple stmt)
339 tree fndecl;
341 if (gimple_code (stmt) != GIMPLE_CALL)
342 return false;
344 fndecl = gimple_call_fndecl (stmt);
345 return (fndecl != NULL_TREE
346 && is_tm_ending_fndecl (fndecl));
349 /* Return true if STMT is a TM load. */
351 static bool
352 is_tm_load (gimple stmt)
354 tree fndecl;
356 if (gimple_code (stmt) != GIMPLE_CALL)
357 return false;
359 fndecl = gimple_call_fndecl (stmt);
360 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
361 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
364 /* Same as above, but for simple TM loads, that is, not the
365 after-write, after-read, etc optimized variants. */
367 static bool
368 is_tm_simple_load (gimple stmt)
370 tree fndecl;
372 if (gimple_code (stmt) != GIMPLE_CALL)
373 return false;
375 fndecl = gimple_call_fndecl (stmt);
376 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
378 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
379 return (fcode == BUILT_IN_TM_LOAD_1
380 || fcode == BUILT_IN_TM_LOAD_2
381 || fcode == BUILT_IN_TM_LOAD_4
382 || fcode == BUILT_IN_TM_LOAD_8
383 || fcode == BUILT_IN_TM_LOAD_FLOAT
384 || fcode == BUILT_IN_TM_LOAD_DOUBLE
385 || fcode == BUILT_IN_TM_LOAD_LDOUBLE
386 || fcode == BUILT_IN_TM_LOAD_M64
387 || fcode == BUILT_IN_TM_LOAD_M128
388 || fcode == BUILT_IN_TM_LOAD_M256);
390 return false;
393 /* Return true if STMT is a TM store. */
395 static bool
396 is_tm_store (gimple stmt)
398 tree fndecl;
400 if (gimple_code (stmt) != GIMPLE_CALL)
401 return false;
403 fndecl = gimple_call_fndecl (stmt);
404 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
405 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
408 /* Same as above, but for simple TM stores, that is, not the
409 after-write, after-read, etc optimized variants. */
411 static bool
412 is_tm_simple_store (gimple stmt)
414 tree fndecl;
416 if (gimple_code (stmt) != GIMPLE_CALL)
417 return false;
419 fndecl = gimple_call_fndecl (stmt);
420 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
422 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
423 return (fcode == BUILT_IN_TM_STORE_1
424 || fcode == BUILT_IN_TM_STORE_2
425 || fcode == BUILT_IN_TM_STORE_4
426 || fcode == BUILT_IN_TM_STORE_8
427 || fcode == BUILT_IN_TM_STORE_FLOAT
428 || fcode == BUILT_IN_TM_STORE_DOUBLE
429 || fcode == BUILT_IN_TM_STORE_LDOUBLE
430 || fcode == BUILT_IN_TM_STORE_M64
431 || fcode == BUILT_IN_TM_STORE_M128
432 || fcode == BUILT_IN_TM_STORE_M256);
434 return false;
437 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */
439 static bool
440 is_tm_abort (tree fndecl)
442 return (fndecl
443 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
444 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
447 /* Build a GENERIC tree for a user abort. This is called by front ends
448 while transforming the __tm_abort statement. */
450 tree
451 build_tm_abort_call (location_t loc, bool is_outer)
453 return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
454 build_int_cst (integer_type_node,
455 AR_USERABORT
456 | (is_outer ? AR_OUTERABORT : 0)));
459 /* Common gateing function for several of the TM passes. */
461 static bool
462 gate_tm (void)
464 return flag_tm;
467 /* Map for aribtrary function replacement under TM, as created
468 by the tm_wrap attribute. */
470 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
471 htab_t tm_wrap_map;
473 void
474 record_tm_replacement (tree from, tree to)
476 struct tree_map **slot, *h;
478 /* Do not inline wrapper functions that will get replaced in the TM
479 pass.
481 Suppose you have foo() that will get replaced into tmfoo(). Make
482 sure the inliner doesn't try to outsmart us and inline foo()
483 before we get a chance to do the TM replacement. */
484 DECL_UNINLINABLE (from) = 1;
486 if (tm_wrap_map == NULL)
487 tm_wrap_map = htab_create_ggc (32, tree_map_hash, tree_map_eq, 0);
489 h = ggc_alloc_tree_map ();
490 h->hash = htab_hash_pointer (from);
491 h->base.from = from;
492 h->to = to;
494 slot = (struct tree_map **)
495 htab_find_slot_with_hash (tm_wrap_map, h, h->hash, INSERT);
496 *slot = h;
499 /* Return a TM-aware replacement function for DECL. */
501 static tree
502 find_tm_replacement_function (tree fndecl)
504 if (tm_wrap_map)
506 struct tree_map *h, in;
508 in.base.from = fndecl;
509 in.hash = htab_hash_pointer (fndecl);
510 h = (struct tree_map *) htab_find_with_hash (tm_wrap_map, &in, in.hash);
511 if (h)
512 return h->to;
515 /* ??? We may well want TM versions of most of the common <string.h>
516 functions. For now, we've already these two defined. */
517 /* Adjust expand_call_tm() attributes as necessary for the cases
518 handled here: */
519 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
520 switch (DECL_FUNCTION_CODE (fndecl))
522 case BUILT_IN_MEMCPY:
523 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
524 case BUILT_IN_MEMMOVE:
525 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
526 case BUILT_IN_MEMSET:
527 return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
528 default:
529 return NULL;
532 return NULL;
535 /* When appropriate, record TM replacement for memory allocation functions.
537 FROM is the FNDECL to wrap. */
538 void
539 tm_malloc_replacement (tree from)
541 const char *str;
542 tree to;
544 if (TREE_CODE (from) != FUNCTION_DECL)
545 return;
547 /* If we have a previous replacement, the user must be explicitly
548 wrapping malloc/calloc/free. They better know what they're
549 doing... */
550 if (find_tm_replacement_function (from))
551 return;
553 str = IDENTIFIER_POINTER (DECL_NAME (from));
555 if (!strcmp (str, "malloc"))
556 to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
557 else if (!strcmp (str, "calloc"))
558 to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
559 else if (!strcmp (str, "free"))
560 to = builtin_decl_explicit (BUILT_IN_TM_FREE);
561 else
562 return;
564 TREE_NOTHROW (to) = 0;
566 record_tm_replacement (from, to);
569 /* Diagnostics for tm_safe functions/regions. Called by the front end
570 once we've lowered the function to high-gimple. */
572 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
573 Process exactly one statement. WI->INFO is set to non-null when in
574 the context of a tm_safe function, and null for a __transaction block. */
576 #define DIAG_TM_OUTER 1
577 #define DIAG_TM_SAFE 2
578 #define DIAG_TM_RELAXED 4
580 struct diagnose_tm
582 unsigned int summary_flags : 8;
583 unsigned int block_flags : 8;
584 unsigned int func_flags : 8;
585 unsigned int saw_volatile : 1;
586 gimple stmt;
589 /* Return true if T is a volatile variable of some kind. */
591 static bool
592 volatile_var_p (tree t)
594 return (SSA_VAR_P (t)
595 && TREE_THIS_VOLATILE (TREE_TYPE (t)));
598 /* Tree callback function for diagnose_tm pass. */
600 static tree
601 diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
602 void *data)
604 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
605 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
607 if (volatile_var_p (*tp)
608 && d->block_flags & DIAG_TM_SAFE
609 && !d->saw_volatile)
611 d->saw_volatile = 1;
612 error_at (gimple_location (d->stmt),
613 "invalid volatile use of %qD inside transaction",
614 *tp);
617 return NULL_TREE;
620 static inline bool
621 is_tm_safe_or_pure (const_tree x)
623 return is_tm_safe (x) || is_tm_pure (x);
626 static tree
627 diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
628 struct walk_stmt_info *wi)
630 gimple stmt = gsi_stmt (*gsi);
631 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
633 /* Save stmt for use in leaf analysis. */
634 d->stmt = stmt;
636 switch (gimple_code (stmt))
638 case GIMPLE_CALL:
640 tree fn = gimple_call_fn (stmt);
642 if ((d->summary_flags & DIAG_TM_OUTER) == 0
643 && is_tm_may_cancel_outer (fn))
644 error_at (gimple_location (stmt),
645 "%<transaction_may_cancel_outer%> function call not within"
646 " outer transaction or %<transaction_may_cancel_outer%>");
648 if (d->summary_flags & DIAG_TM_SAFE)
650 bool is_safe, direct_call_p;
651 tree replacement;
653 if (TREE_CODE (fn) == ADDR_EXPR
654 && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
656 direct_call_p = true;
657 replacement = TREE_OPERAND (fn, 0);
658 replacement = find_tm_replacement_function (replacement);
659 if (replacement)
660 fn = replacement;
662 else
664 direct_call_p = false;
665 replacement = NULL_TREE;
668 if (is_tm_safe_or_pure (fn))
669 is_safe = true;
670 else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
672 /* A function explicitly marked transaction_callable as
673 opposed to transaction_safe is being defined to be
674 unsafe as part of its ABI, regardless of its contents. */
675 is_safe = false;
677 else if (direct_call_p)
679 if (flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
680 is_safe = true;
681 else if (replacement)
683 /* ??? At present we've been considering replacements
684 merely transaction_callable, and therefore might
685 enter irrevocable. The tm_wrap attribute has not
686 yet made it into the new language spec. */
687 is_safe = false;
689 else
691 /* ??? Diagnostics for unmarked direct calls moved into
692 the IPA pass. Section 3.2 of the spec details how
693 functions not marked should be considered "implicitly
694 safe" based on having examined the function body. */
695 is_safe = true;
698 else
700 /* An unmarked indirect call. Consider it unsafe even
701 though optimization may yet figure out how to inline. */
702 is_safe = false;
705 if (!is_safe)
707 if (TREE_CODE (fn) == ADDR_EXPR)
708 fn = TREE_OPERAND (fn, 0);
709 if (d->block_flags & DIAG_TM_SAFE)
711 if (direct_call_p)
712 error_at (gimple_location (stmt),
713 "unsafe function call %qD within "
714 "atomic transaction", fn);
715 else
717 if (!DECL_P (fn) || DECL_NAME (fn))
718 error_at (gimple_location (stmt),
719 "unsafe function call %qE within "
720 "atomic transaction", fn);
721 else
722 error_at (gimple_location (stmt),
723 "unsafe indirect function call within "
724 "atomic transaction");
727 else
729 if (direct_call_p)
730 error_at (gimple_location (stmt),
731 "unsafe function call %qD within "
732 "%<transaction_safe%> function", fn);
733 else
735 if (!DECL_P (fn) || DECL_NAME (fn))
736 error_at (gimple_location (stmt),
737 "unsafe function call %qE within "
738 "%<transaction_safe%> function", fn);
739 else
740 error_at (gimple_location (stmt),
741 "unsafe indirect function call within "
742 "%<transaction_safe%> function");
748 break;
750 case GIMPLE_ASM:
751 /* ??? We ought to come up with a way to add attributes to
752 asm statements, and then add "transaction_safe" to it.
753 Either that or get the language spec to resurrect __tm_waiver. */
754 if (d->block_flags & DIAG_TM_SAFE)
755 error_at (gimple_location (stmt),
756 "asm not allowed in atomic transaction");
757 else if (d->func_flags & DIAG_TM_SAFE)
758 error_at (gimple_location (stmt),
759 "asm not allowed in %<transaction_safe%> function");
760 break;
762 case GIMPLE_TRANSACTION:
764 unsigned char inner_flags = DIAG_TM_SAFE;
766 if (gimple_transaction_subcode (stmt) & GTMA_IS_RELAXED)
768 if (d->block_flags & DIAG_TM_SAFE)
769 error_at (gimple_location (stmt),
770 "relaxed transaction in atomic transaction");
771 else if (d->func_flags & DIAG_TM_SAFE)
772 error_at (gimple_location (stmt),
773 "relaxed transaction in %<transaction_safe%> function");
774 inner_flags = DIAG_TM_RELAXED;
776 else if (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)
778 if (d->block_flags)
779 error_at (gimple_location (stmt),
780 "outer transaction in transaction");
781 else if (d->func_flags & DIAG_TM_OUTER)
782 error_at (gimple_location (stmt),
783 "outer transaction in "
784 "%<transaction_may_cancel_outer%> function");
785 else if (d->func_flags & DIAG_TM_SAFE)
786 error_at (gimple_location (stmt),
787 "outer transaction in %<transaction_safe%> function");
788 inner_flags |= DIAG_TM_OUTER;
791 *handled_ops_p = true;
792 if (gimple_transaction_body (stmt))
794 struct walk_stmt_info wi_inner;
795 struct diagnose_tm d_inner;
797 memset (&d_inner, 0, sizeof (d_inner));
798 d_inner.func_flags = d->func_flags;
799 d_inner.block_flags = d->block_flags | inner_flags;
800 d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
802 memset (&wi_inner, 0, sizeof (wi_inner));
803 wi_inner.info = &d_inner;
805 walk_gimple_seq (gimple_transaction_body (stmt),
806 diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
809 break;
811 default:
812 break;
815 return NULL_TREE;
818 static unsigned int
819 diagnose_tm_blocks (void)
821 struct walk_stmt_info wi;
822 struct diagnose_tm d;
824 memset (&d, 0, sizeof (d));
825 if (is_tm_may_cancel_outer (current_function_decl))
826 d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
827 else if (is_tm_safe (current_function_decl))
828 d.func_flags = DIAG_TM_SAFE;
829 d.summary_flags = d.func_flags;
831 memset (&wi, 0, sizeof (wi));
832 wi.info = &d;
834 walk_gimple_seq (gimple_body (current_function_decl),
835 diagnose_tm_1, diagnose_tm_1_op, &wi);
837 return 0;
840 namespace {
842 const pass_data pass_data_diagnose_tm_blocks =
844 GIMPLE_PASS, /* type */
845 "*diagnose_tm_blocks", /* name */
846 OPTGROUP_NONE, /* optinfo_flags */
847 true, /* has_gate */
848 true, /* has_execute */
849 TV_TRANS_MEM, /* tv_id */
850 PROP_gimple_any, /* properties_required */
851 0, /* properties_provided */
852 0, /* properties_destroyed */
853 0, /* todo_flags_start */
854 0, /* todo_flags_finish */
857 class pass_diagnose_tm_blocks : public gimple_opt_pass
859 public:
860 pass_diagnose_tm_blocks (gcc::context *ctxt)
861 : gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt)
864 /* opt_pass methods: */
865 bool gate () { return gate_tm (); }
866 unsigned int execute () { return diagnose_tm_blocks (); }
868 }; // class pass_diagnose_tm_blocks
870 } // anon namespace
872 gimple_opt_pass *
873 make_pass_diagnose_tm_blocks (gcc::context *ctxt)
875 return new pass_diagnose_tm_blocks (ctxt);
878 /* Instead of instrumenting thread private memory, we save the
879 addresses in a log which we later use to save/restore the addresses
880 upon transaction start/restart.
882 The log is keyed by address, where each element contains individual
883 statements among different code paths that perform the store.
885 This log is later used to generate either plain save/restore of the
886 addresses upon transaction start/restart, or calls to the ITM_L*
887 logging functions.
889 So for something like:
891 struct large { int x[1000]; };
892 struct large lala = { 0 };
893 __transaction {
894 lala.x[i] = 123;
898 We can either save/restore:
900 lala = { 0 };
901 trxn = _ITM_startTransaction ();
902 if (trxn & a_saveLiveVariables)
903 tmp_lala1 = lala.x[i];
904 else if (a & a_restoreLiveVariables)
905 lala.x[i] = tmp_lala1;
907 or use the logging functions:
909 lala = { 0 };
910 trxn = _ITM_startTransaction ();
911 _ITM_LU4 (&lala.x[i]);
913 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
914 far up the dominator tree to shadow all of the writes to a given
915 location (thus reducing the total number of logging calls), but not
916 so high as to be called on a path that does not perform a
917 write. */
919 /* One individual log entry. We may have multiple statements for the
920 same location if neither dominate each other (on different
921 execution paths). */
922 typedef struct tm_log_entry
924 /* Address to save. */
925 tree addr;
926 /* Entry block for the transaction this address occurs in. */
927 basic_block entry_block;
928 /* Dominating statements the store occurs in. */
929 gimple_vec stmts;
930 /* Initially, while we are building the log, we place a nonzero
931 value here to mean that this address *will* be saved with a
932 save/restore sequence. Later, when generating the save sequence
933 we place the SSA temp generated here. */
934 tree save_var;
935 } *tm_log_entry_t;
938 /* Log entry hashtable helpers. */
940 struct log_entry_hasher
942 typedef tm_log_entry value_type;
943 typedef tm_log_entry compare_type;
944 static inline hashval_t hash (const value_type *);
945 static inline bool equal (const value_type *, const compare_type *);
946 static inline void remove (value_type *);
949 /* Htab support. Return hash value for a `tm_log_entry'. */
950 inline hashval_t
951 log_entry_hasher::hash (const value_type *log)
953 return iterative_hash_expr (log->addr, 0);
956 /* Htab support. Return true if two log entries are the same. */
957 inline bool
958 log_entry_hasher::equal (const value_type *log1, const compare_type *log2)
960 /* FIXME:
962 rth: I suggest that we get rid of the component refs etc.
963 I.e. resolve the reference to base + offset.
965 We may need to actually finish a merge with mainline for this,
966 since we'd like to be presented with Richi's MEM_REF_EXPRs more
967 often than not. But in the meantime your tm_log_entry could save
968 the results of get_inner_reference.
970 See: g++.dg/tm/pr46653.C
973 /* Special case plain equality because operand_equal_p() below will
974 return FALSE if the addresses are equal but they have
975 side-effects (e.g. a volatile address). */
976 if (log1->addr == log2->addr)
977 return true;
979 return operand_equal_p (log1->addr, log2->addr, 0);
982 /* Htab support. Free one tm_log_entry. */
983 inline void
984 log_entry_hasher::remove (value_type *lp)
986 lp->stmts.release ();
987 free (lp);
991 /* The actual log. */
992 static hash_table <log_entry_hasher> tm_log;
994 /* Addresses to log with a save/restore sequence. These should be in
995 dominator order. */
996 static vec<tree> tm_log_save_addresses;
998 enum thread_memory_type
1000 mem_non_local = 0,
1001 mem_thread_local,
1002 mem_transaction_local,
1003 mem_max
1006 typedef struct tm_new_mem_map
1008 /* SSA_NAME being dereferenced. */
1009 tree val;
1010 enum thread_memory_type local_new_memory;
1011 } tm_new_mem_map_t;
1013 /* Hashtable helpers. */
1015 struct tm_mem_map_hasher : typed_free_remove <tm_new_mem_map_t>
1017 typedef tm_new_mem_map_t value_type;
1018 typedef tm_new_mem_map_t compare_type;
1019 static inline hashval_t hash (const value_type *);
1020 static inline bool equal (const value_type *, const compare_type *);
1023 inline hashval_t
1024 tm_mem_map_hasher::hash (const value_type *v)
1026 return (intptr_t)v->val >> 4;
1029 inline bool
1030 tm_mem_map_hasher::equal (const value_type *v, const compare_type *c)
1032 return v->val == c->val;
1035 /* Map for an SSA_NAME originally pointing to a non aliased new piece
1036 of memory (malloc, alloc, etc). */
1037 static hash_table <tm_mem_map_hasher> tm_new_mem_hash;
1039 /* Initialize logging data structures. */
1040 static void
1041 tm_log_init (void)
1043 tm_log.create (10);
1044 tm_new_mem_hash.create (5);
1045 tm_log_save_addresses.create (5);
1048 /* Free logging data structures. */
1049 static void
1050 tm_log_delete (void)
1052 tm_log.dispose ();
1053 tm_new_mem_hash.dispose ();
1054 tm_log_save_addresses.release ();
1057 /* Return true if MEM is a transaction invariant memory for the TM
1058 region starting at REGION_ENTRY_BLOCK. */
1059 static bool
1060 transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
1062 if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
1063 && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
1065 basic_block def_bb;
1067 def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
1068 return def_bb != region_entry_block
1069 && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
1072 mem = strip_invariant_refs (mem);
1073 return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
1076 /* Given an address ADDR in STMT, find it in the memory log or add it,
1077 making sure to keep only the addresses highest in the dominator
1078 tree.
1080 ENTRY_BLOCK is the entry_block for the transaction.
1082 If we find the address in the log, make sure it's either the same
1083 address, or an equivalent one that dominates ADDR.
1085 If we find the address, but neither ADDR dominates the found
1086 address, nor the found one dominates ADDR, we're on different
1087 execution paths. Add it.
1089 If known, ENTRY_BLOCK is the entry block for the region, otherwise
1090 NULL. */
1091 static void
1092 tm_log_add (basic_block entry_block, tree addr, gimple stmt)
1094 tm_log_entry **slot;
1095 struct tm_log_entry l, *lp;
1097 l.addr = addr;
1098 slot = tm_log.find_slot (&l, INSERT);
1099 if (!*slot)
1101 tree type = TREE_TYPE (addr);
1103 lp = XNEW (struct tm_log_entry);
1104 lp->addr = addr;
1105 *slot = lp;
1107 /* Small invariant addresses can be handled as save/restores. */
1108 if (entry_block
1109 && transaction_invariant_address_p (lp->addr, entry_block)
1110 && TYPE_SIZE_UNIT (type) != NULL
1111 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
1112 && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type))
1113 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
1114 /* We must be able to copy this type normally. I.e., no
1115 special constructors and the like. */
1116 && !TREE_ADDRESSABLE (type))
1118 lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
1119 lp->stmts.create (0);
1120 lp->entry_block = entry_block;
1121 /* Save addresses separately in dominator order so we don't
1122 get confused by overlapping addresses in the save/restore
1123 sequence. */
1124 tm_log_save_addresses.safe_push (lp->addr);
1126 else
1128 /* Use the logging functions. */
1129 lp->stmts.create (5);
1130 lp->stmts.quick_push (stmt);
1131 lp->save_var = NULL;
1134 else
1136 size_t i;
1137 gimple oldstmt;
1139 lp = *slot;
1141 /* If we're generating a save/restore sequence, we don't care
1142 about statements. */
1143 if (lp->save_var)
1144 return;
1146 for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i)
1148 if (stmt == oldstmt)
1149 return;
1150 /* We already have a store to the same address, higher up the
1151 dominator tree. Nothing to do. */
1152 if (dominated_by_p (CDI_DOMINATORS,
1153 gimple_bb (stmt), gimple_bb (oldstmt)))
1154 return;
1155 /* We should be processing blocks in dominator tree order. */
1156 gcc_assert (!dominated_by_p (CDI_DOMINATORS,
1157 gimple_bb (oldstmt), gimple_bb (stmt)));
1159 /* Store is on a different code path. */
1160 lp->stmts.safe_push (stmt);
1164 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1165 result, insert the new statements before GSI. */
1167 static tree
1168 gimplify_addr (gimple_stmt_iterator *gsi, tree x)
1170 if (TREE_CODE (x) == TARGET_MEM_REF)
1171 x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
1172 else
1173 x = build_fold_addr_expr (x);
1174 return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
1177 /* Instrument one address with the logging functions.
1178 ADDR is the address to save.
1179 STMT is the statement before which to place it. */
1180 static void
1181 tm_log_emit_stmt (tree addr, gimple stmt)
1183 tree type = TREE_TYPE (addr);
1184 tree size = TYPE_SIZE_UNIT (type);
1185 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1186 gimple log;
1187 enum built_in_function code = BUILT_IN_TM_LOG;
1189 if (type == float_type_node)
1190 code = BUILT_IN_TM_LOG_FLOAT;
1191 else if (type == double_type_node)
1192 code = BUILT_IN_TM_LOG_DOUBLE;
1193 else if (type == long_double_type_node)
1194 code = BUILT_IN_TM_LOG_LDOUBLE;
1195 else if (tree_fits_uhwi_p (size))
1197 unsigned int n = tree_to_uhwi (size);
1198 switch (n)
1200 case 1:
1201 code = BUILT_IN_TM_LOG_1;
1202 break;
1203 case 2:
1204 code = BUILT_IN_TM_LOG_2;
1205 break;
1206 case 4:
1207 code = BUILT_IN_TM_LOG_4;
1208 break;
1209 case 8:
1210 code = BUILT_IN_TM_LOG_8;
1211 break;
1212 default:
1213 code = BUILT_IN_TM_LOG;
1214 if (TREE_CODE (type) == VECTOR_TYPE)
1216 if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64))
1217 code = BUILT_IN_TM_LOG_M64;
1218 else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128))
1219 code = BUILT_IN_TM_LOG_M128;
1220 else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256))
1221 code = BUILT_IN_TM_LOG_M256;
1223 break;
1227 addr = gimplify_addr (&gsi, addr);
1228 if (code == BUILT_IN_TM_LOG)
1229 log = gimple_build_call (builtin_decl_explicit (code), 2, addr, size);
1230 else
1231 log = gimple_build_call (builtin_decl_explicit (code), 1, addr);
1232 gsi_insert_before (&gsi, log, GSI_SAME_STMT);
1235 /* Go through the log and instrument address that must be instrumented
1236 with the logging functions. Leave the save/restore addresses for
1237 later. */
1238 static void
1239 tm_log_emit (void)
1241 hash_table <log_entry_hasher>::iterator hi;
1242 struct tm_log_entry *lp;
1244 FOR_EACH_HASH_TABLE_ELEMENT (tm_log, lp, tm_log_entry_t, hi)
1246 size_t i;
1247 gimple stmt;
1249 if (dump_file)
1251 fprintf (dump_file, "TM thread private mem logging: ");
1252 print_generic_expr (dump_file, lp->addr, 0);
1253 fprintf (dump_file, "\n");
1256 if (lp->save_var)
1258 if (dump_file)
1259 fprintf (dump_file, "DUMPING to variable\n");
1260 continue;
1262 else
1264 if (dump_file)
1265 fprintf (dump_file, "DUMPING with logging functions\n");
1266 for (i = 0; lp->stmts.iterate (i, &stmt); ++i)
1267 tm_log_emit_stmt (lp->addr, stmt);
1272 /* Emit the save sequence for the corresponding addresses in the log.
1273 ENTRY_BLOCK is the entry block for the transaction.
1274 BB is the basic block to insert the code in. */
1275 static void
1276 tm_log_emit_saves (basic_block entry_block, basic_block bb)
1278 size_t i;
1279 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1280 gimple stmt;
1281 struct tm_log_entry l, *lp;
1283 for (i = 0; i < tm_log_save_addresses.length (); ++i)
1285 l.addr = tm_log_save_addresses[i];
1286 lp = *(tm_log.find_slot (&l, NO_INSERT));
1287 gcc_assert (lp->save_var != NULL);
1289 /* We only care about variables in the current transaction. */
1290 if (lp->entry_block != entry_block)
1291 continue;
1293 stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
1295 /* Make sure we can create an SSA_NAME for this type. For
1296 instance, aggregates aren't allowed, in which case the system
1297 will create a VOP for us and everything will just work. */
1298 if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
1300 lp->save_var = make_ssa_name (lp->save_var, stmt);
1301 gimple_assign_set_lhs (stmt, lp->save_var);
1304 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1308 /* Emit the restore sequence for the corresponding addresses in the log.
1309 ENTRY_BLOCK is the entry block for the transaction.
1310 BB is the basic block to insert the code in. */
1311 static void
1312 tm_log_emit_restores (basic_block entry_block, basic_block bb)
1314 int i;
1315 struct tm_log_entry l, *lp;
1316 gimple_stmt_iterator gsi;
1317 gimple stmt;
1319 for (i = tm_log_save_addresses.length () - 1; i >= 0; i--)
1321 l.addr = tm_log_save_addresses[i];
1322 lp = *(tm_log.find_slot (&l, NO_INSERT));
1323 gcc_assert (lp->save_var != NULL);
1325 /* We only care about variables in the current transaction. */
1326 if (lp->entry_block != entry_block)
1327 continue;
1329 /* Restores are in LIFO order from the saves in case we have
1330 overlaps. */
1331 gsi = gsi_start_bb (bb);
1333 stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
1334 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1339 static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
1340 struct walk_stmt_info *);
1341 static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
1342 struct walk_stmt_info *);
1344 /* Evaluate an address X being dereferenced and determine if it
1345 originally points to a non aliased new chunk of memory (malloc,
1346 alloca, etc).
1348 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1349 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1350 Return MEM_NON_LOCAL otherwise.
1352 ENTRY_BLOCK is the entry block to the transaction containing the
1353 dereference of X. */
1354 static enum thread_memory_type
1355 thread_private_new_memory (basic_block entry_block, tree x)
1357 gimple stmt = NULL;
1358 enum tree_code code;
1359 tm_new_mem_map_t **slot;
1360 tm_new_mem_map_t elt, *elt_p;
1361 tree val = x;
1362 enum thread_memory_type retval = mem_transaction_local;
1364 if (!entry_block
1365 || TREE_CODE (x) != SSA_NAME
1366 /* Possible uninitialized use, or a function argument. In
1367 either case, we don't care. */
1368 || SSA_NAME_IS_DEFAULT_DEF (x))
1369 return mem_non_local;
1371 /* Look in cache first. */
1372 elt.val = x;
1373 slot = tm_new_mem_hash.find_slot (&elt, INSERT);
1374 elt_p = *slot;
1375 if (elt_p)
1376 return elt_p->local_new_memory;
1378 /* Optimistically assume the memory is transaction local during
1379 processing. This catches recursion into this variable. */
1380 *slot = elt_p = XNEW (tm_new_mem_map_t);
1381 elt_p->val = val;
1382 elt_p->local_new_memory = mem_transaction_local;
1384 /* Search DEF chain to find the original definition of this address. */
1387 if (ptr_deref_may_alias_global_p (x))
1389 /* Address escapes. This is not thread-private. */
1390 retval = mem_non_local;
1391 goto new_memory_ret;
1394 stmt = SSA_NAME_DEF_STMT (x);
1396 /* If the malloc call is outside the transaction, this is
1397 thread-local. */
1398 if (retval != mem_thread_local
1399 && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
1400 retval = mem_thread_local;
1402 if (is_gimple_assign (stmt))
1404 code = gimple_assign_rhs_code (stmt);
1405 /* x = foo ==> foo */
1406 if (code == SSA_NAME)
1407 x = gimple_assign_rhs1 (stmt);
1408 /* x = foo + n ==> foo */
1409 else if (code == POINTER_PLUS_EXPR)
1410 x = gimple_assign_rhs1 (stmt);
1411 /* x = (cast*) foo ==> foo */
1412 else if (code == VIEW_CONVERT_EXPR || code == NOP_EXPR)
1413 x = gimple_assign_rhs1 (stmt);
1414 /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
1415 else if (code == COND_EXPR)
1417 tree op1 = gimple_assign_rhs2 (stmt);
1418 tree op2 = gimple_assign_rhs3 (stmt);
1419 enum thread_memory_type mem;
1420 retval = thread_private_new_memory (entry_block, op1);
1421 if (retval == mem_non_local)
1422 goto new_memory_ret;
1423 mem = thread_private_new_memory (entry_block, op2);
1424 retval = MIN (retval, mem);
1425 goto new_memory_ret;
1427 else
1429 retval = mem_non_local;
1430 goto new_memory_ret;
1433 else
1435 if (gimple_code (stmt) == GIMPLE_PHI)
1437 unsigned int i;
1438 enum thread_memory_type mem;
1439 tree phi_result = gimple_phi_result (stmt);
1441 /* If any of the ancestors are non-local, we are sure to
1442 be non-local. Otherwise we can avoid doing anything
1443 and inherit what has already been generated. */
1444 retval = mem_max;
1445 for (i = 0; i < gimple_phi_num_args (stmt); ++i)
1447 tree op = PHI_ARG_DEF (stmt, i);
1449 /* Exclude self-assignment. */
1450 if (phi_result == op)
1451 continue;
1453 mem = thread_private_new_memory (entry_block, op);
1454 if (mem == mem_non_local)
1456 retval = mem;
1457 goto new_memory_ret;
1459 retval = MIN (retval, mem);
1461 goto new_memory_ret;
1463 break;
1466 while (TREE_CODE (x) == SSA_NAME);
1468 if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
1469 /* Thread-local or transaction-local. */
1471 else
1472 retval = mem_non_local;
1474 new_memory_ret:
1475 elt_p->local_new_memory = retval;
1476 return retval;
1479 /* Determine whether X has to be instrumented using a read
1480 or write barrier.
1482 ENTRY_BLOCK is the entry block for the region where stmt resides
1483 in. NULL if unknown.
1485 STMT is the statement in which X occurs in. It is used for thread
1486 private memory instrumentation. If no TPM instrumentation is
1487 desired, STMT should be null. */
1488 static bool
1489 requires_barrier (basic_block entry_block, tree x, gimple stmt)
1491 tree orig = x;
1492 while (handled_component_p (x))
1493 x = TREE_OPERAND (x, 0);
1495 switch (TREE_CODE (x))
1497 case INDIRECT_REF:
1498 case MEM_REF:
1500 enum thread_memory_type ret;
1502 ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
1503 if (ret == mem_non_local)
1504 return true;
1505 if (stmt && ret == mem_thread_local)
1506 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1507 tm_log_add (entry_block, orig, stmt);
1509 /* Transaction-locals require nothing at all. For malloc, a
1510 transaction restart frees the memory and we reallocate.
1511 For alloca, the stack pointer gets reset by the retry and
1512 we reallocate. */
1513 return false;
1516 case TARGET_MEM_REF:
1517 if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
1518 return true;
1519 x = TREE_OPERAND (TMR_BASE (x), 0);
1520 if (TREE_CODE (x) == PARM_DECL)
1521 return false;
1522 gcc_assert (TREE_CODE (x) == VAR_DECL);
1523 /* FALLTHRU */
1525 case PARM_DECL:
1526 case RESULT_DECL:
1527 case VAR_DECL:
1528 if (DECL_BY_REFERENCE (x))
1530 /* ??? This value is a pointer, but aggregate_value_p has been
1531 jigged to return true which confuses needs_to_live_in_memory.
1532 This ought to be cleaned up generically.
1534 FIXME: Verify this still happens after the next mainline
1535 merge. Testcase ie g++.dg/tm/pr47554.C.
1537 return false;
1540 if (is_global_var (x))
1541 return !TREE_READONLY (x);
1542 if (/* FIXME: This condition should actually go below in the
1543 tm_log_add() call, however is_call_clobbered() depends on
1544 aliasing info which is not available during
1545 gimplification. Since requires_barrier() gets called
1546 during lower_sequence_tm/gimplification, leave the call
1547 to needs_to_live_in_memory until we eliminate
1548 lower_sequence_tm altogether. */
1549 needs_to_live_in_memory (x))
1550 return true;
1551 else
1553 /* For local memory that doesn't escape (aka thread private
1554 memory), we can either save the value at the beginning of
1555 the transaction and restore on restart, or call a tm
1556 function to dynamically save and restore on restart
1557 (ITM_L*). */
1558 if (stmt)
1559 tm_log_add (entry_block, orig, stmt);
1560 return false;
1563 default:
1564 return false;
1568 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1569 a transaction region. */
1571 static void
1572 examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
1574 gimple stmt = gsi_stmt (*gsi);
1576 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
1577 *state |= GTMA_HAVE_LOAD;
1578 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
1579 *state |= GTMA_HAVE_STORE;
1582 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1584 static void
1585 examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
1587 gimple stmt = gsi_stmt (*gsi);
1588 tree fn;
1590 if (is_tm_pure_call (stmt))
1591 return;
1593 /* Check if this call is a transaction abort. */
1594 fn = gimple_call_fndecl (stmt);
1595 if (is_tm_abort (fn))
1596 *state |= GTMA_HAVE_ABORT;
1598 /* Note that something may happen. */
1599 *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
1602 /* Lower a GIMPLE_TRANSACTION statement. */
1604 static void
1605 lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
1607 gimple g, stmt = gsi_stmt (*gsi);
1608 unsigned int *outer_state = (unsigned int *) wi->info;
1609 unsigned int this_state = 0;
1610 struct walk_stmt_info this_wi;
1612 /* First, lower the body. The scanning that we do inside gives
1613 us some idea of what we're dealing with. */
1614 memset (&this_wi, 0, sizeof (this_wi));
1615 this_wi.info = (void *) &this_state;
1616 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
1617 lower_sequence_tm, NULL, &this_wi);
1619 /* If there was absolutely nothing transaction related inside the
1620 transaction, we may elide it. Likewise if this is a nested
1621 transaction and does not contain an abort. */
1622 if (this_state == 0
1623 || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
1625 if (outer_state)
1626 *outer_state |= this_state;
1628 gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
1629 GSI_SAME_STMT);
1630 gimple_transaction_set_body (stmt, NULL);
1632 gsi_remove (gsi, true);
1633 wi->removed_stmt = true;
1634 return;
1637 /* Wrap the body of the transaction in a try-finally node so that
1638 the commit call is always properly called. */
1639 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
1640 if (flag_exceptions)
1642 tree ptr;
1643 gimple_seq n_seq, e_seq;
1645 n_seq = gimple_seq_alloc_with_stmt (g);
1646 e_seq = NULL;
1648 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1649 1, integer_zero_node);
1650 ptr = create_tmp_var (ptr_type_node, NULL);
1651 gimple_call_set_lhs (g, ptr);
1652 gimple_seq_add_stmt (&e_seq, g);
1654 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
1655 1, ptr);
1656 gimple_seq_add_stmt (&e_seq, g);
1658 g = gimple_build_eh_else (n_seq, e_seq);
1661 g = gimple_build_try (gimple_transaction_body (stmt),
1662 gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
1663 gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
1665 gimple_transaction_set_body (stmt, NULL);
1667 /* If the transaction calls abort or if this is an outer transaction,
1668 add an "over" label afterwards. */
1669 if ((this_state & (GTMA_HAVE_ABORT))
1670 || (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER))
1672 tree label = create_artificial_label (UNKNOWN_LOCATION);
1673 gimple_transaction_set_label (stmt, label);
1674 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
1677 /* Record the set of operations found for use later. */
1678 this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
1679 gimple_transaction_set_subcode (stmt, this_state);
1682 /* Iterate through the statements in the sequence, lowering them all
1683 as appropriate for being in a transaction. */
1685 static tree
1686 lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1687 struct walk_stmt_info *wi)
1689 unsigned int *state = (unsigned int *) wi->info;
1690 gimple stmt = gsi_stmt (*gsi);
1692 *handled_ops_p = true;
1693 switch (gimple_code (stmt))
1695 case GIMPLE_ASSIGN:
1696 /* Only memory reads/writes need to be instrumented. */
1697 if (gimple_assign_single_p (stmt))
1698 examine_assign_tm (state, gsi);
1699 break;
1701 case GIMPLE_CALL:
1702 examine_call_tm (state, gsi);
1703 break;
1705 case GIMPLE_ASM:
1706 *state |= GTMA_MAY_ENTER_IRREVOCABLE;
1707 break;
1709 case GIMPLE_TRANSACTION:
1710 lower_transaction (gsi, wi);
1711 break;
1713 default:
1714 *handled_ops_p = !gimple_has_substatements (stmt);
1715 break;
1718 return NULL_TREE;
1721 /* Iterate through the statements in the sequence, lowering them all
1722 as appropriate for being outside of a transaction. */
1724 static tree
1725 lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1726 struct walk_stmt_info * wi)
1728 gimple stmt = gsi_stmt (*gsi);
1730 if (gimple_code (stmt) == GIMPLE_TRANSACTION)
1732 *handled_ops_p = true;
1733 lower_transaction (gsi, wi);
1735 else
1736 *handled_ops_p = !gimple_has_substatements (stmt);
1738 return NULL_TREE;
1741 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1742 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1743 been moved out, and all the data required for constructing a proper
1744 CFG has been recorded. */
1746 static unsigned int
1747 execute_lower_tm (void)
1749 struct walk_stmt_info wi;
1750 gimple_seq body;
1752 /* Transactional clones aren't created until a later pass. */
1753 gcc_assert (!decl_is_tm_clone (current_function_decl));
1755 body = gimple_body (current_function_decl);
1756 memset (&wi, 0, sizeof (wi));
1757 walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi);
1758 gimple_set_body (current_function_decl, body);
1760 return 0;
1763 namespace {
1765 const pass_data pass_data_lower_tm =
1767 GIMPLE_PASS, /* type */
1768 "tmlower", /* name */
1769 OPTGROUP_NONE, /* optinfo_flags */
1770 true, /* has_gate */
1771 true, /* has_execute */
1772 TV_TRANS_MEM, /* tv_id */
1773 PROP_gimple_lcf, /* properties_required */
1774 0, /* properties_provided */
1775 0, /* properties_destroyed */
1776 0, /* todo_flags_start */
1777 0, /* todo_flags_finish */
1780 class pass_lower_tm : public gimple_opt_pass
1782 public:
1783 pass_lower_tm (gcc::context *ctxt)
1784 : gimple_opt_pass (pass_data_lower_tm, ctxt)
1787 /* opt_pass methods: */
1788 bool gate () { return gate_tm (); }
1789 unsigned int execute () { return execute_lower_tm (); }
1791 }; // class pass_lower_tm
1793 } // anon namespace
1795 gimple_opt_pass *
1796 make_pass_lower_tm (gcc::context *ctxt)
1798 return new pass_lower_tm (ctxt);
1801 /* Collect region information for each transaction. */
1803 struct tm_region
1805 /* Link to the next unnested transaction. */
1806 struct tm_region *next;
1808 /* Link to the next inner transaction. */
1809 struct tm_region *inner;
1811 /* Link to the next outer transaction. */
1812 struct tm_region *outer;
1814 /* The GIMPLE_TRANSACTION statement beginning this transaction.
1815 After TM_MARK, this gets replaced by a call to
1816 BUILT_IN_TM_START. */
1817 gimple transaction_stmt;
1819 /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
1820 BUILT_IN_TM_START, this field is true if the transaction is an
1821 outer transaction. */
1822 bool original_transaction_was_outer;
1824 /* Return value from BUILT_IN_TM_START. */
1825 tree tm_state;
1827 /* The entry block to this region. This will always be the first
1828 block of the body of the transaction. */
1829 basic_block entry_block;
1831 /* The first block after an expanded call to _ITM_beginTransaction. */
1832 basic_block restart_block;
1834 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1835 These blocks are still a part of the region (i.e., the border is
1836 inclusive). Note that this set is only complete for paths in the CFG
1837 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1838 the edge to the "over" label. */
1839 bitmap exit_blocks;
1841 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1842 bitmap irr_blocks;
1845 typedef struct tm_region *tm_region_p;
1847 /* True if there are pending edge statements to be committed for the
1848 current function being scanned in the tmmark pass. */
1849 bool pending_edge_inserts_p;
1851 static struct tm_region *all_tm_regions;
1852 static bitmap_obstack tm_obstack;
1855 /* A subroutine of tm_region_init. Record the existence of the
1856 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1858 static struct tm_region *
1859 tm_region_init_0 (struct tm_region *outer, basic_block bb, gimple stmt)
1861 struct tm_region *region;
1863 region = (struct tm_region *)
1864 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1866 if (outer)
1868 region->next = outer->inner;
1869 outer->inner = region;
1871 else
1873 region->next = all_tm_regions;
1874 all_tm_regions = region;
1876 region->inner = NULL;
1877 region->outer = outer;
1879 region->transaction_stmt = stmt;
1880 region->original_transaction_was_outer = false;
1881 region->tm_state = NULL;
1883 /* There are either one or two edges out of the block containing
1884 the GIMPLE_TRANSACTION, one to the actual region and one to the
1885 "over" label if the region contains an abort. The former will
1886 always be the one marked FALLTHRU. */
1887 region->entry_block = FALLTHRU_EDGE (bb)->dest;
1889 region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
1890 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1892 return region;
1895 /* A subroutine of tm_region_init. Record all the exit and
1896 irrevocable blocks in BB into the region's exit_blocks and
1897 irr_blocks bitmaps. Returns the new region being scanned. */
1899 static struct tm_region *
1900 tm_region_init_1 (struct tm_region *region, basic_block bb)
1902 gimple_stmt_iterator gsi;
1903 gimple g;
1905 if (!region
1906 || (!region->irr_blocks && !region->exit_blocks))
1907 return region;
1909 /* Check to see if this is the end of a region by seeing if it
1910 contains a call to __builtin_tm_commit{,_eh}. Note that the
1911 outermost region for DECL_IS_TM_CLONE need not collect this. */
1912 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
1914 g = gsi_stmt (gsi);
1915 if (gimple_code (g) == GIMPLE_CALL)
1917 tree fn = gimple_call_fndecl (g);
1918 if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
1920 if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
1921 || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
1922 && region->exit_blocks)
1924 bitmap_set_bit (region->exit_blocks, bb->index);
1925 region = region->outer;
1926 break;
1928 if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
1929 bitmap_set_bit (region->irr_blocks, bb->index);
1933 return region;
1936 /* Collect all of the transaction regions within the current function
1937 and record them in ALL_TM_REGIONS. The REGION parameter may specify
1938 an "outermost" region for use by tm clones. */
1940 static void
1941 tm_region_init (struct tm_region *region)
1943 gimple g;
1944 edge_iterator ei;
1945 edge e;
1946 basic_block bb;
1947 auto_vec<basic_block> queue;
1948 bitmap visited_blocks = BITMAP_ALLOC (NULL);
1949 struct tm_region *old_region;
1950 auto_vec<tm_region_p> bb_regions;
1952 all_tm_regions = region;
1953 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
1955 /* We could store this information in bb->aux, but we may get called
1956 through get_all_tm_blocks() from another pass that may be already
1957 using bb->aux. */
1958 bb_regions.safe_grow_cleared (last_basic_block);
1960 queue.safe_push (bb);
1961 bb_regions[bb->index] = region;
1964 bb = queue.pop ();
1965 region = bb_regions[bb->index];
1966 bb_regions[bb->index] = NULL;
1968 /* Record exit and irrevocable blocks. */
1969 region = tm_region_init_1 (region, bb);
1971 /* Check for the last statement in the block beginning a new region. */
1972 g = last_stmt (bb);
1973 old_region = region;
1974 if (g && gimple_code (g) == GIMPLE_TRANSACTION)
1975 region = tm_region_init_0 (region, bb, g);
1977 /* Process subsequent blocks. */
1978 FOR_EACH_EDGE (e, ei, bb->succs)
1979 if (!bitmap_bit_p (visited_blocks, e->dest->index))
1981 bitmap_set_bit (visited_blocks, e->dest->index);
1982 queue.safe_push (e->dest);
1984 /* If the current block started a new region, make sure that only
1985 the entry block of the new region is associated with this region.
1986 Other successors are still part of the old region. */
1987 if (old_region != region && e->dest != region->entry_block)
1988 bb_regions[e->dest->index] = old_region;
1989 else
1990 bb_regions[e->dest->index] = region;
1993 while (!queue.is_empty ());
1994 BITMAP_FREE (visited_blocks);
1997 /* The "gate" function for all transactional memory expansion and optimization
1998 passes. We collect region information for each top-level transaction, and
1999 if we don't find any, we skip all of the TM passes. Each region will have
2000 all of the exit blocks recorded, and the originating statement. */
2002 static bool
2003 gate_tm_init (void)
2005 if (!flag_tm)
2006 return false;
2008 calculate_dominance_info (CDI_DOMINATORS);
2009 bitmap_obstack_initialize (&tm_obstack);
2011 /* If the function is a TM_CLONE, then the entire function is the region. */
2012 if (decl_is_tm_clone (current_function_decl))
2014 struct tm_region *region = (struct tm_region *)
2015 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
2016 memset (region, 0, sizeof (*region));
2017 region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2018 /* For a clone, the entire function is the region. But even if
2019 we don't need to record any exit blocks, we may need to
2020 record irrevocable blocks. */
2021 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
2023 tm_region_init (region);
2025 else
2027 tm_region_init (NULL);
2029 /* If we didn't find any regions, cleanup and skip the whole tree
2030 of tm-related optimizations. */
2031 if (all_tm_regions == NULL)
2033 bitmap_obstack_release (&tm_obstack);
2034 return false;
2038 return true;
2041 namespace {
2043 const pass_data pass_data_tm_init =
2045 GIMPLE_PASS, /* type */
2046 "*tminit", /* name */
2047 OPTGROUP_NONE, /* optinfo_flags */
2048 true, /* has_gate */
2049 false, /* has_execute */
2050 TV_TRANS_MEM, /* tv_id */
2051 ( PROP_ssa | PROP_cfg ), /* properties_required */
2052 0, /* properties_provided */
2053 0, /* properties_destroyed */
2054 0, /* todo_flags_start */
2055 0, /* todo_flags_finish */
2058 class pass_tm_init : public gimple_opt_pass
2060 public:
2061 pass_tm_init (gcc::context *ctxt)
2062 : gimple_opt_pass (pass_data_tm_init, ctxt)
2065 /* opt_pass methods: */
2066 bool gate () { return gate_tm_init (); }
2068 }; // class pass_tm_init
2070 } // anon namespace
2072 gimple_opt_pass *
2073 make_pass_tm_init (gcc::context *ctxt)
2075 return new pass_tm_init (ctxt);
2078 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
2079 represented by STATE. */
2081 static inline void
2082 transaction_subcode_ior (struct tm_region *region, unsigned flags)
2084 if (region && region->transaction_stmt)
2086 flags |= gimple_transaction_subcode (region->transaction_stmt);
2087 gimple_transaction_set_subcode (region->transaction_stmt, flags);
2091 /* Construct a memory load in a transactional context. Return the
2092 gimple statement performing the load, or NULL if there is no
2093 TM_LOAD builtin of the appropriate size to do the load.
2095 LOC is the location to use for the new statement(s). */
2097 static gimple
2098 build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2100 enum built_in_function code = END_BUILTINS;
2101 tree t, type = TREE_TYPE (rhs), decl;
2102 gimple gcall;
2104 if (type == float_type_node)
2105 code = BUILT_IN_TM_LOAD_FLOAT;
2106 else if (type == double_type_node)
2107 code = BUILT_IN_TM_LOAD_DOUBLE;
2108 else if (type == long_double_type_node)
2109 code = BUILT_IN_TM_LOAD_LDOUBLE;
2110 else if (TYPE_SIZE_UNIT (type) != NULL
2111 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
2113 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type)))
2115 case 1:
2116 code = BUILT_IN_TM_LOAD_1;
2117 break;
2118 case 2:
2119 code = BUILT_IN_TM_LOAD_2;
2120 break;
2121 case 4:
2122 code = BUILT_IN_TM_LOAD_4;
2123 break;
2124 case 8:
2125 code = BUILT_IN_TM_LOAD_8;
2126 break;
2130 if (code == END_BUILTINS)
2132 decl = targetm.vectorize.builtin_tm_load (type);
2133 if (!decl)
2134 return NULL;
2136 else
2137 decl = builtin_decl_explicit (code);
2139 t = gimplify_addr (gsi, rhs);
2140 gcall = gimple_build_call (decl, 1, t);
2141 gimple_set_location (gcall, loc);
2143 t = TREE_TYPE (TREE_TYPE (decl));
2144 if (useless_type_conversion_p (type, t))
2146 gimple_call_set_lhs (gcall, lhs);
2147 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2149 else
2151 gimple g;
2152 tree temp;
2154 temp = create_tmp_reg (t, NULL);
2155 gimple_call_set_lhs (gcall, temp);
2156 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2158 t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
2159 g = gimple_build_assign (lhs, t);
2160 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2163 return gcall;
2167 /* Similarly for storing TYPE in a transactional context. */
2169 static gimple
2170 build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2172 enum built_in_function code = END_BUILTINS;
2173 tree t, fn, type = TREE_TYPE (rhs), simple_type;
2174 gimple gcall;
2176 if (type == float_type_node)
2177 code = BUILT_IN_TM_STORE_FLOAT;
2178 else if (type == double_type_node)
2179 code = BUILT_IN_TM_STORE_DOUBLE;
2180 else if (type == long_double_type_node)
2181 code = BUILT_IN_TM_STORE_LDOUBLE;
2182 else if (TYPE_SIZE_UNIT (type) != NULL
2183 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
2185 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type)))
2187 case 1:
2188 code = BUILT_IN_TM_STORE_1;
2189 break;
2190 case 2:
2191 code = BUILT_IN_TM_STORE_2;
2192 break;
2193 case 4:
2194 code = BUILT_IN_TM_STORE_4;
2195 break;
2196 case 8:
2197 code = BUILT_IN_TM_STORE_8;
2198 break;
2202 if (code == END_BUILTINS)
2204 fn = targetm.vectorize.builtin_tm_store (type);
2205 if (!fn)
2206 return NULL;
2208 else
2209 fn = builtin_decl_explicit (code);
2211 simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
2213 if (TREE_CODE (rhs) == CONSTRUCTOR)
2215 /* Handle the easy initialization to zero. */
2216 if (!CONSTRUCTOR_ELTS (rhs))
2217 rhs = build_int_cst (simple_type, 0);
2218 else
2220 /* ...otherwise punt to the caller and probably use
2221 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2222 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2223 valid gimple. */
2224 return NULL;
2227 else if (!useless_type_conversion_p (simple_type, type))
2229 gimple g;
2230 tree temp;
2232 temp = create_tmp_reg (simple_type, NULL);
2233 t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
2234 g = gimple_build_assign (temp, t);
2235 gimple_set_location (g, loc);
2236 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2238 rhs = temp;
2241 t = gimplify_addr (gsi, lhs);
2242 gcall = gimple_build_call (fn, 2, t, rhs);
2243 gimple_set_location (gcall, loc);
2244 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2246 return gcall;
2250 /* Expand an assignment statement into transactional builtins. */
2252 static void
2253 expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
2255 gimple stmt = gsi_stmt (*gsi);
2256 location_t loc = gimple_location (stmt);
2257 tree lhs = gimple_assign_lhs (stmt);
2258 tree rhs = gimple_assign_rhs1 (stmt);
2259 bool store_p = requires_barrier (region->entry_block, lhs, NULL);
2260 bool load_p = requires_barrier (region->entry_block, rhs, NULL);
2261 gimple gcall = NULL;
2263 if (!load_p && !store_p)
2265 /* Add thread private addresses to log if applicable. */
2266 requires_barrier (region->entry_block, lhs, stmt);
2267 gsi_next (gsi);
2268 return;
2271 // Remove original load/store statement.
2272 gsi_remove (gsi, true);
2274 if (load_p && !store_p)
2276 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2277 gcall = build_tm_load (loc, lhs, rhs, gsi);
2279 else if (store_p && !load_p)
2281 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2282 gcall = build_tm_store (loc, lhs, rhs, gsi);
2284 if (!gcall)
2286 tree lhs_addr, rhs_addr, tmp;
2288 if (load_p)
2289 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2290 if (store_p)
2291 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2293 /* ??? Figure out if there's any possible overlap between the LHS
2294 and the RHS and if not, use MEMCPY. */
2296 if (load_p && is_gimple_reg (lhs))
2298 tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
2299 lhs_addr = build_fold_addr_expr (tmp);
2301 else
2303 tmp = NULL_TREE;
2304 lhs_addr = gimplify_addr (gsi, lhs);
2306 rhs_addr = gimplify_addr (gsi, rhs);
2307 gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
2308 3, lhs_addr, rhs_addr,
2309 TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
2310 gimple_set_location (gcall, loc);
2311 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2313 if (tmp)
2315 gcall = gimple_build_assign (lhs, tmp);
2316 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2320 /* Now that we have the load/store in its instrumented form, add
2321 thread private addresses to the log if applicable. */
2322 if (!store_p)
2323 requires_barrier (region->entry_block, lhs, gcall);
2325 // The calls to build_tm_{store,load} above inserted the instrumented
2326 // call into the stream.
2327 // gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2331 /* Expand a call statement as appropriate for a transaction. That is,
2332 either verify that the call does not affect the transaction, or
2333 redirect the call to a clone that handles transactions, or change
2334 the transaction state to IRREVOCABLE. Return true if the call is
2335 one of the builtins that end a transaction. */
2337 static bool
2338 expand_call_tm (struct tm_region *region,
2339 gimple_stmt_iterator *gsi)
2341 gimple stmt = gsi_stmt (*gsi);
2342 tree lhs = gimple_call_lhs (stmt);
2343 tree fn_decl;
2344 struct cgraph_node *node;
2345 bool retval = false;
2347 fn_decl = gimple_call_fndecl (stmt);
2349 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
2350 || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
2351 transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
2352 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
2353 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2355 if (is_tm_pure_call (stmt))
2356 return false;
2358 if (fn_decl)
2359 retval = is_tm_ending_fndecl (fn_decl);
2360 if (!retval)
2362 /* Assume all non-const/pure calls write to memory, except
2363 transaction ending builtins. */
2364 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2367 /* For indirect calls, we already generated a call into the runtime. */
2368 if (!fn_decl)
2370 tree fn = gimple_call_fn (stmt);
2372 /* We are guaranteed never to go irrevocable on a safe or pure
2373 call, and the pure call was handled above. */
2374 if (is_tm_safe (fn))
2375 return false;
2376 else
2377 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2379 return false;
2382 node = cgraph_get_node (fn_decl);
2383 /* All calls should have cgraph here. */
2384 if (!node)
2386 /* We can have a nodeless call here if some pass after IPA-tm
2387 added uninstrumented calls. For example, loop distribution
2388 can transform certain loop constructs into __builtin_mem*
2389 calls. In this case, see if we have a suitable TM
2390 replacement and fill in the gaps. */
2391 gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL);
2392 enum built_in_function code = DECL_FUNCTION_CODE (fn_decl);
2393 gcc_assert (code == BUILT_IN_MEMCPY
2394 || code == BUILT_IN_MEMMOVE
2395 || code == BUILT_IN_MEMSET);
2397 tree repl = find_tm_replacement_function (fn_decl);
2398 if (repl)
2400 gimple_call_set_fndecl (stmt, repl);
2401 update_stmt (stmt);
2402 node = cgraph_create_node (repl);
2403 node->local.tm_may_enter_irr = false;
2404 return expand_call_tm (region, gsi);
2406 gcc_unreachable ();
2408 if (node->local.tm_may_enter_irr)
2409 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2411 if (is_tm_abort (fn_decl))
2413 transaction_subcode_ior (region, GTMA_HAVE_ABORT);
2414 return true;
2417 /* Instrument the store if needed.
2419 If the assignment happens inside the function call (return slot
2420 optimization), there is no instrumentation to be done, since
2421 the callee should have done the right thing. */
2422 if (lhs && requires_barrier (region->entry_block, lhs, stmt)
2423 && !gimple_call_return_slot_opt_p (stmt))
2425 tree tmp = create_tmp_reg (TREE_TYPE (lhs), NULL);
2426 location_t loc = gimple_location (stmt);
2427 edge fallthru_edge = NULL;
2429 /* Remember if the call was going to throw. */
2430 if (stmt_can_throw_internal (stmt))
2432 edge_iterator ei;
2433 edge e;
2434 basic_block bb = gimple_bb (stmt);
2436 FOR_EACH_EDGE (e, ei, bb->succs)
2437 if (e->flags & EDGE_FALLTHRU)
2439 fallthru_edge = e;
2440 break;
2444 gimple_call_set_lhs (stmt, tmp);
2445 update_stmt (stmt);
2446 stmt = gimple_build_assign (lhs, tmp);
2447 gimple_set_location (stmt, loc);
2449 /* We cannot throw in the middle of a BB. If the call was going
2450 to throw, place the instrumentation on the fallthru edge, so
2451 the call remains the last statement in the block. */
2452 if (fallthru_edge)
2454 gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (stmt);
2455 gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
2456 expand_assign_tm (region, &fallthru_gsi);
2457 gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
2458 pending_edge_inserts_p = true;
2460 else
2462 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
2463 expand_assign_tm (region, gsi);
2466 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2469 return retval;
2473 /* Expand all statements in BB as appropriate for being inside
2474 a transaction. */
2476 static void
2477 expand_block_tm (struct tm_region *region, basic_block bb)
2479 gimple_stmt_iterator gsi;
2481 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2483 gimple stmt = gsi_stmt (gsi);
2484 switch (gimple_code (stmt))
2486 case GIMPLE_ASSIGN:
2487 /* Only memory reads/writes need to be instrumented. */
2488 if (gimple_assign_single_p (stmt)
2489 && !gimple_clobber_p (stmt))
2491 expand_assign_tm (region, &gsi);
2492 continue;
2494 break;
2496 case GIMPLE_CALL:
2497 if (expand_call_tm (region, &gsi))
2498 return;
2499 break;
2501 case GIMPLE_ASM:
2502 gcc_unreachable ();
2504 default:
2505 break;
2507 if (!gsi_end_p (gsi))
2508 gsi_next (&gsi);
2512 /* Return the list of basic-blocks in REGION.
2514 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2515 following a TM_IRREVOCABLE call.
2517 INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the
2518 uninstrumented code path blocks in the list of basic blocks
2519 returned, false otherwise. */
2521 static vec<basic_block>
2522 get_tm_region_blocks (basic_block entry_block,
2523 bitmap exit_blocks,
2524 bitmap irr_blocks,
2525 bitmap all_region_blocks,
2526 bool stop_at_irrevocable_p,
2527 bool include_uninstrumented_p = true)
2529 vec<basic_block> bbs = vNULL;
2530 unsigned i;
2531 edge e;
2532 edge_iterator ei;
2533 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2535 i = 0;
2536 bbs.safe_push (entry_block);
2537 bitmap_set_bit (visited_blocks, entry_block->index);
2541 basic_block bb = bbs[i++];
2543 if (exit_blocks &&
2544 bitmap_bit_p (exit_blocks, bb->index))
2545 continue;
2547 if (stop_at_irrevocable_p
2548 && irr_blocks
2549 && bitmap_bit_p (irr_blocks, bb->index))
2550 continue;
2552 FOR_EACH_EDGE (e, ei, bb->succs)
2553 if ((include_uninstrumented_p
2554 || !(e->flags & EDGE_TM_UNINSTRUMENTED))
2555 && !bitmap_bit_p (visited_blocks, e->dest->index))
2557 bitmap_set_bit (visited_blocks, e->dest->index);
2558 bbs.safe_push (e->dest);
2561 while (i < bbs.length ());
2563 if (all_region_blocks)
2564 bitmap_ior_into (all_region_blocks, visited_blocks);
2566 BITMAP_FREE (visited_blocks);
2567 return bbs;
2570 // Callback data for collect_bb2reg.
2571 struct bb2reg_stuff
2573 vec<tm_region_p> *bb2reg;
2574 bool include_uninstrumented_p;
2577 // Callback for expand_regions, collect innermost region data for each bb.
2578 static void *
2579 collect_bb2reg (struct tm_region *region, void *data)
2581 struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data;
2582 vec<tm_region_p> *bb2reg = stuff->bb2reg;
2583 vec<basic_block> queue;
2584 unsigned int i;
2585 basic_block bb;
2587 queue = get_tm_region_blocks (region->entry_block,
2588 region->exit_blocks,
2589 region->irr_blocks,
2590 NULL,
2591 /*stop_at_irr_p=*/true,
2592 stuff->include_uninstrumented_p);
2594 // We expect expand_region to perform a post-order traversal of the region
2595 // tree. Therefore the last region seen for any bb is the innermost.
2596 FOR_EACH_VEC_ELT (queue, i, bb)
2597 (*bb2reg)[bb->index] = region;
2599 queue.release ();
2600 return NULL;
2603 // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to
2604 // which a basic block belongs. Note that we only consider the instrumented
2605 // code paths for the region; the uninstrumented code paths are ignored if
2606 // INCLUDE_UNINSTRUMENTED_P is false.
2608 // ??? This data is very similar to the bb_regions array that is collected
2609 // during tm_region_init. Or, rather, this data is similar to what could
2610 // be used within tm_region_init. The actual computation in tm_region_init
2611 // begins and ends with bb_regions entirely full of NULL pointers, due to
2612 // the way in which pointers are swapped in and out of the array.
2614 // ??? Our callers expect that blocks are not shared between transactions.
2615 // When the optimizers get too smart, and blocks are shared, then during
2616 // the tm_mark phase we'll add log entries to only one of the two transactions,
2617 // and in the tm_edge phase we'll add edges to the CFG that create invalid
2618 // cycles. The symptom being SSA defs that do not dominate their uses.
2619 // Note that the optimizers were locally correct with their transformation,
2620 // as we have no info within the program that suggests that the blocks cannot
2621 // be shared.
2623 // ??? There is currently a hack inside tree-ssa-pre.c to work around the
2624 // only known instance of this block sharing.
2626 static vec<tm_region_p>
2627 get_bb_regions_instrumented (bool traverse_clones,
2628 bool include_uninstrumented_p)
2630 unsigned n = last_basic_block;
2631 struct bb2reg_stuff stuff;
2632 vec<tm_region_p> ret;
2634 ret.create (n);
2635 ret.safe_grow_cleared (n);
2636 stuff.bb2reg = &ret;
2637 stuff.include_uninstrumented_p = include_uninstrumented_p;
2638 expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones);
2640 return ret;
2643 /* Set the IN_TRANSACTION for all gimple statements that appear in a
2644 transaction. */
2646 void
2647 compute_transaction_bits (void)
2649 struct tm_region *region;
2650 vec<basic_block> queue;
2651 unsigned int i;
2652 basic_block bb;
2654 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2655 certainly don't need it to calculate CDI_DOMINATOR info. */
2656 gate_tm_init ();
2658 FOR_EACH_BB (bb)
2659 bb->flags &= ~BB_IN_TRANSACTION;
2661 for (region = all_tm_regions; region; region = region->next)
2663 queue = get_tm_region_blocks (region->entry_block,
2664 region->exit_blocks,
2665 region->irr_blocks,
2666 NULL,
2667 /*stop_at_irr_p=*/true);
2668 for (i = 0; queue.iterate (i, &bb); ++i)
2669 bb->flags |= BB_IN_TRANSACTION;
2670 queue.release ();
2673 if (all_tm_regions)
2674 bitmap_obstack_release (&tm_obstack);
2677 /* Replace the GIMPLE_TRANSACTION in this region with the corresponding
2678 call to BUILT_IN_TM_START. */
2680 static void *
2681 expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
2683 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2684 basic_block transaction_bb = gimple_bb (region->transaction_stmt);
2685 tree tm_state = region->tm_state;
2686 tree tm_state_type = TREE_TYPE (tm_state);
2687 edge abort_edge = NULL;
2688 edge inst_edge = NULL;
2689 edge uninst_edge = NULL;
2690 edge fallthru_edge = NULL;
2692 // Identify the various successors of the transaction start.
2694 edge_iterator i;
2695 edge e;
2696 FOR_EACH_EDGE (e, i, transaction_bb->succs)
2698 if (e->flags & EDGE_TM_ABORT)
2699 abort_edge = e;
2700 else if (e->flags & EDGE_TM_UNINSTRUMENTED)
2701 uninst_edge = e;
2702 else
2703 inst_edge = e;
2704 if (e->flags & EDGE_FALLTHRU)
2705 fallthru_edge = e;
2709 /* ??? There are plenty of bits here we're not computing. */
2711 int subcode = gimple_transaction_subcode (region->transaction_stmt);
2712 int flags = 0;
2713 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2714 flags |= PR_DOESGOIRREVOCABLE;
2715 if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
2716 flags |= PR_HASNOIRREVOCABLE;
2717 /* If the transaction does not have an abort in lexical scope and is not
2718 marked as an outer transaction, then it will never abort. */
2719 if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0)
2720 flags |= PR_HASNOABORT;
2721 if ((subcode & GTMA_HAVE_STORE) == 0)
2722 flags |= PR_READONLY;
2723 if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION))
2724 flags |= PR_INSTRUMENTEDCODE;
2725 if (uninst_edge)
2726 flags |= PR_UNINSTRUMENTEDCODE;
2727 if (subcode & GTMA_IS_OUTER)
2728 region->original_transaction_was_outer = true;
2729 tree t = build_int_cst (tm_state_type, flags);
2730 gimple call = gimple_build_call (tm_start, 1, t);
2731 gimple_call_set_lhs (call, tm_state);
2732 gimple_set_location (call, gimple_location (region->transaction_stmt));
2734 // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START.
2735 gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb);
2736 gcc_assert (gsi_stmt (gsi) == region->transaction_stmt);
2737 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
2738 gsi_remove (&gsi, true);
2739 region->transaction_stmt = call;
2742 // Generate log saves.
2743 if (!tm_log_save_addresses.is_empty ())
2744 tm_log_emit_saves (region->entry_block, transaction_bb);
2746 // In the beginning, we've no tests to perform on transaction restart.
2747 // Note that after this point, transaction_bb becomes the "most recent
2748 // block containing tests for the transaction".
2749 region->restart_block = region->entry_block;
2751 // Generate log restores.
2752 if (!tm_log_save_addresses.is_empty ())
2754 basic_block test_bb = create_empty_bb (transaction_bb);
2755 basic_block code_bb = create_empty_bb (test_bb);
2756 basic_block join_bb = create_empty_bb (code_bb);
2757 if (current_loops && transaction_bb->loop_father)
2759 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2760 add_bb_to_loop (code_bb, transaction_bb->loop_father);
2761 add_bb_to_loop (join_bb, transaction_bb->loop_father);
2763 if (region->restart_block == region->entry_block)
2764 region->restart_block = test_bb;
2766 tree t1 = create_tmp_reg (tm_state_type, NULL);
2767 tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES);
2768 gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1,
2769 tm_state, t2);
2770 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2771 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2773 t2 = build_int_cst (tm_state_type, 0);
2774 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2775 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2777 tm_log_emit_restores (region->entry_block, code_bb);
2779 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2780 edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE);
2781 edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE);
2782 redirect_edge_pred (fallthru_edge, join_bb);
2784 join_bb->frequency = test_bb->frequency = transaction_bb->frequency;
2785 join_bb->count = test_bb->count = transaction_bb->count;
2787 ei->probability = PROB_ALWAYS;
2788 et->probability = PROB_LIKELY;
2789 ef->probability = PROB_UNLIKELY;
2790 et->count = apply_probability (test_bb->count, et->probability);
2791 ef->count = apply_probability (test_bb->count, ef->probability);
2793 code_bb->count = et->count;
2794 code_bb->frequency = EDGE_FREQUENCY (et);
2796 transaction_bb = join_bb;
2799 // If we have an ABORT edge, create a test to perform the abort.
2800 if (abort_edge)
2802 basic_block test_bb = create_empty_bb (transaction_bb);
2803 if (current_loops && transaction_bb->loop_father)
2804 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2805 if (region->restart_block == region->entry_block)
2806 region->restart_block = test_bb;
2808 tree t1 = create_tmp_reg (tm_state_type, NULL);
2809 tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION);
2810 gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1,
2811 tm_state, t2);
2812 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2813 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2815 t2 = build_int_cst (tm_state_type, 0);
2816 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2817 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2819 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2820 test_bb->frequency = transaction_bb->frequency;
2821 test_bb->count = transaction_bb->count;
2822 ei->probability = PROB_ALWAYS;
2824 // Not abort edge. If both are live, chose one at random as we'll
2825 // we'll be fixing that up below.
2826 redirect_edge_pred (fallthru_edge, test_bb);
2827 fallthru_edge->flags = EDGE_FALSE_VALUE;
2828 fallthru_edge->probability = PROB_VERY_LIKELY;
2829 fallthru_edge->count
2830 = apply_probability (test_bb->count, fallthru_edge->probability);
2832 // Abort/over edge.
2833 redirect_edge_pred (abort_edge, test_bb);
2834 abort_edge->flags = EDGE_TRUE_VALUE;
2835 abort_edge->probability = PROB_VERY_UNLIKELY;
2836 abort_edge->count
2837 = apply_probability (test_bb->count, abort_edge->probability);
2839 transaction_bb = test_bb;
2842 // If we have both instrumented and uninstrumented code paths, select one.
2843 if (inst_edge && uninst_edge)
2845 basic_block test_bb = create_empty_bb (transaction_bb);
2846 if (current_loops && transaction_bb->loop_father)
2847 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2848 if (region->restart_block == region->entry_block)
2849 region->restart_block = test_bb;
2851 tree t1 = create_tmp_reg (tm_state_type, NULL);
2852 tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE);
2854 gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1,
2855 tm_state, t2);
2856 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2857 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2859 t2 = build_int_cst (tm_state_type, 0);
2860 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2861 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2863 // Create the edge into test_bb first, as we want to copy values
2864 // out of the fallthru edge.
2865 edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags);
2866 e->probability = fallthru_edge->probability;
2867 test_bb->count = e->count = fallthru_edge->count;
2868 test_bb->frequency = EDGE_FREQUENCY (e);
2870 // Now update the edges to the inst/uninist implementations.
2871 // For now assume that the paths are equally likely. When using HTM,
2872 // we'll try the uninst path first and fallback to inst path if htm
2873 // buffers are exceeded. Without HTM we start with the inst path and
2874 // use the uninst path when falling back to serial mode.
2875 redirect_edge_pred (inst_edge, test_bb);
2876 inst_edge->flags = EDGE_FALSE_VALUE;
2877 inst_edge->probability = REG_BR_PROB_BASE / 2;
2878 inst_edge->count
2879 = apply_probability (test_bb->count, inst_edge->probability);
2881 redirect_edge_pred (uninst_edge, test_bb);
2882 uninst_edge->flags = EDGE_TRUE_VALUE;
2883 uninst_edge->probability = REG_BR_PROB_BASE / 2;
2884 uninst_edge->count
2885 = apply_probability (test_bb->count, uninst_edge->probability);
2888 // If we have no previous special cases, and we have PHIs at the beginning
2889 // of the atomic region, this means we have a loop at the beginning of the
2890 // atomic region that shares the first block. This can cause problems with
2891 // the transaction restart abnormal edges to be added in the tm_edges pass.
2892 // Solve this by adding a new empty block to receive the abnormal edges.
2893 if (region->restart_block == region->entry_block
2894 && phi_nodes (region->entry_block))
2896 basic_block empty_bb = create_empty_bb (transaction_bb);
2897 region->restart_block = empty_bb;
2898 if (current_loops && transaction_bb->loop_father)
2899 add_bb_to_loop (empty_bb, transaction_bb->loop_father);
2901 redirect_edge_pred (fallthru_edge, empty_bb);
2902 make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU);
2905 return NULL;
2908 /* Generate the temporary to be used for the return value of
2909 BUILT_IN_TM_START. */
2911 static void *
2912 generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
2914 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2915 region->tm_state =
2916 create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
2918 // Reset the subcode, post optimizations. We'll fill this in
2919 // again as we process blocks.
2920 if (region->exit_blocks)
2922 unsigned int subcode
2923 = gimple_transaction_subcode (region->transaction_stmt);
2925 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2926 subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
2927 | GTMA_MAY_ENTER_IRREVOCABLE
2928 | GTMA_HAS_NO_INSTRUMENTATION);
2929 else
2930 subcode &= GTMA_DECLARATION_MASK;
2931 gimple_transaction_set_subcode (region->transaction_stmt, subcode);
2934 return NULL;
2937 // Propagate flags from inner transactions outwards.
2938 static void
2939 propagate_tm_flags_out (struct tm_region *region)
2941 if (region == NULL)
2942 return;
2943 propagate_tm_flags_out (region->inner);
2945 if (region->outer && region->outer->transaction_stmt)
2947 unsigned s = gimple_transaction_subcode (region->transaction_stmt);
2948 s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE
2949 | GTMA_MAY_ENTER_IRREVOCABLE);
2950 s |= gimple_transaction_subcode (region->outer->transaction_stmt);
2951 gimple_transaction_set_subcode (region->outer->transaction_stmt, s);
2954 propagate_tm_flags_out (region->next);
2957 /* Entry point to the MARK phase of TM expansion. Here we replace
2958 transactional memory statements with calls to builtins, and function
2959 calls with their transactional clones (if available). But we don't
2960 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
2962 static unsigned int
2963 execute_tm_mark (void)
2965 pending_edge_inserts_p = false;
2967 expand_regions (all_tm_regions, generate_tm_state, NULL,
2968 /*traverse_clones=*/true);
2970 tm_log_init ();
2972 vec<tm_region_p> bb_regions
2973 = get_bb_regions_instrumented (/*traverse_clones=*/true,
2974 /*include_uninstrumented_p=*/false);
2975 struct tm_region *r;
2976 unsigned i;
2978 // Expand memory operations into calls into the runtime.
2979 // This collects log entries as well.
2980 FOR_EACH_VEC_ELT (bb_regions, i, r)
2982 if (r != NULL)
2984 if (r->transaction_stmt)
2986 unsigned sub = gimple_transaction_subcode (r->transaction_stmt);
2988 /* If we're sure to go irrevocable, there won't be
2989 anything to expand, since the run-time will go
2990 irrevocable right away. */
2991 if (sub & GTMA_DOES_GO_IRREVOCABLE
2992 && sub & GTMA_MAY_ENTER_IRREVOCABLE)
2993 continue;
2995 expand_block_tm (r, BASIC_BLOCK (i));
2999 bb_regions.release ();
3001 // Propagate flags from inner transactions outwards.
3002 propagate_tm_flags_out (all_tm_regions);
3004 // Expand GIMPLE_TRANSACTIONs into calls into the runtime.
3005 expand_regions (all_tm_regions, expand_transaction, NULL,
3006 /*traverse_clones=*/false);
3008 tm_log_emit ();
3009 tm_log_delete ();
3011 if (pending_edge_inserts_p)
3012 gsi_commit_edge_inserts ();
3013 free_dominance_info (CDI_DOMINATORS);
3014 return 0;
3017 namespace {
3019 const pass_data pass_data_tm_mark =
3021 GIMPLE_PASS, /* type */
3022 "tmmark", /* name */
3023 OPTGROUP_NONE, /* optinfo_flags */
3024 false, /* has_gate */
3025 true, /* has_execute */
3026 TV_TRANS_MEM, /* tv_id */
3027 ( PROP_ssa | PROP_cfg ), /* properties_required */
3028 0, /* properties_provided */
3029 0, /* properties_destroyed */
3030 0, /* todo_flags_start */
3031 ( TODO_update_ssa | TODO_verify_ssa ), /* todo_flags_finish */
3034 class pass_tm_mark : public gimple_opt_pass
3036 public:
3037 pass_tm_mark (gcc::context *ctxt)
3038 : gimple_opt_pass (pass_data_tm_mark, ctxt)
3041 /* opt_pass methods: */
3042 unsigned int execute () { return execute_tm_mark (); }
3044 }; // class pass_tm_mark
3046 } // anon namespace
3048 gimple_opt_pass *
3049 make_pass_tm_mark (gcc::context *ctxt)
3051 return new pass_tm_mark (ctxt);
3055 /* Create an abnormal edge from STMT at iter, splitting the block
3056 as necessary. Adjust *PNEXT as needed for the split block. */
3058 static inline void
3059 split_bb_make_tm_edge (gimple stmt, basic_block dest_bb,
3060 gimple_stmt_iterator iter, gimple_stmt_iterator *pnext)
3062 basic_block bb = gimple_bb (stmt);
3063 if (!gsi_one_before_end_p (iter))
3065 edge e = split_block (bb, stmt);
3066 *pnext = gsi_start_bb (e->dest);
3068 make_edge (bb, dest_bb, EDGE_ABNORMAL);
3070 // Record the need for the edge for the benefit of the rtl passes.
3071 if (cfun->gimple_df->tm_restart == NULL)
3072 cfun->gimple_df->tm_restart = htab_create_ggc (31, struct_ptr_hash,
3073 struct_ptr_eq, ggc_free);
3075 struct tm_restart_node dummy;
3076 dummy.stmt = stmt;
3077 dummy.label_or_list = gimple_block_label (dest_bb);
3079 void **slot = htab_find_slot (cfun->gimple_df->tm_restart, &dummy, INSERT);
3080 struct tm_restart_node *n = (struct tm_restart_node *) *slot;
3081 if (n == NULL)
3083 n = ggc_alloc_tm_restart_node ();
3084 *n = dummy;
3086 else
3088 tree old = n->label_or_list;
3089 if (TREE_CODE (old) == LABEL_DECL)
3090 old = tree_cons (NULL, old, NULL);
3091 n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
3095 /* Split block BB as necessary for every builtin function we added, and
3096 wire up the abnormal back edges implied by the transaction restart. */
3098 static void
3099 expand_block_edges (struct tm_region *const region, basic_block bb)
3101 gimple_stmt_iterator gsi, next_gsi;
3103 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi)
3105 gimple stmt = gsi_stmt (gsi);
3107 next_gsi = gsi;
3108 gsi_next (&next_gsi);
3110 // ??? Shouldn't we split for any non-pure, non-irrevocable function?
3111 if (gimple_code (stmt) != GIMPLE_CALL
3112 || (gimple_call_flags (stmt) & ECF_TM_BUILTIN) == 0)
3113 continue;
3115 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)) == BUILT_IN_TM_ABORT)
3117 // If we have a ``_transaction_cancel [[outer]]'', there is only
3118 // one abnormal edge: to the transaction marked OUTER.
3119 // All compiler-generated instances of BUILT_IN_TM_ABORT have a
3120 // constant argument, which we can examine here. Users invoking
3121 // TM_ABORT directly get what they deserve.
3122 tree arg = gimple_call_arg (stmt, 0);
3123 if (TREE_CODE (arg) == INTEGER_CST
3124 && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0
3125 && !decl_is_tm_clone (current_function_decl))
3127 // Find the GTMA_IS_OUTER transaction.
3128 for (struct tm_region *o = region; o; o = o->outer)
3129 if (o->original_transaction_was_outer)
3131 split_bb_make_tm_edge (stmt, o->restart_block,
3132 gsi, &next_gsi);
3133 break;
3136 // Otherwise, the front-end should have semantically checked
3137 // outer aborts, but in either case the target region is not
3138 // within this function.
3139 continue;
3142 // Non-outer, TM aborts have an abnormal edge to the inner-most
3143 // transaction, the one being aborted;
3144 split_bb_make_tm_edge (stmt, region->restart_block, gsi, &next_gsi);
3147 // All TM builtins have an abnormal edge to the outer-most transaction.
3148 // We never restart inner transactions. For tm clones, we know a-priori
3149 // that the outer-most transaction is outside the function.
3150 if (decl_is_tm_clone (current_function_decl))
3151 continue;
3153 if (cfun->gimple_df->tm_restart == NULL)
3154 cfun->gimple_df->tm_restart
3155 = htab_create_ggc (31, struct_ptr_hash, struct_ptr_eq, ggc_free);
3157 // All TM builtins have an abnormal edge to the outer-most transaction.
3158 // We never restart inner transactions.
3159 for (struct tm_region *o = region; o; o = o->outer)
3160 if (!o->outer)
3162 split_bb_make_tm_edge (stmt, o->restart_block, gsi, &next_gsi);
3163 break;
3166 // Delete any tail-call annotation that may have been added.
3167 // The tail-call pass may have mis-identified the commit as being
3168 // a candidate because we had not yet added this restart edge.
3169 gimple_call_set_tail (stmt, false);
3173 /* Entry point to the final expansion of transactional nodes. */
3175 static unsigned int
3176 execute_tm_edges (void)
3178 vec<tm_region_p> bb_regions
3179 = get_bb_regions_instrumented (/*traverse_clones=*/false,
3180 /*include_uninstrumented_p=*/true);
3181 struct tm_region *r;
3182 unsigned i;
3184 FOR_EACH_VEC_ELT (bb_regions, i, r)
3185 if (r != NULL)
3186 expand_block_edges (r, BASIC_BLOCK (i));
3188 bb_regions.release ();
3190 /* We've got to release the dominance info now, to indicate that it
3191 must be rebuilt completely. Otherwise we'll crash trying to update
3192 the SSA web in the TODO section following this pass. */
3193 free_dominance_info (CDI_DOMINATORS);
3194 bitmap_obstack_release (&tm_obstack);
3195 all_tm_regions = NULL;
3197 return 0;
3200 namespace {
3202 const pass_data pass_data_tm_edges =
3204 GIMPLE_PASS, /* type */
3205 "tmedge", /* name */
3206 OPTGROUP_NONE, /* optinfo_flags */
3207 false, /* has_gate */
3208 true, /* has_execute */
3209 TV_TRANS_MEM, /* tv_id */
3210 ( PROP_ssa | PROP_cfg ), /* properties_required */
3211 0, /* properties_provided */
3212 0, /* properties_destroyed */
3213 0, /* todo_flags_start */
3214 ( TODO_update_ssa | TODO_verify_ssa ), /* todo_flags_finish */
3217 class pass_tm_edges : public gimple_opt_pass
3219 public:
3220 pass_tm_edges (gcc::context *ctxt)
3221 : gimple_opt_pass (pass_data_tm_edges, ctxt)
3224 /* opt_pass methods: */
3225 unsigned int execute () { return execute_tm_edges (); }
3227 }; // class pass_tm_edges
3229 } // anon namespace
3231 gimple_opt_pass *
3232 make_pass_tm_edges (gcc::context *ctxt)
3234 return new pass_tm_edges (ctxt);
3237 /* Helper function for expand_regions. Expand REGION and recurse to
3238 the inner region. Call CALLBACK on each region. CALLBACK returns
3239 NULL to continue the traversal, otherwise a non-null value which
3240 this function will return as well. TRAVERSE_CLONES is true if we
3241 should traverse transactional clones. */
3243 static void *
3244 expand_regions_1 (struct tm_region *region,
3245 void *(*callback)(struct tm_region *, void *),
3246 void *data,
3247 bool traverse_clones)
3249 void *retval = NULL;
3250 if (region->exit_blocks
3251 || (traverse_clones && decl_is_tm_clone (current_function_decl)))
3253 retval = callback (region, data);
3254 if (retval)
3255 return retval;
3257 if (region->inner)
3259 retval = expand_regions (region->inner, callback, data, traverse_clones);
3260 if (retval)
3261 return retval;
3263 return retval;
3266 /* Traverse the regions enclosed and including REGION. Execute
3267 CALLBACK for each region, passing DATA. CALLBACK returns NULL to
3268 continue the traversal, otherwise a non-null value which this
3269 function will return as well. TRAVERSE_CLONES is true if we should
3270 traverse transactional clones. */
3272 static void *
3273 expand_regions (struct tm_region *region,
3274 void *(*callback)(struct tm_region *, void *),
3275 void *data,
3276 bool traverse_clones)
3278 void *retval = NULL;
3279 while (region)
3281 retval = expand_regions_1 (region, callback, data, traverse_clones);
3282 if (retval)
3283 return retval;
3284 region = region->next;
3286 return retval;
3290 /* A unique TM memory operation. */
3291 typedef struct tm_memop
3293 /* Unique ID that all memory operations to the same location have. */
3294 unsigned int value_id;
3295 /* Address of load/store. */
3296 tree addr;
3297 } *tm_memop_t;
3299 /* TM memory operation hashtable helpers. */
3301 struct tm_memop_hasher : typed_free_remove <tm_memop>
3303 typedef tm_memop value_type;
3304 typedef tm_memop compare_type;
3305 static inline hashval_t hash (const value_type *);
3306 static inline bool equal (const value_type *, const compare_type *);
3309 /* Htab support. Return a hash value for a `tm_memop'. */
3310 inline hashval_t
3311 tm_memop_hasher::hash (const value_type *mem)
3313 tree addr = mem->addr;
3314 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
3315 actually done with operand_equal_p (see tm_memop_eq). */
3316 if (TREE_CODE (addr) == ADDR_EXPR)
3317 addr = TREE_OPERAND (addr, 0);
3318 return iterative_hash_expr (addr, 0);
3321 /* Htab support. Return true if two tm_memop's are the same. */
3322 inline bool
3323 tm_memop_hasher::equal (const value_type *mem1, const compare_type *mem2)
3325 return operand_equal_p (mem1->addr, mem2->addr, 0);
3328 /* Sets for solving data flow equations in the memory optimization pass. */
3329 struct tm_memopt_bitmaps
3331 /* Stores available to this BB upon entry. Basically, stores that
3332 dominate this BB. */
3333 bitmap store_avail_in;
3334 /* Stores available at the end of this BB. */
3335 bitmap store_avail_out;
3336 bitmap store_antic_in;
3337 bitmap store_antic_out;
3338 /* Reads available to this BB upon entry. Basically, reads that
3339 dominate this BB. */
3340 bitmap read_avail_in;
3341 /* Reads available at the end of this BB. */
3342 bitmap read_avail_out;
3343 /* Reads performed in this BB. */
3344 bitmap read_local;
3345 /* Writes performed in this BB. */
3346 bitmap store_local;
3348 /* Temporary storage for pass. */
3349 /* Is the current BB in the worklist? */
3350 bool avail_in_worklist_p;
3351 /* Have we visited this BB? */
3352 bool visited_p;
3355 static bitmap_obstack tm_memopt_obstack;
3357 /* Unique counter for TM loads and stores. Loads and stores of the
3358 same address get the same ID. */
3359 static unsigned int tm_memopt_value_id;
3360 static hash_table <tm_memop_hasher> tm_memopt_value_numbers;
3362 #define STORE_AVAIL_IN(BB) \
3363 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
3364 #define STORE_AVAIL_OUT(BB) \
3365 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
3366 #define STORE_ANTIC_IN(BB) \
3367 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
3368 #define STORE_ANTIC_OUT(BB) \
3369 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
3370 #define READ_AVAIL_IN(BB) \
3371 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
3372 #define READ_AVAIL_OUT(BB) \
3373 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
3374 #define READ_LOCAL(BB) \
3375 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
3376 #define STORE_LOCAL(BB) \
3377 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
3378 #define AVAIL_IN_WORKLIST_P(BB) \
3379 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
3380 #define BB_VISITED_P(BB) \
3381 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
3383 /* Given a TM load/store in STMT, return the value number for the address
3384 it accesses. */
3386 static unsigned int
3387 tm_memopt_value_number (gimple stmt, enum insert_option op)
3389 struct tm_memop tmpmem, *mem;
3390 tm_memop **slot;
3392 gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
3393 tmpmem.addr = gimple_call_arg (stmt, 0);
3394 slot = tm_memopt_value_numbers.find_slot (&tmpmem, op);
3395 if (*slot)
3396 mem = *slot;
3397 else if (op == INSERT)
3399 mem = XNEW (struct tm_memop);
3400 *slot = mem;
3401 mem->value_id = tm_memopt_value_id++;
3402 mem->addr = tmpmem.addr;
3404 else
3405 gcc_unreachable ();
3406 return mem->value_id;
3409 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
3411 static void
3412 tm_memopt_accumulate_memops (basic_block bb)
3414 gimple_stmt_iterator gsi;
3416 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3418 gimple stmt = gsi_stmt (gsi);
3419 bitmap bits;
3420 unsigned int loc;
3422 if (is_tm_store (stmt))
3423 bits = STORE_LOCAL (bb);
3424 else if (is_tm_load (stmt))
3425 bits = READ_LOCAL (bb);
3426 else
3427 continue;
3429 loc = tm_memopt_value_number (stmt, INSERT);
3430 bitmap_set_bit (bits, loc);
3431 if (dump_file)
3433 fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
3434 is_tm_load (stmt) ? "LOAD" : "STORE", loc,
3435 gimple_bb (stmt)->index);
3436 print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0);
3437 fprintf (dump_file, "\n");
3442 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
3444 static void
3445 dump_tm_memopt_set (const char *set_name, bitmap bits)
3447 unsigned i;
3448 bitmap_iterator bi;
3449 const char *comma = "";
3451 fprintf (dump_file, "TM memopt: %s: [", set_name);
3452 EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
3454 hash_table <tm_memop_hasher>::iterator hi;
3455 struct tm_memop *mem = NULL;
3457 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
3458 FOR_EACH_HASH_TABLE_ELEMENT (tm_memopt_value_numbers, mem, tm_memop_t, hi)
3459 if (mem->value_id == i)
3460 break;
3461 gcc_assert (mem->value_id == i);
3462 fprintf (dump_file, "%s", comma);
3463 comma = ", ";
3464 print_generic_expr (dump_file, mem->addr, 0);
3466 fprintf (dump_file, "]\n");
3469 /* Prettily dump all of the memopt sets in BLOCKS. */
3471 static void
3472 dump_tm_memopt_sets (vec<basic_block> blocks)
3474 size_t i;
3475 basic_block bb;
3477 for (i = 0; blocks.iterate (i, &bb); ++i)
3479 fprintf (dump_file, "------------BB %d---------\n", bb->index);
3480 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
3481 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
3482 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
3483 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
3484 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
3485 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
3489 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3491 static void
3492 tm_memopt_compute_avin (basic_block bb)
3494 edge e;
3495 unsigned ix;
3497 /* Seed with the AVOUT of any predecessor. */
3498 for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
3500 e = EDGE_PRED (bb, ix);
3501 /* Make sure we have already visited this BB, and is thus
3502 initialized.
3504 If e->src->aux is NULL, this predecessor is actually on an
3505 enclosing transaction. We only care about the current
3506 transaction, so ignore it. */
3507 if (e->src->aux && BB_VISITED_P (e->src))
3509 bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3510 bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3511 break;
3515 for (; ix < EDGE_COUNT (bb->preds); ix++)
3517 e = EDGE_PRED (bb, ix);
3518 if (e->src->aux && BB_VISITED_P (e->src))
3520 bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3521 bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3525 BB_VISITED_P (bb) = true;
3528 /* Compute the STORE_ANTIC_IN for the basic block BB. */
3530 static void
3531 tm_memopt_compute_antin (basic_block bb)
3533 edge e;
3534 unsigned ix;
3536 /* Seed with the ANTIC_OUT of any successor. */
3537 for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
3539 e = EDGE_SUCC (bb, ix);
3540 /* Make sure we have already visited this BB, and is thus
3541 initialized. */
3542 if (BB_VISITED_P (e->dest))
3544 bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3545 break;
3549 for (; ix < EDGE_COUNT (bb->succs); ix++)
3551 e = EDGE_SUCC (bb, ix);
3552 if (BB_VISITED_P (e->dest))
3553 bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3556 BB_VISITED_P (bb) = true;
3559 /* Compute the AVAIL sets for every basic block in BLOCKS.
3561 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3563 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3564 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3566 This is basically what we do in lcm's compute_available(), but here
3567 we calculate two sets of sets (one for STOREs and one for READs),
3568 and we work on a region instead of the entire CFG.
3570 REGION is the TM region.
3571 BLOCKS are the basic blocks in the region. */
3573 static void
3574 tm_memopt_compute_available (struct tm_region *region,
3575 vec<basic_block> blocks)
3577 edge e;
3578 basic_block *worklist, *qin, *qout, *qend, bb;
3579 unsigned int qlen, i;
3580 edge_iterator ei;
3581 bool changed;
3583 /* Allocate a worklist array/queue. Entries are only added to the
3584 list if they were not already on the list. So the size is
3585 bounded by the number of basic blocks in the region. */
3586 qlen = blocks.length () - 1;
3587 qin = qout = worklist =
3588 XNEWVEC (basic_block, qlen);
3590 /* Put every block in the region on the worklist. */
3591 for (i = 0; blocks.iterate (i, &bb); ++i)
3593 /* Seed AVAIL_OUT with the LOCAL set. */
3594 bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
3595 bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
3597 AVAIL_IN_WORKLIST_P (bb) = true;
3598 /* No need to insert the entry block, since it has an AVIN of
3599 null, and an AVOUT that has already been seeded in. */
3600 if (bb != region->entry_block)
3601 *qin++ = bb;
3604 /* The entry block has been initialized with the local sets. */
3605 BB_VISITED_P (region->entry_block) = true;
3607 qin = worklist;
3608 qend = &worklist[qlen];
3610 /* Iterate until the worklist is empty. */
3611 while (qlen)
3613 /* Take the first entry off the worklist. */
3614 bb = *qout++;
3615 qlen--;
3617 if (qout >= qend)
3618 qout = worklist;
3620 /* This block can be added to the worklist again if necessary. */
3621 AVAIL_IN_WORKLIST_P (bb) = false;
3622 tm_memopt_compute_avin (bb);
3624 /* Note: We do not add the LOCAL sets here because we already
3625 seeded the AVAIL_OUT sets with them. */
3626 changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
3627 changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
3628 if (changed
3629 && (region->exit_blocks == NULL
3630 || !bitmap_bit_p (region->exit_blocks, bb->index)))
3631 /* If the out state of this block changed, then we need to add
3632 its successors to the worklist if they are not already in. */
3633 FOR_EACH_EDGE (e, ei, bb->succs)
3634 if (!AVAIL_IN_WORKLIST_P (e->dest)
3635 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
3637 *qin++ = e->dest;
3638 AVAIL_IN_WORKLIST_P (e->dest) = true;
3639 qlen++;
3641 if (qin >= qend)
3642 qin = worklist;
3646 free (worklist);
3648 if (dump_file)
3649 dump_tm_memopt_sets (blocks);
3652 /* Compute ANTIC sets for every basic block in BLOCKS.
3654 We compute STORE_ANTIC_OUT as follows:
3656 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3657 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3659 REGION is the TM region.
3660 BLOCKS are the basic blocks in the region. */
3662 static void
3663 tm_memopt_compute_antic (struct tm_region *region,
3664 vec<basic_block> blocks)
3666 edge e;
3667 basic_block *worklist, *qin, *qout, *qend, bb;
3668 unsigned int qlen;
3669 int i;
3670 edge_iterator ei;
3672 /* Allocate a worklist array/queue. Entries are only added to the
3673 list if they were not already on the list. So the size is
3674 bounded by the number of basic blocks in the region. */
3675 qin = qout = worklist = XNEWVEC (basic_block, blocks.length ());
3677 for (qlen = 0, i = blocks.length () - 1; i >= 0; --i)
3679 bb = blocks[i];
3681 /* Seed ANTIC_OUT with the LOCAL set. */
3682 bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
3684 /* Put every block in the region on the worklist. */
3685 AVAIL_IN_WORKLIST_P (bb) = true;
3686 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3687 and their ANTIC_OUT has already been seeded in. */
3688 if (region->exit_blocks
3689 && !bitmap_bit_p (region->exit_blocks, bb->index))
3691 qlen++;
3692 *qin++ = bb;
3696 /* The exit blocks have been initialized with the local sets. */
3697 if (region->exit_blocks)
3699 unsigned int i;
3700 bitmap_iterator bi;
3701 EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
3702 BB_VISITED_P (BASIC_BLOCK (i)) = true;
3705 qin = worklist;
3706 qend = &worklist[qlen];
3708 /* Iterate until the worklist is empty. */
3709 while (qlen)
3711 /* Take the first entry off the worklist. */
3712 bb = *qout++;
3713 qlen--;
3715 if (qout >= qend)
3716 qout = worklist;
3718 /* This block can be added to the worklist again if necessary. */
3719 AVAIL_IN_WORKLIST_P (bb) = false;
3720 tm_memopt_compute_antin (bb);
3722 /* Note: We do not add the LOCAL sets here because we already
3723 seeded the ANTIC_OUT sets with them. */
3724 if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
3725 && bb != region->entry_block)
3726 /* If the out state of this block changed, then we need to add
3727 its predecessors to the worklist if they are not already in. */
3728 FOR_EACH_EDGE (e, ei, bb->preds)
3729 if (!AVAIL_IN_WORKLIST_P (e->src))
3731 *qin++ = e->src;
3732 AVAIL_IN_WORKLIST_P (e->src) = true;
3733 qlen++;
3735 if (qin >= qend)
3736 qin = worklist;
3740 free (worklist);
3742 if (dump_file)
3743 dump_tm_memopt_sets (blocks);
3746 /* Offsets of load variants from TM_LOAD. For example,
3747 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3748 See gtm-builtins.def. */
3749 #define TRANSFORM_RAR 1
3750 #define TRANSFORM_RAW 2
3751 #define TRANSFORM_RFW 3
3752 /* Offsets of store variants from TM_STORE. */
3753 #define TRANSFORM_WAR 1
3754 #define TRANSFORM_WAW 2
3756 /* Inform about a load/store optimization. */
3758 static void
3759 dump_tm_memopt_transform (gimple stmt)
3761 if (dump_file)
3763 fprintf (dump_file, "TM memopt: transforming: ");
3764 print_gimple_stmt (dump_file, stmt, 0, 0);
3765 fprintf (dump_file, "\n");
3769 /* Perform a read/write optimization. Replaces the TM builtin in STMT
3770 by a builtin that is OFFSET entries down in the builtins table in
3771 gtm-builtins.def. */
3773 static void
3774 tm_memopt_transform_stmt (unsigned int offset,
3775 gimple stmt,
3776 gimple_stmt_iterator *gsi)
3778 tree fn = gimple_call_fn (stmt);
3779 gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
3780 TREE_OPERAND (fn, 0)
3781 = builtin_decl_explicit ((enum built_in_function)
3782 (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
3783 + offset));
3784 gimple_call_set_fn (stmt, fn);
3785 gsi_replace (gsi, stmt, true);
3786 dump_tm_memopt_transform (stmt);
3789 /* Perform the actual TM memory optimization transformations in the
3790 basic blocks in BLOCKS. */
3792 static void
3793 tm_memopt_transform_blocks (vec<basic_block> blocks)
3795 size_t i;
3796 basic_block bb;
3797 gimple_stmt_iterator gsi;
3799 for (i = 0; blocks.iterate (i, &bb); ++i)
3801 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3803 gimple stmt = gsi_stmt (gsi);
3804 bitmap read_avail = READ_AVAIL_IN (bb);
3805 bitmap store_avail = STORE_AVAIL_IN (bb);
3806 bitmap store_antic = STORE_ANTIC_OUT (bb);
3807 unsigned int loc;
3809 if (is_tm_simple_load (stmt))
3811 loc = tm_memopt_value_number (stmt, NO_INSERT);
3812 if (store_avail && bitmap_bit_p (store_avail, loc))
3813 tm_memopt_transform_stmt (TRANSFORM_RAW, stmt, &gsi);
3814 else if (store_antic && bitmap_bit_p (store_antic, loc))
3816 tm_memopt_transform_stmt (TRANSFORM_RFW, stmt, &gsi);
3817 bitmap_set_bit (store_avail, loc);
3819 else if (read_avail && bitmap_bit_p (read_avail, loc))
3820 tm_memopt_transform_stmt (TRANSFORM_RAR, stmt, &gsi);
3821 else
3822 bitmap_set_bit (read_avail, loc);
3824 else if (is_tm_simple_store (stmt))
3826 loc = tm_memopt_value_number (stmt, NO_INSERT);
3827 if (store_avail && bitmap_bit_p (store_avail, loc))
3828 tm_memopt_transform_stmt (TRANSFORM_WAW, stmt, &gsi);
3829 else
3831 if (read_avail && bitmap_bit_p (read_avail, loc))
3832 tm_memopt_transform_stmt (TRANSFORM_WAR, stmt, &gsi);
3833 bitmap_set_bit (store_avail, loc);
3840 /* Return a new set of bitmaps for a BB. */
3842 static struct tm_memopt_bitmaps *
3843 tm_memopt_init_sets (void)
3845 struct tm_memopt_bitmaps *b
3846 = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
3847 b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3848 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3849 b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
3850 b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
3851 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3852 b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3853 b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3854 b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
3855 b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
3856 return b;
3859 /* Free sets computed for each BB. */
3861 static void
3862 tm_memopt_free_sets (vec<basic_block> blocks)
3864 size_t i;
3865 basic_block bb;
3867 for (i = 0; blocks.iterate (i, &bb); ++i)
3868 bb->aux = NULL;
3871 /* Clear the visited bit for every basic block in BLOCKS. */
3873 static void
3874 tm_memopt_clear_visited (vec<basic_block> blocks)
3876 size_t i;
3877 basic_block bb;
3879 for (i = 0; blocks.iterate (i, &bb); ++i)
3880 BB_VISITED_P (bb) = false;
3883 /* Replace TM load/stores with hints for the runtime. We handle
3884 things like read-after-write, write-after-read, read-after-read,
3885 read-for-write, etc. */
3887 static unsigned int
3888 execute_tm_memopt (void)
3890 struct tm_region *region;
3891 vec<basic_block> bbs;
3893 tm_memopt_value_id = 0;
3894 tm_memopt_value_numbers.create (10);
3896 for (region = all_tm_regions; region; region = region->next)
3898 /* All the TM stores/loads in the current region. */
3899 size_t i;
3900 basic_block bb;
3902 bitmap_obstack_initialize (&tm_memopt_obstack);
3904 /* Save all BBs for the current region. */
3905 bbs = get_tm_region_blocks (region->entry_block,
3906 region->exit_blocks,
3907 region->irr_blocks,
3908 NULL,
3909 false);
3911 /* Collect all the memory operations. */
3912 for (i = 0; bbs.iterate (i, &bb); ++i)
3914 bb->aux = tm_memopt_init_sets ();
3915 tm_memopt_accumulate_memops (bb);
3918 /* Solve data flow equations and transform each block accordingly. */
3919 tm_memopt_clear_visited (bbs);
3920 tm_memopt_compute_available (region, bbs);
3921 tm_memopt_clear_visited (bbs);
3922 tm_memopt_compute_antic (region, bbs);
3923 tm_memopt_transform_blocks (bbs);
3925 tm_memopt_free_sets (bbs);
3926 bbs.release ();
3927 bitmap_obstack_release (&tm_memopt_obstack);
3928 tm_memopt_value_numbers.empty ();
3931 tm_memopt_value_numbers.dispose ();
3932 return 0;
3935 static bool
3936 gate_tm_memopt (void)
3938 return flag_tm && optimize > 0;
3941 namespace {
3943 const pass_data pass_data_tm_memopt =
3945 GIMPLE_PASS, /* type */
3946 "tmmemopt", /* name */
3947 OPTGROUP_NONE, /* optinfo_flags */
3948 true, /* has_gate */
3949 true, /* has_execute */
3950 TV_TRANS_MEM, /* tv_id */
3951 ( PROP_ssa | PROP_cfg ), /* properties_required */
3952 0, /* properties_provided */
3953 0, /* properties_destroyed */
3954 0, /* todo_flags_start */
3955 0, /* todo_flags_finish */
3958 class pass_tm_memopt : public gimple_opt_pass
3960 public:
3961 pass_tm_memopt (gcc::context *ctxt)
3962 : gimple_opt_pass (pass_data_tm_memopt, ctxt)
3965 /* opt_pass methods: */
3966 bool gate () { return gate_tm_memopt (); }
3967 unsigned int execute () { return execute_tm_memopt (); }
3969 }; // class pass_tm_memopt
3971 } // anon namespace
3973 gimple_opt_pass *
3974 make_pass_tm_memopt (gcc::context *ctxt)
3976 return new pass_tm_memopt (ctxt);
3980 /* Interprocedual analysis for the creation of transactional clones.
3981 The aim of this pass is to find which functions are referenced in
3982 a non-irrevocable transaction context, and for those over which
3983 we have control (or user directive), create a version of the
3984 function which uses only the transactional interface to reference
3985 protected memories. This analysis proceeds in several steps:
3987 (1) Collect the set of all possible transactional clones:
3989 (a) For all local public functions marked tm_callable, push
3990 it onto the tm_callee queue.
3992 (b) For all local functions, scan for calls in transaction blocks.
3993 Push the caller and callee onto the tm_caller and tm_callee
3994 queues. Count the number of callers for each callee.
3996 (c) For each local function on the callee list, assume we will
3997 create a transactional clone. Push *all* calls onto the
3998 callee queues; count the number of clone callers separately
3999 to the number of original callers.
4001 (2) Propagate irrevocable status up the dominator tree:
4003 (a) Any external function on the callee list that is not marked
4004 tm_callable is irrevocable. Push all callers of such onto
4005 a worklist.
4007 (b) For each function on the worklist, mark each block that
4008 contains an irrevocable call. Use the AND operator to
4009 propagate that mark up the dominator tree.
4011 (c) If we reach the entry block for a possible transactional
4012 clone, then the transactional clone is irrevocable, and
4013 we should not create the clone after all. Push all
4014 callers onto the worklist.
4016 (d) Place tm_irrevocable calls at the beginning of the relevant
4017 blocks. Special case here is the entry block for the entire
4018 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
4019 the library to begin the region in serial mode. Decrement
4020 the call count for all callees in the irrevocable region.
4022 (3) Create the transactional clones:
4024 Any tm_callee that still has a non-zero call count is cloned.
4027 /* This structure is stored in the AUX field of each cgraph_node. */
4028 struct tm_ipa_cg_data
4030 /* The clone of the function that got created. */
4031 struct cgraph_node *clone;
4033 /* The tm regions in the normal function. */
4034 struct tm_region *all_tm_regions;
4036 /* The blocks of the normal/clone functions that contain irrevocable
4037 calls, or blocks that are post-dominated by irrevocable calls. */
4038 bitmap irrevocable_blocks_normal;
4039 bitmap irrevocable_blocks_clone;
4041 /* The blocks of the normal function that are involved in transactions. */
4042 bitmap transaction_blocks_normal;
4044 /* The number of callers to the transactional clone of this function
4045 from normal and transactional clones respectively. */
4046 unsigned tm_callers_normal;
4047 unsigned tm_callers_clone;
4049 /* True if all calls to this function's transactional clone
4050 are irrevocable. Also automatically true if the function
4051 has no transactional clone. */
4052 bool is_irrevocable;
4054 /* Flags indicating the presence of this function in various queues. */
4055 bool in_callee_queue;
4056 bool in_worklist;
4058 /* Flags indicating the kind of scan desired while in the worklist. */
4059 bool want_irr_scan_normal;
4062 typedef vec<cgraph_node_ptr> cgraph_node_queue;
4064 /* Return the ipa data associated with NODE, allocating zeroed memory
4065 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
4066 and set *NODE accordingly. */
4068 static struct tm_ipa_cg_data *
4069 get_cg_data (struct cgraph_node **node, bool traverse_aliases)
4071 struct tm_ipa_cg_data *d;
4073 if (traverse_aliases && (*node)->alias)
4074 *node = cgraph_alias_target (*node);
4076 d = (struct tm_ipa_cg_data *) (*node)->aux;
4078 if (d == NULL)
4080 d = (struct tm_ipa_cg_data *)
4081 obstack_alloc (&tm_obstack.obstack, sizeof (*d));
4082 (*node)->aux = (void *) d;
4083 memset (d, 0, sizeof (*d));
4086 return d;
4089 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
4090 it is already present. */
4092 static void
4093 maybe_push_queue (struct cgraph_node *node,
4094 cgraph_node_queue *queue_p, bool *in_queue_p)
4096 if (!*in_queue_p)
4098 *in_queue_p = true;
4099 queue_p->safe_push (node);
4103 /* Duplicate the basic blocks in QUEUE for use in the uninstrumented
4104 code path. QUEUE are the basic blocks inside the transaction
4105 represented in REGION.
4107 Later in split_code_paths() we will add the conditional to choose
4108 between the two alternatives. */
4110 static void
4111 ipa_uninstrument_transaction (struct tm_region *region,
4112 vec<basic_block> queue)
4114 gimple transaction = region->transaction_stmt;
4115 basic_block transaction_bb = gimple_bb (transaction);
4116 int n = queue.length ();
4117 basic_block *new_bbs = XNEWVEC (basic_block, n);
4119 copy_bbs (queue.address (), n, new_bbs, NULL, 0, NULL, NULL, transaction_bb,
4120 true);
4121 edge e = make_edge (transaction_bb, new_bbs[0], EDGE_TM_UNINSTRUMENTED);
4122 add_phi_args_after_copy (new_bbs, n, e);
4124 // Now we will have a GIMPLE_ATOMIC with 3 possible edges out of it.
4125 // a) EDGE_FALLTHRU into the transaction
4126 // b) EDGE_TM_ABORT out of the transaction
4127 // c) EDGE_TM_UNINSTRUMENTED into the uninstrumented blocks.
4129 free (new_bbs);
4132 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
4133 Queue all callees within block BB. */
4135 static void
4136 ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
4137 basic_block bb, bool for_clone)
4139 gimple_stmt_iterator gsi;
4141 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4143 gimple stmt = gsi_stmt (gsi);
4144 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4146 tree fndecl = gimple_call_fndecl (stmt);
4147 if (fndecl)
4149 struct tm_ipa_cg_data *d;
4150 unsigned *pcallers;
4151 struct cgraph_node *node;
4153 if (is_tm_ending_fndecl (fndecl))
4154 continue;
4155 if (find_tm_replacement_function (fndecl))
4156 continue;
4158 node = cgraph_get_node (fndecl);
4159 gcc_assert (node != NULL);
4160 d = get_cg_data (&node, true);
4162 pcallers = (for_clone ? &d->tm_callers_clone
4163 : &d->tm_callers_normal);
4164 *pcallers += 1;
4166 maybe_push_queue (node, callees_p, &d->in_callee_queue);
4172 /* Scan all calls in NODE that are within a transaction region,
4173 and push the resulting nodes into the callee queue. */
4175 static void
4176 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
4177 cgraph_node_queue *callees_p)
4179 struct tm_region *r;
4181 d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
4182 d->all_tm_regions = all_tm_regions;
4184 for (r = all_tm_regions; r; r = r->next)
4186 vec<basic_block> bbs;
4187 basic_block bb;
4188 unsigned i;
4190 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
4191 d->transaction_blocks_normal, false);
4193 // Generate the uninstrumented code path for this transaction.
4194 ipa_uninstrument_transaction (r, bbs);
4196 FOR_EACH_VEC_ELT (bbs, i, bb)
4197 ipa_tm_scan_calls_block (callees_p, bb, false);
4199 bbs.release ();
4202 // ??? copy_bbs should maintain cgraph edges for the blocks as it is
4203 // copying them, rather than forcing us to do this externally.
4204 rebuild_cgraph_edges ();
4206 // ??? In ipa_uninstrument_transaction we don't try to update dominators
4207 // because copy_bbs doesn't return a VEC like iterate_fix_dominators expects.
4208 // Instead, just release dominators here so update_ssa recomputes them.
4209 free_dominance_info (CDI_DOMINATORS);
4211 // When building the uninstrumented code path, copy_bbs will have invoked
4212 // create_new_def_for starting an "ssa update context". There is only one
4213 // instance of this context, so resolve ssa updates before moving on to
4214 // the next function.
4215 update_ssa (TODO_update_ssa);
4218 /* Scan all calls in NODE as if this is the transactional clone,
4219 and push the destinations into the callee queue. */
4221 static void
4222 ipa_tm_scan_calls_clone (struct cgraph_node *node,
4223 cgraph_node_queue *callees_p)
4225 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
4226 basic_block bb;
4228 FOR_EACH_BB_FN (bb, fn)
4229 ipa_tm_scan_calls_block (callees_p, bb, true);
4232 /* The function NODE has been detected to be irrevocable. Push all
4233 of its callers onto WORKLIST for the purpose of re-scanning them. */
4235 static void
4236 ipa_tm_note_irrevocable (struct cgraph_node *node,
4237 cgraph_node_queue *worklist_p)
4239 struct tm_ipa_cg_data *d = get_cg_data (&node, true);
4240 struct cgraph_edge *e;
4242 d->is_irrevocable = true;
4244 for (e = node->callers; e ; e = e->next_caller)
4246 basic_block bb;
4247 struct cgraph_node *caller;
4249 /* Don't examine recursive calls. */
4250 if (e->caller == node)
4251 continue;
4252 /* Even if we think we can go irrevocable, believe the user
4253 above all. */
4254 if (is_tm_safe_or_pure (e->caller->decl))
4255 continue;
4257 caller = e->caller;
4258 d = get_cg_data (&caller, true);
4260 /* Check if the callee is in a transactional region. If so,
4261 schedule the function for normal re-scan as well. */
4262 bb = gimple_bb (e->call_stmt);
4263 gcc_assert (bb != NULL);
4264 if (d->transaction_blocks_normal
4265 && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
4266 d->want_irr_scan_normal = true;
4268 maybe_push_queue (caller, worklist_p, &d->in_worklist);
4272 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
4273 within the block is irrevocable. */
4275 static bool
4276 ipa_tm_scan_irr_block (basic_block bb)
4278 gimple_stmt_iterator gsi;
4279 tree fn;
4281 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4283 gimple stmt = gsi_stmt (gsi);
4284 switch (gimple_code (stmt))
4286 case GIMPLE_ASSIGN:
4287 if (gimple_assign_single_p (stmt))
4289 tree lhs = gimple_assign_lhs (stmt);
4290 tree rhs = gimple_assign_rhs1 (stmt);
4291 if (volatile_var_p (lhs) || volatile_var_p (rhs))
4292 return true;
4294 break;
4296 case GIMPLE_CALL:
4298 tree lhs = gimple_call_lhs (stmt);
4299 if (lhs && volatile_var_p (lhs))
4300 return true;
4302 if (is_tm_pure_call (stmt))
4303 break;
4305 fn = gimple_call_fn (stmt);
4307 /* Functions with the attribute are by definition irrevocable. */
4308 if (is_tm_irrevocable (fn))
4309 return true;
4311 /* For direct function calls, go ahead and check for replacement
4312 functions, or transitive irrevocable functions. For indirect
4313 functions, we'll ask the runtime. */
4314 if (TREE_CODE (fn) == ADDR_EXPR)
4316 struct tm_ipa_cg_data *d;
4317 struct cgraph_node *node;
4319 fn = TREE_OPERAND (fn, 0);
4320 if (is_tm_ending_fndecl (fn))
4321 break;
4322 if (find_tm_replacement_function (fn))
4323 break;
4325 node = cgraph_get_node (fn);
4326 d = get_cg_data (&node, true);
4328 /* Return true if irrevocable, but above all, believe
4329 the user. */
4330 if (d->is_irrevocable
4331 && !is_tm_safe_or_pure (fn))
4332 return true;
4334 break;
4337 case GIMPLE_ASM:
4338 /* ??? The Approved Method of indicating that an inline
4339 assembly statement is not relevant to the transaction
4340 is to wrap it in a __tm_waiver block. This is not
4341 yet implemented, so we can't check for it. */
4342 if (is_tm_safe (current_function_decl))
4344 tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
4345 SET_EXPR_LOCATION (t, gimple_location (stmt));
4346 error ("%Kasm not allowed in %<transaction_safe%> function", t);
4348 return true;
4350 default:
4351 break;
4355 return false;
4358 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking
4359 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
4360 scanning past OLD_IRR or EXIT_BLOCKS. */
4362 static bool
4363 ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr,
4364 bitmap old_irr, bitmap exit_blocks)
4366 bool any_new_irr = false;
4367 edge e;
4368 edge_iterator ei;
4369 bitmap visited_blocks = BITMAP_ALLOC (NULL);
4373 basic_block bb = pqueue->pop ();
4375 /* Don't re-scan blocks we know already are irrevocable. */
4376 if (old_irr && bitmap_bit_p (old_irr, bb->index))
4377 continue;
4379 if (ipa_tm_scan_irr_block (bb))
4381 bitmap_set_bit (new_irr, bb->index);
4382 any_new_irr = true;
4384 else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
4386 FOR_EACH_EDGE (e, ei, bb->succs)
4387 if (!bitmap_bit_p (visited_blocks, e->dest->index))
4389 bitmap_set_bit (visited_blocks, e->dest->index);
4390 pqueue->safe_push (e->dest);
4394 while (!pqueue->is_empty ());
4396 BITMAP_FREE (visited_blocks);
4398 return any_new_irr;
4401 /* Propagate the irrevocable property both up and down the dominator tree.
4402 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
4403 TM regions; OLD_IRR are the results of a previous scan of the dominator
4404 tree which has been fully propagated; NEW_IRR is the set of new blocks
4405 which are gaining the irrevocable property during the current scan. */
4407 static void
4408 ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
4409 bitmap old_irr, bitmap exit_blocks)
4411 vec<basic_block> bbs;
4412 bitmap all_region_blocks;
4414 /* If this block is in the old set, no need to rescan. */
4415 if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
4416 return;
4418 all_region_blocks = BITMAP_ALLOC (&tm_obstack);
4419 bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
4420 all_region_blocks, false);
4423 basic_block bb = bbs.pop ();
4424 bool this_irr = bitmap_bit_p (new_irr, bb->index);
4425 bool all_son_irr = false;
4426 edge_iterator ei;
4427 edge e;
4429 /* Propagate up. If my children are, I am too, but we must have
4430 at least one child that is. */
4431 if (!this_irr)
4433 FOR_EACH_EDGE (e, ei, bb->succs)
4435 if (!bitmap_bit_p (new_irr, e->dest->index))
4437 all_son_irr = false;
4438 break;
4440 else
4441 all_son_irr = true;
4443 if (all_son_irr)
4445 /* Add block to new_irr if it hasn't already been processed. */
4446 if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
4448 bitmap_set_bit (new_irr, bb->index);
4449 this_irr = true;
4454 /* Propagate down to everyone we immediately dominate. */
4455 if (this_irr)
4457 basic_block son;
4458 for (son = first_dom_son (CDI_DOMINATORS, bb);
4459 son;
4460 son = next_dom_son (CDI_DOMINATORS, son))
4462 /* Make sure block is actually in a TM region, and it
4463 isn't already in old_irr. */
4464 if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
4465 && bitmap_bit_p (all_region_blocks, son->index))
4466 bitmap_set_bit (new_irr, son->index);
4470 while (!bbs.is_empty ());
4472 BITMAP_FREE (all_region_blocks);
4473 bbs.release ();
4476 static void
4477 ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
4479 gimple_stmt_iterator gsi;
4481 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4483 gimple stmt = gsi_stmt (gsi);
4484 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4486 tree fndecl = gimple_call_fndecl (stmt);
4487 if (fndecl)
4489 struct tm_ipa_cg_data *d;
4490 unsigned *pcallers;
4491 struct cgraph_node *tnode;
4493 if (is_tm_ending_fndecl (fndecl))
4494 continue;
4495 if (find_tm_replacement_function (fndecl))
4496 continue;
4498 tnode = cgraph_get_node (fndecl);
4499 d = get_cg_data (&tnode, true);
4501 pcallers = (for_clone ? &d->tm_callers_clone
4502 : &d->tm_callers_normal);
4504 gcc_assert (*pcallers > 0);
4505 *pcallers -= 1;
4511 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
4512 as well as other irrevocable actions such as inline assembly. Mark all
4513 such blocks as irrevocable and decrement the number of calls to
4514 transactional clones. Return true if, for the transactional clone, the
4515 entire function is irrevocable. */
4517 static bool
4518 ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
4520 struct tm_ipa_cg_data *d;
4521 bitmap new_irr, old_irr;
4522 bool ret = false;
4524 /* Builtin operators (operator new, and such). */
4525 if (DECL_STRUCT_FUNCTION (node->decl) == NULL
4526 || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL)
4527 return false;
4529 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4530 calculate_dominance_info (CDI_DOMINATORS);
4532 d = get_cg_data (&node, true);
4533 stack_vec<basic_block, 10> queue;
4534 new_irr = BITMAP_ALLOC (&tm_obstack);
4536 /* Scan each tm region, propagating irrevocable status through the tree. */
4537 if (for_clone)
4539 old_irr = d->irrevocable_blocks_clone;
4540 queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
4541 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
4543 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
4544 new_irr,
4545 old_irr, NULL);
4546 ret = bitmap_bit_p (new_irr,
4547 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index);
4550 else
4552 struct tm_region *region;
4554 old_irr = d->irrevocable_blocks_normal;
4555 for (region = d->all_tm_regions; region; region = region->next)
4557 queue.quick_push (region->entry_block);
4558 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
4559 region->exit_blocks))
4560 ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
4561 region->exit_blocks);
4565 /* If we found any new irrevocable blocks, reduce the call count for
4566 transactional clones within the irrevocable blocks. Save the new
4567 set of irrevocable blocks for next time. */
4568 if (!bitmap_empty_p (new_irr))
4570 bitmap_iterator bmi;
4571 unsigned i;
4573 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4574 ipa_tm_decrement_clone_counts (BASIC_BLOCK (i), for_clone);
4576 if (old_irr)
4578 bitmap_ior_into (old_irr, new_irr);
4579 BITMAP_FREE (new_irr);
4581 else if (for_clone)
4582 d->irrevocable_blocks_clone = new_irr;
4583 else
4584 d->irrevocable_blocks_normal = new_irr;
4586 if (dump_file && new_irr)
4588 const char *dname;
4589 bitmap_iterator bmi;
4590 unsigned i;
4592 dname = lang_hooks.decl_printable_name (current_function_decl, 2);
4593 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4594 fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
4597 else
4598 BITMAP_FREE (new_irr);
4600 pop_cfun ();
4602 return ret;
4605 /* Return true if, for the transactional clone of NODE, any call
4606 may enter irrevocable mode. */
4608 static bool
4609 ipa_tm_mayenterirr_function (struct cgraph_node *node)
4611 struct tm_ipa_cg_data *d;
4612 tree decl;
4613 unsigned flags;
4615 d = get_cg_data (&node, true);
4616 decl = node->decl;
4617 flags = flags_from_decl_or_type (decl);
4619 /* Handle some TM builtins. Ordinarily these aren't actually generated
4620 at this point, but handling these functions when written in by the
4621 user makes it easier to build unit tests. */
4622 if (flags & ECF_TM_BUILTIN)
4623 return false;
4625 /* Filter out all functions that are marked. */
4626 if (flags & ECF_TM_PURE)
4627 return false;
4628 if (is_tm_safe (decl))
4629 return false;
4630 if (is_tm_irrevocable (decl))
4631 return true;
4632 if (is_tm_callable (decl))
4633 return true;
4634 if (find_tm_replacement_function (decl))
4635 return true;
4637 /* If we aren't seeing the final version of the function we don't
4638 know what it will contain at runtime. */
4639 if (cgraph_function_body_availability (node) < AVAIL_AVAILABLE)
4640 return true;
4642 /* If the function must go irrevocable, then of course true. */
4643 if (d->is_irrevocable)
4644 return true;
4646 /* If there are any blocks marked irrevocable, then the function
4647 as a whole may enter irrevocable. */
4648 if (d->irrevocable_blocks_clone)
4649 return true;
4651 /* We may have previously marked this function as tm_may_enter_irr;
4652 see pass_diagnose_tm_blocks. */
4653 if (node->local.tm_may_enter_irr)
4654 return true;
4656 /* Recurse on the main body for aliases. In general, this will
4657 result in one of the bits above being set so that we will not
4658 have to recurse next time. */
4659 if (node->alias)
4660 return ipa_tm_mayenterirr_function (cgraph_get_node (node->thunk.alias));
4662 /* What remains is unmarked local functions without items that force
4663 the function to go irrevocable. */
4664 return false;
4667 /* Diagnose calls from transaction_safe functions to unmarked
4668 functions that are determined to not be safe. */
4670 static void
4671 ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
4673 struct cgraph_edge *e;
4675 for (e = node->callees; e ; e = e->next_callee)
4676 if (!is_tm_callable (e->callee->decl)
4677 && e->callee->local.tm_may_enter_irr)
4678 error_at (gimple_location (e->call_stmt),
4679 "unsafe function call %qD within "
4680 "%<transaction_safe%> function", e->callee->decl);
4683 /* Diagnose call from atomic transactions to unmarked functions
4684 that are determined to not be safe. */
4686 static void
4687 ipa_tm_diagnose_transaction (struct cgraph_node *node,
4688 struct tm_region *all_tm_regions)
4690 struct tm_region *r;
4692 for (r = all_tm_regions; r ; r = r->next)
4693 if (gimple_transaction_subcode (r->transaction_stmt) & GTMA_IS_RELAXED)
4695 /* Atomic transactions can be nested inside relaxed. */
4696 if (r->inner)
4697 ipa_tm_diagnose_transaction (node, r->inner);
4699 else
4701 vec<basic_block> bbs;
4702 gimple_stmt_iterator gsi;
4703 basic_block bb;
4704 size_t i;
4706 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
4707 r->irr_blocks, NULL, false);
4709 for (i = 0; bbs.iterate (i, &bb); ++i)
4710 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4712 gimple stmt = gsi_stmt (gsi);
4713 tree fndecl;
4715 if (gimple_code (stmt) == GIMPLE_ASM)
4717 error_at (gimple_location (stmt),
4718 "asm not allowed in atomic transaction");
4719 continue;
4722 if (!is_gimple_call (stmt))
4723 continue;
4724 fndecl = gimple_call_fndecl (stmt);
4726 /* Indirect function calls have been diagnosed already. */
4727 if (!fndecl)
4728 continue;
4730 /* Stop at the end of the transaction. */
4731 if (is_tm_ending_fndecl (fndecl))
4733 if (bitmap_bit_p (r->exit_blocks, bb->index))
4734 break;
4735 continue;
4738 /* Marked functions have been diagnosed already. */
4739 if (is_tm_pure_call (stmt))
4740 continue;
4741 if (is_tm_callable (fndecl))
4742 continue;
4744 if (cgraph_local_info (fndecl)->tm_may_enter_irr)
4745 error_at (gimple_location (stmt),
4746 "unsafe function call %qD within "
4747 "atomic transaction", fndecl);
4750 bbs.release ();
4754 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4755 OLD_DECL. The returned value is a freshly malloced pointer that
4756 should be freed by the caller. */
4758 static tree
4759 tm_mangle (tree old_asm_id)
4761 const char *old_asm_name;
4762 char *tm_name;
4763 void *alloc = NULL;
4764 struct demangle_component *dc;
4765 tree new_asm_id;
4767 /* Determine if the symbol is already a valid C++ mangled name. Do this
4768 even for C, which might be interfacing with C++ code via appropriately
4769 ugly identifiers. */
4770 /* ??? We could probably do just as well checking for "_Z" and be done. */
4771 old_asm_name = IDENTIFIER_POINTER (old_asm_id);
4772 dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
4774 if (dc == NULL)
4776 char length[8];
4778 do_unencoded:
4779 sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
4780 tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
4782 else
4784 old_asm_name += 2; /* Skip _Z */
4786 switch (dc->type)
4788 case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
4789 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
4790 /* Don't play silly games, you! */
4791 goto do_unencoded;
4793 case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
4794 /* I'd really like to know if we can ever be passed one of
4795 these from the C++ front end. The Logical Thing would
4796 seem that hidden-alias should be outer-most, so that we
4797 get hidden-alias of a transaction-clone and not vice-versa. */
4798 old_asm_name += 2;
4799 break;
4801 default:
4802 break;
4805 tm_name = concat ("_ZGTt", old_asm_name, NULL);
4807 free (alloc);
4809 new_asm_id = get_identifier (tm_name);
4810 free (tm_name);
4812 return new_asm_id;
4815 static inline void
4816 ipa_tm_mark_force_output_node (struct cgraph_node *node)
4818 cgraph_mark_force_output_node (node);
4819 node->analyzed = true;
4822 static inline void
4823 ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node)
4825 node->forced_by_abi = true;
4826 node->analyzed = true;
4829 /* Callback data for ipa_tm_create_version_alias. */
4830 struct create_version_alias_info
4832 struct cgraph_node *old_node;
4833 tree new_decl;
4836 /* A subroutine of ipa_tm_create_version, called via
4837 cgraph_for_node_and_aliases. Create new tm clones for each of
4838 the existing aliases. */
4839 static bool
4840 ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
4842 struct create_version_alias_info *info
4843 = (struct create_version_alias_info *)data;
4844 tree old_decl, new_decl, tm_name;
4845 struct cgraph_node *new_node;
4847 if (!node->cpp_implicit_alias)
4848 return false;
4850 old_decl = node->decl;
4851 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4852 new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
4853 TREE_CODE (old_decl), tm_name,
4854 TREE_TYPE (old_decl));
4856 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4857 SET_DECL_RTL (new_decl, NULL);
4859 /* Based loosely on C++'s make_alias_for(). */
4860 TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
4861 DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
4862 DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
4863 TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
4864 DECL_EXTERNAL (new_decl) = 0;
4865 DECL_ARTIFICIAL (new_decl) = 1;
4866 TREE_ADDRESSABLE (new_decl) = 1;
4867 TREE_USED (new_decl) = 1;
4868 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4870 /* Perform the same remapping to the comdat group. */
4871 if (DECL_ONE_ONLY (new_decl))
4872 DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
4874 new_node = cgraph_same_body_alias (NULL, new_decl, info->new_decl);
4875 new_node->tm_clone = true;
4876 new_node->externally_visible = info->old_node->externally_visible;
4877 /* ?? Do not traverse aliases here. */
4878 get_cg_data (&node, false)->clone = new_node;
4880 record_tm_clone_pair (old_decl, new_decl);
4882 if (info->old_node->force_output
4883 || ipa_ref_list_first_referring (&info->old_node->ref_list))
4884 ipa_tm_mark_force_output_node (new_node);
4885 if (info->old_node->forced_by_abi)
4886 ipa_tm_mark_forced_by_abi_node (new_node);
4887 return false;
4890 /* Create a copy of the function (possibly declaration only) of OLD_NODE,
4891 appropriate for the transactional clone. */
4893 static void
4894 ipa_tm_create_version (struct cgraph_node *old_node)
4896 tree new_decl, old_decl, tm_name;
4897 struct cgraph_node *new_node;
4899 old_decl = old_node->decl;
4900 new_decl = copy_node (old_decl);
4902 /* DECL_ASSEMBLER_NAME needs to be set before we call
4903 cgraph_copy_node_for_versioning below, because cgraph_node will
4904 fill the assembler_name_hash. */
4905 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4906 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4907 SET_DECL_RTL (new_decl, NULL);
4908 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4910 /* Perform the same remapping to the comdat group. */
4911 if (DECL_ONE_ONLY (new_decl))
4912 DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
4914 new_node = cgraph_copy_node_for_versioning (old_node, new_decl, vNULL, NULL);
4915 new_node->local.local = false;
4916 new_node->externally_visible = old_node->externally_visible;
4917 new_node->lowered = true;
4918 new_node->tm_clone = 1;
4919 get_cg_data (&old_node, true)->clone = new_node;
4921 if (cgraph_function_body_availability (old_node) >= AVAIL_OVERWRITABLE)
4923 /* Remap extern inline to static inline. */
4924 /* ??? Is it worth trying to use make_decl_one_only? */
4925 if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
4927 DECL_EXTERNAL (new_decl) = 0;
4928 TREE_PUBLIC (new_decl) = 0;
4929 DECL_WEAK (new_decl) = 0;
4932 tree_function_versioning (old_decl, new_decl,
4933 NULL, false, NULL,
4934 false, NULL, NULL);
4937 record_tm_clone_pair (old_decl, new_decl);
4939 cgraph_call_function_insertion_hooks (new_node);
4940 if (old_node->force_output
4941 || ipa_ref_list_first_referring (&old_node->ref_list))
4942 ipa_tm_mark_force_output_node (new_node);
4943 if (old_node->forced_by_abi)
4944 ipa_tm_mark_forced_by_abi_node (new_node);
4946 /* Do the same thing, but for any aliases of the original node. */
4948 struct create_version_alias_info data;
4949 data.old_node = old_node;
4950 data.new_decl = new_decl;
4951 cgraph_for_node_and_aliases (old_node, ipa_tm_create_version_alias,
4952 &data, true);
4956 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
4958 static void
4959 ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
4960 basic_block bb)
4962 gimple_stmt_iterator gsi;
4963 gimple g;
4965 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4967 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
4968 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
4970 split_block_after_labels (bb);
4971 gsi = gsi_after_labels (bb);
4972 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
4974 cgraph_create_edge (node,
4975 cgraph_get_create_node
4976 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
4977 g, 0,
4978 compute_call_stmt_bb_frequency (node->decl,
4979 gimple_bb (g)));
4982 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */
4984 static bool
4985 ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
4986 struct tm_region *region,
4987 gimple_stmt_iterator *gsi, gimple stmt)
4989 tree gettm_fn, ret, old_fn, callfn;
4990 gimple g, g2;
4991 bool safe;
4993 old_fn = gimple_call_fn (stmt);
4995 if (TREE_CODE (old_fn) == ADDR_EXPR)
4997 tree fndecl = TREE_OPERAND (old_fn, 0);
4998 tree clone = get_tm_clone_pair (fndecl);
5000 /* By transforming the call into a TM_GETTMCLONE, we are
5001 technically taking the address of the original function and
5002 its clone. Explain this so inlining will know this function
5003 is needed. */
5004 cgraph_mark_address_taken_node (cgraph_get_node (fndecl));
5005 if (clone)
5006 cgraph_mark_address_taken_node (cgraph_get_node (clone));
5009 safe = is_tm_safe (TREE_TYPE (old_fn));
5010 gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
5011 : BUILT_IN_TM_GETTMCLONE_IRR);
5012 ret = create_tmp_var (ptr_type_node, NULL);
5014 if (!safe)
5015 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
5017 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
5018 if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
5019 old_fn = OBJ_TYPE_REF_EXPR (old_fn);
5021 g = gimple_build_call (gettm_fn, 1, old_fn);
5022 ret = make_ssa_name (ret, g);
5023 gimple_call_set_lhs (g, ret);
5025 gsi_insert_before (gsi, g, GSI_SAME_STMT);
5027 cgraph_create_edge (node, cgraph_get_create_node (gettm_fn), g, 0,
5028 compute_call_stmt_bb_frequency (node->decl,
5029 gimple_bb (g)));
5031 /* Cast return value from tm_gettmclone* into appropriate function
5032 pointer. */
5033 callfn = create_tmp_var (TREE_TYPE (old_fn), NULL);
5034 g2 = gimple_build_assign (callfn,
5035 fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
5036 callfn = make_ssa_name (callfn, g2);
5037 gimple_assign_set_lhs (g2, callfn);
5038 gsi_insert_before (gsi, g2, GSI_SAME_STMT);
5040 /* ??? This is a hack to preserve the NOTHROW bit on the call,
5041 which we would have derived from the decl. Failure to save
5042 this bit means we might have to split the basic block. */
5043 if (gimple_call_nothrow_p (stmt))
5044 gimple_call_set_nothrow (stmt, true);
5046 gimple_call_set_fn (stmt, callfn);
5048 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
5049 for a call statement. Fix it. */
5051 tree lhs = gimple_call_lhs (stmt);
5052 tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
5053 if (lhs
5054 && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
5056 tree temp;
5058 temp = create_tmp_reg (rettype, 0);
5059 gimple_call_set_lhs (stmt, temp);
5061 g2 = gimple_build_assign (lhs,
5062 fold_build1 (VIEW_CONVERT_EXPR,
5063 TREE_TYPE (lhs), temp));
5064 gsi_insert_after (gsi, g2, GSI_SAME_STMT);
5068 update_stmt (stmt);
5070 return true;
5073 /* Helper function for ipa_tm_transform_calls*. Given a call
5074 statement in GSI which resides inside transaction REGION, redirect
5075 the call to either its wrapper function, or its clone. */
5077 static void
5078 ipa_tm_transform_calls_redirect (struct cgraph_node *node,
5079 struct tm_region *region,
5080 gimple_stmt_iterator *gsi,
5081 bool *need_ssa_rename_p)
5083 gimple stmt = gsi_stmt (*gsi);
5084 struct cgraph_node *new_node;
5085 struct cgraph_edge *e = cgraph_edge (node, stmt);
5086 tree fndecl = gimple_call_fndecl (stmt);
5088 /* For indirect calls, pass the address through the runtime. */
5089 if (fndecl == NULL)
5091 *need_ssa_rename_p |=
5092 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5093 return;
5096 /* Handle some TM builtins. Ordinarily these aren't actually generated
5097 at this point, but handling these functions when written in by the
5098 user makes it easier to build unit tests. */
5099 if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
5100 return;
5102 /* Fixup recursive calls inside clones. */
5103 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
5104 for recursion but not update the call statements themselves? */
5105 if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
5107 gimple_call_set_fndecl (stmt, current_function_decl);
5108 return;
5111 /* If there is a replacement, use it. */
5112 fndecl = find_tm_replacement_function (fndecl);
5113 if (fndecl)
5115 new_node = cgraph_get_create_node (fndecl);
5117 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
5119 We can't do this earlier in record_tm_replacement because
5120 cgraph_remove_unreachable_nodes is called before we inject
5121 references to the node. Further, we can't do this in some
5122 nice central place in ipa_tm_execute because we don't have
5123 the exact list of wrapper functions that would be used.
5124 Marking more wrappers than necessary results in the creation
5125 of unnecessary cgraph_nodes, which can cause some of the
5126 other IPA passes to crash.
5128 We do need to mark these nodes so that we get the proper
5129 result in expand_call_tm. */
5130 /* ??? This seems broken. How is it that we're marking the
5131 CALLEE as may_enter_irr? Surely we should be marking the
5132 CALLER. Also note that find_tm_replacement_function also
5133 contains mappings into the TM runtime, e.g. memcpy. These
5134 we know won't go irrevocable. */
5135 new_node->local.tm_may_enter_irr = 1;
5137 else
5139 struct tm_ipa_cg_data *d;
5140 struct cgraph_node *tnode = e->callee;
5142 d = get_cg_data (&tnode, true);
5143 new_node = d->clone;
5145 /* As we've already skipped pure calls and appropriate builtins,
5146 and we've already marked irrevocable blocks, if we can't come
5147 up with a static replacement, then ask the runtime. */
5148 if (new_node == NULL)
5150 *need_ssa_rename_p |=
5151 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5152 return;
5155 fndecl = new_node->decl;
5158 cgraph_redirect_edge_callee (e, new_node);
5159 gimple_call_set_fndecl (stmt, fndecl);
5162 /* Helper function for ipa_tm_transform_calls. For a given BB,
5163 install calls to tm_irrevocable when IRR_BLOCKS are reached,
5164 redirect other calls to the generated transactional clone. */
5166 static bool
5167 ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
5168 basic_block bb, bitmap irr_blocks)
5170 gimple_stmt_iterator gsi;
5171 bool need_ssa_rename = false;
5173 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5175 ipa_tm_insert_irr_call (node, region, bb);
5176 return true;
5179 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5181 gimple stmt = gsi_stmt (gsi);
5183 if (!is_gimple_call (stmt))
5184 continue;
5185 if (is_tm_pure_call (stmt))
5186 continue;
5188 /* Redirect edges to the appropriate replacement or clone. */
5189 ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
5192 return need_ssa_rename;
5195 /* Walk the CFG for REGION, beginning at BB. Install calls to
5196 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
5197 the generated transactional clone. */
5199 static bool
5200 ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
5201 basic_block bb, bitmap irr_blocks)
5203 bool need_ssa_rename = false;
5204 edge e;
5205 edge_iterator ei;
5206 auto_vec<basic_block> queue;
5207 bitmap visited_blocks = BITMAP_ALLOC (NULL);
5209 queue.safe_push (bb);
5212 bb = queue.pop ();
5214 need_ssa_rename |=
5215 ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
5217 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5218 continue;
5220 if (region && bitmap_bit_p (region->exit_blocks, bb->index))
5221 continue;
5223 FOR_EACH_EDGE (e, ei, bb->succs)
5224 if (!bitmap_bit_p (visited_blocks, e->dest->index))
5226 bitmap_set_bit (visited_blocks, e->dest->index);
5227 queue.safe_push (e->dest);
5230 while (!queue.is_empty ());
5232 BITMAP_FREE (visited_blocks);
5234 return need_ssa_rename;
5237 /* Transform the calls within the TM regions within NODE. */
5239 static void
5240 ipa_tm_transform_transaction (struct cgraph_node *node)
5242 struct tm_ipa_cg_data *d;
5243 struct tm_region *region;
5244 bool need_ssa_rename = false;
5246 d = get_cg_data (&node, true);
5248 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
5249 calculate_dominance_info (CDI_DOMINATORS);
5251 for (region = d->all_tm_regions; region; region = region->next)
5253 /* If we're sure to go irrevocable, don't transform anything. */
5254 if (d->irrevocable_blocks_normal
5255 && bitmap_bit_p (d->irrevocable_blocks_normal,
5256 region->entry_block->index))
5258 transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE
5259 | GTMA_MAY_ENTER_IRREVOCABLE
5260 | GTMA_HAS_NO_INSTRUMENTATION);
5261 continue;
5264 need_ssa_rename |=
5265 ipa_tm_transform_calls (node, region, region->entry_block,
5266 d->irrevocable_blocks_normal);
5269 if (need_ssa_rename)
5270 update_ssa (TODO_update_ssa_only_virtuals);
5272 pop_cfun ();
5275 /* Transform the calls within the transactional clone of NODE. */
5277 static void
5278 ipa_tm_transform_clone (struct cgraph_node *node)
5280 struct tm_ipa_cg_data *d;
5281 bool need_ssa_rename;
5283 d = get_cg_data (&node, true);
5285 /* If this function makes no calls and has no irrevocable blocks,
5286 then there's nothing to do. */
5287 /* ??? Remove non-aborting top-level transactions. */
5288 if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone)
5289 return;
5291 push_cfun (DECL_STRUCT_FUNCTION (d->clone->decl));
5292 calculate_dominance_info (CDI_DOMINATORS);
5294 need_ssa_rename =
5295 ipa_tm_transform_calls (d->clone, NULL,
5296 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
5297 d->irrevocable_blocks_clone);
5299 if (need_ssa_rename)
5300 update_ssa (TODO_update_ssa_only_virtuals);
5302 pop_cfun ();
5305 /* Main entry point for the transactional memory IPA pass. */
5307 static unsigned int
5308 ipa_tm_execute (void)
5310 cgraph_node_queue tm_callees = cgraph_node_queue ();
5311 /* List of functions that will go irrevocable. */
5312 cgraph_node_queue irr_worklist = cgraph_node_queue ();
5314 struct cgraph_node *node;
5315 struct tm_ipa_cg_data *d;
5316 enum availability a;
5317 unsigned int i;
5319 #ifdef ENABLE_CHECKING
5320 verify_cgraph ();
5321 #endif
5323 bitmap_obstack_initialize (&tm_obstack);
5324 initialize_original_copy_tables ();
5326 /* For all local functions marked tm_callable, queue them. */
5327 FOR_EACH_DEFINED_FUNCTION (node)
5328 if (is_tm_callable (node->decl)
5329 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
5331 d = get_cg_data (&node, true);
5332 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5335 /* For all local reachable functions... */
5336 FOR_EACH_DEFINED_FUNCTION (node)
5337 if (node->lowered
5338 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
5340 /* ... marked tm_pure, record that fact for the runtime by
5341 indicating that the pure function is its own tm_callable.
5342 No need to do this if the function's address can't be taken. */
5343 if (is_tm_pure (node->decl))
5345 if (!node->local.local)
5346 record_tm_clone_pair (node->decl, node->decl);
5347 continue;
5350 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
5351 calculate_dominance_info (CDI_DOMINATORS);
5353 tm_region_init (NULL);
5354 if (all_tm_regions)
5356 d = get_cg_data (&node, true);
5358 /* Scan for calls that are in each transaction, and
5359 generate the uninstrumented code path. */
5360 ipa_tm_scan_calls_transaction (d, &tm_callees);
5362 /* Put it in the worklist so we can scan the function
5363 later (ipa_tm_scan_irr_function) and mark the
5364 irrevocable blocks. */
5365 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5366 d->want_irr_scan_normal = true;
5369 pop_cfun ();
5372 /* For every local function on the callee list, scan as if we will be
5373 creating a transactional clone, queueing all new functions we find
5374 along the way. */
5375 for (i = 0; i < tm_callees.length (); ++i)
5377 node = tm_callees[i];
5378 a = cgraph_function_body_availability (node);
5379 d = get_cg_data (&node, true);
5381 /* Put it in the worklist so we can scan the function later
5382 (ipa_tm_scan_irr_function) and mark the irrevocable
5383 blocks. */
5384 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5386 /* Some callees cannot be arbitrarily cloned. These will always be
5387 irrevocable. Mark these now, so that we need not scan them. */
5388 if (is_tm_irrevocable (node->decl))
5389 ipa_tm_note_irrevocable (node, &irr_worklist);
5390 else if (a <= AVAIL_NOT_AVAILABLE
5391 && !is_tm_safe_or_pure (node->decl))
5392 ipa_tm_note_irrevocable (node, &irr_worklist);
5393 else if (a >= AVAIL_OVERWRITABLE)
5395 if (!tree_versionable_function_p (node->decl))
5396 ipa_tm_note_irrevocable (node, &irr_worklist);
5397 else if (!d->is_irrevocable)
5399 /* If this is an alias, make sure its base is queued as well.
5400 we need not scan the callees now, as the base will do. */
5401 if (node->alias)
5403 node = cgraph_get_node (node->thunk.alias);
5404 d = get_cg_data (&node, true);
5405 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5406 continue;
5409 /* Add all nodes called by this function into
5410 tm_callees as well. */
5411 ipa_tm_scan_calls_clone (node, &tm_callees);
5416 /* Iterate scans until no more work to be done. Prefer not to use
5417 vec::pop because the worklist tends to follow a breadth-first
5418 search of the callgraph, which should allow convergance with a
5419 minimum number of scans. But we also don't want the worklist
5420 array to grow without bound, so we shift the array up periodically. */
5421 for (i = 0; i < irr_worklist.length (); ++i)
5423 if (i > 256 && i == irr_worklist.length () / 8)
5425 irr_worklist.block_remove (0, i);
5426 i = 0;
5429 node = irr_worklist[i];
5430 d = get_cg_data (&node, true);
5431 d->in_worklist = false;
5433 if (d->want_irr_scan_normal)
5435 d->want_irr_scan_normal = false;
5436 ipa_tm_scan_irr_function (node, false);
5438 if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
5439 ipa_tm_note_irrevocable (node, &irr_worklist);
5442 /* For every function on the callee list, collect the tm_may_enter_irr
5443 bit on the node. */
5444 irr_worklist.truncate (0);
5445 for (i = 0; i < tm_callees.length (); ++i)
5447 node = tm_callees[i];
5448 if (ipa_tm_mayenterirr_function (node))
5450 d = get_cg_data (&node, true);
5451 gcc_assert (d->in_worklist == false);
5452 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5456 /* Propagate the tm_may_enter_irr bit to callers until stable. */
5457 for (i = 0; i < irr_worklist.length (); ++i)
5459 struct cgraph_node *caller;
5460 struct cgraph_edge *e;
5461 struct ipa_ref *ref;
5462 unsigned j;
5464 if (i > 256 && i == irr_worklist.length () / 8)
5466 irr_worklist.block_remove (0, i);
5467 i = 0;
5470 node = irr_worklist[i];
5471 d = get_cg_data (&node, true);
5472 d->in_worklist = false;
5473 node->local.tm_may_enter_irr = true;
5475 /* Propagate back to normal callers. */
5476 for (e = node->callers; e ; e = e->next_caller)
5478 caller = e->caller;
5479 if (!is_tm_safe_or_pure (caller->decl)
5480 && !caller->local.tm_may_enter_irr)
5482 d = get_cg_data (&caller, true);
5483 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5487 /* Propagate back to referring aliases as well. */
5488 for (j = 0; ipa_ref_list_referring_iterate (&node->ref_list, j, ref); j++)
5490 caller = cgraph (ref->referring);
5491 if (ref->use == IPA_REF_ALIAS
5492 && !caller->local.tm_may_enter_irr)
5494 /* ?? Do not traverse aliases here. */
5495 d = get_cg_data (&caller, false);
5496 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5501 /* Now validate all tm_safe functions, and all atomic regions in
5502 other functions. */
5503 FOR_EACH_DEFINED_FUNCTION (node)
5504 if (node->lowered
5505 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
5507 d = get_cg_data (&node, true);
5508 if (is_tm_safe (node->decl))
5509 ipa_tm_diagnose_tm_safe (node);
5510 else if (d->all_tm_regions)
5511 ipa_tm_diagnose_transaction (node, d->all_tm_regions);
5514 /* Create clones. Do those that are not irrevocable and have a
5515 positive call count. Do those publicly visible functions that
5516 the user directed us to clone. */
5517 for (i = 0; i < tm_callees.length (); ++i)
5519 bool doit = false;
5521 node = tm_callees[i];
5522 if (node->cpp_implicit_alias)
5523 continue;
5525 a = cgraph_function_body_availability (node);
5526 d = get_cg_data (&node, true);
5528 if (a <= AVAIL_NOT_AVAILABLE)
5529 doit = is_tm_callable (node->decl);
5530 else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl))
5531 doit = true;
5532 else if (!d->is_irrevocable
5533 && d->tm_callers_normal + d->tm_callers_clone > 0)
5534 doit = true;
5536 if (doit)
5537 ipa_tm_create_version (node);
5540 /* Redirect calls to the new clones, and insert irrevocable marks. */
5541 for (i = 0; i < tm_callees.length (); ++i)
5543 node = tm_callees[i];
5544 if (node->analyzed)
5546 d = get_cg_data (&node, true);
5547 if (d->clone)
5548 ipa_tm_transform_clone (node);
5551 FOR_EACH_DEFINED_FUNCTION (node)
5552 if (node->lowered
5553 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
5555 d = get_cg_data (&node, true);
5556 if (d->all_tm_regions)
5557 ipa_tm_transform_transaction (node);
5560 /* Free and clear all data structures. */
5561 tm_callees.release ();
5562 irr_worklist.release ();
5563 bitmap_obstack_release (&tm_obstack);
5564 free_original_copy_tables ();
5566 FOR_EACH_FUNCTION (node)
5567 node->aux = NULL;
5569 #ifdef ENABLE_CHECKING
5570 verify_cgraph ();
5571 #endif
5573 return 0;
5576 namespace {
5578 const pass_data pass_data_ipa_tm =
5580 SIMPLE_IPA_PASS, /* type */
5581 "tmipa", /* name */
5582 OPTGROUP_NONE, /* optinfo_flags */
5583 true, /* has_gate */
5584 true, /* has_execute */
5585 TV_TRANS_MEM, /* tv_id */
5586 ( PROP_ssa | PROP_cfg ), /* properties_required */
5587 0, /* properties_provided */
5588 0, /* properties_destroyed */
5589 0, /* todo_flags_start */
5590 0, /* todo_flags_finish */
5593 class pass_ipa_tm : public simple_ipa_opt_pass
5595 public:
5596 pass_ipa_tm (gcc::context *ctxt)
5597 : simple_ipa_opt_pass (pass_data_ipa_tm, ctxt)
5600 /* opt_pass methods: */
5601 bool gate () { return gate_tm (); }
5602 unsigned int execute () { return ipa_tm_execute (); }
5604 }; // class pass_ipa_tm
5606 } // anon namespace
5608 simple_ipa_opt_pass *
5609 make_pass_ipa_tm (gcc::context *ctxt)
5611 return new pass_ipa_tm (ctxt);
5614 #include "gt-trans-mem.h"