Improve max_insns_skipped logic
[official-gcc.git] / gcc / trans-mem.c
blob40b5368118664bdaaeb06546c390924feb1763f8
1 /* Passes for transactional memory support.
2 Copyright (C) 2008-2017 Free Software Foundation, Inc.
3 Contributed by Richard Henderson <rth@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "cgraph.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
36 #include "fold-const.h"
37 #include "tree-eh.h"
38 #include "calls.h"
39 #include "gimplify.h"
40 #include "gimple-iterator.h"
41 #include "gimplify-me.h"
42 #include "gimple-walk.h"
43 #include "tree-cfg.h"
44 #include "tree-into-ssa.h"
45 #include "tree-inline.h"
46 #include "demangle.h"
47 #include "output.h"
48 #include "trans-mem.h"
49 #include "params.h"
50 #include "langhooks.h"
51 #include "cfgloop.h"
52 #include "tree-ssa-address.h"
53 #include "stringpool.h"
54 #include "attribs.h"
56 #define A_RUNINSTRUMENTEDCODE 0x0001
57 #define A_RUNUNINSTRUMENTEDCODE 0x0002
58 #define A_SAVELIVEVARIABLES 0x0004
59 #define A_RESTORELIVEVARIABLES 0x0008
60 #define A_ABORTTRANSACTION 0x0010
62 #define AR_USERABORT 0x0001
63 #define AR_USERRETRY 0x0002
64 #define AR_TMCONFLICT 0x0004
65 #define AR_EXCEPTIONBLOCKABORT 0x0008
66 #define AR_OUTERABORT 0x0010
68 #define MODE_SERIALIRREVOCABLE 0x0000
71 /* The representation of a transaction changes several times during the
72 lowering process. In the beginning, in the front-end we have the
73 GENERIC tree TRANSACTION_EXPR. For example,
75 __transaction {
76 local++;
77 if (++global == 10)
78 __tm_abort;
81 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
82 trivially replaced with a GIMPLE_TRANSACTION node.
84 During pass_lower_tm, we examine the body of transactions looking
85 for aborts. Transactions that do not contain an abort may be
86 merged into an outer transaction. We also add a TRY-FINALLY node
87 to arrange for the transaction to be committed on any exit.
89 [??? Think about how this arrangement affects throw-with-commit
90 and throw-with-abort operations. In this case we want the TRY to
91 handle gotos, but not to catch any exceptions because the transaction
92 will already be closed.]
94 GIMPLE_TRANSACTION [label=NULL] {
95 try {
96 local = local + 1;
97 t0 = global;
98 t1 = t0 + 1;
99 global = t1;
100 if (t1 == 10)
101 __builtin___tm_abort ();
102 } finally {
103 __builtin___tm_commit ();
107 During pass_lower_eh, we create EH regions for the transactions,
108 intermixed with the regular EH stuff. This gives us a nice persistent
109 mapping (all the way through rtl) from transactional memory operation
110 back to the transaction, which allows us to get the abnormal edges
111 correct to model transaction aborts and restarts:
113 GIMPLE_TRANSACTION [label=over]
114 local = local + 1;
115 t0 = global;
116 t1 = t0 + 1;
117 global = t1;
118 if (t1 == 10)
119 __builtin___tm_abort ();
120 __builtin___tm_commit ();
121 over:
123 This is the end of all_lowering_passes, and so is what is present
124 during the IPA passes, and through all of the optimization passes.
126 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
127 functions and mark functions for cloning.
129 At the end of gimple optimization, before exiting SSA form,
130 pass_tm_edges replaces statements that perform transactional
131 memory operations with the appropriate TM builtins, and swap
132 out function calls with their transactional clones. At this
133 point we introduce the abnormal transaction restart edges and
134 complete lowering of the GIMPLE_TRANSACTION node.
136 x = __builtin___tm_start (MAY_ABORT);
137 eh_label:
138 if (x & abort_transaction)
139 goto over;
140 local = local + 1;
141 t0 = __builtin___tm_load (global);
142 t1 = t0 + 1;
143 __builtin___tm_store (&global, t1);
144 if (t1 == 10)
145 __builtin___tm_abort ();
146 __builtin___tm_commit ();
147 over:
150 static void *expand_regions (struct tm_region *,
151 void *(*callback)(struct tm_region *, void *),
152 void *, bool);
155 /* Return the attributes we want to examine for X, or NULL if it's not
156 something we examine. We look at function types, but allow pointers
157 to function types and function decls and peek through. */
159 static tree
160 get_attrs_for (const_tree x)
162 if (x == NULL_TREE)
163 return NULL_TREE;
165 switch (TREE_CODE (x))
167 case FUNCTION_DECL:
168 return TYPE_ATTRIBUTES (TREE_TYPE (x));
170 default:
171 if (TYPE_P (x))
172 return NULL_TREE;
173 x = TREE_TYPE (x);
174 if (TREE_CODE (x) != POINTER_TYPE)
175 return NULL_TREE;
176 /* FALLTHRU */
178 case POINTER_TYPE:
179 x = TREE_TYPE (x);
180 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
181 return NULL_TREE;
182 /* FALLTHRU */
184 case FUNCTION_TYPE:
185 case METHOD_TYPE:
186 return TYPE_ATTRIBUTES (x);
190 /* Return true if X has been marked TM_PURE. */
192 bool
193 is_tm_pure (const_tree x)
195 unsigned flags;
197 switch (TREE_CODE (x))
199 case FUNCTION_DECL:
200 case FUNCTION_TYPE:
201 case METHOD_TYPE:
202 break;
204 default:
205 if (TYPE_P (x))
206 return false;
207 x = TREE_TYPE (x);
208 if (TREE_CODE (x) != POINTER_TYPE)
209 return false;
210 /* FALLTHRU */
212 case POINTER_TYPE:
213 x = TREE_TYPE (x);
214 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
215 return false;
216 break;
219 flags = flags_from_decl_or_type (x);
220 return (flags & ECF_TM_PURE) != 0;
223 /* Return true if X has been marked TM_IRREVOCABLE. */
225 static bool
226 is_tm_irrevocable (tree x)
228 tree attrs = get_attrs_for (x);
230 if (attrs && lookup_attribute ("transaction_unsafe", attrs))
231 return true;
233 /* A call to the irrevocable builtin is by definition,
234 irrevocable. */
235 if (TREE_CODE (x) == ADDR_EXPR)
236 x = TREE_OPERAND (x, 0);
237 if (TREE_CODE (x) == FUNCTION_DECL
238 && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
239 && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
240 return true;
242 return false;
245 /* Return true if X has been marked TM_SAFE. */
247 bool
248 is_tm_safe (const_tree x)
250 if (flag_tm)
252 tree attrs = get_attrs_for (x);
253 if (attrs)
255 if (lookup_attribute ("transaction_safe", attrs))
256 return true;
257 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
258 return true;
261 return false;
264 /* Return true if CALL is const, or tm_pure. */
266 static bool
267 is_tm_pure_call (gimple *call)
269 if (gimple_call_internal_p (call))
270 return (gimple_call_flags (call) & (ECF_CONST | ECF_TM_PURE)) != 0;
272 tree fn = gimple_call_fn (call);
274 if (TREE_CODE (fn) == ADDR_EXPR)
276 fn = TREE_OPERAND (fn, 0);
277 gcc_assert (TREE_CODE (fn) == FUNCTION_DECL);
279 else
280 fn = TREE_TYPE (fn);
282 return is_tm_pure (fn);
285 /* Return true if X has been marked TM_CALLABLE. */
287 static bool
288 is_tm_callable (tree x)
290 tree attrs = get_attrs_for (x);
291 if (attrs)
293 if (lookup_attribute ("transaction_callable", attrs))
294 return true;
295 if (lookup_attribute ("transaction_safe", attrs))
296 return true;
297 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
298 return true;
300 return false;
303 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
305 bool
306 is_tm_may_cancel_outer (tree x)
308 tree attrs = get_attrs_for (x);
309 if (attrs)
310 return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
311 return false;
314 /* Return true for built in functions that "end" a transaction. */
316 bool
317 is_tm_ending_fndecl (tree fndecl)
319 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
320 switch (DECL_FUNCTION_CODE (fndecl))
322 case BUILT_IN_TM_COMMIT:
323 case BUILT_IN_TM_COMMIT_EH:
324 case BUILT_IN_TM_ABORT:
325 case BUILT_IN_TM_IRREVOCABLE:
326 return true;
327 default:
328 break;
331 return false;
334 /* Return true if STMT is a built in function call that "ends" a
335 transaction. */
337 bool
338 is_tm_ending (gimple *stmt)
340 tree fndecl;
342 if (gimple_code (stmt) != GIMPLE_CALL)
343 return false;
345 fndecl = gimple_call_fndecl (stmt);
346 return (fndecl != NULL_TREE
347 && is_tm_ending_fndecl (fndecl));
350 /* Return true if STMT is a TM load. */
352 static bool
353 is_tm_load (gimple *stmt)
355 tree fndecl;
357 if (gimple_code (stmt) != GIMPLE_CALL)
358 return false;
360 fndecl = gimple_call_fndecl (stmt);
361 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
362 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
365 /* Same as above, but for simple TM loads, that is, not the
366 after-write, after-read, etc optimized variants. */
368 static bool
369 is_tm_simple_load (gimple *stmt)
371 tree fndecl;
373 if (gimple_code (stmt) != GIMPLE_CALL)
374 return false;
376 fndecl = gimple_call_fndecl (stmt);
377 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
379 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
380 return (fcode == BUILT_IN_TM_LOAD_1
381 || fcode == BUILT_IN_TM_LOAD_2
382 || fcode == BUILT_IN_TM_LOAD_4
383 || fcode == BUILT_IN_TM_LOAD_8
384 || fcode == BUILT_IN_TM_LOAD_FLOAT
385 || fcode == BUILT_IN_TM_LOAD_DOUBLE
386 || fcode == BUILT_IN_TM_LOAD_LDOUBLE
387 || fcode == BUILT_IN_TM_LOAD_M64
388 || fcode == BUILT_IN_TM_LOAD_M128
389 || fcode == BUILT_IN_TM_LOAD_M256);
391 return false;
394 /* Return true if STMT is a TM store. */
396 static bool
397 is_tm_store (gimple *stmt)
399 tree fndecl;
401 if (gimple_code (stmt) != GIMPLE_CALL)
402 return false;
404 fndecl = gimple_call_fndecl (stmt);
405 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
406 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
409 /* Same as above, but for simple TM stores, that is, not the
410 after-write, after-read, etc optimized variants. */
412 static bool
413 is_tm_simple_store (gimple *stmt)
415 tree fndecl;
417 if (gimple_code (stmt) != GIMPLE_CALL)
418 return false;
420 fndecl = gimple_call_fndecl (stmt);
421 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
423 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
424 return (fcode == BUILT_IN_TM_STORE_1
425 || fcode == BUILT_IN_TM_STORE_2
426 || fcode == BUILT_IN_TM_STORE_4
427 || fcode == BUILT_IN_TM_STORE_8
428 || fcode == BUILT_IN_TM_STORE_FLOAT
429 || fcode == BUILT_IN_TM_STORE_DOUBLE
430 || fcode == BUILT_IN_TM_STORE_LDOUBLE
431 || fcode == BUILT_IN_TM_STORE_M64
432 || fcode == BUILT_IN_TM_STORE_M128
433 || fcode == BUILT_IN_TM_STORE_M256);
435 return false;
438 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */
440 static bool
441 is_tm_abort (tree fndecl)
443 return (fndecl
444 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
445 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
448 /* Build a GENERIC tree for a user abort. This is called by front ends
449 while transforming the __tm_abort statement. */
451 tree
452 build_tm_abort_call (location_t loc, bool is_outer)
454 return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
455 build_int_cst (integer_type_node,
456 AR_USERABORT
457 | (is_outer ? AR_OUTERABORT : 0)));
460 /* Map for arbitrary function replacement under TM, as created
461 by the tm_wrap attribute. */
463 struct tm_wrapper_hasher : ggc_cache_ptr_hash<tree_map>
465 static inline hashval_t hash (tree_map *m) { return m->hash; }
466 static inline bool
467 equal (tree_map *a, tree_map *b)
469 return a->base.from == b->base.from;
472 static int
473 keep_cache_entry (tree_map *&m)
475 return ggc_marked_p (m->base.from);
479 static GTY((cache)) hash_table<tm_wrapper_hasher> *tm_wrap_map;
481 void
482 record_tm_replacement (tree from, tree to)
484 struct tree_map **slot, *h;
486 /* Do not inline wrapper functions that will get replaced in the TM
487 pass.
489 Suppose you have foo() that will get replaced into tmfoo(). Make
490 sure the inliner doesn't try to outsmart us and inline foo()
491 before we get a chance to do the TM replacement. */
492 DECL_UNINLINABLE (from) = 1;
494 if (tm_wrap_map == NULL)
495 tm_wrap_map = hash_table<tm_wrapper_hasher>::create_ggc (32);
497 h = ggc_alloc<tree_map> ();
498 h->hash = htab_hash_pointer (from);
499 h->base.from = from;
500 h->to = to;
502 slot = tm_wrap_map->find_slot_with_hash (h, h->hash, INSERT);
503 *slot = h;
506 /* Return a TM-aware replacement function for DECL. */
508 static tree
509 find_tm_replacement_function (tree fndecl)
511 if (tm_wrap_map)
513 struct tree_map *h, in;
515 in.base.from = fndecl;
516 in.hash = htab_hash_pointer (fndecl);
517 h = tm_wrap_map->find_with_hash (&in, in.hash);
518 if (h)
519 return h->to;
522 /* ??? We may well want TM versions of most of the common <string.h>
523 functions. For now, we've already these two defined. */
524 /* Adjust expand_call_tm() attributes as necessary for the cases
525 handled here: */
526 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
527 switch (DECL_FUNCTION_CODE (fndecl))
529 case BUILT_IN_MEMCPY:
530 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
531 case BUILT_IN_MEMMOVE:
532 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
533 case BUILT_IN_MEMSET:
534 return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
535 default:
536 return NULL;
539 return NULL;
542 /* When appropriate, record TM replacement for memory allocation functions.
544 FROM is the FNDECL to wrap. */
545 void
546 tm_malloc_replacement (tree from)
548 const char *str;
549 tree to;
551 if (TREE_CODE (from) != FUNCTION_DECL)
552 return;
554 /* If we have a previous replacement, the user must be explicitly
555 wrapping malloc/calloc/free. They better know what they're
556 doing... */
557 if (find_tm_replacement_function (from))
558 return;
560 str = IDENTIFIER_POINTER (DECL_NAME (from));
562 if (!strcmp (str, "malloc"))
563 to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
564 else if (!strcmp (str, "calloc"))
565 to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
566 else if (!strcmp (str, "free"))
567 to = builtin_decl_explicit (BUILT_IN_TM_FREE);
568 else
569 return;
571 TREE_NOTHROW (to) = 0;
573 record_tm_replacement (from, to);
576 /* Diagnostics for tm_safe functions/regions. Called by the front end
577 once we've lowered the function to high-gimple. */
579 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
580 Process exactly one statement. WI->INFO is set to non-null when in
581 the context of a tm_safe function, and null for a __transaction block. */
583 #define DIAG_TM_OUTER 1
584 #define DIAG_TM_SAFE 2
585 #define DIAG_TM_RELAXED 4
587 struct diagnose_tm
589 unsigned int summary_flags : 8;
590 unsigned int block_flags : 8;
591 unsigned int func_flags : 8;
592 unsigned int saw_volatile : 1;
593 gimple *stmt;
596 /* Return true if T is a volatile lvalue of some kind. */
598 static bool
599 volatile_lvalue_p (tree t)
601 return ((SSA_VAR_P (t) || REFERENCE_CLASS_P (t))
602 && TREE_THIS_VOLATILE (TREE_TYPE (t)));
605 /* Tree callback function for diagnose_tm pass. */
607 static tree
608 diagnose_tm_1_op (tree *tp, int *walk_subtrees, void *data)
610 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
611 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
613 if (TYPE_P (*tp))
614 *walk_subtrees = false;
615 else if (volatile_lvalue_p (*tp)
616 && !d->saw_volatile)
618 d->saw_volatile = 1;
619 if (d->block_flags & DIAG_TM_SAFE)
620 error_at (gimple_location (d->stmt),
621 "invalid use of volatile lvalue inside transaction");
622 else if (d->func_flags & DIAG_TM_SAFE)
623 error_at (gimple_location (d->stmt),
624 "invalid use of volatile lvalue inside %<transaction_safe%> "
625 "function");
628 return NULL_TREE;
631 static inline bool
632 is_tm_safe_or_pure (const_tree x)
634 return is_tm_safe (x) || is_tm_pure (x);
637 static tree
638 diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
639 struct walk_stmt_info *wi)
641 gimple *stmt = gsi_stmt (*gsi);
642 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
644 /* Save stmt for use in leaf analysis. */
645 d->stmt = stmt;
647 switch (gimple_code (stmt))
649 case GIMPLE_CALL:
651 tree fn = gimple_call_fn (stmt);
653 if ((d->summary_flags & DIAG_TM_OUTER) == 0
654 && is_tm_may_cancel_outer (fn))
655 error_at (gimple_location (stmt),
656 "%<transaction_may_cancel_outer%> function call not within"
657 " outer transaction or %<transaction_may_cancel_outer%>");
659 if (d->summary_flags & DIAG_TM_SAFE)
661 bool is_safe, direct_call_p;
662 tree replacement;
664 if (TREE_CODE (fn) == ADDR_EXPR
665 && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
667 direct_call_p = true;
668 replacement = TREE_OPERAND (fn, 0);
669 replacement = find_tm_replacement_function (replacement);
670 if (replacement)
671 fn = replacement;
673 else
675 direct_call_p = false;
676 replacement = NULL_TREE;
679 if (is_tm_safe_or_pure (fn))
680 is_safe = true;
681 else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
683 /* A function explicitly marked transaction_callable as
684 opposed to transaction_safe is being defined to be
685 unsafe as part of its ABI, regardless of its contents. */
686 is_safe = false;
688 else if (direct_call_p)
690 if (IS_TYPE_OR_DECL_P (fn)
691 && flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
692 is_safe = true;
693 else if (replacement)
695 /* ??? At present we've been considering replacements
696 merely transaction_callable, and therefore might
697 enter irrevocable. The tm_wrap attribute has not
698 yet made it into the new language spec. */
699 is_safe = false;
701 else
703 /* ??? Diagnostics for unmarked direct calls moved into
704 the IPA pass. Section 3.2 of the spec details how
705 functions not marked should be considered "implicitly
706 safe" based on having examined the function body. */
707 is_safe = true;
710 else
712 /* An unmarked indirect call. Consider it unsafe even
713 though optimization may yet figure out how to inline. */
714 is_safe = false;
717 if (!is_safe)
719 if (TREE_CODE (fn) == ADDR_EXPR)
720 fn = TREE_OPERAND (fn, 0);
721 if (d->block_flags & DIAG_TM_SAFE)
723 if (direct_call_p)
724 error_at (gimple_location (stmt),
725 "unsafe function call %qD within "
726 "atomic transaction", fn);
727 else
729 if ((!DECL_P (fn) || DECL_NAME (fn))
730 && TREE_CODE (fn) != SSA_NAME)
731 error_at (gimple_location (stmt),
732 "unsafe function call %qE within "
733 "atomic transaction", fn);
734 else
735 error_at (gimple_location (stmt),
736 "unsafe indirect function call within "
737 "atomic transaction");
740 else
742 if (direct_call_p)
743 error_at (gimple_location (stmt),
744 "unsafe function call %qD within "
745 "%<transaction_safe%> function", fn);
746 else
748 if ((!DECL_P (fn) || DECL_NAME (fn))
749 && TREE_CODE (fn) != SSA_NAME)
750 error_at (gimple_location (stmt),
751 "unsafe function call %qE within "
752 "%<transaction_safe%> function", fn);
753 else
754 error_at (gimple_location (stmt),
755 "unsafe indirect function call within "
756 "%<transaction_safe%> function");
762 break;
764 case GIMPLE_ASM:
765 /* ??? We ought to come up with a way to add attributes to
766 asm statements, and then add "transaction_safe" to it.
767 Either that or get the language spec to resurrect __tm_waiver. */
768 if (d->block_flags & DIAG_TM_SAFE)
769 error_at (gimple_location (stmt),
770 "asm not allowed in atomic transaction");
771 else if (d->func_flags & DIAG_TM_SAFE)
772 error_at (gimple_location (stmt),
773 "asm not allowed in %<transaction_safe%> function");
774 break;
776 case GIMPLE_TRANSACTION:
778 gtransaction *trans_stmt = as_a <gtransaction *> (stmt);
779 unsigned char inner_flags = DIAG_TM_SAFE;
781 if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_RELAXED)
783 if (d->block_flags & DIAG_TM_SAFE)
784 error_at (gimple_location (stmt),
785 "relaxed transaction in atomic transaction");
786 else if (d->func_flags & DIAG_TM_SAFE)
787 error_at (gimple_location (stmt),
788 "relaxed transaction in %<transaction_safe%> function");
789 inner_flags = DIAG_TM_RELAXED;
791 else if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_OUTER)
793 if (d->block_flags)
794 error_at (gimple_location (stmt),
795 "outer transaction in transaction");
796 else if (d->func_flags & DIAG_TM_OUTER)
797 error_at (gimple_location (stmt),
798 "outer transaction in "
799 "%<transaction_may_cancel_outer%> function");
800 else if (d->func_flags & DIAG_TM_SAFE)
801 error_at (gimple_location (stmt),
802 "outer transaction in %<transaction_safe%> function");
803 inner_flags |= DIAG_TM_OUTER;
806 *handled_ops_p = true;
807 if (gimple_transaction_body (trans_stmt))
809 struct walk_stmt_info wi_inner;
810 struct diagnose_tm d_inner;
812 memset (&d_inner, 0, sizeof (d_inner));
813 d_inner.func_flags = d->func_flags;
814 d_inner.block_flags = d->block_flags | inner_flags;
815 d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
817 memset (&wi_inner, 0, sizeof (wi_inner));
818 wi_inner.info = &d_inner;
820 walk_gimple_seq (gimple_transaction_body (trans_stmt),
821 diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
824 break;
826 default:
827 break;
830 return NULL_TREE;
833 static unsigned int
834 diagnose_tm_blocks (void)
836 struct walk_stmt_info wi;
837 struct diagnose_tm d;
839 memset (&d, 0, sizeof (d));
840 if (is_tm_may_cancel_outer (current_function_decl))
841 d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
842 else if (is_tm_safe (current_function_decl))
843 d.func_flags = DIAG_TM_SAFE;
844 d.summary_flags = d.func_flags;
846 memset (&wi, 0, sizeof (wi));
847 wi.info = &d;
849 walk_gimple_seq (gimple_body (current_function_decl),
850 diagnose_tm_1, diagnose_tm_1_op, &wi);
852 return 0;
855 namespace {
857 const pass_data pass_data_diagnose_tm_blocks =
859 GIMPLE_PASS, /* type */
860 "*diagnose_tm_blocks", /* name */
861 OPTGROUP_NONE, /* optinfo_flags */
862 TV_TRANS_MEM, /* tv_id */
863 PROP_gimple_any, /* properties_required */
864 0, /* properties_provided */
865 0, /* properties_destroyed */
866 0, /* todo_flags_start */
867 0, /* todo_flags_finish */
870 class pass_diagnose_tm_blocks : public gimple_opt_pass
872 public:
873 pass_diagnose_tm_blocks (gcc::context *ctxt)
874 : gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt)
877 /* opt_pass methods: */
878 virtual bool gate (function *) { return flag_tm; }
879 virtual unsigned int execute (function *) { return diagnose_tm_blocks (); }
881 }; // class pass_diagnose_tm_blocks
883 } // anon namespace
885 gimple_opt_pass *
886 make_pass_diagnose_tm_blocks (gcc::context *ctxt)
888 return new pass_diagnose_tm_blocks (ctxt);
891 /* Instead of instrumenting thread private memory, we save the
892 addresses in a log which we later use to save/restore the addresses
893 upon transaction start/restart.
895 The log is keyed by address, where each element contains individual
896 statements among different code paths that perform the store.
898 This log is later used to generate either plain save/restore of the
899 addresses upon transaction start/restart, or calls to the ITM_L*
900 logging functions.
902 So for something like:
904 struct large { int x[1000]; };
905 struct large lala = { 0 };
906 __transaction {
907 lala.x[i] = 123;
911 We can either save/restore:
913 lala = { 0 };
914 trxn = _ITM_startTransaction ();
915 if (trxn & a_saveLiveVariables)
916 tmp_lala1 = lala.x[i];
917 else if (a & a_restoreLiveVariables)
918 lala.x[i] = tmp_lala1;
920 or use the logging functions:
922 lala = { 0 };
923 trxn = _ITM_startTransaction ();
924 _ITM_LU4 (&lala.x[i]);
926 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
927 far up the dominator tree to shadow all of the writes to a given
928 location (thus reducing the total number of logging calls), but not
929 so high as to be called on a path that does not perform a
930 write. */
932 /* One individual log entry. We may have multiple statements for the
933 same location if neither dominate each other (on different
934 execution paths). */
935 struct tm_log_entry
937 /* Address to save. */
938 tree addr;
939 /* Entry block for the transaction this address occurs in. */
940 basic_block entry_block;
941 /* Dominating statements the store occurs in. */
942 vec<gimple *> stmts;
943 /* Initially, while we are building the log, we place a nonzero
944 value here to mean that this address *will* be saved with a
945 save/restore sequence. Later, when generating the save sequence
946 we place the SSA temp generated here. */
947 tree save_var;
951 /* Log entry hashtable helpers. */
953 struct log_entry_hasher : pointer_hash <tm_log_entry>
955 static inline hashval_t hash (const tm_log_entry *);
956 static inline bool equal (const tm_log_entry *, const tm_log_entry *);
957 static inline void remove (tm_log_entry *);
960 /* Htab support. Return hash value for a `tm_log_entry'. */
961 inline hashval_t
962 log_entry_hasher::hash (const tm_log_entry *log)
964 return iterative_hash_expr (log->addr, 0);
967 /* Htab support. Return true if two log entries are the same. */
968 inline bool
969 log_entry_hasher::equal (const tm_log_entry *log1, const tm_log_entry *log2)
971 /* FIXME:
973 rth: I suggest that we get rid of the component refs etc.
974 I.e. resolve the reference to base + offset.
976 We may need to actually finish a merge with mainline for this,
977 since we'd like to be presented with Richi's MEM_REF_EXPRs more
978 often than not. But in the meantime your tm_log_entry could save
979 the results of get_inner_reference.
981 See: g++.dg/tm/pr46653.C
984 /* Special case plain equality because operand_equal_p() below will
985 return FALSE if the addresses are equal but they have
986 side-effects (e.g. a volatile address). */
987 if (log1->addr == log2->addr)
988 return true;
990 return operand_equal_p (log1->addr, log2->addr, 0);
993 /* Htab support. Free one tm_log_entry. */
994 inline void
995 log_entry_hasher::remove (tm_log_entry *lp)
997 lp->stmts.release ();
998 free (lp);
1002 /* The actual log. */
1003 static hash_table<log_entry_hasher> *tm_log;
1005 /* Addresses to log with a save/restore sequence. These should be in
1006 dominator order. */
1007 static vec<tree> tm_log_save_addresses;
1009 enum thread_memory_type
1011 mem_non_local = 0,
1012 mem_thread_local,
1013 mem_transaction_local,
1014 mem_max
1017 struct tm_new_mem_map
1019 /* SSA_NAME being dereferenced. */
1020 tree val;
1021 enum thread_memory_type local_new_memory;
1024 /* Hashtable helpers. */
1026 struct tm_mem_map_hasher : free_ptr_hash <tm_new_mem_map>
1028 static inline hashval_t hash (const tm_new_mem_map *);
1029 static inline bool equal (const tm_new_mem_map *, const tm_new_mem_map *);
1032 inline hashval_t
1033 tm_mem_map_hasher::hash (const tm_new_mem_map *v)
1035 return (intptr_t)v->val >> 4;
1038 inline bool
1039 tm_mem_map_hasher::equal (const tm_new_mem_map *v, const tm_new_mem_map *c)
1041 return v->val == c->val;
1044 /* Map for an SSA_NAME originally pointing to a non aliased new piece
1045 of memory (malloc, alloc, etc). */
1046 static hash_table<tm_mem_map_hasher> *tm_new_mem_hash;
1048 /* Initialize logging data structures. */
1049 static void
1050 tm_log_init (void)
1052 tm_log = new hash_table<log_entry_hasher> (10);
1053 tm_new_mem_hash = new hash_table<tm_mem_map_hasher> (5);
1054 tm_log_save_addresses.create (5);
1057 /* Free logging data structures. */
1058 static void
1059 tm_log_delete (void)
1061 delete tm_log;
1062 tm_log = NULL;
1063 delete tm_new_mem_hash;
1064 tm_new_mem_hash = NULL;
1065 tm_log_save_addresses.release ();
1068 /* Return true if MEM is a transaction invariant memory for the TM
1069 region starting at REGION_ENTRY_BLOCK. */
1070 static bool
1071 transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
1073 if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
1074 && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
1076 basic_block def_bb;
1078 def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
1079 return def_bb != region_entry_block
1080 && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
1083 mem = strip_invariant_refs (mem);
1084 return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
1087 /* Given an address ADDR in STMT, find it in the memory log or add it,
1088 making sure to keep only the addresses highest in the dominator
1089 tree.
1091 ENTRY_BLOCK is the entry_block for the transaction.
1093 If we find the address in the log, make sure it's either the same
1094 address, or an equivalent one that dominates ADDR.
1096 If we find the address, but neither ADDR dominates the found
1097 address, nor the found one dominates ADDR, we're on different
1098 execution paths. Add it.
1100 If known, ENTRY_BLOCK is the entry block for the region, otherwise
1101 NULL. */
1102 static void
1103 tm_log_add (basic_block entry_block, tree addr, gimple *stmt)
1105 tm_log_entry **slot;
1106 struct tm_log_entry l, *lp;
1108 l.addr = addr;
1109 slot = tm_log->find_slot (&l, INSERT);
1110 if (!*slot)
1112 tree type = TREE_TYPE (addr);
1114 lp = XNEW (struct tm_log_entry);
1115 lp->addr = addr;
1116 *slot = lp;
1118 /* Small invariant addresses can be handled as save/restores. */
1119 if (entry_block
1120 && transaction_invariant_address_p (lp->addr, entry_block)
1121 && TYPE_SIZE_UNIT (type) != NULL
1122 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
1123 && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type))
1124 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
1125 /* We must be able to copy this type normally. I.e., no
1126 special constructors and the like. */
1127 && !TREE_ADDRESSABLE (type))
1129 lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
1130 lp->stmts.create (0);
1131 lp->entry_block = entry_block;
1132 /* Save addresses separately in dominator order so we don't
1133 get confused by overlapping addresses in the save/restore
1134 sequence. */
1135 tm_log_save_addresses.safe_push (lp->addr);
1137 else
1139 /* Use the logging functions. */
1140 lp->stmts.create (5);
1141 lp->stmts.quick_push (stmt);
1142 lp->save_var = NULL;
1145 else
1147 size_t i;
1148 gimple *oldstmt;
1150 lp = *slot;
1152 /* If we're generating a save/restore sequence, we don't care
1153 about statements. */
1154 if (lp->save_var)
1155 return;
1157 for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i)
1159 if (stmt == oldstmt)
1160 return;
1161 /* We already have a store to the same address, higher up the
1162 dominator tree. Nothing to do. */
1163 if (dominated_by_p (CDI_DOMINATORS,
1164 gimple_bb (stmt), gimple_bb (oldstmt)))
1165 return;
1166 /* We should be processing blocks in dominator tree order. */
1167 gcc_assert (!dominated_by_p (CDI_DOMINATORS,
1168 gimple_bb (oldstmt), gimple_bb (stmt)));
1170 /* Store is on a different code path. */
1171 lp->stmts.safe_push (stmt);
1175 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1176 result, insert the new statements before GSI. */
1178 static tree
1179 gimplify_addr (gimple_stmt_iterator *gsi, tree x)
1181 if (TREE_CODE (x) == TARGET_MEM_REF)
1182 x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
1183 else
1184 x = build_fold_addr_expr (x);
1185 return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
1188 /* Instrument one address with the logging functions.
1189 ADDR is the address to save.
1190 STMT is the statement before which to place it. */
1191 static void
1192 tm_log_emit_stmt (tree addr, gimple *stmt)
1194 tree type = TREE_TYPE (addr);
1195 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1196 gimple *log;
1197 enum built_in_function code = BUILT_IN_TM_LOG;
1199 if (type == float_type_node)
1200 code = BUILT_IN_TM_LOG_FLOAT;
1201 else if (type == double_type_node)
1202 code = BUILT_IN_TM_LOG_DOUBLE;
1203 else if (type == long_double_type_node)
1204 code = BUILT_IN_TM_LOG_LDOUBLE;
1205 else if (TYPE_SIZE (type) != NULL
1206 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1208 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
1210 if (TREE_CODE (type) == VECTOR_TYPE)
1212 switch (type_size)
1214 case 64:
1215 code = BUILT_IN_TM_LOG_M64;
1216 break;
1217 case 128:
1218 code = BUILT_IN_TM_LOG_M128;
1219 break;
1220 case 256:
1221 code = BUILT_IN_TM_LOG_M256;
1222 break;
1223 default:
1224 goto unhandled_vec;
1226 if (!builtin_decl_explicit_p (code))
1227 goto unhandled_vec;
1229 else
1231 unhandled_vec:
1232 switch (type_size)
1234 case 8:
1235 code = BUILT_IN_TM_LOG_1;
1236 break;
1237 case 16:
1238 code = BUILT_IN_TM_LOG_2;
1239 break;
1240 case 32:
1241 code = BUILT_IN_TM_LOG_4;
1242 break;
1243 case 64:
1244 code = BUILT_IN_TM_LOG_8;
1245 break;
1250 if (code != BUILT_IN_TM_LOG && !builtin_decl_explicit_p (code))
1251 code = BUILT_IN_TM_LOG;
1252 tree decl = builtin_decl_explicit (code);
1254 addr = gimplify_addr (&gsi, addr);
1255 if (code == BUILT_IN_TM_LOG)
1256 log = gimple_build_call (decl, 2, addr, TYPE_SIZE_UNIT (type));
1257 else
1258 log = gimple_build_call (decl, 1, addr);
1259 gsi_insert_before (&gsi, log, GSI_SAME_STMT);
1262 /* Go through the log and instrument address that must be instrumented
1263 with the logging functions. Leave the save/restore addresses for
1264 later. */
1265 static void
1266 tm_log_emit (void)
1268 hash_table<log_entry_hasher>::iterator hi;
1269 struct tm_log_entry *lp;
1271 FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi)
1273 size_t i;
1274 gimple *stmt;
1276 if (dump_file)
1278 fprintf (dump_file, "TM thread private mem logging: ");
1279 print_generic_expr (dump_file, lp->addr);
1280 fprintf (dump_file, "\n");
1283 if (lp->save_var)
1285 if (dump_file)
1286 fprintf (dump_file, "DUMPING to variable\n");
1287 continue;
1289 else
1291 if (dump_file)
1292 fprintf (dump_file, "DUMPING with logging functions\n");
1293 for (i = 0; lp->stmts.iterate (i, &stmt); ++i)
1294 tm_log_emit_stmt (lp->addr, stmt);
1299 /* Emit the save sequence for the corresponding addresses in the log.
1300 ENTRY_BLOCK is the entry block for the transaction.
1301 BB is the basic block to insert the code in. */
1302 static void
1303 tm_log_emit_saves (basic_block entry_block, basic_block bb)
1305 size_t i;
1306 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1307 gimple *stmt;
1308 struct tm_log_entry l, *lp;
1310 for (i = 0; i < tm_log_save_addresses.length (); ++i)
1312 l.addr = tm_log_save_addresses[i];
1313 lp = *(tm_log->find_slot (&l, NO_INSERT));
1314 gcc_assert (lp->save_var != NULL);
1316 /* We only care about variables in the current transaction. */
1317 if (lp->entry_block != entry_block)
1318 continue;
1320 stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
1322 /* Make sure we can create an SSA_NAME for this type. For
1323 instance, aggregates aren't allowed, in which case the system
1324 will create a VOP for us and everything will just work. */
1325 if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
1327 lp->save_var = make_ssa_name (lp->save_var, stmt);
1328 gimple_assign_set_lhs (stmt, lp->save_var);
1331 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1335 /* Emit the restore sequence for the corresponding addresses in the log.
1336 ENTRY_BLOCK is the entry block for the transaction.
1337 BB is the basic block to insert the code in. */
1338 static void
1339 tm_log_emit_restores (basic_block entry_block, basic_block bb)
1341 int i;
1342 struct tm_log_entry l, *lp;
1343 gimple_stmt_iterator gsi;
1344 gimple *stmt;
1346 for (i = tm_log_save_addresses.length () - 1; i >= 0; i--)
1348 l.addr = tm_log_save_addresses[i];
1349 lp = *(tm_log->find_slot (&l, NO_INSERT));
1350 gcc_assert (lp->save_var != NULL);
1352 /* We only care about variables in the current transaction. */
1353 if (lp->entry_block != entry_block)
1354 continue;
1356 /* Restores are in LIFO order from the saves in case we have
1357 overlaps. */
1358 gsi = gsi_start_bb (bb);
1360 stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
1361 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1366 static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
1367 struct walk_stmt_info *);
1368 static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
1369 struct walk_stmt_info *);
1371 /* Evaluate an address X being dereferenced and determine if it
1372 originally points to a non aliased new chunk of memory (malloc,
1373 alloca, etc).
1375 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1376 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1377 Return MEM_NON_LOCAL otherwise.
1379 ENTRY_BLOCK is the entry block to the transaction containing the
1380 dereference of X. */
1381 static enum thread_memory_type
1382 thread_private_new_memory (basic_block entry_block, tree x)
1384 gimple *stmt = NULL;
1385 enum tree_code code;
1386 tm_new_mem_map **slot;
1387 tm_new_mem_map elt, *elt_p;
1388 tree val = x;
1389 enum thread_memory_type retval = mem_transaction_local;
1391 if (!entry_block
1392 || TREE_CODE (x) != SSA_NAME
1393 /* Possible uninitialized use, or a function argument. In
1394 either case, we don't care. */
1395 || SSA_NAME_IS_DEFAULT_DEF (x))
1396 return mem_non_local;
1398 /* Look in cache first. */
1399 elt.val = x;
1400 slot = tm_new_mem_hash->find_slot (&elt, INSERT);
1401 elt_p = *slot;
1402 if (elt_p)
1403 return elt_p->local_new_memory;
1405 /* Optimistically assume the memory is transaction local during
1406 processing. This catches recursion into this variable. */
1407 *slot = elt_p = XNEW (tm_new_mem_map);
1408 elt_p->val = val;
1409 elt_p->local_new_memory = mem_transaction_local;
1411 /* Search DEF chain to find the original definition of this address. */
1414 if (ptr_deref_may_alias_global_p (x))
1416 /* Address escapes. This is not thread-private. */
1417 retval = mem_non_local;
1418 goto new_memory_ret;
1421 stmt = SSA_NAME_DEF_STMT (x);
1423 /* If the malloc call is outside the transaction, this is
1424 thread-local. */
1425 if (retval != mem_thread_local
1426 && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
1427 retval = mem_thread_local;
1429 if (is_gimple_assign (stmt))
1431 code = gimple_assign_rhs_code (stmt);
1432 /* x = foo ==> foo */
1433 if (code == SSA_NAME)
1434 x = gimple_assign_rhs1 (stmt);
1435 /* x = foo + n ==> foo */
1436 else if (code == POINTER_PLUS_EXPR)
1437 x = gimple_assign_rhs1 (stmt);
1438 /* x = (cast*) foo ==> foo */
1439 else if (code == VIEW_CONVERT_EXPR || CONVERT_EXPR_CODE_P (code))
1440 x = gimple_assign_rhs1 (stmt);
1441 /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
1442 else if (code == COND_EXPR)
1444 tree op1 = gimple_assign_rhs2 (stmt);
1445 tree op2 = gimple_assign_rhs3 (stmt);
1446 enum thread_memory_type mem;
1447 retval = thread_private_new_memory (entry_block, op1);
1448 if (retval == mem_non_local)
1449 goto new_memory_ret;
1450 mem = thread_private_new_memory (entry_block, op2);
1451 retval = MIN (retval, mem);
1452 goto new_memory_ret;
1454 else
1456 retval = mem_non_local;
1457 goto new_memory_ret;
1460 else
1462 if (gimple_code (stmt) == GIMPLE_PHI)
1464 unsigned int i;
1465 enum thread_memory_type mem;
1466 tree phi_result = gimple_phi_result (stmt);
1468 /* If any of the ancestors are non-local, we are sure to
1469 be non-local. Otherwise we can avoid doing anything
1470 and inherit what has already been generated. */
1471 retval = mem_max;
1472 for (i = 0; i < gimple_phi_num_args (stmt); ++i)
1474 tree op = PHI_ARG_DEF (stmt, i);
1476 /* Exclude self-assignment. */
1477 if (phi_result == op)
1478 continue;
1480 mem = thread_private_new_memory (entry_block, op);
1481 if (mem == mem_non_local)
1483 retval = mem;
1484 goto new_memory_ret;
1486 retval = MIN (retval, mem);
1488 goto new_memory_ret;
1490 break;
1493 while (TREE_CODE (x) == SSA_NAME);
1495 if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
1496 /* Thread-local or transaction-local. */
1498 else
1499 retval = mem_non_local;
1501 new_memory_ret:
1502 elt_p->local_new_memory = retval;
1503 return retval;
1506 /* Determine whether X has to be instrumented using a read
1507 or write barrier.
1509 ENTRY_BLOCK is the entry block for the region where stmt resides
1510 in. NULL if unknown.
1512 STMT is the statement in which X occurs in. It is used for thread
1513 private memory instrumentation. If no TPM instrumentation is
1514 desired, STMT should be null. */
1515 static bool
1516 requires_barrier (basic_block entry_block, tree x, gimple *stmt)
1518 tree orig = x;
1519 while (handled_component_p (x))
1520 x = TREE_OPERAND (x, 0);
1522 switch (TREE_CODE (x))
1524 case INDIRECT_REF:
1525 case MEM_REF:
1527 enum thread_memory_type ret;
1529 ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
1530 if (ret == mem_non_local)
1531 return true;
1532 if (stmt && ret == mem_thread_local)
1533 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1534 tm_log_add (entry_block, orig, stmt);
1536 /* Transaction-locals require nothing at all. For malloc, a
1537 transaction restart frees the memory and we reallocate.
1538 For alloca, the stack pointer gets reset by the retry and
1539 we reallocate. */
1540 return false;
1543 case TARGET_MEM_REF:
1544 if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
1545 return true;
1546 x = TREE_OPERAND (TMR_BASE (x), 0);
1547 if (TREE_CODE (x) == PARM_DECL)
1548 return false;
1549 gcc_assert (VAR_P (x));
1550 /* FALLTHRU */
1552 case PARM_DECL:
1553 case RESULT_DECL:
1554 case VAR_DECL:
1555 if (DECL_BY_REFERENCE (x))
1557 /* ??? This value is a pointer, but aggregate_value_p has been
1558 jigged to return true which confuses needs_to_live_in_memory.
1559 This ought to be cleaned up generically.
1561 FIXME: Verify this still happens after the next mainline
1562 merge. Testcase ie g++.dg/tm/pr47554.C.
1564 return false;
1567 if (is_global_var (x))
1568 return !TREE_READONLY (x);
1569 if (/* FIXME: This condition should actually go below in the
1570 tm_log_add() call, however is_call_clobbered() depends on
1571 aliasing info which is not available during
1572 gimplification. Since requires_barrier() gets called
1573 during lower_sequence_tm/gimplification, leave the call
1574 to needs_to_live_in_memory until we eliminate
1575 lower_sequence_tm altogether. */
1576 needs_to_live_in_memory (x))
1577 return true;
1578 else
1580 /* For local memory that doesn't escape (aka thread private
1581 memory), we can either save the value at the beginning of
1582 the transaction and restore on restart, or call a tm
1583 function to dynamically save and restore on restart
1584 (ITM_L*). */
1585 if (stmt)
1586 tm_log_add (entry_block, orig, stmt);
1587 return false;
1590 default:
1591 return false;
1595 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1596 a transaction region. */
1598 static void
1599 examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
1601 gimple *stmt = gsi_stmt (*gsi);
1603 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
1604 *state |= GTMA_HAVE_LOAD;
1605 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
1606 *state |= GTMA_HAVE_STORE;
1609 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1611 static void
1612 examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
1614 gimple *stmt = gsi_stmt (*gsi);
1615 tree fn;
1617 if (is_tm_pure_call (stmt))
1618 return;
1620 /* Check if this call is a transaction abort. */
1621 fn = gimple_call_fndecl (stmt);
1622 if (is_tm_abort (fn))
1623 *state |= GTMA_HAVE_ABORT;
1625 /* Note that something may happen. */
1626 *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
1629 /* Iterate through the statements in the sequence, moving labels
1630 (and thus edges) of transactions from "label_norm" to "label_uninst". */
1632 static tree
1633 make_tm_uninst (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1634 struct walk_stmt_info *)
1636 gimple *stmt = gsi_stmt (*gsi);
1638 if (gtransaction *txn = dyn_cast <gtransaction *> (stmt))
1640 *handled_ops_p = true;
1641 txn->label_uninst = txn->label_norm;
1642 txn->label_norm = NULL;
1644 else
1645 *handled_ops_p = !gimple_has_substatements (stmt);
1647 return NULL_TREE;
1650 /* Lower a GIMPLE_TRANSACTION statement. */
1652 static void
1653 lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
1655 gimple *g;
1656 gtransaction *stmt = as_a <gtransaction *> (gsi_stmt (*gsi));
1657 unsigned int *outer_state = (unsigned int *) wi->info;
1658 unsigned int this_state = 0;
1659 struct walk_stmt_info this_wi;
1661 /* First, lower the body. The scanning that we do inside gives
1662 us some idea of what we're dealing with. */
1663 memset (&this_wi, 0, sizeof (this_wi));
1664 this_wi.info = (void *) &this_state;
1665 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
1666 lower_sequence_tm, NULL, &this_wi);
1668 /* If there was absolutely nothing transaction related inside the
1669 transaction, we may elide it. Likewise if this is a nested
1670 transaction and does not contain an abort. */
1671 if (this_state == 0
1672 || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
1674 if (outer_state)
1675 *outer_state |= this_state;
1677 gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
1678 GSI_SAME_STMT);
1679 gimple_transaction_set_body (stmt, NULL);
1681 gsi_remove (gsi, true);
1682 wi->removed_stmt = true;
1683 return;
1686 /* Wrap the body of the transaction in a try-finally node so that
1687 the commit call is always properly called. */
1688 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
1689 if (flag_exceptions)
1691 tree ptr;
1692 gimple_seq n_seq, e_seq;
1694 n_seq = gimple_seq_alloc_with_stmt (g);
1695 e_seq = NULL;
1697 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1698 1, integer_zero_node);
1699 ptr = create_tmp_var (ptr_type_node);
1700 gimple_call_set_lhs (g, ptr);
1701 gimple_seq_add_stmt (&e_seq, g);
1703 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
1704 1, ptr);
1705 gimple_seq_add_stmt (&e_seq, g);
1707 g = gimple_build_eh_else (n_seq, e_seq);
1710 g = gimple_build_try (gimple_transaction_body (stmt),
1711 gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
1713 /* For a (potentially) outer transaction, create two paths. */
1714 gimple_seq uninst = NULL;
1715 if (outer_state == NULL)
1717 uninst = copy_gimple_seq_and_replace_locals (g);
1718 /* In the uninstrumented copy, reset inner transactions to have only
1719 an uninstrumented code path. */
1720 memset (&this_wi, 0, sizeof (this_wi));
1721 walk_gimple_seq (uninst, make_tm_uninst, NULL, &this_wi);
1724 tree label1 = create_artificial_label (UNKNOWN_LOCATION);
1725 gsi_insert_after (gsi, gimple_build_label (label1), GSI_CONTINUE_LINKING);
1726 gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
1727 gimple_transaction_set_label_norm (stmt, label1);
1729 /* If the transaction calls abort or if this is an outer transaction,
1730 add an "over" label afterwards. */
1731 tree label3 = NULL;
1732 if ((this_state & GTMA_HAVE_ABORT)
1733 || outer_state == NULL
1734 || (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER))
1736 label3 = create_artificial_label (UNKNOWN_LOCATION);
1737 gimple_transaction_set_label_over (stmt, label3);
1740 if (uninst != NULL)
1742 gsi_insert_after (gsi, gimple_build_goto (label3), GSI_CONTINUE_LINKING);
1744 tree label2 = create_artificial_label (UNKNOWN_LOCATION);
1745 gsi_insert_after (gsi, gimple_build_label (label2), GSI_CONTINUE_LINKING);
1746 gsi_insert_seq_after (gsi, uninst, GSI_CONTINUE_LINKING);
1747 gimple_transaction_set_label_uninst (stmt, label2);
1750 if (label3 != NULL)
1751 gsi_insert_after (gsi, gimple_build_label (label3), GSI_CONTINUE_LINKING);
1753 gimple_transaction_set_body (stmt, NULL);
1755 /* Record the set of operations found for use later. */
1756 this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
1757 gimple_transaction_set_subcode (stmt, this_state);
1760 /* Iterate through the statements in the sequence, lowering them all
1761 as appropriate for being in a transaction. */
1763 static tree
1764 lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1765 struct walk_stmt_info *wi)
1767 unsigned int *state = (unsigned int *) wi->info;
1768 gimple *stmt = gsi_stmt (*gsi);
1770 *handled_ops_p = true;
1771 switch (gimple_code (stmt))
1773 case GIMPLE_ASSIGN:
1774 /* Only memory reads/writes need to be instrumented. */
1775 if (gimple_assign_single_p (stmt))
1776 examine_assign_tm (state, gsi);
1777 break;
1779 case GIMPLE_CALL:
1780 examine_call_tm (state, gsi);
1781 break;
1783 case GIMPLE_ASM:
1784 *state |= GTMA_MAY_ENTER_IRREVOCABLE;
1785 break;
1787 case GIMPLE_TRANSACTION:
1788 lower_transaction (gsi, wi);
1789 break;
1791 default:
1792 *handled_ops_p = !gimple_has_substatements (stmt);
1793 break;
1796 return NULL_TREE;
1799 /* Iterate through the statements in the sequence, lowering them all
1800 as appropriate for being outside of a transaction. */
1802 static tree
1803 lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1804 struct walk_stmt_info * wi)
1806 gimple *stmt = gsi_stmt (*gsi);
1808 if (gimple_code (stmt) == GIMPLE_TRANSACTION)
1810 *handled_ops_p = true;
1811 lower_transaction (gsi, wi);
1813 else
1814 *handled_ops_p = !gimple_has_substatements (stmt);
1816 return NULL_TREE;
1819 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1820 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1821 been moved out, and all the data required for constructing a proper
1822 CFG has been recorded. */
1824 static unsigned int
1825 execute_lower_tm (void)
1827 struct walk_stmt_info wi;
1828 gimple_seq body;
1830 /* Transactional clones aren't created until a later pass. */
1831 gcc_assert (!decl_is_tm_clone (current_function_decl));
1833 body = gimple_body (current_function_decl);
1834 memset (&wi, 0, sizeof (wi));
1835 walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi);
1836 gimple_set_body (current_function_decl, body);
1838 return 0;
1841 namespace {
1843 const pass_data pass_data_lower_tm =
1845 GIMPLE_PASS, /* type */
1846 "tmlower", /* name */
1847 OPTGROUP_NONE, /* optinfo_flags */
1848 TV_TRANS_MEM, /* tv_id */
1849 PROP_gimple_lcf, /* properties_required */
1850 0, /* properties_provided */
1851 0, /* properties_destroyed */
1852 0, /* todo_flags_start */
1853 0, /* todo_flags_finish */
1856 class pass_lower_tm : public gimple_opt_pass
1858 public:
1859 pass_lower_tm (gcc::context *ctxt)
1860 : gimple_opt_pass (pass_data_lower_tm, ctxt)
1863 /* opt_pass methods: */
1864 virtual bool gate (function *) { return flag_tm; }
1865 virtual unsigned int execute (function *) { return execute_lower_tm (); }
1867 }; // class pass_lower_tm
1869 } // anon namespace
1871 gimple_opt_pass *
1872 make_pass_lower_tm (gcc::context *ctxt)
1874 return new pass_lower_tm (ctxt);
1877 /* Collect region information for each transaction. */
1879 struct tm_region
1881 public:
1883 /* The field "transaction_stmt" is initially a gtransaction *,
1884 but eventually gets lowered to a gcall *(to BUILT_IN_TM_START).
1886 Helper method to get it as a gtransaction *, with code-checking
1887 in a checked-build. */
1889 gtransaction *
1890 get_transaction_stmt () const
1892 return as_a <gtransaction *> (transaction_stmt);
1895 public:
1897 /* Link to the next unnested transaction. */
1898 struct tm_region *next;
1900 /* Link to the next inner transaction. */
1901 struct tm_region *inner;
1903 /* Link to the next outer transaction. */
1904 struct tm_region *outer;
1906 /* The GIMPLE_TRANSACTION statement beginning this transaction.
1907 After TM_MARK, this gets replaced by a call to
1908 BUILT_IN_TM_START.
1909 Hence this will be either a gtransaction *or a gcall *. */
1910 gimple *transaction_stmt;
1912 /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
1913 BUILT_IN_TM_START, this field is true if the transaction is an
1914 outer transaction. */
1915 bool original_transaction_was_outer;
1917 /* Return value from BUILT_IN_TM_START. */
1918 tree tm_state;
1920 /* The entry block to this region. This will always be the first
1921 block of the body of the transaction. */
1922 basic_block entry_block;
1924 /* The first block after an expanded call to _ITM_beginTransaction. */
1925 basic_block restart_block;
1927 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1928 These blocks are still a part of the region (i.e., the border is
1929 inclusive). Note that this set is only complete for paths in the CFG
1930 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1931 the edge to the "over" label. */
1932 bitmap exit_blocks;
1934 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1935 bitmap irr_blocks;
1938 /* True if there are pending edge statements to be committed for the
1939 current function being scanned in the tmmark pass. */
1940 bool pending_edge_inserts_p;
1942 static struct tm_region *all_tm_regions;
1943 static bitmap_obstack tm_obstack;
1946 /* A subroutine of tm_region_init. Record the existence of the
1947 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1949 static struct tm_region *
1950 tm_region_init_0 (struct tm_region *outer, basic_block bb,
1951 gtransaction *stmt)
1953 struct tm_region *region;
1955 region = (struct tm_region *)
1956 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1958 if (outer)
1960 region->next = outer->inner;
1961 outer->inner = region;
1963 else
1965 region->next = all_tm_regions;
1966 all_tm_regions = region;
1968 region->inner = NULL;
1969 region->outer = outer;
1971 region->transaction_stmt = stmt;
1972 region->original_transaction_was_outer = false;
1973 region->tm_state = NULL;
1975 /* There are either one or two edges out of the block containing
1976 the GIMPLE_TRANSACTION, one to the actual region and one to the
1977 "over" label if the region contains an abort. The former will
1978 always be the one marked FALLTHRU. */
1979 region->entry_block = FALLTHRU_EDGE (bb)->dest;
1981 region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
1982 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1984 return region;
1987 /* A subroutine of tm_region_init. Record all the exit and
1988 irrevocable blocks in BB into the region's exit_blocks and
1989 irr_blocks bitmaps. Returns the new region being scanned. */
1991 static struct tm_region *
1992 tm_region_init_1 (struct tm_region *region, basic_block bb)
1994 gimple_stmt_iterator gsi;
1995 gimple *g;
1997 if (!region
1998 || (!region->irr_blocks && !region->exit_blocks))
1999 return region;
2001 /* Check to see if this is the end of a region by seeing if it
2002 contains a call to __builtin_tm_commit{,_eh}. Note that the
2003 outermost region for DECL_IS_TM_CLONE need not collect this. */
2004 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
2006 g = gsi_stmt (gsi);
2007 if (gimple_code (g) == GIMPLE_CALL)
2009 tree fn = gimple_call_fndecl (g);
2010 if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
2012 if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
2013 || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
2014 && region->exit_blocks)
2016 bitmap_set_bit (region->exit_blocks, bb->index);
2017 region = region->outer;
2018 break;
2020 if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
2021 bitmap_set_bit (region->irr_blocks, bb->index);
2025 return region;
2028 /* Collect all of the transaction regions within the current function
2029 and record them in ALL_TM_REGIONS. The REGION parameter may specify
2030 an "outermost" region for use by tm clones. */
2032 static void
2033 tm_region_init (struct tm_region *region)
2035 gimple *g;
2036 edge_iterator ei;
2037 edge e;
2038 basic_block bb;
2039 auto_vec<basic_block> queue;
2040 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2041 struct tm_region *old_region;
2042 auto_vec<tm_region *> bb_regions;
2044 /* We could store this information in bb->aux, but we may get called
2045 through get_all_tm_blocks() from another pass that may be already
2046 using bb->aux. */
2047 bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun));
2049 all_tm_regions = region;
2050 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2051 queue.safe_push (bb);
2052 bitmap_set_bit (visited_blocks, bb->index);
2053 bb_regions[bb->index] = region;
2057 bb = queue.pop ();
2058 region = bb_regions[bb->index];
2059 bb_regions[bb->index] = NULL;
2061 /* Record exit and irrevocable blocks. */
2062 region = tm_region_init_1 (region, bb);
2064 /* Check for the last statement in the block beginning a new region. */
2065 g = last_stmt (bb);
2066 old_region = region;
2067 if (g)
2068 if (gtransaction *trans_stmt = dyn_cast <gtransaction *> (g))
2069 region = tm_region_init_0 (region, bb, trans_stmt);
2071 /* Process subsequent blocks. */
2072 FOR_EACH_EDGE (e, ei, bb->succs)
2073 if (!bitmap_bit_p (visited_blocks, e->dest->index))
2075 bitmap_set_bit (visited_blocks, e->dest->index);
2076 queue.safe_push (e->dest);
2078 /* If the current block started a new region, make sure that only
2079 the entry block of the new region is associated with this region.
2080 Other successors are still part of the old region. */
2081 if (old_region != region && e->dest != region->entry_block)
2082 bb_regions[e->dest->index] = old_region;
2083 else
2084 bb_regions[e->dest->index] = region;
2087 while (!queue.is_empty ());
2088 BITMAP_FREE (visited_blocks);
2091 /* The "gate" function for all transactional memory expansion and optimization
2092 passes. We collect region information for each top-level transaction, and
2093 if we don't find any, we skip all of the TM passes. Each region will have
2094 all of the exit blocks recorded, and the originating statement. */
2096 static bool
2097 gate_tm_init (void)
2099 if (!flag_tm)
2100 return false;
2102 calculate_dominance_info (CDI_DOMINATORS);
2103 bitmap_obstack_initialize (&tm_obstack);
2105 /* If the function is a TM_CLONE, then the entire function is the region. */
2106 if (decl_is_tm_clone (current_function_decl))
2108 struct tm_region *region = (struct tm_region *)
2109 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
2110 memset (region, 0, sizeof (*region));
2111 region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2112 /* For a clone, the entire function is the region. But even if
2113 we don't need to record any exit blocks, we may need to
2114 record irrevocable blocks. */
2115 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
2117 tm_region_init (region);
2119 else
2121 tm_region_init (NULL);
2123 /* If we didn't find any regions, cleanup and skip the whole tree
2124 of tm-related optimizations. */
2125 if (all_tm_regions == NULL)
2127 bitmap_obstack_release (&tm_obstack);
2128 return false;
2132 return true;
2135 namespace {
2137 const pass_data pass_data_tm_init =
2139 GIMPLE_PASS, /* type */
2140 "*tminit", /* name */
2141 OPTGROUP_NONE, /* optinfo_flags */
2142 TV_TRANS_MEM, /* tv_id */
2143 ( PROP_ssa | PROP_cfg ), /* properties_required */
2144 0, /* properties_provided */
2145 0, /* properties_destroyed */
2146 0, /* todo_flags_start */
2147 0, /* todo_flags_finish */
2150 class pass_tm_init : public gimple_opt_pass
2152 public:
2153 pass_tm_init (gcc::context *ctxt)
2154 : gimple_opt_pass (pass_data_tm_init, ctxt)
2157 /* opt_pass methods: */
2158 virtual bool gate (function *) { return gate_tm_init (); }
2160 }; // class pass_tm_init
2162 } // anon namespace
2164 gimple_opt_pass *
2165 make_pass_tm_init (gcc::context *ctxt)
2167 return new pass_tm_init (ctxt);
2170 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
2171 represented by STATE. */
2173 static inline void
2174 transaction_subcode_ior (struct tm_region *region, unsigned flags)
2176 if (region && region->transaction_stmt)
2178 gtransaction *transaction_stmt = region->get_transaction_stmt ();
2179 flags |= gimple_transaction_subcode (transaction_stmt);
2180 gimple_transaction_set_subcode (transaction_stmt, flags);
2184 /* Construct a memory load in a transactional context. Return the
2185 gimple statement performing the load, or NULL if there is no
2186 TM_LOAD builtin of the appropriate size to do the load.
2188 LOC is the location to use for the new statement(s). */
2190 static gcall *
2191 build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2193 tree t, type = TREE_TYPE (rhs);
2194 gcall *gcall;
2196 built_in_function code;
2197 if (type == float_type_node)
2198 code = BUILT_IN_TM_LOAD_FLOAT;
2199 else if (type == double_type_node)
2200 code = BUILT_IN_TM_LOAD_DOUBLE;
2201 else if (type == long_double_type_node)
2202 code = BUILT_IN_TM_LOAD_LDOUBLE;
2203 else
2205 if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type)))
2206 return NULL;
2207 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
2209 if (TREE_CODE (type) == VECTOR_TYPE)
2211 switch (type_size)
2213 case 64:
2214 code = BUILT_IN_TM_LOAD_M64;
2215 break;
2216 case 128:
2217 code = BUILT_IN_TM_LOAD_M128;
2218 break;
2219 case 256:
2220 code = BUILT_IN_TM_LOAD_M256;
2221 break;
2222 default:
2223 goto unhandled_vec;
2225 if (!builtin_decl_explicit_p (code))
2226 goto unhandled_vec;
2228 else
2230 unhandled_vec:
2231 switch (type_size)
2233 case 8:
2234 code = BUILT_IN_TM_LOAD_1;
2235 break;
2236 case 16:
2237 code = BUILT_IN_TM_LOAD_2;
2238 break;
2239 case 32:
2240 code = BUILT_IN_TM_LOAD_4;
2241 break;
2242 case 64:
2243 code = BUILT_IN_TM_LOAD_8;
2244 break;
2245 default:
2246 return NULL;
2251 tree decl = builtin_decl_explicit (code);
2252 gcc_assert (decl);
2254 t = gimplify_addr (gsi, rhs);
2255 gcall = gimple_build_call (decl, 1, t);
2256 gimple_set_location (gcall, loc);
2258 t = TREE_TYPE (TREE_TYPE (decl));
2259 if (useless_type_conversion_p (type, t))
2261 gimple_call_set_lhs (gcall, lhs);
2262 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2264 else
2266 gimple *g;
2267 tree temp;
2269 temp = create_tmp_reg (t);
2270 gimple_call_set_lhs (gcall, temp);
2271 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2273 t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
2274 g = gimple_build_assign (lhs, t);
2275 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2278 return gcall;
2282 /* Similarly for storing TYPE in a transactional context. */
2284 static gcall *
2285 build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2287 tree t, fn, type = TREE_TYPE (rhs), simple_type;
2288 gcall *gcall;
2290 built_in_function code;
2291 if (type == float_type_node)
2292 code = BUILT_IN_TM_STORE_FLOAT;
2293 else if (type == double_type_node)
2294 code = BUILT_IN_TM_STORE_DOUBLE;
2295 else if (type == long_double_type_node)
2296 code = BUILT_IN_TM_STORE_LDOUBLE;
2297 else
2299 if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type)))
2300 return NULL;
2301 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
2303 if (TREE_CODE (type) == VECTOR_TYPE)
2305 switch (type_size)
2307 case 64:
2308 code = BUILT_IN_TM_STORE_M64;
2309 break;
2310 case 128:
2311 code = BUILT_IN_TM_STORE_M128;
2312 break;
2313 case 256:
2314 code = BUILT_IN_TM_STORE_M256;
2315 break;
2316 default:
2317 goto unhandled_vec;
2319 if (!builtin_decl_explicit_p (code))
2320 goto unhandled_vec;
2322 else
2324 unhandled_vec:
2325 switch (type_size)
2327 case 8:
2328 code = BUILT_IN_TM_STORE_1;
2329 break;
2330 case 16:
2331 code = BUILT_IN_TM_STORE_2;
2332 break;
2333 case 32:
2334 code = BUILT_IN_TM_STORE_4;
2335 break;
2336 case 64:
2337 code = BUILT_IN_TM_STORE_8;
2338 break;
2339 default:
2340 return NULL;
2345 fn = builtin_decl_explicit (code);
2346 gcc_assert (fn);
2348 simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
2350 if (TREE_CODE (rhs) == CONSTRUCTOR)
2352 /* Handle the easy initialization to zero. */
2353 if (!CONSTRUCTOR_ELTS (rhs))
2354 rhs = build_int_cst (simple_type, 0);
2355 else
2357 /* ...otherwise punt to the caller and probably use
2358 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2359 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2360 valid gimple. */
2361 return NULL;
2364 else if (!useless_type_conversion_p (simple_type, type))
2366 gimple *g;
2367 tree temp;
2369 temp = create_tmp_reg (simple_type);
2370 t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
2371 g = gimple_build_assign (temp, t);
2372 gimple_set_location (g, loc);
2373 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2375 rhs = temp;
2378 t = gimplify_addr (gsi, lhs);
2379 gcall = gimple_build_call (fn, 2, t, rhs);
2380 gimple_set_location (gcall, loc);
2381 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2383 return gcall;
2387 /* Expand an assignment statement into transactional builtins. */
2389 static void
2390 expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
2392 gimple *stmt = gsi_stmt (*gsi);
2393 location_t loc = gimple_location (stmt);
2394 tree lhs = gimple_assign_lhs (stmt);
2395 tree rhs = gimple_assign_rhs1 (stmt);
2396 bool store_p = requires_barrier (region->entry_block, lhs, NULL);
2397 bool load_p = requires_barrier (region->entry_block, rhs, NULL);
2398 gimple *gcall = NULL;
2400 if (!load_p && !store_p)
2402 /* Add thread private addresses to log if applicable. */
2403 requires_barrier (region->entry_block, lhs, stmt);
2404 gsi_next (gsi);
2405 return;
2408 if (load_p)
2409 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2410 if (store_p)
2411 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2413 // Remove original load/store statement.
2414 gsi_remove (gsi, true);
2416 // Attempt to use a simple load/store helper function.
2417 if (load_p && !store_p)
2418 gcall = build_tm_load (loc, lhs, rhs, gsi);
2419 else if (store_p && !load_p)
2420 gcall = build_tm_store (loc, lhs, rhs, gsi);
2422 // If gcall has not been set, then we do not have a simple helper
2423 // function available for the type. This may be true of larger
2424 // structures, vectors, and non-standard float types.
2425 if (!gcall)
2427 tree lhs_addr, rhs_addr, ltmp = NULL, copy_fn;
2429 // If this is a type that we couldn't handle above, but it's
2430 // in a register, we must spill it to memory for the copy.
2431 if (is_gimple_reg (lhs))
2433 ltmp = create_tmp_var (TREE_TYPE (lhs));
2434 lhs_addr = build_fold_addr_expr (ltmp);
2436 else
2437 lhs_addr = gimplify_addr (gsi, lhs);
2438 if (is_gimple_reg (rhs))
2440 tree rtmp = create_tmp_var (TREE_TYPE (rhs));
2441 rhs_addr = build_fold_addr_expr (rtmp);
2442 gcall = gimple_build_assign (rtmp, rhs);
2443 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2445 else
2446 rhs_addr = gimplify_addr (gsi, rhs);
2448 // Choose the appropriate memory transfer function.
2449 if (load_p && store_p)
2451 // ??? Figure out if there's any possible overlap between
2452 // the LHS and the RHS and if not, use MEMCPY.
2453 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
2455 else if (load_p)
2457 // Note that the store is non-transactional and cannot overlap.
2458 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RTWN);
2460 else
2462 // Note that the load is non-transactional and cannot overlap.
2463 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RNWT);
2466 gcall = gimple_build_call (copy_fn, 3, lhs_addr, rhs_addr,
2467 TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
2468 gimple_set_location (gcall, loc);
2469 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2471 if (ltmp)
2473 gcall = gimple_build_assign (lhs, ltmp);
2474 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2478 // Now that we have the load/store in its instrumented form, add
2479 // thread private addresses to the log if applicable.
2480 if (!store_p)
2481 requires_barrier (region->entry_block, lhs, gcall);
2485 /* Expand a call statement as appropriate for a transaction. That is,
2486 either verify that the call does not affect the transaction, or
2487 redirect the call to a clone that handles transactions, or change
2488 the transaction state to IRREVOCABLE. Return true if the call is
2489 one of the builtins that end a transaction. */
2491 static bool
2492 expand_call_tm (struct tm_region *region,
2493 gimple_stmt_iterator *gsi)
2495 gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi));
2496 tree lhs = gimple_call_lhs (stmt);
2497 tree fn_decl;
2498 struct cgraph_node *node;
2499 bool retval = false;
2501 fn_decl = gimple_call_fndecl (stmt);
2503 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
2504 || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
2505 transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
2506 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
2507 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2509 if (is_tm_pure_call (stmt))
2510 return false;
2512 if (fn_decl)
2513 retval = is_tm_ending_fndecl (fn_decl);
2514 if (!retval)
2516 /* Assume all non-const/pure calls write to memory, except
2517 transaction ending builtins. */
2518 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2521 /* For indirect calls, we already generated a call into the runtime. */
2522 if (!fn_decl)
2524 tree fn = gimple_call_fn (stmt);
2526 /* We are guaranteed never to go irrevocable on a safe or pure
2527 call, and the pure call was handled above. */
2528 if (is_tm_safe (fn))
2529 return false;
2530 else
2531 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2533 return false;
2536 node = cgraph_node::get (fn_decl);
2537 /* All calls should have cgraph here. */
2538 if (!node)
2540 /* We can have a nodeless call here if some pass after IPA-tm
2541 added uninstrumented calls. For example, loop distribution
2542 can transform certain loop constructs into __builtin_mem*
2543 calls. In this case, see if we have a suitable TM
2544 replacement and fill in the gaps. */
2545 gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL);
2546 enum built_in_function code = DECL_FUNCTION_CODE (fn_decl);
2547 gcc_assert (code == BUILT_IN_MEMCPY
2548 || code == BUILT_IN_MEMMOVE
2549 || code == BUILT_IN_MEMSET);
2551 tree repl = find_tm_replacement_function (fn_decl);
2552 if (repl)
2554 gimple_call_set_fndecl (stmt, repl);
2555 update_stmt (stmt);
2556 node = cgraph_node::create (repl);
2557 node->local.tm_may_enter_irr = false;
2558 return expand_call_tm (region, gsi);
2560 gcc_unreachable ();
2562 if (node->local.tm_may_enter_irr)
2563 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2565 if (is_tm_abort (fn_decl))
2567 transaction_subcode_ior (region, GTMA_HAVE_ABORT);
2568 return true;
2571 /* Instrument the store if needed.
2573 If the assignment happens inside the function call (return slot
2574 optimization), there is no instrumentation to be done, since
2575 the callee should have done the right thing. */
2576 if (lhs && requires_barrier (region->entry_block, lhs, stmt)
2577 && !gimple_call_return_slot_opt_p (stmt))
2579 tree tmp = create_tmp_reg (TREE_TYPE (lhs));
2580 location_t loc = gimple_location (stmt);
2581 edge fallthru_edge = NULL;
2582 gassign *assign_stmt;
2584 /* Remember if the call was going to throw. */
2585 if (stmt_can_throw_internal (stmt))
2587 edge_iterator ei;
2588 edge e;
2589 basic_block bb = gimple_bb (stmt);
2591 FOR_EACH_EDGE (e, ei, bb->succs)
2592 if (e->flags & EDGE_FALLTHRU)
2594 fallthru_edge = e;
2595 break;
2599 gimple_call_set_lhs (stmt, tmp);
2600 update_stmt (stmt);
2601 assign_stmt = gimple_build_assign (lhs, tmp);
2602 gimple_set_location (assign_stmt, loc);
2604 /* We cannot throw in the middle of a BB. If the call was going
2605 to throw, place the instrumentation on the fallthru edge, so
2606 the call remains the last statement in the block. */
2607 if (fallthru_edge)
2609 gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (assign_stmt);
2610 gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
2611 expand_assign_tm (region, &fallthru_gsi);
2612 gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
2613 pending_edge_inserts_p = true;
2615 else
2617 gsi_insert_after (gsi, assign_stmt, GSI_CONTINUE_LINKING);
2618 expand_assign_tm (region, gsi);
2621 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2624 return retval;
2628 /* Expand all statements in BB as appropriate for being inside
2629 a transaction. */
2631 static void
2632 expand_block_tm (struct tm_region *region, basic_block bb)
2634 gimple_stmt_iterator gsi;
2636 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2638 gimple *stmt = gsi_stmt (gsi);
2639 switch (gimple_code (stmt))
2641 case GIMPLE_ASSIGN:
2642 /* Only memory reads/writes need to be instrumented. */
2643 if (gimple_assign_single_p (stmt)
2644 && !gimple_clobber_p (stmt))
2646 expand_assign_tm (region, &gsi);
2647 continue;
2649 break;
2651 case GIMPLE_CALL:
2652 if (expand_call_tm (region, &gsi))
2653 return;
2654 break;
2656 case GIMPLE_ASM:
2657 gcc_unreachable ();
2659 default:
2660 break;
2662 if (!gsi_end_p (gsi))
2663 gsi_next (&gsi);
2667 /* Return the list of basic-blocks in REGION.
2669 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2670 following a TM_IRREVOCABLE call.
2672 INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the
2673 uninstrumented code path blocks in the list of basic blocks
2674 returned, false otherwise. */
2676 static vec<basic_block>
2677 get_tm_region_blocks (basic_block entry_block,
2678 bitmap exit_blocks,
2679 bitmap irr_blocks,
2680 bitmap all_region_blocks,
2681 bool stop_at_irrevocable_p,
2682 bool include_uninstrumented_p = true)
2684 vec<basic_block> bbs = vNULL;
2685 unsigned i;
2686 edge e;
2687 edge_iterator ei;
2688 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2690 i = 0;
2691 bbs.safe_push (entry_block);
2692 bitmap_set_bit (visited_blocks, entry_block->index);
2696 basic_block bb = bbs[i++];
2698 if (exit_blocks &&
2699 bitmap_bit_p (exit_blocks, bb->index))
2700 continue;
2702 if (stop_at_irrevocable_p
2703 && irr_blocks
2704 && bitmap_bit_p (irr_blocks, bb->index))
2705 continue;
2707 FOR_EACH_EDGE (e, ei, bb->succs)
2708 if ((include_uninstrumented_p
2709 || !(e->flags & EDGE_TM_UNINSTRUMENTED))
2710 && !bitmap_bit_p (visited_blocks, e->dest->index))
2712 bitmap_set_bit (visited_blocks, e->dest->index);
2713 bbs.safe_push (e->dest);
2716 while (i < bbs.length ());
2718 if (all_region_blocks)
2719 bitmap_ior_into (all_region_blocks, visited_blocks);
2721 BITMAP_FREE (visited_blocks);
2722 return bbs;
2725 // Callback data for collect_bb2reg.
2726 struct bb2reg_stuff
2728 vec<tm_region *> *bb2reg;
2729 bool include_uninstrumented_p;
2732 // Callback for expand_regions, collect innermost region data for each bb.
2733 static void *
2734 collect_bb2reg (struct tm_region *region, void *data)
2736 struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data;
2737 vec<tm_region *> *bb2reg = stuff->bb2reg;
2738 vec<basic_block> queue;
2739 unsigned int i;
2740 basic_block bb;
2742 queue = get_tm_region_blocks (region->entry_block,
2743 region->exit_blocks,
2744 region->irr_blocks,
2745 NULL,
2746 /*stop_at_irr_p=*/true,
2747 stuff->include_uninstrumented_p);
2749 // We expect expand_region to perform a post-order traversal of the region
2750 // tree. Therefore the last region seen for any bb is the innermost.
2751 FOR_EACH_VEC_ELT (queue, i, bb)
2752 (*bb2reg)[bb->index] = region;
2754 queue.release ();
2755 return NULL;
2758 // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to
2759 // which a basic block belongs. Note that we only consider the instrumented
2760 // code paths for the region; the uninstrumented code paths are ignored if
2761 // INCLUDE_UNINSTRUMENTED_P is false.
2763 // ??? This data is very similar to the bb_regions array that is collected
2764 // during tm_region_init. Or, rather, this data is similar to what could
2765 // be used within tm_region_init. The actual computation in tm_region_init
2766 // begins and ends with bb_regions entirely full of NULL pointers, due to
2767 // the way in which pointers are swapped in and out of the array.
2769 // ??? Our callers expect that blocks are not shared between transactions.
2770 // When the optimizers get too smart, and blocks are shared, then during
2771 // the tm_mark phase we'll add log entries to only one of the two transactions,
2772 // and in the tm_edge phase we'll add edges to the CFG that create invalid
2773 // cycles. The symptom being SSA defs that do not dominate their uses.
2774 // Note that the optimizers were locally correct with their transformation,
2775 // as we have no info within the program that suggests that the blocks cannot
2776 // be shared.
2778 // ??? There is currently a hack inside tree-ssa-pre.c to work around the
2779 // only known instance of this block sharing.
2781 static vec<tm_region *>
2782 get_bb_regions_instrumented (bool traverse_clones,
2783 bool include_uninstrumented_p)
2785 unsigned n = last_basic_block_for_fn (cfun);
2786 struct bb2reg_stuff stuff;
2787 vec<tm_region *> ret;
2789 ret.create (n);
2790 ret.safe_grow_cleared (n);
2791 stuff.bb2reg = &ret;
2792 stuff.include_uninstrumented_p = include_uninstrumented_p;
2793 expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones);
2795 return ret;
2798 /* Set the IN_TRANSACTION for all gimple statements that appear in a
2799 transaction. */
2801 void
2802 compute_transaction_bits (void)
2804 struct tm_region *region;
2805 vec<basic_block> queue;
2806 unsigned int i;
2807 basic_block bb;
2809 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2810 certainly don't need it to calculate CDI_DOMINATOR info. */
2811 gate_tm_init ();
2813 FOR_EACH_BB_FN (bb, cfun)
2814 bb->flags &= ~BB_IN_TRANSACTION;
2816 for (region = all_tm_regions; region; region = region->next)
2818 queue = get_tm_region_blocks (region->entry_block,
2819 region->exit_blocks,
2820 region->irr_blocks,
2821 NULL,
2822 /*stop_at_irr_p=*/true);
2823 for (i = 0; queue.iterate (i, &bb); ++i)
2824 bb->flags |= BB_IN_TRANSACTION;
2825 queue.release ();
2828 if (all_tm_regions)
2829 bitmap_obstack_release (&tm_obstack);
2832 /* Replace the GIMPLE_TRANSACTION in this region with the corresponding
2833 call to BUILT_IN_TM_START. */
2835 static void *
2836 expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
2838 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2839 basic_block transaction_bb = gimple_bb (region->transaction_stmt);
2840 tree tm_state = region->tm_state;
2841 tree tm_state_type = TREE_TYPE (tm_state);
2842 edge abort_edge = NULL;
2843 edge inst_edge = NULL;
2844 edge uninst_edge = NULL;
2845 edge fallthru_edge = NULL;
2847 // Identify the various successors of the transaction start.
2849 edge_iterator i;
2850 edge e;
2851 FOR_EACH_EDGE (e, i, transaction_bb->succs)
2853 if (e->flags & EDGE_TM_ABORT)
2854 abort_edge = e;
2855 else if (e->flags & EDGE_TM_UNINSTRUMENTED)
2856 uninst_edge = e;
2857 else
2858 inst_edge = e;
2859 if (e->flags & EDGE_FALLTHRU)
2860 fallthru_edge = e;
2864 /* ??? There are plenty of bits here we're not computing. */
2866 int subcode = gimple_transaction_subcode (region->get_transaction_stmt ());
2867 int flags = 0;
2868 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2869 flags |= PR_DOESGOIRREVOCABLE;
2870 if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
2871 flags |= PR_HASNOIRREVOCABLE;
2872 /* If the transaction does not have an abort in lexical scope and is not
2873 marked as an outer transaction, then it will never abort. */
2874 if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0)
2875 flags |= PR_HASNOABORT;
2876 if ((subcode & GTMA_HAVE_STORE) == 0)
2877 flags |= PR_READONLY;
2878 if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION))
2879 flags |= PR_INSTRUMENTEDCODE;
2880 if (uninst_edge)
2881 flags |= PR_UNINSTRUMENTEDCODE;
2882 if (subcode & GTMA_IS_OUTER)
2883 region->original_transaction_was_outer = true;
2884 tree t = build_int_cst (tm_state_type, flags);
2885 gcall *call = gimple_build_call (tm_start, 1, t);
2886 gimple_call_set_lhs (call, tm_state);
2887 gimple_set_location (call, gimple_location (region->transaction_stmt));
2889 // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START.
2890 gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb);
2891 gcc_assert (gsi_stmt (gsi) == region->transaction_stmt);
2892 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
2893 gsi_remove (&gsi, true);
2894 region->transaction_stmt = call;
2897 // Generate log saves.
2898 if (!tm_log_save_addresses.is_empty ())
2899 tm_log_emit_saves (region->entry_block, transaction_bb);
2901 // In the beginning, we've no tests to perform on transaction restart.
2902 // Note that after this point, transaction_bb becomes the "most recent
2903 // block containing tests for the transaction".
2904 region->restart_block = region->entry_block;
2906 // Generate log restores.
2907 if (!tm_log_save_addresses.is_empty ())
2909 basic_block test_bb = create_empty_bb (transaction_bb);
2910 basic_block code_bb = create_empty_bb (test_bb);
2911 basic_block join_bb = create_empty_bb (code_bb);
2912 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2913 add_bb_to_loop (code_bb, transaction_bb->loop_father);
2914 add_bb_to_loop (join_bb, transaction_bb->loop_father);
2915 if (region->restart_block == region->entry_block)
2916 region->restart_block = test_bb;
2918 tree t1 = create_tmp_reg (tm_state_type);
2919 tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES);
2920 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
2921 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2922 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2924 t2 = build_int_cst (tm_state_type, 0);
2925 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2926 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2928 tm_log_emit_restores (region->entry_block, code_bb);
2930 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2931 edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE);
2932 edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE);
2933 redirect_edge_pred (fallthru_edge, join_bb);
2935 join_bb->frequency = test_bb->frequency = transaction_bb->frequency;
2936 join_bb->count = test_bb->count = transaction_bb->count;
2938 ei->probability = profile_probability::always ();
2939 et->probability = profile_probability::likely ();
2940 ef->probability = profile_probability::unlikely ();
2941 et->count = test_bb->count.apply_probability (et->probability);
2942 ef->count = test_bb->count.apply_probability (ef->probability);
2944 code_bb->count = et->count;
2945 code_bb->frequency = EDGE_FREQUENCY (et);
2947 transaction_bb = join_bb;
2950 // If we have an ABORT edge, create a test to perform the abort.
2951 if (abort_edge)
2953 basic_block test_bb = create_empty_bb (transaction_bb);
2954 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2955 if (region->restart_block == region->entry_block)
2956 region->restart_block = test_bb;
2958 tree t1 = create_tmp_reg (tm_state_type);
2959 tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION);
2960 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
2961 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2962 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2964 t2 = build_int_cst (tm_state_type, 0);
2965 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2966 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2968 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2969 test_bb->frequency = transaction_bb->frequency;
2970 test_bb->count = transaction_bb->count;
2971 ei->probability = profile_probability::always ();
2973 // Not abort edge. If both are live, chose one at random as we'll
2974 // we'll be fixing that up below.
2975 redirect_edge_pred (fallthru_edge, test_bb);
2976 fallthru_edge->flags = EDGE_FALSE_VALUE;
2977 fallthru_edge->probability = profile_probability::very_likely ();
2978 fallthru_edge->count = test_bb->count.apply_probability
2979 (fallthru_edge->probability);
2981 // Abort/over edge.
2982 redirect_edge_pred (abort_edge, test_bb);
2983 abort_edge->flags = EDGE_TRUE_VALUE;
2984 abort_edge->probability = profile_probability::unlikely ();
2985 abort_edge->count = test_bb->count.apply_probability
2986 (abort_edge->probability);
2988 transaction_bb = test_bb;
2991 // If we have both instrumented and uninstrumented code paths, select one.
2992 if (inst_edge && uninst_edge)
2994 basic_block test_bb = create_empty_bb (transaction_bb);
2995 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2996 if (region->restart_block == region->entry_block)
2997 region->restart_block = test_bb;
2999 tree t1 = create_tmp_reg (tm_state_type);
3000 tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE);
3002 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
3003 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
3004 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3006 t2 = build_int_cst (tm_state_type, 0);
3007 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
3008 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3010 // Create the edge into test_bb first, as we want to copy values
3011 // out of the fallthru edge.
3012 edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags);
3013 e->probability = fallthru_edge->probability;
3014 test_bb->count = e->count = fallthru_edge->count;
3015 test_bb->frequency = EDGE_FREQUENCY (e);
3017 // Now update the edges to the inst/uninist implementations.
3018 // For now assume that the paths are equally likely. When using HTM,
3019 // we'll try the uninst path first and fallback to inst path if htm
3020 // buffers are exceeded. Without HTM we start with the inst path and
3021 // use the uninst path when falling back to serial mode.
3022 redirect_edge_pred (inst_edge, test_bb);
3023 inst_edge->flags = EDGE_FALSE_VALUE;
3024 inst_edge->probability = profile_probability::even ();
3025 inst_edge->count
3026 = test_bb->count.apply_probability (inst_edge->probability);
3028 redirect_edge_pred (uninst_edge, test_bb);
3029 uninst_edge->flags = EDGE_TRUE_VALUE;
3030 uninst_edge->probability = profile_probability::even ();
3031 uninst_edge->count
3032 = test_bb->count.apply_probability (uninst_edge->probability);
3035 // If we have no previous special cases, and we have PHIs at the beginning
3036 // of the atomic region, this means we have a loop at the beginning of the
3037 // atomic region that shares the first block. This can cause problems with
3038 // the transaction restart abnormal edges to be added in the tm_edges pass.
3039 // Solve this by adding a new empty block to receive the abnormal edges.
3040 if (region->restart_block == region->entry_block
3041 && phi_nodes (region->entry_block))
3043 basic_block empty_bb = create_empty_bb (transaction_bb);
3044 region->restart_block = empty_bb;
3045 add_bb_to_loop (empty_bb, transaction_bb->loop_father);
3047 redirect_edge_pred (fallthru_edge, empty_bb);
3048 make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU);
3051 return NULL;
3054 /* Generate the temporary to be used for the return value of
3055 BUILT_IN_TM_START. */
3057 static void *
3058 generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
3060 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
3061 region->tm_state =
3062 create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
3064 // Reset the subcode, post optimizations. We'll fill this in
3065 // again as we process blocks.
3066 if (region->exit_blocks)
3068 gtransaction *transaction_stmt = region->get_transaction_stmt ();
3069 unsigned int subcode = gimple_transaction_subcode (transaction_stmt);
3071 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
3072 subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
3073 | GTMA_MAY_ENTER_IRREVOCABLE
3074 | GTMA_HAS_NO_INSTRUMENTATION);
3075 else
3076 subcode &= GTMA_DECLARATION_MASK;
3077 gimple_transaction_set_subcode (transaction_stmt, subcode);
3080 return NULL;
3083 // Propagate flags from inner transactions outwards.
3084 static void
3085 propagate_tm_flags_out (struct tm_region *region)
3087 if (region == NULL)
3088 return;
3089 propagate_tm_flags_out (region->inner);
3091 if (region->outer && region->outer->transaction_stmt)
3093 unsigned s
3094 = gimple_transaction_subcode (region->get_transaction_stmt ());
3095 s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE
3096 | GTMA_MAY_ENTER_IRREVOCABLE);
3097 s |= gimple_transaction_subcode (region->outer->get_transaction_stmt ());
3098 gimple_transaction_set_subcode (region->outer->get_transaction_stmt (),
3102 propagate_tm_flags_out (region->next);
3105 /* Entry point to the MARK phase of TM expansion. Here we replace
3106 transactional memory statements with calls to builtins, and function
3107 calls with their transactional clones (if available). But we don't
3108 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
3110 static unsigned int
3111 execute_tm_mark (void)
3113 pending_edge_inserts_p = false;
3115 expand_regions (all_tm_regions, generate_tm_state, NULL,
3116 /*traverse_clones=*/true);
3118 tm_log_init ();
3120 vec<tm_region *> bb_regions
3121 = get_bb_regions_instrumented (/*traverse_clones=*/true,
3122 /*include_uninstrumented_p=*/false);
3123 struct tm_region *r;
3124 unsigned i;
3126 // Expand memory operations into calls into the runtime.
3127 // This collects log entries as well.
3128 FOR_EACH_VEC_ELT (bb_regions, i, r)
3130 if (r != NULL)
3132 if (r->transaction_stmt)
3134 unsigned sub
3135 = gimple_transaction_subcode (r->get_transaction_stmt ());
3137 /* If we're sure to go irrevocable, there won't be
3138 anything to expand, since the run-time will go
3139 irrevocable right away. */
3140 if (sub & GTMA_DOES_GO_IRREVOCABLE
3141 && sub & GTMA_MAY_ENTER_IRREVOCABLE)
3142 continue;
3144 expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i));
3148 bb_regions.release ();
3150 // Propagate flags from inner transactions outwards.
3151 propagate_tm_flags_out (all_tm_regions);
3153 // Expand GIMPLE_TRANSACTIONs into calls into the runtime.
3154 expand_regions (all_tm_regions, expand_transaction, NULL,
3155 /*traverse_clones=*/false);
3157 tm_log_emit ();
3158 tm_log_delete ();
3160 if (pending_edge_inserts_p)
3161 gsi_commit_edge_inserts ();
3162 free_dominance_info (CDI_DOMINATORS);
3163 return 0;
3166 namespace {
3168 const pass_data pass_data_tm_mark =
3170 GIMPLE_PASS, /* type */
3171 "tmmark", /* name */
3172 OPTGROUP_NONE, /* optinfo_flags */
3173 TV_TRANS_MEM, /* tv_id */
3174 ( PROP_ssa | PROP_cfg ), /* properties_required */
3175 0, /* properties_provided */
3176 0, /* properties_destroyed */
3177 0, /* todo_flags_start */
3178 TODO_update_ssa, /* todo_flags_finish */
3181 class pass_tm_mark : public gimple_opt_pass
3183 public:
3184 pass_tm_mark (gcc::context *ctxt)
3185 : gimple_opt_pass (pass_data_tm_mark, ctxt)
3188 /* opt_pass methods: */
3189 virtual unsigned int execute (function *) { return execute_tm_mark (); }
3191 }; // class pass_tm_mark
3193 } // anon namespace
3195 gimple_opt_pass *
3196 make_pass_tm_mark (gcc::context *ctxt)
3198 return new pass_tm_mark (ctxt);
3202 /* Create an abnormal edge from STMT at iter, splitting the block
3203 as necessary. Adjust *PNEXT as needed for the split block. */
3205 static inline void
3206 split_bb_make_tm_edge (gimple *stmt, basic_block dest_bb,
3207 gimple_stmt_iterator iter, gimple_stmt_iterator *pnext)
3209 basic_block bb = gimple_bb (stmt);
3210 if (!gsi_one_before_end_p (iter))
3212 edge e = split_block (bb, stmt);
3213 *pnext = gsi_start_bb (e->dest);
3215 edge e = make_edge (bb, dest_bb, EDGE_ABNORMAL);
3216 if (e)
3218 e->probability = profile_probability::guessed_never ();
3219 e->count = profile_count::guessed_zero ();
3222 // Record the need for the edge for the benefit of the rtl passes.
3223 if (cfun->gimple_df->tm_restart == NULL)
3224 cfun->gimple_df->tm_restart
3225 = hash_table<tm_restart_hasher>::create_ggc (31);
3227 struct tm_restart_node dummy;
3228 dummy.stmt = stmt;
3229 dummy.label_or_list = gimple_block_label (dest_bb);
3231 tm_restart_node **slot = cfun->gimple_df->tm_restart->find_slot (&dummy,
3232 INSERT);
3233 struct tm_restart_node *n = *slot;
3234 if (n == NULL)
3236 n = ggc_alloc<tm_restart_node> ();
3237 *n = dummy;
3239 else
3241 tree old = n->label_or_list;
3242 if (TREE_CODE (old) == LABEL_DECL)
3243 old = tree_cons (NULL, old, NULL);
3244 n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
3248 /* Split block BB as necessary for every builtin function we added, and
3249 wire up the abnormal back edges implied by the transaction restart. */
3251 static void
3252 expand_block_edges (struct tm_region *const region, basic_block bb)
3254 gimple_stmt_iterator gsi, next_gsi;
3256 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi)
3258 gimple *stmt = gsi_stmt (gsi);
3259 gcall *call_stmt;
3261 next_gsi = gsi;
3262 gsi_next (&next_gsi);
3264 // ??? Shouldn't we split for any non-pure, non-irrevocable function?
3265 call_stmt = dyn_cast <gcall *> (stmt);
3266 if ((!call_stmt)
3267 || (gimple_call_flags (call_stmt) & ECF_TM_BUILTIN) == 0)
3268 continue;
3270 if (DECL_FUNCTION_CODE (gimple_call_fndecl (call_stmt))
3271 == BUILT_IN_TM_ABORT)
3273 // If we have a ``_transaction_cancel [[outer]]'', there is only
3274 // one abnormal edge: to the transaction marked OUTER.
3275 // All compiler-generated instances of BUILT_IN_TM_ABORT have a
3276 // constant argument, which we can examine here. Users invoking
3277 // TM_ABORT directly get what they deserve.
3278 tree arg = gimple_call_arg (call_stmt, 0);
3279 if (TREE_CODE (arg) == INTEGER_CST
3280 && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0
3281 && !decl_is_tm_clone (current_function_decl))
3283 // Find the GTMA_IS_OUTER transaction.
3284 for (struct tm_region *o = region; o; o = o->outer)
3285 if (o->original_transaction_was_outer)
3287 split_bb_make_tm_edge (call_stmt, o->restart_block,
3288 gsi, &next_gsi);
3289 break;
3292 // Otherwise, the front-end should have semantically checked
3293 // outer aborts, but in either case the target region is not
3294 // within this function.
3295 continue;
3298 // Non-outer, TM aborts have an abnormal edge to the inner-most
3299 // transaction, the one being aborted;
3300 split_bb_make_tm_edge (call_stmt, region->restart_block, gsi,
3301 &next_gsi);
3304 // All TM builtins have an abnormal edge to the outer-most transaction.
3305 // We never restart inner transactions. For tm clones, we know a-priori
3306 // that the outer-most transaction is outside the function.
3307 if (decl_is_tm_clone (current_function_decl))
3308 continue;
3310 if (cfun->gimple_df->tm_restart == NULL)
3311 cfun->gimple_df->tm_restart
3312 = hash_table<tm_restart_hasher>::create_ggc (31);
3314 // All TM builtins have an abnormal edge to the outer-most transaction.
3315 // We never restart inner transactions.
3316 for (struct tm_region *o = region; o; o = o->outer)
3317 if (!o->outer)
3319 split_bb_make_tm_edge (call_stmt, o->restart_block, gsi, &next_gsi);
3320 break;
3323 // Delete any tail-call annotation that may have been added.
3324 // The tail-call pass may have mis-identified the commit as being
3325 // a candidate because we had not yet added this restart edge.
3326 gimple_call_set_tail (call_stmt, false);
3330 /* Entry point to the final expansion of transactional nodes. */
3332 namespace {
3334 const pass_data pass_data_tm_edges =
3336 GIMPLE_PASS, /* type */
3337 "tmedge", /* name */
3338 OPTGROUP_NONE, /* optinfo_flags */
3339 TV_TRANS_MEM, /* tv_id */
3340 ( PROP_ssa | PROP_cfg ), /* properties_required */
3341 0, /* properties_provided */
3342 0, /* properties_destroyed */
3343 0, /* todo_flags_start */
3344 TODO_update_ssa, /* todo_flags_finish */
3347 class pass_tm_edges : public gimple_opt_pass
3349 public:
3350 pass_tm_edges (gcc::context *ctxt)
3351 : gimple_opt_pass (pass_data_tm_edges, ctxt)
3354 /* opt_pass methods: */
3355 virtual unsigned int execute (function *);
3357 }; // class pass_tm_edges
3359 unsigned int
3360 pass_tm_edges::execute (function *fun)
3362 vec<tm_region *> bb_regions
3363 = get_bb_regions_instrumented (/*traverse_clones=*/false,
3364 /*include_uninstrumented_p=*/true);
3365 struct tm_region *r;
3366 unsigned i;
3368 FOR_EACH_VEC_ELT (bb_regions, i, r)
3369 if (r != NULL)
3370 expand_block_edges (r, BASIC_BLOCK_FOR_FN (fun, i));
3372 bb_regions.release ();
3374 /* We've got to release the dominance info now, to indicate that it
3375 must be rebuilt completely. Otherwise we'll crash trying to update
3376 the SSA web in the TODO section following this pass. */
3377 free_dominance_info (CDI_DOMINATORS);
3378 /* We'ge also wrecked loops badly with inserting of abnormal edges. */
3379 loops_state_set (LOOPS_NEED_FIXUP);
3380 bitmap_obstack_release (&tm_obstack);
3381 all_tm_regions = NULL;
3383 return 0;
3386 } // anon namespace
3388 gimple_opt_pass *
3389 make_pass_tm_edges (gcc::context *ctxt)
3391 return new pass_tm_edges (ctxt);
3394 /* Helper function for expand_regions. Expand REGION and recurse to
3395 the inner region. Call CALLBACK on each region. CALLBACK returns
3396 NULL to continue the traversal, otherwise a non-null value which
3397 this function will return as well. TRAVERSE_CLONES is true if we
3398 should traverse transactional clones. */
3400 static void *
3401 expand_regions_1 (struct tm_region *region,
3402 void *(*callback)(struct tm_region *, void *),
3403 void *data,
3404 bool traverse_clones)
3406 void *retval = NULL;
3407 if (region->exit_blocks
3408 || (traverse_clones && decl_is_tm_clone (current_function_decl)))
3410 retval = callback (region, data);
3411 if (retval)
3412 return retval;
3414 if (region->inner)
3416 retval = expand_regions (region->inner, callback, data, traverse_clones);
3417 if (retval)
3418 return retval;
3420 return retval;
3423 /* Traverse the regions enclosed and including REGION. Execute
3424 CALLBACK for each region, passing DATA. CALLBACK returns NULL to
3425 continue the traversal, otherwise a non-null value which this
3426 function will return as well. TRAVERSE_CLONES is true if we should
3427 traverse transactional clones. */
3429 static void *
3430 expand_regions (struct tm_region *region,
3431 void *(*callback)(struct tm_region *, void *),
3432 void *data,
3433 bool traverse_clones)
3435 void *retval = NULL;
3436 while (region)
3438 retval = expand_regions_1 (region, callback, data, traverse_clones);
3439 if (retval)
3440 return retval;
3441 region = region->next;
3443 return retval;
3447 /* A unique TM memory operation. */
3448 struct tm_memop
3450 /* Unique ID that all memory operations to the same location have. */
3451 unsigned int value_id;
3452 /* Address of load/store. */
3453 tree addr;
3456 /* TM memory operation hashtable helpers. */
3458 struct tm_memop_hasher : free_ptr_hash <tm_memop>
3460 static inline hashval_t hash (const tm_memop *);
3461 static inline bool equal (const tm_memop *, const tm_memop *);
3464 /* Htab support. Return a hash value for a `tm_memop'. */
3465 inline hashval_t
3466 tm_memop_hasher::hash (const tm_memop *mem)
3468 tree addr = mem->addr;
3469 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
3470 actually done with operand_equal_p (see tm_memop_eq). */
3471 if (TREE_CODE (addr) == ADDR_EXPR)
3472 addr = TREE_OPERAND (addr, 0);
3473 return iterative_hash_expr (addr, 0);
3476 /* Htab support. Return true if two tm_memop's are the same. */
3477 inline bool
3478 tm_memop_hasher::equal (const tm_memop *mem1, const tm_memop *mem2)
3480 return operand_equal_p (mem1->addr, mem2->addr, 0);
3483 /* Sets for solving data flow equations in the memory optimization pass. */
3484 struct tm_memopt_bitmaps
3486 /* Stores available to this BB upon entry. Basically, stores that
3487 dominate this BB. */
3488 bitmap store_avail_in;
3489 /* Stores available at the end of this BB. */
3490 bitmap store_avail_out;
3491 bitmap store_antic_in;
3492 bitmap store_antic_out;
3493 /* Reads available to this BB upon entry. Basically, reads that
3494 dominate this BB. */
3495 bitmap read_avail_in;
3496 /* Reads available at the end of this BB. */
3497 bitmap read_avail_out;
3498 /* Reads performed in this BB. */
3499 bitmap read_local;
3500 /* Writes performed in this BB. */
3501 bitmap store_local;
3503 /* Temporary storage for pass. */
3504 /* Is the current BB in the worklist? */
3505 bool avail_in_worklist_p;
3506 /* Have we visited this BB? */
3507 bool visited_p;
3510 static bitmap_obstack tm_memopt_obstack;
3512 /* Unique counter for TM loads and stores. Loads and stores of the
3513 same address get the same ID. */
3514 static unsigned int tm_memopt_value_id;
3515 static hash_table<tm_memop_hasher> *tm_memopt_value_numbers;
3517 #define STORE_AVAIL_IN(BB) \
3518 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
3519 #define STORE_AVAIL_OUT(BB) \
3520 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
3521 #define STORE_ANTIC_IN(BB) \
3522 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
3523 #define STORE_ANTIC_OUT(BB) \
3524 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
3525 #define READ_AVAIL_IN(BB) \
3526 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
3527 #define READ_AVAIL_OUT(BB) \
3528 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
3529 #define READ_LOCAL(BB) \
3530 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
3531 #define STORE_LOCAL(BB) \
3532 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
3533 #define AVAIL_IN_WORKLIST_P(BB) \
3534 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
3535 #define BB_VISITED_P(BB) \
3536 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
3538 /* Given a TM load/store in STMT, return the value number for the address
3539 it accesses. */
3541 static unsigned int
3542 tm_memopt_value_number (gimple *stmt, enum insert_option op)
3544 struct tm_memop tmpmem, *mem;
3545 tm_memop **slot;
3547 gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
3548 tmpmem.addr = gimple_call_arg (stmt, 0);
3549 slot = tm_memopt_value_numbers->find_slot (&tmpmem, op);
3550 if (*slot)
3551 mem = *slot;
3552 else if (op == INSERT)
3554 mem = XNEW (struct tm_memop);
3555 *slot = mem;
3556 mem->value_id = tm_memopt_value_id++;
3557 mem->addr = tmpmem.addr;
3559 else
3560 gcc_unreachable ();
3561 return mem->value_id;
3564 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
3566 static void
3567 tm_memopt_accumulate_memops (basic_block bb)
3569 gimple_stmt_iterator gsi;
3571 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3573 gimple *stmt = gsi_stmt (gsi);
3574 bitmap bits;
3575 unsigned int loc;
3577 if (is_tm_store (stmt))
3578 bits = STORE_LOCAL (bb);
3579 else if (is_tm_load (stmt))
3580 bits = READ_LOCAL (bb);
3581 else
3582 continue;
3584 loc = tm_memopt_value_number (stmt, INSERT);
3585 bitmap_set_bit (bits, loc);
3586 if (dump_file)
3588 fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
3589 is_tm_load (stmt) ? "LOAD" : "STORE", loc,
3590 gimple_bb (stmt)->index);
3591 print_generic_expr (dump_file, gimple_call_arg (stmt, 0));
3592 fprintf (dump_file, "\n");
3597 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
3599 static void
3600 dump_tm_memopt_set (const char *set_name, bitmap bits)
3602 unsigned i;
3603 bitmap_iterator bi;
3604 const char *comma = "";
3606 fprintf (dump_file, "TM memopt: %s: [", set_name);
3607 EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
3609 hash_table<tm_memop_hasher>::iterator hi;
3610 struct tm_memop *mem = NULL;
3612 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
3613 FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers, mem, tm_memop_t, hi)
3614 if (mem->value_id == i)
3615 break;
3616 gcc_assert (mem->value_id == i);
3617 fprintf (dump_file, "%s", comma);
3618 comma = ", ";
3619 print_generic_expr (dump_file, mem->addr);
3621 fprintf (dump_file, "]\n");
3624 /* Prettily dump all of the memopt sets in BLOCKS. */
3626 static void
3627 dump_tm_memopt_sets (vec<basic_block> blocks)
3629 size_t i;
3630 basic_block bb;
3632 for (i = 0; blocks.iterate (i, &bb); ++i)
3634 fprintf (dump_file, "------------BB %d---------\n", bb->index);
3635 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
3636 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
3637 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
3638 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
3639 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
3640 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
3644 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3646 static void
3647 tm_memopt_compute_avin (basic_block bb)
3649 edge e;
3650 unsigned ix;
3652 /* Seed with the AVOUT of any predecessor. */
3653 for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
3655 e = EDGE_PRED (bb, ix);
3656 /* Make sure we have already visited this BB, and is thus
3657 initialized.
3659 If e->src->aux is NULL, this predecessor is actually on an
3660 enclosing transaction. We only care about the current
3661 transaction, so ignore it. */
3662 if (e->src->aux && BB_VISITED_P (e->src))
3664 bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3665 bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3666 break;
3670 for (; ix < EDGE_COUNT (bb->preds); ix++)
3672 e = EDGE_PRED (bb, ix);
3673 if (e->src->aux && BB_VISITED_P (e->src))
3675 bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3676 bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3680 BB_VISITED_P (bb) = true;
3683 /* Compute the STORE_ANTIC_IN for the basic block BB. */
3685 static void
3686 tm_memopt_compute_antin (basic_block bb)
3688 edge e;
3689 unsigned ix;
3691 /* Seed with the ANTIC_OUT of any successor. */
3692 for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
3694 e = EDGE_SUCC (bb, ix);
3695 /* Make sure we have already visited this BB, and is thus
3696 initialized. */
3697 if (BB_VISITED_P (e->dest))
3699 bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3700 break;
3704 for (; ix < EDGE_COUNT (bb->succs); ix++)
3706 e = EDGE_SUCC (bb, ix);
3707 if (BB_VISITED_P (e->dest))
3708 bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3711 BB_VISITED_P (bb) = true;
3714 /* Compute the AVAIL sets for every basic block in BLOCKS.
3716 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3718 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3719 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3721 This is basically what we do in lcm's compute_available(), but here
3722 we calculate two sets of sets (one for STOREs and one for READs),
3723 and we work on a region instead of the entire CFG.
3725 REGION is the TM region.
3726 BLOCKS are the basic blocks in the region. */
3728 static void
3729 tm_memopt_compute_available (struct tm_region *region,
3730 vec<basic_block> blocks)
3732 edge e;
3733 basic_block *worklist, *qin, *qout, *qend, bb;
3734 unsigned int qlen, i;
3735 edge_iterator ei;
3736 bool changed;
3738 /* Allocate a worklist array/queue. Entries are only added to the
3739 list if they were not already on the list. So the size is
3740 bounded by the number of basic blocks in the region. */
3741 qlen = blocks.length () - 1;
3742 qin = qout = worklist =
3743 XNEWVEC (basic_block, qlen);
3745 /* Put every block in the region on the worklist. */
3746 for (i = 0; blocks.iterate (i, &bb); ++i)
3748 /* Seed AVAIL_OUT with the LOCAL set. */
3749 bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
3750 bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
3752 AVAIL_IN_WORKLIST_P (bb) = true;
3753 /* No need to insert the entry block, since it has an AVIN of
3754 null, and an AVOUT that has already been seeded in. */
3755 if (bb != region->entry_block)
3756 *qin++ = bb;
3759 /* The entry block has been initialized with the local sets. */
3760 BB_VISITED_P (region->entry_block) = true;
3762 qin = worklist;
3763 qend = &worklist[qlen];
3765 /* Iterate until the worklist is empty. */
3766 while (qlen)
3768 /* Take the first entry off the worklist. */
3769 bb = *qout++;
3770 qlen--;
3772 if (qout >= qend)
3773 qout = worklist;
3775 /* This block can be added to the worklist again if necessary. */
3776 AVAIL_IN_WORKLIST_P (bb) = false;
3777 tm_memopt_compute_avin (bb);
3779 /* Note: We do not add the LOCAL sets here because we already
3780 seeded the AVAIL_OUT sets with them. */
3781 changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
3782 changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
3783 if (changed
3784 && (region->exit_blocks == NULL
3785 || !bitmap_bit_p (region->exit_blocks, bb->index)))
3786 /* If the out state of this block changed, then we need to add
3787 its successors to the worklist if they are not already in. */
3788 FOR_EACH_EDGE (e, ei, bb->succs)
3789 if (!AVAIL_IN_WORKLIST_P (e->dest)
3790 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
3792 *qin++ = e->dest;
3793 AVAIL_IN_WORKLIST_P (e->dest) = true;
3794 qlen++;
3796 if (qin >= qend)
3797 qin = worklist;
3801 free (worklist);
3803 if (dump_file)
3804 dump_tm_memopt_sets (blocks);
3807 /* Compute ANTIC sets for every basic block in BLOCKS.
3809 We compute STORE_ANTIC_OUT as follows:
3811 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3812 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3814 REGION is the TM region.
3815 BLOCKS are the basic blocks in the region. */
3817 static void
3818 tm_memopt_compute_antic (struct tm_region *region,
3819 vec<basic_block> blocks)
3821 edge e;
3822 basic_block *worklist, *qin, *qout, *qend, bb;
3823 unsigned int qlen;
3824 int i;
3825 edge_iterator ei;
3827 /* Allocate a worklist array/queue. Entries are only added to the
3828 list if they were not already on the list. So the size is
3829 bounded by the number of basic blocks in the region. */
3830 qin = qout = worklist = XNEWVEC (basic_block, blocks.length ());
3832 for (qlen = 0, i = blocks.length () - 1; i >= 0; --i)
3834 bb = blocks[i];
3836 /* Seed ANTIC_OUT with the LOCAL set. */
3837 bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
3839 /* Put every block in the region on the worklist. */
3840 AVAIL_IN_WORKLIST_P (bb) = true;
3841 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3842 and their ANTIC_OUT has already been seeded in. */
3843 if (region->exit_blocks
3844 && !bitmap_bit_p (region->exit_blocks, bb->index))
3846 qlen++;
3847 *qin++ = bb;
3851 /* The exit blocks have been initialized with the local sets. */
3852 if (region->exit_blocks)
3854 unsigned int i;
3855 bitmap_iterator bi;
3856 EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
3857 BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun, i)) = true;
3860 qin = worklist;
3861 qend = &worklist[qlen];
3863 /* Iterate until the worklist is empty. */
3864 while (qlen)
3866 /* Take the first entry off the worklist. */
3867 bb = *qout++;
3868 qlen--;
3870 if (qout >= qend)
3871 qout = worklist;
3873 /* This block can be added to the worklist again if necessary. */
3874 AVAIL_IN_WORKLIST_P (bb) = false;
3875 tm_memopt_compute_antin (bb);
3877 /* Note: We do not add the LOCAL sets here because we already
3878 seeded the ANTIC_OUT sets with them. */
3879 if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
3880 && bb != region->entry_block)
3881 /* If the out state of this block changed, then we need to add
3882 its predecessors to the worklist if they are not already in. */
3883 FOR_EACH_EDGE (e, ei, bb->preds)
3884 if (!AVAIL_IN_WORKLIST_P (e->src))
3886 *qin++ = e->src;
3887 AVAIL_IN_WORKLIST_P (e->src) = true;
3888 qlen++;
3890 if (qin >= qend)
3891 qin = worklist;
3895 free (worklist);
3897 if (dump_file)
3898 dump_tm_memopt_sets (blocks);
3901 /* Offsets of load variants from TM_LOAD. For example,
3902 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3903 See gtm-builtins.def. */
3904 #define TRANSFORM_RAR 1
3905 #define TRANSFORM_RAW 2
3906 #define TRANSFORM_RFW 3
3907 /* Offsets of store variants from TM_STORE. */
3908 #define TRANSFORM_WAR 1
3909 #define TRANSFORM_WAW 2
3911 /* Inform about a load/store optimization. */
3913 static void
3914 dump_tm_memopt_transform (gimple *stmt)
3916 if (dump_file)
3918 fprintf (dump_file, "TM memopt: transforming: ");
3919 print_gimple_stmt (dump_file, stmt, 0);
3920 fprintf (dump_file, "\n");
3924 /* Perform a read/write optimization. Replaces the TM builtin in STMT
3925 by a builtin that is OFFSET entries down in the builtins table in
3926 gtm-builtins.def. */
3928 static void
3929 tm_memopt_transform_stmt (unsigned int offset,
3930 gcall *stmt,
3931 gimple_stmt_iterator *gsi)
3933 tree fn = gimple_call_fn (stmt);
3934 gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
3935 TREE_OPERAND (fn, 0)
3936 = builtin_decl_explicit ((enum built_in_function)
3937 (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
3938 + offset));
3939 gimple_call_set_fn (stmt, fn);
3940 gsi_replace (gsi, stmt, true);
3941 dump_tm_memopt_transform (stmt);
3944 /* Perform the actual TM memory optimization transformations in the
3945 basic blocks in BLOCKS. */
3947 static void
3948 tm_memopt_transform_blocks (vec<basic_block> blocks)
3950 size_t i;
3951 basic_block bb;
3952 gimple_stmt_iterator gsi;
3954 for (i = 0; blocks.iterate (i, &bb); ++i)
3956 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3958 gimple *stmt = gsi_stmt (gsi);
3959 bitmap read_avail = READ_AVAIL_IN (bb);
3960 bitmap store_avail = STORE_AVAIL_IN (bb);
3961 bitmap store_antic = STORE_ANTIC_OUT (bb);
3962 unsigned int loc;
3964 if (is_tm_simple_load (stmt))
3966 gcall *call_stmt = as_a <gcall *> (stmt);
3967 loc = tm_memopt_value_number (stmt, NO_INSERT);
3968 if (store_avail && bitmap_bit_p (store_avail, loc))
3969 tm_memopt_transform_stmt (TRANSFORM_RAW, call_stmt, &gsi);
3970 else if (store_antic && bitmap_bit_p (store_antic, loc))
3972 tm_memopt_transform_stmt (TRANSFORM_RFW, call_stmt, &gsi);
3973 bitmap_set_bit (store_avail, loc);
3975 else if (read_avail && bitmap_bit_p (read_avail, loc))
3976 tm_memopt_transform_stmt (TRANSFORM_RAR, call_stmt, &gsi);
3977 else
3978 bitmap_set_bit (read_avail, loc);
3980 else if (is_tm_simple_store (stmt))
3982 gcall *call_stmt = as_a <gcall *> (stmt);
3983 loc = tm_memopt_value_number (stmt, NO_INSERT);
3984 if (store_avail && bitmap_bit_p (store_avail, loc))
3985 tm_memopt_transform_stmt (TRANSFORM_WAW, call_stmt, &gsi);
3986 else
3988 if (read_avail && bitmap_bit_p (read_avail, loc))
3989 tm_memopt_transform_stmt (TRANSFORM_WAR, call_stmt, &gsi);
3990 bitmap_set_bit (store_avail, loc);
3997 /* Return a new set of bitmaps for a BB. */
3999 static struct tm_memopt_bitmaps *
4000 tm_memopt_init_sets (void)
4002 struct tm_memopt_bitmaps *b
4003 = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
4004 b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
4005 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
4006 b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
4007 b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
4008 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
4009 b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
4010 b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
4011 b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
4012 b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
4013 return b;
4016 /* Free sets computed for each BB. */
4018 static void
4019 tm_memopt_free_sets (vec<basic_block> blocks)
4021 size_t i;
4022 basic_block bb;
4024 for (i = 0; blocks.iterate (i, &bb); ++i)
4025 bb->aux = NULL;
4028 /* Clear the visited bit for every basic block in BLOCKS. */
4030 static void
4031 tm_memopt_clear_visited (vec<basic_block> blocks)
4033 size_t i;
4034 basic_block bb;
4036 for (i = 0; blocks.iterate (i, &bb); ++i)
4037 BB_VISITED_P (bb) = false;
4040 /* Replace TM load/stores with hints for the runtime. We handle
4041 things like read-after-write, write-after-read, read-after-read,
4042 read-for-write, etc. */
4044 static unsigned int
4045 execute_tm_memopt (void)
4047 struct tm_region *region;
4048 vec<basic_block> bbs;
4050 tm_memopt_value_id = 0;
4051 tm_memopt_value_numbers = new hash_table<tm_memop_hasher> (10);
4053 for (region = all_tm_regions; region; region = region->next)
4055 /* All the TM stores/loads in the current region. */
4056 size_t i;
4057 basic_block bb;
4059 bitmap_obstack_initialize (&tm_memopt_obstack);
4061 /* Save all BBs for the current region. */
4062 bbs = get_tm_region_blocks (region->entry_block,
4063 region->exit_blocks,
4064 region->irr_blocks,
4065 NULL,
4066 false);
4068 /* Collect all the memory operations. */
4069 for (i = 0; bbs.iterate (i, &bb); ++i)
4071 bb->aux = tm_memopt_init_sets ();
4072 tm_memopt_accumulate_memops (bb);
4075 /* Solve data flow equations and transform each block accordingly. */
4076 tm_memopt_clear_visited (bbs);
4077 tm_memopt_compute_available (region, bbs);
4078 tm_memopt_clear_visited (bbs);
4079 tm_memopt_compute_antic (region, bbs);
4080 tm_memopt_transform_blocks (bbs);
4082 tm_memopt_free_sets (bbs);
4083 bbs.release ();
4084 bitmap_obstack_release (&tm_memopt_obstack);
4085 tm_memopt_value_numbers->empty ();
4088 delete tm_memopt_value_numbers;
4089 tm_memopt_value_numbers = NULL;
4090 return 0;
4093 namespace {
4095 const pass_data pass_data_tm_memopt =
4097 GIMPLE_PASS, /* type */
4098 "tmmemopt", /* name */
4099 OPTGROUP_NONE, /* optinfo_flags */
4100 TV_TRANS_MEM, /* tv_id */
4101 ( PROP_ssa | PROP_cfg ), /* properties_required */
4102 0, /* properties_provided */
4103 0, /* properties_destroyed */
4104 0, /* todo_flags_start */
4105 0, /* todo_flags_finish */
4108 class pass_tm_memopt : public gimple_opt_pass
4110 public:
4111 pass_tm_memopt (gcc::context *ctxt)
4112 : gimple_opt_pass (pass_data_tm_memopt, ctxt)
4115 /* opt_pass methods: */
4116 virtual bool gate (function *) { return flag_tm && optimize > 0; }
4117 virtual unsigned int execute (function *) { return execute_tm_memopt (); }
4119 }; // class pass_tm_memopt
4121 } // anon namespace
4123 gimple_opt_pass *
4124 make_pass_tm_memopt (gcc::context *ctxt)
4126 return new pass_tm_memopt (ctxt);
4130 /* Interprocedual analysis for the creation of transactional clones.
4131 The aim of this pass is to find which functions are referenced in
4132 a non-irrevocable transaction context, and for those over which
4133 we have control (or user directive), create a version of the
4134 function which uses only the transactional interface to reference
4135 protected memories. This analysis proceeds in several steps:
4137 (1) Collect the set of all possible transactional clones:
4139 (a) For all local public functions marked tm_callable, push
4140 it onto the tm_callee queue.
4142 (b) For all local functions, scan for calls in transaction blocks.
4143 Push the caller and callee onto the tm_caller and tm_callee
4144 queues. Count the number of callers for each callee.
4146 (c) For each local function on the callee list, assume we will
4147 create a transactional clone. Push *all* calls onto the
4148 callee queues; count the number of clone callers separately
4149 to the number of original callers.
4151 (2) Propagate irrevocable status up the dominator tree:
4153 (a) Any external function on the callee list that is not marked
4154 tm_callable is irrevocable. Push all callers of such onto
4155 a worklist.
4157 (b) For each function on the worklist, mark each block that
4158 contains an irrevocable call. Use the AND operator to
4159 propagate that mark up the dominator tree.
4161 (c) If we reach the entry block for a possible transactional
4162 clone, then the transactional clone is irrevocable, and
4163 we should not create the clone after all. Push all
4164 callers onto the worklist.
4166 (d) Place tm_irrevocable calls at the beginning of the relevant
4167 blocks. Special case here is the entry block for the entire
4168 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
4169 the library to begin the region in serial mode. Decrement
4170 the call count for all callees in the irrevocable region.
4172 (3) Create the transactional clones:
4174 Any tm_callee that still has a non-zero call count is cloned.
4177 /* This structure is stored in the AUX field of each cgraph_node. */
4178 struct tm_ipa_cg_data
4180 /* The clone of the function that got created. */
4181 struct cgraph_node *clone;
4183 /* The tm regions in the normal function. */
4184 struct tm_region *all_tm_regions;
4186 /* The blocks of the normal/clone functions that contain irrevocable
4187 calls, or blocks that are post-dominated by irrevocable calls. */
4188 bitmap irrevocable_blocks_normal;
4189 bitmap irrevocable_blocks_clone;
4191 /* The blocks of the normal function that are involved in transactions. */
4192 bitmap transaction_blocks_normal;
4194 /* The number of callers to the transactional clone of this function
4195 from normal and transactional clones respectively. */
4196 unsigned tm_callers_normal;
4197 unsigned tm_callers_clone;
4199 /* True if all calls to this function's transactional clone
4200 are irrevocable. Also automatically true if the function
4201 has no transactional clone. */
4202 bool is_irrevocable;
4204 /* Flags indicating the presence of this function in various queues. */
4205 bool in_callee_queue;
4206 bool in_worklist;
4208 /* Flags indicating the kind of scan desired while in the worklist. */
4209 bool want_irr_scan_normal;
4212 typedef vec<cgraph_node *> cgraph_node_queue;
4214 /* Return the ipa data associated with NODE, allocating zeroed memory
4215 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
4216 and set *NODE accordingly. */
4218 static struct tm_ipa_cg_data *
4219 get_cg_data (struct cgraph_node **node, bool traverse_aliases)
4221 struct tm_ipa_cg_data *d;
4223 if (traverse_aliases && (*node)->alias)
4224 *node = (*node)->get_alias_target ();
4226 d = (struct tm_ipa_cg_data *) (*node)->aux;
4228 if (d == NULL)
4230 d = (struct tm_ipa_cg_data *)
4231 obstack_alloc (&tm_obstack.obstack, sizeof (*d));
4232 (*node)->aux = (void *) d;
4233 memset (d, 0, sizeof (*d));
4236 return d;
4239 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
4240 it is already present. */
4242 static void
4243 maybe_push_queue (struct cgraph_node *node,
4244 cgraph_node_queue *queue_p, bool *in_queue_p)
4246 if (!*in_queue_p)
4248 *in_queue_p = true;
4249 queue_p->safe_push (node);
4253 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
4254 Queue all callees within block BB. */
4256 static void
4257 ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
4258 basic_block bb, bool for_clone)
4260 gimple_stmt_iterator gsi;
4262 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4264 gimple *stmt = gsi_stmt (gsi);
4265 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4267 tree fndecl = gimple_call_fndecl (stmt);
4268 if (fndecl)
4270 struct tm_ipa_cg_data *d;
4271 unsigned *pcallers;
4272 struct cgraph_node *node;
4274 if (is_tm_ending_fndecl (fndecl))
4275 continue;
4276 if (find_tm_replacement_function (fndecl))
4277 continue;
4279 node = cgraph_node::get (fndecl);
4280 gcc_assert (node != NULL);
4281 d = get_cg_data (&node, true);
4283 pcallers = (for_clone ? &d->tm_callers_clone
4284 : &d->tm_callers_normal);
4285 *pcallers += 1;
4287 maybe_push_queue (node, callees_p, &d->in_callee_queue);
4293 /* Scan all calls in NODE that are within a transaction region,
4294 and push the resulting nodes into the callee queue. */
4296 static void
4297 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
4298 cgraph_node_queue *callees_p)
4300 d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
4301 d->all_tm_regions = all_tm_regions;
4303 for (tm_region *r = all_tm_regions; r; r = r->next)
4305 vec<basic_block> bbs;
4306 basic_block bb;
4307 unsigned i;
4309 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
4310 d->transaction_blocks_normal, false, false);
4312 FOR_EACH_VEC_ELT (bbs, i, bb)
4313 ipa_tm_scan_calls_block (callees_p, bb, false);
4315 bbs.release ();
4319 /* Scan all calls in NODE as if this is the transactional clone,
4320 and push the destinations into the callee queue. */
4322 static void
4323 ipa_tm_scan_calls_clone (struct cgraph_node *node,
4324 cgraph_node_queue *callees_p)
4326 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
4327 basic_block bb;
4329 FOR_EACH_BB_FN (bb, fn)
4330 ipa_tm_scan_calls_block (callees_p, bb, true);
4333 /* The function NODE has been detected to be irrevocable. Push all
4334 of its callers onto WORKLIST for the purpose of re-scanning them. */
4336 static void
4337 ipa_tm_note_irrevocable (struct cgraph_node *node,
4338 cgraph_node_queue *worklist_p)
4340 struct tm_ipa_cg_data *d = get_cg_data (&node, true);
4341 struct cgraph_edge *e;
4343 d->is_irrevocable = true;
4345 for (e = node->callers; e ; e = e->next_caller)
4347 basic_block bb;
4348 struct cgraph_node *caller;
4350 /* Don't examine recursive calls. */
4351 if (e->caller == node)
4352 continue;
4353 /* Even if we think we can go irrevocable, believe the user
4354 above all. */
4355 if (is_tm_safe_or_pure (e->caller->decl))
4356 continue;
4358 caller = e->caller;
4359 d = get_cg_data (&caller, true);
4361 /* Check if the callee is in a transactional region. If so,
4362 schedule the function for normal re-scan as well. */
4363 bb = gimple_bb (e->call_stmt);
4364 gcc_assert (bb != NULL);
4365 if (d->transaction_blocks_normal
4366 && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
4367 d->want_irr_scan_normal = true;
4369 maybe_push_queue (caller, worklist_p, &d->in_worklist);
4373 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
4374 within the block is irrevocable. */
4376 static bool
4377 ipa_tm_scan_irr_block (basic_block bb)
4379 gimple_stmt_iterator gsi;
4380 tree fn;
4382 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4384 gimple *stmt = gsi_stmt (gsi);
4385 switch (gimple_code (stmt))
4387 case GIMPLE_ASSIGN:
4388 if (gimple_assign_single_p (stmt))
4390 tree lhs = gimple_assign_lhs (stmt);
4391 tree rhs = gimple_assign_rhs1 (stmt);
4392 if (volatile_lvalue_p (lhs) || volatile_lvalue_p (rhs))
4393 return true;
4395 break;
4397 case GIMPLE_CALL:
4399 tree lhs = gimple_call_lhs (stmt);
4400 if (lhs && volatile_lvalue_p (lhs))
4401 return true;
4403 if (is_tm_pure_call (stmt))
4404 break;
4406 fn = gimple_call_fn (stmt);
4408 /* Functions with the attribute are by definition irrevocable. */
4409 if (is_tm_irrevocable (fn))
4410 return true;
4412 /* For direct function calls, go ahead and check for replacement
4413 functions, or transitive irrevocable functions. For indirect
4414 functions, we'll ask the runtime. */
4415 if (TREE_CODE (fn) == ADDR_EXPR)
4417 struct tm_ipa_cg_data *d;
4418 struct cgraph_node *node;
4420 fn = TREE_OPERAND (fn, 0);
4421 if (is_tm_ending_fndecl (fn))
4422 break;
4423 if (find_tm_replacement_function (fn))
4424 break;
4426 node = cgraph_node::get (fn);
4427 d = get_cg_data (&node, true);
4429 /* Return true if irrevocable, but above all, believe
4430 the user. */
4431 if (d->is_irrevocable
4432 && !is_tm_safe_or_pure (fn))
4433 return true;
4435 break;
4438 case GIMPLE_ASM:
4439 /* ??? The Approved Method of indicating that an inline
4440 assembly statement is not relevant to the transaction
4441 is to wrap it in a __tm_waiver block. This is not
4442 yet implemented, so we can't check for it. */
4443 if (is_tm_safe (current_function_decl))
4445 tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
4446 SET_EXPR_LOCATION (t, gimple_location (stmt));
4447 error ("%Kasm not allowed in %<transaction_safe%> function", t);
4449 return true;
4451 default:
4452 break;
4456 return false;
4459 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking
4460 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
4461 scanning past OLD_IRR or EXIT_BLOCKS. */
4463 static bool
4464 ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr,
4465 bitmap old_irr, bitmap exit_blocks)
4467 bool any_new_irr = false;
4468 edge e;
4469 edge_iterator ei;
4470 bitmap visited_blocks = BITMAP_ALLOC (NULL);
4474 basic_block bb = pqueue->pop ();
4476 /* Don't re-scan blocks we know already are irrevocable. */
4477 if (old_irr && bitmap_bit_p (old_irr, bb->index))
4478 continue;
4480 if (ipa_tm_scan_irr_block (bb))
4482 bitmap_set_bit (new_irr, bb->index);
4483 any_new_irr = true;
4485 else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
4487 FOR_EACH_EDGE (e, ei, bb->succs)
4488 if (!bitmap_bit_p (visited_blocks, e->dest->index))
4490 bitmap_set_bit (visited_blocks, e->dest->index);
4491 pqueue->safe_push (e->dest);
4495 while (!pqueue->is_empty ());
4497 BITMAP_FREE (visited_blocks);
4499 return any_new_irr;
4502 /* Propagate the irrevocable property both up and down the dominator tree.
4503 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
4504 TM regions; OLD_IRR are the results of a previous scan of the dominator
4505 tree which has been fully propagated; NEW_IRR is the set of new blocks
4506 which are gaining the irrevocable property during the current scan. */
4508 static void
4509 ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
4510 bitmap old_irr, bitmap exit_blocks)
4512 vec<basic_block> bbs;
4513 bitmap all_region_blocks;
4515 /* If this block is in the old set, no need to rescan. */
4516 if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
4517 return;
4519 all_region_blocks = BITMAP_ALLOC (&tm_obstack);
4520 bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
4521 all_region_blocks, false);
4524 basic_block bb = bbs.pop ();
4525 bool this_irr = bitmap_bit_p (new_irr, bb->index);
4526 bool all_son_irr = false;
4527 edge_iterator ei;
4528 edge e;
4530 /* Propagate up. If my children are, I am too, but we must have
4531 at least one child that is. */
4532 if (!this_irr)
4534 FOR_EACH_EDGE (e, ei, bb->succs)
4536 if (!bitmap_bit_p (new_irr, e->dest->index))
4538 all_son_irr = false;
4539 break;
4541 else
4542 all_son_irr = true;
4544 if (all_son_irr)
4546 /* Add block to new_irr if it hasn't already been processed. */
4547 if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
4549 bitmap_set_bit (new_irr, bb->index);
4550 this_irr = true;
4555 /* Propagate down to everyone we immediately dominate. */
4556 if (this_irr)
4558 basic_block son;
4559 for (son = first_dom_son (CDI_DOMINATORS, bb);
4560 son;
4561 son = next_dom_son (CDI_DOMINATORS, son))
4563 /* Make sure block is actually in a TM region, and it
4564 isn't already in old_irr. */
4565 if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
4566 && bitmap_bit_p (all_region_blocks, son->index))
4567 bitmap_set_bit (new_irr, son->index);
4571 while (!bbs.is_empty ());
4573 BITMAP_FREE (all_region_blocks);
4574 bbs.release ();
4577 static void
4578 ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
4580 gimple_stmt_iterator gsi;
4582 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4584 gimple *stmt = gsi_stmt (gsi);
4585 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4587 tree fndecl = gimple_call_fndecl (stmt);
4588 if (fndecl)
4590 struct tm_ipa_cg_data *d;
4591 unsigned *pcallers;
4592 struct cgraph_node *tnode;
4594 if (is_tm_ending_fndecl (fndecl))
4595 continue;
4596 if (find_tm_replacement_function (fndecl))
4597 continue;
4599 tnode = cgraph_node::get (fndecl);
4600 d = get_cg_data (&tnode, true);
4602 pcallers = (for_clone ? &d->tm_callers_clone
4603 : &d->tm_callers_normal);
4605 gcc_assert (*pcallers > 0);
4606 *pcallers -= 1;
4612 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
4613 as well as other irrevocable actions such as inline assembly. Mark all
4614 such blocks as irrevocable and decrement the number of calls to
4615 transactional clones. Return true if, for the transactional clone, the
4616 entire function is irrevocable. */
4618 static bool
4619 ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
4621 struct tm_ipa_cg_data *d;
4622 bitmap new_irr, old_irr;
4623 bool ret = false;
4625 /* Builtin operators (operator new, and such). */
4626 if (DECL_STRUCT_FUNCTION (node->decl) == NULL
4627 || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL)
4628 return false;
4630 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4631 calculate_dominance_info (CDI_DOMINATORS);
4633 d = get_cg_data (&node, true);
4634 auto_vec<basic_block, 10> queue;
4635 new_irr = BITMAP_ALLOC (&tm_obstack);
4637 /* Scan each tm region, propagating irrevocable status through the tree. */
4638 if (for_clone)
4640 old_irr = d->irrevocable_blocks_clone;
4641 queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
4642 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
4644 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
4645 new_irr,
4646 old_irr, NULL);
4647 ret = bitmap_bit_p (new_irr,
4648 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index);
4651 else
4653 struct tm_region *region;
4655 old_irr = d->irrevocable_blocks_normal;
4656 for (region = d->all_tm_regions; region; region = region->next)
4658 queue.quick_push (region->entry_block);
4659 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
4660 region->exit_blocks))
4661 ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
4662 region->exit_blocks);
4666 /* If we found any new irrevocable blocks, reduce the call count for
4667 transactional clones within the irrevocable blocks. Save the new
4668 set of irrevocable blocks for next time. */
4669 if (!bitmap_empty_p (new_irr))
4671 bitmap_iterator bmi;
4672 unsigned i;
4674 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4675 ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun, i),
4676 for_clone);
4678 if (old_irr)
4680 bitmap_ior_into (old_irr, new_irr);
4681 BITMAP_FREE (new_irr);
4683 else if (for_clone)
4684 d->irrevocable_blocks_clone = new_irr;
4685 else
4686 d->irrevocable_blocks_normal = new_irr;
4688 if (dump_file && new_irr)
4690 const char *dname;
4691 bitmap_iterator bmi;
4692 unsigned i;
4694 dname = lang_hooks.decl_printable_name (current_function_decl, 2);
4695 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4696 fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
4699 else
4700 BITMAP_FREE (new_irr);
4702 pop_cfun ();
4704 return ret;
4707 /* Return true if, for the transactional clone of NODE, any call
4708 may enter irrevocable mode. */
4710 static bool
4711 ipa_tm_mayenterirr_function (struct cgraph_node *node)
4713 struct tm_ipa_cg_data *d;
4714 tree decl;
4715 unsigned flags;
4717 d = get_cg_data (&node, true);
4718 decl = node->decl;
4719 flags = flags_from_decl_or_type (decl);
4721 /* Handle some TM builtins. Ordinarily these aren't actually generated
4722 at this point, but handling these functions when written in by the
4723 user makes it easier to build unit tests. */
4724 if (flags & ECF_TM_BUILTIN)
4725 return false;
4727 /* Filter out all functions that are marked. */
4728 if (flags & ECF_TM_PURE)
4729 return false;
4730 if (is_tm_safe (decl))
4731 return false;
4732 if (is_tm_irrevocable (decl))
4733 return true;
4734 if (is_tm_callable (decl))
4735 return true;
4736 if (find_tm_replacement_function (decl))
4737 return true;
4739 /* If we aren't seeing the final version of the function we don't
4740 know what it will contain at runtime. */
4741 if (node->get_availability () < AVAIL_AVAILABLE)
4742 return true;
4744 /* If the function must go irrevocable, then of course true. */
4745 if (d->is_irrevocable)
4746 return true;
4748 /* If there are any blocks marked irrevocable, then the function
4749 as a whole may enter irrevocable. */
4750 if (d->irrevocable_blocks_clone)
4751 return true;
4753 /* We may have previously marked this function as tm_may_enter_irr;
4754 see pass_diagnose_tm_blocks. */
4755 if (node->local.tm_may_enter_irr)
4756 return true;
4758 /* Recurse on the main body for aliases. In general, this will
4759 result in one of the bits above being set so that we will not
4760 have to recurse next time. */
4761 if (node->alias)
4762 return ipa_tm_mayenterirr_function (cgraph_node::get (node->thunk.alias));
4764 /* What remains is unmarked local functions without items that force
4765 the function to go irrevocable. */
4766 return false;
4769 /* Diagnose calls from transaction_safe functions to unmarked
4770 functions that are determined to not be safe. */
4772 static void
4773 ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
4775 struct cgraph_edge *e;
4777 for (e = node->callees; e ; e = e->next_callee)
4778 if (!is_tm_callable (e->callee->decl)
4779 && e->callee->local.tm_may_enter_irr)
4780 error_at (gimple_location (e->call_stmt),
4781 "unsafe function call %qD within "
4782 "%<transaction_safe%> function", e->callee->decl);
4785 /* Diagnose call from atomic transactions to unmarked functions
4786 that are determined to not be safe. */
4788 static void
4789 ipa_tm_diagnose_transaction (struct cgraph_node *node,
4790 struct tm_region *all_tm_regions)
4792 struct tm_region *r;
4794 for (r = all_tm_regions; r ; r = r->next)
4795 if (gimple_transaction_subcode (r->get_transaction_stmt ())
4796 & GTMA_IS_RELAXED)
4798 /* Atomic transactions can be nested inside relaxed. */
4799 if (r->inner)
4800 ipa_tm_diagnose_transaction (node, r->inner);
4802 else
4804 vec<basic_block> bbs;
4805 gimple_stmt_iterator gsi;
4806 basic_block bb;
4807 size_t i;
4809 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
4810 r->irr_blocks, NULL, false);
4812 for (i = 0; bbs.iterate (i, &bb); ++i)
4813 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4815 gimple *stmt = gsi_stmt (gsi);
4816 tree fndecl;
4818 if (gimple_code (stmt) == GIMPLE_ASM)
4820 error_at (gimple_location (stmt),
4821 "asm not allowed in atomic transaction");
4822 continue;
4825 if (!is_gimple_call (stmt))
4826 continue;
4827 fndecl = gimple_call_fndecl (stmt);
4829 /* Indirect function calls have been diagnosed already. */
4830 if (!fndecl)
4831 continue;
4833 /* Stop at the end of the transaction. */
4834 if (is_tm_ending_fndecl (fndecl))
4836 if (bitmap_bit_p (r->exit_blocks, bb->index))
4837 break;
4838 continue;
4841 /* Marked functions have been diagnosed already. */
4842 if (is_tm_pure_call (stmt))
4843 continue;
4844 if (is_tm_callable (fndecl))
4845 continue;
4847 if (cgraph_node::local_info (fndecl)->tm_may_enter_irr)
4848 error_at (gimple_location (stmt),
4849 "unsafe function call %qD within "
4850 "atomic transaction", fndecl);
4853 bbs.release ();
4857 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4858 OLD_DECL. The returned value is a freshly malloced pointer that
4859 should be freed by the caller. */
4861 static tree
4862 tm_mangle (tree old_asm_id)
4864 const char *old_asm_name;
4865 char *tm_name;
4866 void *alloc = NULL;
4867 struct demangle_component *dc;
4868 tree new_asm_id;
4870 /* Determine if the symbol is already a valid C++ mangled name. Do this
4871 even for C, which might be interfacing with C++ code via appropriately
4872 ugly identifiers. */
4873 /* ??? We could probably do just as well checking for "_Z" and be done. */
4874 old_asm_name = IDENTIFIER_POINTER (old_asm_id);
4875 dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
4877 if (dc == NULL)
4879 char length[8];
4881 do_unencoded:
4882 sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
4883 tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
4885 else
4887 old_asm_name += 2; /* Skip _Z */
4889 switch (dc->type)
4891 case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
4892 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
4893 /* Don't play silly games, you! */
4894 goto do_unencoded;
4896 case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
4897 /* I'd really like to know if we can ever be passed one of
4898 these from the C++ front end. The Logical Thing would
4899 seem that hidden-alias should be outer-most, so that we
4900 get hidden-alias of a transaction-clone and not vice-versa. */
4901 old_asm_name += 2;
4902 break;
4904 default:
4905 break;
4908 tm_name = concat ("_ZGTt", old_asm_name, NULL);
4910 free (alloc);
4912 new_asm_id = get_identifier (tm_name);
4913 free (tm_name);
4915 return new_asm_id;
4918 static inline void
4919 ipa_tm_mark_force_output_node (struct cgraph_node *node)
4921 node->mark_force_output ();
4922 node->analyzed = true;
4925 static inline void
4926 ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node)
4928 node->forced_by_abi = true;
4929 node->analyzed = true;
4932 /* Callback data for ipa_tm_create_version_alias. */
4933 struct create_version_alias_info
4935 struct cgraph_node *old_node;
4936 tree new_decl;
4939 /* A subroutine of ipa_tm_create_version, called via
4940 cgraph_for_node_and_aliases. Create new tm clones for each of
4941 the existing aliases. */
4942 static bool
4943 ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
4945 struct create_version_alias_info *info
4946 = (struct create_version_alias_info *)data;
4947 tree old_decl, new_decl, tm_name;
4948 struct cgraph_node *new_node;
4950 if (!node->cpp_implicit_alias)
4951 return false;
4953 old_decl = node->decl;
4954 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4955 new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
4956 TREE_CODE (old_decl), tm_name,
4957 TREE_TYPE (old_decl));
4959 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4960 SET_DECL_RTL (new_decl, NULL);
4962 /* Based loosely on C++'s make_alias_for(). */
4963 TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
4964 DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
4965 DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
4966 TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
4967 DECL_EXTERNAL (new_decl) = 0;
4968 DECL_ARTIFICIAL (new_decl) = 1;
4969 TREE_ADDRESSABLE (new_decl) = 1;
4970 TREE_USED (new_decl) = 1;
4971 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4973 /* Perform the same remapping to the comdat group. */
4974 if (DECL_ONE_ONLY (new_decl))
4975 varpool_node::get (new_decl)->set_comdat_group
4976 (tm_mangle (decl_comdat_group_id (old_decl)));
4978 new_node = cgraph_node::create_same_body_alias (new_decl, info->new_decl);
4979 new_node->tm_clone = true;
4980 new_node->externally_visible = info->old_node->externally_visible;
4981 new_node->no_reorder = info->old_node->no_reorder;
4982 /* ?? Do not traverse aliases here. */
4983 get_cg_data (&node, false)->clone = new_node;
4985 record_tm_clone_pair (old_decl, new_decl);
4987 if (info->old_node->force_output
4988 || info->old_node->ref_list.first_referring ())
4989 ipa_tm_mark_force_output_node (new_node);
4990 if (info->old_node->forced_by_abi)
4991 ipa_tm_mark_forced_by_abi_node (new_node);
4992 return false;
4995 /* Create a copy of the function (possibly declaration only) of OLD_NODE,
4996 appropriate for the transactional clone. */
4998 static void
4999 ipa_tm_create_version (struct cgraph_node *old_node)
5001 tree new_decl, old_decl, tm_name;
5002 struct cgraph_node *new_node;
5004 old_decl = old_node->decl;
5005 new_decl = copy_node (old_decl);
5007 /* DECL_ASSEMBLER_NAME needs to be set before we call
5008 cgraph_copy_node_for_versioning below, because cgraph_node will
5009 fill the assembler_name_hash. */
5010 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
5011 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
5012 SET_DECL_RTL (new_decl, NULL);
5013 TREE_SYMBOL_REFERENCED (tm_name) = 1;
5015 /* Perform the same remapping to the comdat group. */
5016 if (DECL_ONE_ONLY (new_decl))
5017 varpool_node::get (new_decl)->set_comdat_group
5018 (tm_mangle (DECL_COMDAT_GROUP (old_decl)));
5020 gcc_assert (!old_node->ipa_transforms_to_apply.exists ());
5021 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
5022 new_node->local.local = false;
5023 new_node->externally_visible = old_node->externally_visible;
5024 new_node->lowered = true;
5025 new_node->tm_clone = 1;
5026 if (!old_node->implicit_section)
5027 new_node->set_section (old_node->get_section ());
5028 get_cg_data (&old_node, true)->clone = new_node;
5030 if (old_node->get_availability () >= AVAIL_INTERPOSABLE)
5032 /* Remap extern inline to static inline. */
5033 /* ??? Is it worth trying to use make_decl_one_only? */
5034 if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
5036 DECL_EXTERNAL (new_decl) = 0;
5037 TREE_PUBLIC (new_decl) = 0;
5038 DECL_WEAK (new_decl) = 0;
5041 tree_function_versioning (old_decl, new_decl,
5042 NULL, false, NULL,
5043 false, NULL, NULL);
5046 record_tm_clone_pair (old_decl, new_decl);
5048 symtab->call_cgraph_insertion_hooks (new_node);
5049 if (old_node->force_output
5050 || old_node->ref_list.first_referring ())
5051 ipa_tm_mark_force_output_node (new_node);
5052 if (old_node->forced_by_abi)
5053 ipa_tm_mark_forced_by_abi_node (new_node);
5055 /* Do the same thing, but for any aliases of the original node. */
5057 struct create_version_alias_info data;
5058 data.old_node = old_node;
5059 data.new_decl = new_decl;
5060 old_node->call_for_symbol_thunks_and_aliases (ipa_tm_create_version_alias,
5061 &data, true);
5065 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
5067 static void
5068 ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
5069 basic_block bb)
5071 gimple_stmt_iterator gsi;
5072 gcall *g;
5074 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
5076 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
5077 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
5079 split_block_after_labels (bb);
5080 gsi = gsi_after_labels (bb);
5081 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
5083 node->create_edge (cgraph_node::get_create
5084 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
5085 g, gimple_bb (g)->count,
5086 compute_call_stmt_bb_frequency (node->decl,
5087 gimple_bb (g)));
5090 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */
5092 static bool
5093 ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
5094 struct tm_region *region,
5095 gimple_stmt_iterator *gsi, gcall *stmt)
5097 tree gettm_fn, ret, old_fn, callfn;
5098 gcall *g;
5099 gassign *g2;
5100 bool safe;
5102 old_fn = gimple_call_fn (stmt);
5104 if (TREE_CODE (old_fn) == ADDR_EXPR)
5106 tree fndecl = TREE_OPERAND (old_fn, 0);
5107 tree clone = get_tm_clone_pair (fndecl);
5109 /* By transforming the call into a TM_GETTMCLONE, we are
5110 technically taking the address of the original function and
5111 its clone. Explain this so inlining will know this function
5112 is needed. */
5113 cgraph_node::get (fndecl)->mark_address_taken () ;
5114 if (clone)
5115 cgraph_node::get (clone)->mark_address_taken ();
5118 safe = is_tm_safe (TREE_TYPE (old_fn));
5119 gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
5120 : BUILT_IN_TM_GETTMCLONE_IRR);
5121 ret = create_tmp_var (ptr_type_node);
5123 if (!safe)
5124 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
5126 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
5127 if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
5128 old_fn = OBJ_TYPE_REF_EXPR (old_fn);
5130 g = gimple_build_call (gettm_fn, 1, old_fn);
5131 ret = make_ssa_name (ret, g);
5132 gimple_call_set_lhs (g, ret);
5134 gsi_insert_before (gsi, g, GSI_SAME_STMT);
5136 node->create_edge (cgraph_node::get_create (gettm_fn), g, gimple_bb (g)->count,
5137 compute_call_stmt_bb_frequency (node->decl,
5138 gimple_bb (g)));
5140 /* Cast return value from tm_gettmclone* into appropriate function
5141 pointer. */
5142 callfn = create_tmp_var (TREE_TYPE (old_fn));
5143 g2 = gimple_build_assign (callfn,
5144 fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
5145 callfn = make_ssa_name (callfn, g2);
5146 gimple_assign_set_lhs (g2, callfn);
5147 gsi_insert_before (gsi, g2, GSI_SAME_STMT);
5149 /* ??? This is a hack to preserve the NOTHROW bit on the call,
5150 which we would have derived from the decl. Failure to save
5151 this bit means we might have to split the basic block. */
5152 if (gimple_call_nothrow_p (stmt))
5153 gimple_call_set_nothrow (stmt, true);
5155 gimple_call_set_fn (stmt, callfn);
5157 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
5158 for a call statement. Fix it. */
5160 tree lhs = gimple_call_lhs (stmt);
5161 tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
5162 if (lhs
5163 && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
5165 tree temp;
5167 temp = create_tmp_reg (rettype);
5168 gimple_call_set_lhs (stmt, temp);
5170 g2 = gimple_build_assign (lhs,
5171 fold_build1 (VIEW_CONVERT_EXPR,
5172 TREE_TYPE (lhs), temp));
5173 gsi_insert_after (gsi, g2, GSI_SAME_STMT);
5177 update_stmt (stmt);
5178 cgraph_edge *e = cgraph_node::get (current_function_decl)->get_edge (stmt);
5179 if (e && e->indirect_info)
5180 e->indirect_info->polymorphic = false;
5182 return true;
5185 /* Helper function for ipa_tm_transform_calls*. Given a call
5186 statement in GSI which resides inside transaction REGION, redirect
5187 the call to either its wrapper function, or its clone. */
5189 static void
5190 ipa_tm_transform_calls_redirect (struct cgraph_node *node,
5191 struct tm_region *region,
5192 gimple_stmt_iterator *gsi,
5193 bool *need_ssa_rename_p)
5195 gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi));
5196 struct cgraph_node *new_node;
5197 struct cgraph_edge *e = node->get_edge (stmt);
5198 tree fndecl = gimple_call_fndecl (stmt);
5200 /* For indirect calls, pass the address through the runtime. */
5201 if (fndecl == NULL)
5203 *need_ssa_rename_p |=
5204 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5205 return;
5208 /* Handle some TM builtins. Ordinarily these aren't actually generated
5209 at this point, but handling these functions when written in by the
5210 user makes it easier to build unit tests. */
5211 if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
5212 return;
5214 /* Fixup recursive calls inside clones. */
5215 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
5216 for recursion but not update the call statements themselves? */
5217 if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
5219 gimple_call_set_fndecl (stmt, current_function_decl);
5220 return;
5223 /* If there is a replacement, use it. */
5224 fndecl = find_tm_replacement_function (fndecl);
5225 if (fndecl)
5227 new_node = cgraph_node::get_create (fndecl);
5229 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
5231 We can't do this earlier in record_tm_replacement because
5232 cgraph_remove_unreachable_nodes is called before we inject
5233 references to the node. Further, we can't do this in some
5234 nice central place in ipa_tm_execute because we don't have
5235 the exact list of wrapper functions that would be used.
5236 Marking more wrappers than necessary results in the creation
5237 of unnecessary cgraph_nodes, which can cause some of the
5238 other IPA passes to crash.
5240 We do need to mark these nodes so that we get the proper
5241 result in expand_call_tm. */
5242 /* ??? This seems broken. How is it that we're marking the
5243 CALLEE as may_enter_irr? Surely we should be marking the
5244 CALLER. Also note that find_tm_replacement_function also
5245 contains mappings into the TM runtime, e.g. memcpy. These
5246 we know won't go irrevocable. */
5247 new_node->local.tm_may_enter_irr = 1;
5249 else
5251 struct tm_ipa_cg_data *d;
5252 struct cgraph_node *tnode = e->callee;
5254 d = get_cg_data (&tnode, true);
5255 new_node = d->clone;
5257 /* As we've already skipped pure calls and appropriate builtins,
5258 and we've already marked irrevocable blocks, if we can't come
5259 up with a static replacement, then ask the runtime. */
5260 if (new_node == NULL)
5262 *need_ssa_rename_p |=
5263 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5264 return;
5267 fndecl = new_node->decl;
5270 e->redirect_callee (new_node);
5271 gimple_call_set_fndecl (stmt, fndecl);
5274 /* Helper function for ipa_tm_transform_calls. For a given BB,
5275 install calls to tm_irrevocable when IRR_BLOCKS are reached,
5276 redirect other calls to the generated transactional clone. */
5278 static bool
5279 ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
5280 basic_block bb, bitmap irr_blocks)
5282 gimple_stmt_iterator gsi;
5283 bool need_ssa_rename = false;
5285 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5287 ipa_tm_insert_irr_call (node, region, bb);
5288 return true;
5291 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5293 gimple *stmt = gsi_stmt (gsi);
5295 if (!is_gimple_call (stmt))
5296 continue;
5297 if (is_tm_pure_call (stmt))
5298 continue;
5300 /* Redirect edges to the appropriate replacement or clone. */
5301 ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
5304 return need_ssa_rename;
5307 /* Walk the CFG for REGION, beginning at BB. Install calls to
5308 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
5309 the generated transactional clone. */
5311 static bool
5312 ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
5313 basic_block bb, bitmap irr_blocks)
5315 bool need_ssa_rename = false;
5316 edge e;
5317 edge_iterator ei;
5318 auto_vec<basic_block> queue;
5319 bitmap visited_blocks = BITMAP_ALLOC (NULL);
5321 queue.safe_push (bb);
5324 bb = queue.pop ();
5326 need_ssa_rename |=
5327 ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
5329 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5330 continue;
5332 if (region && bitmap_bit_p (region->exit_blocks, bb->index))
5333 continue;
5335 FOR_EACH_EDGE (e, ei, bb->succs)
5336 if (!bitmap_bit_p (visited_blocks, e->dest->index))
5338 bitmap_set_bit (visited_blocks, e->dest->index);
5339 queue.safe_push (e->dest);
5342 while (!queue.is_empty ());
5344 BITMAP_FREE (visited_blocks);
5346 return need_ssa_rename;
5349 /* Transform the calls within the TM regions within NODE. */
5351 static void
5352 ipa_tm_transform_transaction (struct cgraph_node *node)
5354 struct tm_ipa_cg_data *d;
5355 struct tm_region *region;
5356 bool need_ssa_rename = false;
5358 d = get_cg_data (&node, true);
5360 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
5361 calculate_dominance_info (CDI_DOMINATORS);
5363 for (region = d->all_tm_regions; region; region = region->next)
5365 /* If we're sure to go irrevocable, don't transform anything. */
5366 if (d->irrevocable_blocks_normal
5367 && bitmap_bit_p (d->irrevocable_blocks_normal,
5368 region->entry_block->index))
5370 transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE
5371 | GTMA_MAY_ENTER_IRREVOCABLE
5372 | GTMA_HAS_NO_INSTRUMENTATION);
5373 continue;
5376 need_ssa_rename |=
5377 ipa_tm_transform_calls (node, region, region->entry_block,
5378 d->irrevocable_blocks_normal);
5381 if (need_ssa_rename)
5382 update_ssa (TODO_update_ssa_only_virtuals);
5384 pop_cfun ();
5387 /* Transform the calls within the transactional clone of NODE. */
5389 static void
5390 ipa_tm_transform_clone (struct cgraph_node *node)
5392 struct tm_ipa_cg_data *d;
5393 bool need_ssa_rename;
5395 d = get_cg_data (&node, true);
5397 /* If this function makes no calls and has no irrevocable blocks,
5398 then there's nothing to do. */
5399 /* ??? Remove non-aborting top-level transactions. */
5400 if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone)
5401 return;
5403 push_cfun (DECL_STRUCT_FUNCTION (d->clone->decl));
5404 calculate_dominance_info (CDI_DOMINATORS);
5406 need_ssa_rename =
5407 ipa_tm_transform_calls (d->clone, NULL,
5408 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
5409 d->irrevocable_blocks_clone);
5411 if (need_ssa_rename)
5412 update_ssa (TODO_update_ssa_only_virtuals);
5414 pop_cfun ();
5417 /* Main entry point for the transactional memory IPA pass. */
5419 static unsigned int
5420 ipa_tm_execute (void)
5422 cgraph_node_queue tm_callees = cgraph_node_queue ();
5423 /* List of functions that will go irrevocable. */
5424 cgraph_node_queue irr_worklist = cgraph_node_queue ();
5426 struct cgraph_node *node;
5427 struct tm_ipa_cg_data *d;
5428 enum availability a;
5429 unsigned int i;
5431 cgraph_node::checking_verify_cgraph_nodes ();
5433 bitmap_obstack_initialize (&tm_obstack);
5434 initialize_original_copy_tables ();
5436 /* For all local functions marked tm_callable, queue them. */
5437 FOR_EACH_DEFINED_FUNCTION (node)
5438 if (is_tm_callable (node->decl)
5439 && node->get_availability () >= AVAIL_INTERPOSABLE)
5441 d = get_cg_data (&node, true);
5442 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5445 /* For all local reachable functions... */
5446 FOR_EACH_DEFINED_FUNCTION (node)
5447 if (node->lowered
5448 && node->get_availability () >= AVAIL_INTERPOSABLE)
5450 /* ... marked tm_pure, record that fact for the runtime by
5451 indicating that the pure function is its own tm_callable.
5452 No need to do this if the function's address can't be taken. */
5453 if (is_tm_pure (node->decl))
5455 if (!node->local.local)
5456 record_tm_clone_pair (node->decl, node->decl);
5457 continue;
5460 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
5461 calculate_dominance_info (CDI_DOMINATORS);
5463 tm_region_init (NULL);
5464 if (all_tm_regions)
5466 d = get_cg_data (&node, true);
5468 /* Scan for calls that are in each transaction, and
5469 generate the uninstrumented code path. */
5470 ipa_tm_scan_calls_transaction (d, &tm_callees);
5472 /* Put it in the worklist so we can scan the function
5473 later (ipa_tm_scan_irr_function) and mark the
5474 irrevocable blocks. */
5475 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5476 d->want_irr_scan_normal = true;
5479 pop_cfun ();
5482 /* For every local function on the callee list, scan as if we will be
5483 creating a transactional clone, queueing all new functions we find
5484 along the way. */
5485 for (i = 0; i < tm_callees.length (); ++i)
5487 node = tm_callees[i];
5488 a = node->get_availability ();
5489 d = get_cg_data (&node, true);
5491 /* Put it in the worklist so we can scan the function later
5492 (ipa_tm_scan_irr_function) and mark the irrevocable
5493 blocks. */
5494 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5496 /* Some callees cannot be arbitrarily cloned. These will always be
5497 irrevocable. Mark these now, so that we need not scan them. */
5498 if (is_tm_irrevocable (node->decl))
5499 ipa_tm_note_irrevocable (node, &irr_worklist);
5500 else if (a <= AVAIL_NOT_AVAILABLE
5501 && !is_tm_safe_or_pure (node->decl))
5502 ipa_tm_note_irrevocable (node, &irr_worklist);
5503 else if (a >= AVAIL_INTERPOSABLE)
5505 if (!tree_versionable_function_p (node->decl))
5506 ipa_tm_note_irrevocable (node, &irr_worklist);
5507 else if (!d->is_irrevocable)
5509 /* If this is an alias, make sure its base is queued as well.
5510 we need not scan the callees now, as the base will do. */
5511 if (node->alias)
5513 node = cgraph_node::get (node->thunk.alias);
5514 d = get_cg_data (&node, true);
5515 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5516 continue;
5519 /* Add all nodes called by this function into
5520 tm_callees as well. */
5521 ipa_tm_scan_calls_clone (node, &tm_callees);
5526 /* Iterate scans until no more work to be done. Prefer not to use
5527 vec::pop because the worklist tends to follow a breadth-first
5528 search of the callgraph, which should allow convergance with a
5529 minimum number of scans. But we also don't want the worklist
5530 array to grow without bound, so we shift the array up periodically. */
5531 for (i = 0; i < irr_worklist.length (); ++i)
5533 if (i > 256 && i == irr_worklist.length () / 8)
5535 irr_worklist.block_remove (0, i);
5536 i = 0;
5539 node = irr_worklist[i];
5540 d = get_cg_data (&node, true);
5541 d->in_worklist = false;
5543 if (d->want_irr_scan_normal)
5545 d->want_irr_scan_normal = false;
5546 ipa_tm_scan_irr_function (node, false);
5548 if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
5549 ipa_tm_note_irrevocable (node, &irr_worklist);
5552 /* For every function on the callee list, collect the tm_may_enter_irr
5553 bit on the node. */
5554 irr_worklist.truncate (0);
5555 for (i = 0; i < tm_callees.length (); ++i)
5557 node = tm_callees[i];
5558 if (ipa_tm_mayenterirr_function (node))
5560 d = get_cg_data (&node, true);
5561 gcc_assert (d->in_worklist == false);
5562 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5566 /* Propagate the tm_may_enter_irr bit to callers until stable. */
5567 for (i = 0; i < irr_worklist.length (); ++i)
5569 struct cgraph_node *caller;
5570 struct cgraph_edge *e;
5571 struct ipa_ref *ref;
5573 if (i > 256 && i == irr_worklist.length () / 8)
5575 irr_worklist.block_remove (0, i);
5576 i = 0;
5579 node = irr_worklist[i];
5580 d = get_cg_data (&node, true);
5581 d->in_worklist = false;
5582 node->local.tm_may_enter_irr = true;
5584 /* Propagate back to normal callers. */
5585 for (e = node->callers; e ; e = e->next_caller)
5587 caller = e->caller;
5588 if (!is_tm_safe_or_pure (caller->decl)
5589 && !caller->local.tm_may_enter_irr)
5591 d = get_cg_data (&caller, true);
5592 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5596 /* Propagate back to referring aliases as well. */
5597 FOR_EACH_ALIAS (node, ref)
5599 caller = dyn_cast<cgraph_node *> (ref->referring);
5600 if (!caller->local.tm_may_enter_irr)
5602 /* ?? Do not traverse aliases here. */
5603 d = get_cg_data (&caller, false);
5604 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5609 /* Now validate all tm_safe functions, and all atomic regions in
5610 other functions. */
5611 FOR_EACH_DEFINED_FUNCTION (node)
5612 if (node->lowered
5613 && node->get_availability () >= AVAIL_INTERPOSABLE)
5615 d = get_cg_data (&node, true);
5616 if (is_tm_safe (node->decl))
5617 ipa_tm_diagnose_tm_safe (node);
5618 else if (d->all_tm_regions)
5619 ipa_tm_diagnose_transaction (node, d->all_tm_regions);
5622 /* Create clones. Do those that are not irrevocable and have a
5623 positive call count. Do those publicly visible functions that
5624 the user directed us to clone. */
5625 for (i = 0; i < tm_callees.length (); ++i)
5627 bool doit = false;
5629 node = tm_callees[i];
5630 if (node->cpp_implicit_alias)
5631 continue;
5633 a = node->get_availability ();
5634 d = get_cg_data (&node, true);
5636 if (a <= AVAIL_NOT_AVAILABLE)
5637 doit = is_tm_callable (node->decl);
5638 else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl))
5639 doit = true;
5640 else if (!d->is_irrevocable
5641 && d->tm_callers_normal + d->tm_callers_clone > 0)
5642 doit = true;
5644 if (doit)
5645 ipa_tm_create_version (node);
5648 /* Redirect calls to the new clones, and insert irrevocable marks. */
5649 for (i = 0; i < tm_callees.length (); ++i)
5651 node = tm_callees[i];
5652 if (node->analyzed)
5654 d = get_cg_data (&node, true);
5655 if (d->clone)
5656 ipa_tm_transform_clone (node);
5659 FOR_EACH_DEFINED_FUNCTION (node)
5660 if (node->lowered
5661 && node->get_availability () >= AVAIL_INTERPOSABLE)
5663 d = get_cg_data (&node, true);
5664 if (d->all_tm_regions)
5665 ipa_tm_transform_transaction (node);
5668 /* Free and clear all data structures. */
5669 tm_callees.release ();
5670 irr_worklist.release ();
5671 bitmap_obstack_release (&tm_obstack);
5672 free_original_copy_tables ();
5674 FOR_EACH_FUNCTION (node)
5675 node->aux = NULL;
5677 cgraph_node::checking_verify_cgraph_nodes ();
5679 return 0;
5682 namespace {
5684 const pass_data pass_data_ipa_tm =
5686 SIMPLE_IPA_PASS, /* type */
5687 "tmipa", /* name */
5688 OPTGROUP_NONE, /* optinfo_flags */
5689 TV_TRANS_MEM, /* tv_id */
5690 ( PROP_ssa | PROP_cfg ), /* properties_required */
5691 0, /* properties_provided */
5692 0, /* properties_destroyed */
5693 0, /* todo_flags_start */
5694 0, /* todo_flags_finish */
5697 class pass_ipa_tm : public simple_ipa_opt_pass
5699 public:
5700 pass_ipa_tm (gcc::context *ctxt)
5701 : simple_ipa_opt_pass (pass_data_ipa_tm, ctxt)
5704 /* opt_pass methods: */
5705 virtual bool gate (function *) { return flag_tm; }
5706 virtual unsigned int execute (function *) { return ipa_tm_execute (); }
5708 }; // class pass_ipa_tm
5710 } // anon namespace
5712 simple_ipa_opt_pass *
5713 make_pass_ipa_tm (gcc::context *ctxt)
5715 return new pass_ipa_tm (ctxt);
5718 #include "gt-trans-mem.h"