Clean up some minor white space issues in trans-decl.c and trans-expr.c
[official-gcc.git] / gcc / trans-mem.c
blobb2047600dcb93469662aec8be40a510264c238a0
1 /* Passes for transactional memory support.
2 Copyright (C) 2008-2016 Free Software Foundation, Inc.
3 Contributed by Richard Henderson <rth@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "cgraph.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
36 #include "fold-const.h"
37 #include "tree-eh.h"
38 #include "calls.h"
39 #include "gimplify.h"
40 #include "gimple-iterator.h"
41 #include "gimplify-me.h"
42 #include "gimple-walk.h"
43 #include "tree-cfg.h"
44 #include "tree-into-ssa.h"
45 #include "tree-inline.h"
46 #include "demangle.h"
47 #include "output.h"
48 #include "trans-mem.h"
49 #include "params.h"
50 #include "langhooks.h"
51 #include "cfgloop.h"
52 #include "tree-ssa-address.h"
55 #define A_RUNINSTRUMENTEDCODE 0x0001
56 #define A_RUNUNINSTRUMENTEDCODE 0x0002
57 #define A_SAVELIVEVARIABLES 0x0004
58 #define A_RESTORELIVEVARIABLES 0x0008
59 #define A_ABORTTRANSACTION 0x0010
61 #define AR_USERABORT 0x0001
62 #define AR_USERRETRY 0x0002
63 #define AR_TMCONFLICT 0x0004
64 #define AR_EXCEPTIONBLOCKABORT 0x0008
65 #define AR_OUTERABORT 0x0010
67 #define MODE_SERIALIRREVOCABLE 0x0000
70 /* The representation of a transaction changes several times during the
71 lowering process. In the beginning, in the front-end we have the
72 GENERIC tree TRANSACTION_EXPR. For example,
74 __transaction {
75 local++;
76 if (++global == 10)
77 __tm_abort;
80 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
81 trivially replaced with a GIMPLE_TRANSACTION node.
83 During pass_lower_tm, we examine the body of transactions looking
84 for aborts. Transactions that do not contain an abort may be
85 merged into an outer transaction. We also add a TRY-FINALLY node
86 to arrange for the transaction to be committed on any exit.
88 [??? Think about how this arrangement affects throw-with-commit
89 and throw-with-abort operations. In this case we want the TRY to
90 handle gotos, but not to catch any exceptions because the transaction
91 will already be closed.]
93 GIMPLE_TRANSACTION [label=NULL] {
94 try {
95 local = local + 1;
96 t0 = global;
97 t1 = t0 + 1;
98 global = t1;
99 if (t1 == 10)
100 __builtin___tm_abort ();
101 } finally {
102 __builtin___tm_commit ();
106 During pass_lower_eh, we create EH regions for the transactions,
107 intermixed with the regular EH stuff. This gives us a nice persistent
108 mapping (all the way through rtl) from transactional memory operation
109 back to the transaction, which allows us to get the abnormal edges
110 correct to model transaction aborts and restarts:
112 GIMPLE_TRANSACTION [label=over]
113 local = local + 1;
114 t0 = global;
115 t1 = t0 + 1;
116 global = t1;
117 if (t1 == 10)
118 __builtin___tm_abort ();
119 __builtin___tm_commit ();
120 over:
122 This is the end of all_lowering_passes, and so is what is present
123 during the IPA passes, and through all of the optimization passes.
125 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
126 functions and mark functions for cloning.
128 At the end of gimple optimization, before exiting SSA form,
129 pass_tm_edges replaces statements that perform transactional
130 memory operations with the appropriate TM builtins, and swap
131 out function calls with their transactional clones. At this
132 point we introduce the abnormal transaction restart edges and
133 complete lowering of the GIMPLE_TRANSACTION node.
135 x = __builtin___tm_start (MAY_ABORT);
136 eh_label:
137 if (x & abort_transaction)
138 goto over;
139 local = local + 1;
140 t0 = __builtin___tm_load (global);
141 t1 = t0 + 1;
142 __builtin___tm_store (&global, t1);
143 if (t1 == 10)
144 __builtin___tm_abort ();
145 __builtin___tm_commit ();
146 over:
149 static void *expand_regions (struct tm_region *,
150 void *(*callback)(struct tm_region *, void *),
151 void *, bool);
154 /* Return the attributes we want to examine for X, or NULL if it's not
155 something we examine. We look at function types, but allow pointers
156 to function types and function decls and peek through. */
158 static tree
159 get_attrs_for (const_tree x)
161 if (x == NULL_TREE)
162 return NULL_TREE;
164 switch (TREE_CODE (x))
166 case FUNCTION_DECL:
167 return TYPE_ATTRIBUTES (TREE_TYPE (x));
168 break;
170 default:
171 if (TYPE_P (x))
172 return NULL_TREE;
173 x = TREE_TYPE (x);
174 if (TREE_CODE (x) != POINTER_TYPE)
175 return NULL_TREE;
176 /* FALLTHRU */
178 case POINTER_TYPE:
179 x = TREE_TYPE (x);
180 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
181 return NULL_TREE;
182 /* FALLTHRU */
184 case FUNCTION_TYPE:
185 case METHOD_TYPE:
186 return TYPE_ATTRIBUTES (x);
190 /* Return true if X has been marked TM_PURE. */
192 bool
193 is_tm_pure (const_tree x)
195 unsigned flags;
197 switch (TREE_CODE (x))
199 case FUNCTION_DECL:
200 case FUNCTION_TYPE:
201 case METHOD_TYPE:
202 break;
204 default:
205 if (TYPE_P (x))
206 return false;
207 x = TREE_TYPE (x);
208 if (TREE_CODE (x) != POINTER_TYPE)
209 return false;
210 /* FALLTHRU */
212 case POINTER_TYPE:
213 x = TREE_TYPE (x);
214 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
215 return false;
216 break;
219 flags = flags_from_decl_or_type (x);
220 return (flags & ECF_TM_PURE) != 0;
223 /* Return true if X has been marked TM_IRREVOCABLE. */
225 static bool
226 is_tm_irrevocable (tree x)
228 tree attrs = get_attrs_for (x);
230 if (attrs && lookup_attribute ("transaction_unsafe", attrs))
231 return true;
233 /* A call to the irrevocable builtin is by definition,
234 irrevocable. */
235 if (TREE_CODE (x) == ADDR_EXPR)
236 x = TREE_OPERAND (x, 0);
237 if (TREE_CODE (x) == FUNCTION_DECL
238 && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
239 && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
240 return true;
242 return false;
245 /* Return true if X has been marked TM_SAFE. */
247 bool
248 is_tm_safe (const_tree x)
250 if (flag_tm)
252 tree attrs = get_attrs_for (x);
253 if (attrs)
255 if (lookup_attribute ("transaction_safe", attrs))
256 return true;
257 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
258 return true;
261 return false;
264 /* Return true if CALL is const, or tm_pure. */
266 static bool
267 is_tm_pure_call (gimple *call)
269 if (gimple_call_internal_p (call))
270 return (gimple_call_flags (call) & (ECF_CONST | ECF_TM_PURE)) != 0;
272 tree fn = gimple_call_fn (call);
274 if (TREE_CODE (fn) == ADDR_EXPR)
276 fn = TREE_OPERAND (fn, 0);
277 gcc_assert (TREE_CODE (fn) == FUNCTION_DECL);
279 else
280 fn = TREE_TYPE (fn);
282 return is_tm_pure (fn);
285 /* Return true if X has been marked TM_CALLABLE. */
287 static bool
288 is_tm_callable (tree x)
290 tree attrs = get_attrs_for (x);
291 if (attrs)
293 if (lookup_attribute ("transaction_callable", attrs))
294 return true;
295 if (lookup_attribute ("transaction_safe", attrs))
296 return true;
297 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
298 return true;
300 return false;
303 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
305 bool
306 is_tm_may_cancel_outer (tree x)
308 tree attrs = get_attrs_for (x);
309 if (attrs)
310 return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
311 return false;
314 /* Return true for built in functions that "end" a transaction. */
316 bool
317 is_tm_ending_fndecl (tree fndecl)
319 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
320 switch (DECL_FUNCTION_CODE (fndecl))
322 case BUILT_IN_TM_COMMIT:
323 case BUILT_IN_TM_COMMIT_EH:
324 case BUILT_IN_TM_ABORT:
325 case BUILT_IN_TM_IRREVOCABLE:
326 return true;
327 default:
328 break;
331 return false;
334 /* Return true if STMT is a built in function call that "ends" a
335 transaction. */
337 bool
338 is_tm_ending (gimple *stmt)
340 tree fndecl;
342 if (gimple_code (stmt) != GIMPLE_CALL)
343 return false;
345 fndecl = gimple_call_fndecl (stmt);
346 return (fndecl != NULL_TREE
347 && is_tm_ending_fndecl (fndecl));
350 /* Return true if STMT is a TM load. */
352 static bool
353 is_tm_load (gimple *stmt)
355 tree fndecl;
357 if (gimple_code (stmt) != GIMPLE_CALL)
358 return false;
360 fndecl = gimple_call_fndecl (stmt);
361 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
362 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
365 /* Same as above, but for simple TM loads, that is, not the
366 after-write, after-read, etc optimized variants. */
368 static bool
369 is_tm_simple_load (gimple *stmt)
371 tree fndecl;
373 if (gimple_code (stmt) != GIMPLE_CALL)
374 return false;
376 fndecl = gimple_call_fndecl (stmt);
377 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
379 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
380 return (fcode == BUILT_IN_TM_LOAD_1
381 || fcode == BUILT_IN_TM_LOAD_2
382 || fcode == BUILT_IN_TM_LOAD_4
383 || fcode == BUILT_IN_TM_LOAD_8
384 || fcode == BUILT_IN_TM_LOAD_FLOAT
385 || fcode == BUILT_IN_TM_LOAD_DOUBLE
386 || fcode == BUILT_IN_TM_LOAD_LDOUBLE
387 || fcode == BUILT_IN_TM_LOAD_M64
388 || fcode == BUILT_IN_TM_LOAD_M128
389 || fcode == BUILT_IN_TM_LOAD_M256);
391 return false;
394 /* Return true if STMT is a TM store. */
396 static bool
397 is_tm_store (gimple *stmt)
399 tree fndecl;
401 if (gimple_code (stmt) != GIMPLE_CALL)
402 return false;
404 fndecl = gimple_call_fndecl (stmt);
405 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
406 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
409 /* Same as above, but for simple TM stores, that is, not the
410 after-write, after-read, etc optimized variants. */
412 static bool
413 is_tm_simple_store (gimple *stmt)
415 tree fndecl;
417 if (gimple_code (stmt) != GIMPLE_CALL)
418 return false;
420 fndecl = gimple_call_fndecl (stmt);
421 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
423 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
424 return (fcode == BUILT_IN_TM_STORE_1
425 || fcode == BUILT_IN_TM_STORE_2
426 || fcode == BUILT_IN_TM_STORE_4
427 || fcode == BUILT_IN_TM_STORE_8
428 || fcode == BUILT_IN_TM_STORE_FLOAT
429 || fcode == BUILT_IN_TM_STORE_DOUBLE
430 || fcode == BUILT_IN_TM_STORE_LDOUBLE
431 || fcode == BUILT_IN_TM_STORE_M64
432 || fcode == BUILT_IN_TM_STORE_M128
433 || fcode == BUILT_IN_TM_STORE_M256);
435 return false;
438 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */
440 static bool
441 is_tm_abort (tree fndecl)
443 return (fndecl
444 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
445 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
448 /* Build a GENERIC tree for a user abort. This is called by front ends
449 while transforming the __tm_abort statement. */
451 tree
452 build_tm_abort_call (location_t loc, bool is_outer)
454 return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
455 build_int_cst (integer_type_node,
456 AR_USERABORT
457 | (is_outer ? AR_OUTERABORT : 0)));
460 /* Map for aribtrary function replacement under TM, as created
461 by the tm_wrap attribute. */
463 struct tm_wrapper_hasher : ggc_cache_ptr_hash<tree_map>
465 static inline hashval_t hash (tree_map *m) { return m->hash; }
466 static inline bool
467 equal (tree_map *a, tree_map *b)
469 return a->base.from == b->base.from;
472 static int
473 keep_cache_entry (tree_map *&m)
475 return ggc_marked_p (m->base.from);
479 static GTY((cache)) hash_table<tm_wrapper_hasher> *tm_wrap_map;
481 void
482 record_tm_replacement (tree from, tree to)
484 struct tree_map **slot, *h;
486 /* Do not inline wrapper functions that will get replaced in the TM
487 pass.
489 Suppose you have foo() that will get replaced into tmfoo(). Make
490 sure the inliner doesn't try to outsmart us and inline foo()
491 before we get a chance to do the TM replacement. */
492 DECL_UNINLINABLE (from) = 1;
494 if (tm_wrap_map == NULL)
495 tm_wrap_map = hash_table<tm_wrapper_hasher>::create_ggc (32);
497 h = ggc_alloc<tree_map> ();
498 h->hash = htab_hash_pointer (from);
499 h->base.from = from;
500 h->to = to;
502 slot = tm_wrap_map->find_slot_with_hash (h, h->hash, INSERT);
503 *slot = h;
506 /* Return a TM-aware replacement function for DECL. */
508 static tree
509 find_tm_replacement_function (tree fndecl)
511 if (tm_wrap_map)
513 struct tree_map *h, in;
515 in.base.from = fndecl;
516 in.hash = htab_hash_pointer (fndecl);
517 h = tm_wrap_map->find_with_hash (&in, in.hash);
518 if (h)
519 return h->to;
522 /* ??? We may well want TM versions of most of the common <string.h>
523 functions. For now, we've already these two defined. */
524 /* Adjust expand_call_tm() attributes as necessary for the cases
525 handled here: */
526 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
527 switch (DECL_FUNCTION_CODE (fndecl))
529 case BUILT_IN_MEMCPY:
530 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
531 case BUILT_IN_MEMMOVE:
532 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
533 case BUILT_IN_MEMSET:
534 return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
535 default:
536 return NULL;
539 return NULL;
542 /* When appropriate, record TM replacement for memory allocation functions.
544 FROM is the FNDECL to wrap. */
545 void
546 tm_malloc_replacement (tree from)
548 const char *str;
549 tree to;
551 if (TREE_CODE (from) != FUNCTION_DECL)
552 return;
554 /* If we have a previous replacement, the user must be explicitly
555 wrapping malloc/calloc/free. They better know what they're
556 doing... */
557 if (find_tm_replacement_function (from))
558 return;
560 str = IDENTIFIER_POINTER (DECL_NAME (from));
562 if (!strcmp (str, "malloc"))
563 to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
564 else if (!strcmp (str, "calloc"))
565 to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
566 else if (!strcmp (str, "free"))
567 to = builtin_decl_explicit (BUILT_IN_TM_FREE);
568 else
569 return;
571 TREE_NOTHROW (to) = 0;
573 record_tm_replacement (from, to);
576 /* Diagnostics for tm_safe functions/regions. Called by the front end
577 once we've lowered the function to high-gimple. */
579 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
580 Process exactly one statement. WI->INFO is set to non-null when in
581 the context of a tm_safe function, and null for a __transaction block. */
583 #define DIAG_TM_OUTER 1
584 #define DIAG_TM_SAFE 2
585 #define DIAG_TM_RELAXED 4
587 struct diagnose_tm
589 unsigned int summary_flags : 8;
590 unsigned int block_flags : 8;
591 unsigned int func_flags : 8;
592 unsigned int saw_volatile : 1;
593 gimple *stmt;
596 /* Return true if T is a volatile lvalue of some kind. */
598 static bool
599 volatile_lvalue_p (tree t)
601 return ((SSA_VAR_P (t) || REFERENCE_CLASS_P (t))
602 && TREE_THIS_VOLATILE (TREE_TYPE (t)));
605 /* Tree callback function for diagnose_tm pass. */
607 static tree
608 diagnose_tm_1_op (tree *tp, int *walk_subtrees, void *data)
610 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
611 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
613 if (TYPE_P (*tp))
614 *walk_subtrees = false;
615 else if (volatile_lvalue_p (*tp)
616 && !d->saw_volatile)
618 d->saw_volatile = 1;
619 if (d->block_flags & DIAG_TM_SAFE)
620 error_at (gimple_location (d->stmt),
621 "invalid use of volatile lvalue inside transaction");
622 else if (d->func_flags & DIAG_TM_SAFE)
623 error_at (gimple_location (d->stmt),
624 "invalid use of volatile lvalue inside %<transaction_safe%>"
625 "function");
628 return NULL_TREE;
631 static inline bool
632 is_tm_safe_or_pure (const_tree x)
634 return is_tm_safe (x) || is_tm_pure (x);
637 static tree
638 diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
639 struct walk_stmt_info *wi)
641 gimple *stmt = gsi_stmt (*gsi);
642 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
644 /* Save stmt for use in leaf analysis. */
645 d->stmt = stmt;
647 switch (gimple_code (stmt))
649 case GIMPLE_CALL:
651 tree fn = gimple_call_fn (stmt);
653 if ((d->summary_flags & DIAG_TM_OUTER) == 0
654 && is_tm_may_cancel_outer (fn))
655 error_at (gimple_location (stmt),
656 "%<transaction_may_cancel_outer%> function call not within"
657 " outer transaction or %<transaction_may_cancel_outer%>");
659 if (d->summary_flags & DIAG_TM_SAFE)
661 bool is_safe, direct_call_p;
662 tree replacement;
664 if (TREE_CODE (fn) == ADDR_EXPR
665 && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
667 direct_call_p = true;
668 replacement = TREE_OPERAND (fn, 0);
669 replacement = find_tm_replacement_function (replacement);
670 if (replacement)
671 fn = replacement;
673 else
675 direct_call_p = false;
676 replacement = NULL_TREE;
679 if (is_tm_safe_or_pure (fn))
680 is_safe = true;
681 else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
683 /* A function explicitly marked transaction_callable as
684 opposed to transaction_safe is being defined to be
685 unsafe as part of its ABI, regardless of its contents. */
686 is_safe = false;
688 else if (direct_call_p)
690 if (IS_TYPE_OR_DECL_P (fn)
691 && flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
692 is_safe = true;
693 else if (replacement)
695 /* ??? At present we've been considering replacements
696 merely transaction_callable, and therefore might
697 enter irrevocable. The tm_wrap attribute has not
698 yet made it into the new language spec. */
699 is_safe = false;
701 else
703 /* ??? Diagnostics for unmarked direct calls moved into
704 the IPA pass. Section 3.2 of the spec details how
705 functions not marked should be considered "implicitly
706 safe" based on having examined the function body. */
707 is_safe = true;
710 else
712 /* An unmarked indirect call. Consider it unsafe even
713 though optimization may yet figure out how to inline. */
714 is_safe = false;
717 if (!is_safe)
719 if (TREE_CODE (fn) == ADDR_EXPR)
720 fn = TREE_OPERAND (fn, 0);
721 if (d->block_flags & DIAG_TM_SAFE)
723 if (direct_call_p)
724 error_at (gimple_location (stmt),
725 "unsafe function call %qD within "
726 "atomic transaction", fn);
727 else
729 if (!DECL_P (fn) || DECL_NAME (fn))
730 error_at (gimple_location (stmt),
731 "unsafe function call %qE within "
732 "atomic transaction", fn);
733 else
734 error_at (gimple_location (stmt),
735 "unsafe indirect function call within "
736 "atomic transaction");
739 else
741 if (direct_call_p)
742 error_at (gimple_location (stmt),
743 "unsafe function call %qD within "
744 "%<transaction_safe%> function", fn);
745 else
747 if (!DECL_P (fn) || DECL_NAME (fn))
748 error_at (gimple_location (stmt),
749 "unsafe function call %qE within "
750 "%<transaction_safe%> function", fn);
751 else
752 error_at (gimple_location (stmt),
753 "unsafe indirect function call within "
754 "%<transaction_safe%> function");
760 break;
762 case GIMPLE_ASM:
763 /* ??? We ought to come up with a way to add attributes to
764 asm statements, and then add "transaction_safe" to it.
765 Either that or get the language spec to resurrect __tm_waiver. */
766 if (d->block_flags & DIAG_TM_SAFE)
767 error_at (gimple_location (stmt),
768 "asm not allowed in atomic transaction");
769 else if (d->func_flags & DIAG_TM_SAFE)
770 error_at (gimple_location (stmt),
771 "asm not allowed in %<transaction_safe%> function");
772 break;
774 case GIMPLE_TRANSACTION:
776 gtransaction *trans_stmt = as_a <gtransaction *> (stmt);
777 unsigned char inner_flags = DIAG_TM_SAFE;
779 if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_RELAXED)
781 if (d->block_flags & DIAG_TM_SAFE)
782 error_at (gimple_location (stmt),
783 "relaxed transaction in atomic transaction");
784 else if (d->func_flags & DIAG_TM_SAFE)
785 error_at (gimple_location (stmt),
786 "relaxed transaction in %<transaction_safe%> function");
787 inner_flags = DIAG_TM_RELAXED;
789 else if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_OUTER)
791 if (d->block_flags)
792 error_at (gimple_location (stmt),
793 "outer transaction in transaction");
794 else if (d->func_flags & DIAG_TM_OUTER)
795 error_at (gimple_location (stmt),
796 "outer transaction in "
797 "%<transaction_may_cancel_outer%> function");
798 else if (d->func_flags & DIAG_TM_SAFE)
799 error_at (gimple_location (stmt),
800 "outer transaction in %<transaction_safe%> function");
801 inner_flags |= DIAG_TM_OUTER;
804 *handled_ops_p = true;
805 if (gimple_transaction_body (trans_stmt))
807 struct walk_stmt_info wi_inner;
808 struct diagnose_tm d_inner;
810 memset (&d_inner, 0, sizeof (d_inner));
811 d_inner.func_flags = d->func_flags;
812 d_inner.block_flags = d->block_flags | inner_flags;
813 d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
815 memset (&wi_inner, 0, sizeof (wi_inner));
816 wi_inner.info = &d_inner;
818 walk_gimple_seq (gimple_transaction_body (trans_stmt),
819 diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
822 break;
824 default:
825 break;
828 return NULL_TREE;
831 static unsigned int
832 diagnose_tm_blocks (void)
834 struct walk_stmt_info wi;
835 struct diagnose_tm d;
837 memset (&d, 0, sizeof (d));
838 if (is_tm_may_cancel_outer (current_function_decl))
839 d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
840 else if (is_tm_safe (current_function_decl))
841 d.func_flags = DIAG_TM_SAFE;
842 d.summary_flags = d.func_flags;
844 memset (&wi, 0, sizeof (wi));
845 wi.info = &d;
847 walk_gimple_seq (gimple_body (current_function_decl),
848 diagnose_tm_1, diagnose_tm_1_op, &wi);
850 return 0;
853 namespace {
855 const pass_data pass_data_diagnose_tm_blocks =
857 GIMPLE_PASS, /* type */
858 "*diagnose_tm_blocks", /* name */
859 OPTGROUP_NONE, /* optinfo_flags */
860 TV_TRANS_MEM, /* tv_id */
861 PROP_gimple_any, /* properties_required */
862 0, /* properties_provided */
863 0, /* properties_destroyed */
864 0, /* todo_flags_start */
865 0, /* todo_flags_finish */
868 class pass_diagnose_tm_blocks : public gimple_opt_pass
870 public:
871 pass_diagnose_tm_blocks (gcc::context *ctxt)
872 : gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt)
875 /* opt_pass methods: */
876 virtual bool gate (function *) { return flag_tm; }
877 virtual unsigned int execute (function *) { return diagnose_tm_blocks (); }
879 }; // class pass_diagnose_tm_blocks
881 } // anon namespace
883 gimple_opt_pass *
884 make_pass_diagnose_tm_blocks (gcc::context *ctxt)
886 return new pass_diagnose_tm_blocks (ctxt);
889 /* Instead of instrumenting thread private memory, we save the
890 addresses in a log which we later use to save/restore the addresses
891 upon transaction start/restart.
893 The log is keyed by address, where each element contains individual
894 statements among different code paths that perform the store.
896 This log is later used to generate either plain save/restore of the
897 addresses upon transaction start/restart, or calls to the ITM_L*
898 logging functions.
900 So for something like:
902 struct large { int x[1000]; };
903 struct large lala = { 0 };
904 __transaction {
905 lala.x[i] = 123;
909 We can either save/restore:
911 lala = { 0 };
912 trxn = _ITM_startTransaction ();
913 if (trxn & a_saveLiveVariables)
914 tmp_lala1 = lala.x[i];
915 else if (a & a_restoreLiveVariables)
916 lala.x[i] = tmp_lala1;
918 or use the logging functions:
920 lala = { 0 };
921 trxn = _ITM_startTransaction ();
922 _ITM_LU4 (&lala.x[i]);
924 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
925 far up the dominator tree to shadow all of the writes to a given
926 location (thus reducing the total number of logging calls), but not
927 so high as to be called on a path that does not perform a
928 write. */
930 /* One individual log entry. We may have multiple statements for the
931 same location if neither dominate each other (on different
932 execution paths). */
933 struct tm_log_entry
935 /* Address to save. */
936 tree addr;
937 /* Entry block for the transaction this address occurs in. */
938 basic_block entry_block;
939 /* Dominating statements the store occurs in. */
940 vec<gimple *> stmts;
941 /* Initially, while we are building the log, we place a nonzero
942 value here to mean that this address *will* be saved with a
943 save/restore sequence. Later, when generating the save sequence
944 we place the SSA temp generated here. */
945 tree save_var;
949 /* Log entry hashtable helpers. */
951 struct log_entry_hasher : pointer_hash <tm_log_entry>
953 static inline hashval_t hash (const tm_log_entry *);
954 static inline bool equal (const tm_log_entry *, const tm_log_entry *);
955 static inline void remove (tm_log_entry *);
958 /* Htab support. Return hash value for a `tm_log_entry'. */
959 inline hashval_t
960 log_entry_hasher::hash (const tm_log_entry *log)
962 return iterative_hash_expr (log->addr, 0);
965 /* Htab support. Return true if two log entries are the same. */
966 inline bool
967 log_entry_hasher::equal (const tm_log_entry *log1, const tm_log_entry *log2)
969 /* FIXME:
971 rth: I suggest that we get rid of the component refs etc.
972 I.e. resolve the reference to base + offset.
974 We may need to actually finish a merge with mainline for this,
975 since we'd like to be presented with Richi's MEM_REF_EXPRs more
976 often than not. But in the meantime your tm_log_entry could save
977 the results of get_inner_reference.
979 See: g++.dg/tm/pr46653.C
982 /* Special case plain equality because operand_equal_p() below will
983 return FALSE if the addresses are equal but they have
984 side-effects (e.g. a volatile address). */
985 if (log1->addr == log2->addr)
986 return true;
988 return operand_equal_p (log1->addr, log2->addr, 0);
991 /* Htab support. Free one tm_log_entry. */
992 inline void
993 log_entry_hasher::remove (tm_log_entry *lp)
995 lp->stmts.release ();
996 free (lp);
1000 /* The actual log. */
1001 static hash_table<log_entry_hasher> *tm_log;
1003 /* Addresses to log with a save/restore sequence. These should be in
1004 dominator order. */
1005 static vec<tree> tm_log_save_addresses;
1007 enum thread_memory_type
1009 mem_non_local = 0,
1010 mem_thread_local,
1011 mem_transaction_local,
1012 mem_max
1015 struct tm_new_mem_map
1017 /* SSA_NAME being dereferenced. */
1018 tree val;
1019 enum thread_memory_type local_new_memory;
1022 /* Hashtable helpers. */
1024 struct tm_mem_map_hasher : free_ptr_hash <tm_new_mem_map>
1026 static inline hashval_t hash (const tm_new_mem_map *);
1027 static inline bool equal (const tm_new_mem_map *, const tm_new_mem_map *);
1030 inline hashval_t
1031 tm_mem_map_hasher::hash (const tm_new_mem_map *v)
1033 return (intptr_t)v->val >> 4;
1036 inline bool
1037 tm_mem_map_hasher::equal (const tm_new_mem_map *v, const tm_new_mem_map *c)
1039 return v->val == c->val;
1042 /* Map for an SSA_NAME originally pointing to a non aliased new piece
1043 of memory (malloc, alloc, etc). */
1044 static hash_table<tm_mem_map_hasher> *tm_new_mem_hash;
1046 /* Initialize logging data structures. */
1047 static void
1048 tm_log_init (void)
1050 tm_log = new hash_table<log_entry_hasher> (10);
1051 tm_new_mem_hash = new hash_table<tm_mem_map_hasher> (5);
1052 tm_log_save_addresses.create (5);
1055 /* Free logging data structures. */
1056 static void
1057 tm_log_delete (void)
1059 delete tm_log;
1060 tm_log = NULL;
1061 delete tm_new_mem_hash;
1062 tm_new_mem_hash = NULL;
1063 tm_log_save_addresses.release ();
1066 /* Return true if MEM is a transaction invariant memory for the TM
1067 region starting at REGION_ENTRY_BLOCK. */
1068 static bool
1069 transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
1071 if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
1072 && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
1074 basic_block def_bb;
1076 def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
1077 return def_bb != region_entry_block
1078 && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
1081 mem = strip_invariant_refs (mem);
1082 return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
1085 /* Given an address ADDR in STMT, find it in the memory log or add it,
1086 making sure to keep only the addresses highest in the dominator
1087 tree.
1089 ENTRY_BLOCK is the entry_block for the transaction.
1091 If we find the address in the log, make sure it's either the same
1092 address, or an equivalent one that dominates ADDR.
1094 If we find the address, but neither ADDR dominates the found
1095 address, nor the found one dominates ADDR, we're on different
1096 execution paths. Add it.
1098 If known, ENTRY_BLOCK is the entry block for the region, otherwise
1099 NULL. */
1100 static void
1101 tm_log_add (basic_block entry_block, tree addr, gimple *stmt)
1103 tm_log_entry **slot;
1104 struct tm_log_entry l, *lp;
1106 l.addr = addr;
1107 slot = tm_log->find_slot (&l, INSERT);
1108 if (!*slot)
1110 tree type = TREE_TYPE (addr);
1112 lp = XNEW (struct tm_log_entry);
1113 lp->addr = addr;
1114 *slot = lp;
1116 /* Small invariant addresses can be handled as save/restores. */
1117 if (entry_block
1118 && transaction_invariant_address_p (lp->addr, entry_block)
1119 && TYPE_SIZE_UNIT (type) != NULL
1120 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
1121 && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type))
1122 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
1123 /* We must be able to copy this type normally. I.e., no
1124 special constructors and the like. */
1125 && !TREE_ADDRESSABLE (type))
1127 lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
1128 lp->stmts.create (0);
1129 lp->entry_block = entry_block;
1130 /* Save addresses separately in dominator order so we don't
1131 get confused by overlapping addresses in the save/restore
1132 sequence. */
1133 tm_log_save_addresses.safe_push (lp->addr);
1135 else
1137 /* Use the logging functions. */
1138 lp->stmts.create (5);
1139 lp->stmts.quick_push (stmt);
1140 lp->save_var = NULL;
1143 else
1145 size_t i;
1146 gimple *oldstmt;
1148 lp = *slot;
1150 /* If we're generating a save/restore sequence, we don't care
1151 about statements. */
1152 if (lp->save_var)
1153 return;
1155 for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i)
1157 if (stmt == oldstmt)
1158 return;
1159 /* We already have a store to the same address, higher up the
1160 dominator tree. Nothing to do. */
1161 if (dominated_by_p (CDI_DOMINATORS,
1162 gimple_bb (stmt), gimple_bb (oldstmt)))
1163 return;
1164 /* We should be processing blocks in dominator tree order. */
1165 gcc_assert (!dominated_by_p (CDI_DOMINATORS,
1166 gimple_bb (oldstmt), gimple_bb (stmt)));
1168 /* Store is on a different code path. */
1169 lp->stmts.safe_push (stmt);
1173 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1174 result, insert the new statements before GSI. */
1176 static tree
1177 gimplify_addr (gimple_stmt_iterator *gsi, tree x)
1179 if (TREE_CODE (x) == TARGET_MEM_REF)
1180 x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
1181 else
1182 x = build_fold_addr_expr (x);
1183 return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
1186 /* Instrument one address with the logging functions.
1187 ADDR is the address to save.
1188 STMT is the statement before which to place it. */
1189 static void
1190 tm_log_emit_stmt (tree addr, gimple *stmt)
1192 tree type = TREE_TYPE (addr);
1193 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1194 gimple *log;
1195 enum built_in_function code = BUILT_IN_TM_LOG;
1197 if (type == float_type_node)
1198 code = BUILT_IN_TM_LOG_FLOAT;
1199 else if (type == double_type_node)
1200 code = BUILT_IN_TM_LOG_DOUBLE;
1201 else if (type == long_double_type_node)
1202 code = BUILT_IN_TM_LOG_LDOUBLE;
1203 else if (TYPE_SIZE (type) != NULL
1204 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1206 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
1208 if (TREE_CODE (type) == VECTOR_TYPE)
1210 switch (type_size)
1212 case 64:
1213 code = BUILT_IN_TM_LOG_M64;
1214 break;
1215 case 128:
1216 code = BUILT_IN_TM_LOG_M128;
1217 break;
1218 case 256:
1219 code = BUILT_IN_TM_LOG_M256;
1220 break;
1221 default:
1222 goto unhandled_vec;
1224 if (!builtin_decl_explicit_p (code))
1225 goto unhandled_vec;
1227 else
1229 unhandled_vec:
1230 switch (type_size)
1232 case 8:
1233 code = BUILT_IN_TM_LOG_1;
1234 break;
1235 case 16:
1236 code = BUILT_IN_TM_LOG_2;
1237 break;
1238 case 32:
1239 code = BUILT_IN_TM_LOG_4;
1240 break;
1241 case 64:
1242 code = BUILT_IN_TM_LOG_8;
1243 break;
1248 if (code != BUILT_IN_TM_LOG && !builtin_decl_explicit_p (code))
1249 code = BUILT_IN_TM_LOG;
1250 tree decl = builtin_decl_explicit (code);
1252 addr = gimplify_addr (&gsi, addr);
1253 if (code == BUILT_IN_TM_LOG)
1254 log = gimple_build_call (decl, 2, addr, TYPE_SIZE_UNIT (type));
1255 else
1256 log = gimple_build_call (decl, 1, addr);
1257 gsi_insert_before (&gsi, log, GSI_SAME_STMT);
1260 /* Go through the log and instrument address that must be instrumented
1261 with the logging functions. Leave the save/restore addresses for
1262 later. */
1263 static void
1264 tm_log_emit (void)
1266 hash_table<log_entry_hasher>::iterator hi;
1267 struct tm_log_entry *lp;
1269 FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi)
1271 size_t i;
1272 gimple *stmt;
1274 if (dump_file)
1276 fprintf (dump_file, "TM thread private mem logging: ");
1277 print_generic_expr (dump_file, lp->addr, 0);
1278 fprintf (dump_file, "\n");
1281 if (lp->save_var)
1283 if (dump_file)
1284 fprintf (dump_file, "DUMPING to variable\n");
1285 continue;
1287 else
1289 if (dump_file)
1290 fprintf (dump_file, "DUMPING with logging functions\n");
1291 for (i = 0; lp->stmts.iterate (i, &stmt); ++i)
1292 tm_log_emit_stmt (lp->addr, stmt);
1297 /* Emit the save sequence for the corresponding addresses in the log.
1298 ENTRY_BLOCK is the entry block for the transaction.
1299 BB is the basic block to insert the code in. */
1300 static void
1301 tm_log_emit_saves (basic_block entry_block, basic_block bb)
1303 size_t i;
1304 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1305 gimple *stmt;
1306 struct tm_log_entry l, *lp;
1308 for (i = 0; i < tm_log_save_addresses.length (); ++i)
1310 l.addr = tm_log_save_addresses[i];
1311 lp = *(tm_log->find_slot (&l, NO_INSERT));
1312 gcc_assert (lp->save_var != NULL);
1314 /* We only care about variables in the current transaction. */
1315 if (lp->entry_block != entry_block)
1316 continue;
1318 stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
1320 /* Make sure we can create an SSA_NAME for this type. For
1321 instance, aggregates aren't allowed, in which case the system
1322 will create a VOP for us and everything will just work. */
1323 if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
1325 lp->save_var = make_ssa_name (lp->save_var, stmt);
1326 gimple_assign_set_lhs (stmt, lp->save_var);
1329 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1333 /* Emit the restore sequence for the corresponding addresses in the log.
1334 ENTRY_BLOCK is the entry block for the transaction.
1335 BB is the basic block to insert the code in. */
1336 static void
1337 tm_log_emit_restores (basic_block entry_block, basic_block bb)
1339 int i;
1340 struct tm_log_entry l, *lp;
1341 gimple_stmt_iterator gsi;
1342 gimple *stmt;
1344 for (i = tm_log_save_addresses.length () - 1; i >= 0; i--)
1346 l.addr = tm_log_save_addresses[i];
1347 lp = *(tm_log->find_slot (&l, NO_INSERT));
1348 gcc_assert (lp->save_var != NULL);
1350 /* We only care about variables in the current transaction. */
1351 if (lp->entry_block != entry_block)
1352 continue;
1354 /* Restores are in LIFO order from the saves in case we have
1355 overlaps. */
1356 gsi = gsi_start_bb (bb);
1358 stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
1359 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1364 static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
1365 struct walk_stmt_info *);
1366 static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
1367 struct walk_stmt_info *);
1369 /* Evaluate an address X being dereferenced and determine if it
1370 originally points to a non aliased new chunk of memory (malloc,
1371 alloca, etc).
1373 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1374 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1375 Return MEM_NON_LOCAL otherwise.
1377 ENTRY_BLOCK is the entry block to the transaction containing the
1378 dereference of X. */
1379 static enum thread_memory_type
1380 thread_private_new_memory (basic_block entry_block, tree x)
1382 gimple *stmt = NULL;
1383 enum tree_code code;
1384 tm_new_mem_map **slot;
1385 tm_new_mem_map elt, *elt_p;
1386 tree val = x;
1387 enum thread_memory_type retval = mem_transaction_local;
1389 if (!entry_block
1390 || TREE_CODE (x) != SSA_NAME
1391 /* Possible uninitialized use, or a function argument. In
1392 either case, we don't care. */
1393 || SSA_NAME_IS_DEFAULT_DEF (x))
1394 return mem_non_local;
1396 /* Look in cache first. */
1397 elt.val = x;
1398 slot = tm_new_mem_hash->find_slot (&elt, INSERT);
1399 elt_p = *slot;
1400 if (elt_p)
1401 return elt_p->local_new_memory;
1403 /* Optimistically assume the memory is transaction local during
1404 processing. This catches recursion into this variable. */
1405 *slot = elt_p = XNEW (tm_new_mem_map);
1406 elt_p->val = val;
1407 elt_p->local_new_memory = mem_transaction_local;
1409 /* Search DEF chain to find the original definition of this address. */
1412 if (ptr_deref_may_alias_global_p (x))
1414 /* Address escapes. This is not thread-private. */
1415 retval = mem_non_local;
1416 goto new_memory_ret;
1419 stmt = SSA_NAME_DEF_STMT (x);
1421 /* If the malloc call is outside the transaction, this is
1422 thread-local. */
1423 if (retval != mem_thread_local
1424 && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
1425 retval = mem_thread_local;
1427 if (is_gimple_assign (stmt))
1429 code = gimple_assign_rhs_code (stmt);
1430 /* x = foo ==> foo */
1431 if (code == SSA_NAME)
1432 x = gimple_assign_rhs1 (stmt);
1433 /* x = foo + n ==> foo */
1434 else if (code == POINTER_PLUS_EXPR)
1435 x = gimple_assign_rhs1 (stmt);
1436 /* x = (cast*) foo ==> foo */
1437 else if (code == VIEW_CONVERT_EXPR || CONVERT_EXPR_CODE_P (code))
1438 x = gimple_assign_rhs1 (stmt);
1439 /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
1440 else if (code == COND_EXPR)
1442 tree op1 = gimple_assign_rhs2 (stmt);
1443 tree op2 = gimple_assign_rhs3 (stmt);
1444 enum thread_memory_type mem;
1445 retval = thread_private_new_memory (entry_block, op1);
1446 if (retval == mem_non_local)
1447 goto new_memory_ret;
1448 mem = thread_private_new_memory (entry_block, op2);
1449 retval = MIN (retval, mem);
1450 goto new_memory_ret;
1452 else
1454 retval = mem_non_local;
1455 goto new_memory_ret;
1458 else
1460 if (gimple_code (stmt) == GIMPLE_PHI)
1462 unsigned int i;
1463 enum thread_memory_type mem;
1464 tree phi_result = gimple_phi_result (stmt);
1466 /* If any of the ancestors are non-local, we are sure to
1467 be non-local. Otherwise we can avoid doing anything
1468 and inherit what has already been generated. */
1469 retval = mem_max;
1470 for (i = 0; i < gimple_phi_num_args (stmt); ++i)
1472 tree op = PHI_ARG_DEF (stmt, i);
1474 /* Exclude self-assignment. */
1475 if (phi_result == op)
1476 continue;
1478 mem = thread_private_new_memory (entry_block, op);
1479 if (mem == mem_non_local)
1481 retval = mem;
1482 goto new_memory_ret;
1484 retval = MIN (retval, mem);
1486 goto new_memory_ret;
1488 break;
1491 while (TREE_CODE (x) == SSA_NAME);
1493 if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
1494 /* Thread-local or transaction-local. */
1496 else
1497 retval = mem_non_local;
1499 new_memory_ret:
1500 elt_p->local_new_memory = retval;
1501 return retval;
1504 /* Determine whether X has to be instrumented using a read
1505 or write barrier.
1507 ENTRY_BLOCK is the entry block for the region where stmt resides
1508 in. NULL if unknown.
1510 STMT is the statement in which X occurs in. It is used for thread
1511 private memory instrumentation. If no TPM instrumentation is
1512 desired, STMT should be null. */
1513 static bool
1514 requires_barrier (basic_block entry_block, tree x, gimple *stmt)
1516 tree orig = x;
1517 while (handled_component_p (x))
1518 x = TREE_OPERAND (x, 0);
1520 switch (TREE_CODE (x))
1522 case INDIRECT_REF:
1523 case MEM_REF:
1525 enum thread_memory_type ret;
1527 ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
1528 if (ret == mem_non_local)
1529 return true;
1530 if (stmt && ret == mem_thread_local)
1531 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1532 tm_log_add (entry_block, orig, stmt);
1534 /* Transaction-locals require nothing at all. For malloc, a
1535 transaction restart frees the memory and we reallocate.
1536 For alloca, the stack pointer gets reset by the retry and
1537 we reallocate. */
1538 return false;
1541 case TARGET_MEM_REF:
1542 if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
1543 return true;
1544 x = TREE_OPERAND (TMR_BASE (x), 0);
1545 if (TREE_CODE (x) == PARM_DECL)
1546 return false;
1547 gcc_assert (TREE_CODE (x) == VAR_DECL);
1548 /* FALLTHRU */
1550 case PARM_DECL:
1551 case RESULT_DECL:
1552 case VAR_DECL:
1553 if (DECL_BY_REFERENCE (x))
1555 /* ??? This value is a pointer, but aggregate_value_p has been
1556 jigged to return true which confuses needs_to_live_in_memory.
1557 This ought to be cleaned up generically.
1559 FIXME: Verify this still happens after the next mainline
1560 merge. Testcase ie g++.dg/tm/pr47554.C.
1562 return false;
1565 if (is_global_var (x))
1566 return !TREE_READONLY (x);
1567 if (/* FIXME: This condition should actually go below in the
1568 tm_log_add() call, however is_call_clobbered() depends on
1569 aliasing info which is not available during
1570 gimplification. Since requires_barrier() gets called
1571 during lower_sequence_tm/gimplification, leave the call
1572 to needs_to_live_in_memory until we eliminate
1573 lower_sequence_tm altogether. */
1574 needs_to_live_in_memory (x))
1575 return true;
1576 else
1578 /* For local memory that doesn't escape (aka thread private
1579 memory), we can either save the value at the beginning of
1580 the transaction and restore on restart, or call a tm
1581 function to dynamically save and restore on restart
1582 (ITM_L*). */
1583 if (stmt)
1584 tm_log_add (entry_block, orig, stmt);
1585 return false;
1588 default:
1589 return false;
1593 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1594 a transaction region. */
1596 static void
1597 examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
1599 gimple *stmt = gsi_stmt (*gsi);
1601 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
1602 *state |= GTMA_HAVE_LOAD;
1603 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
1604 *state |= GTMA_HAVE_STORE;
1607 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1609 static void
1610 examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
1612 gimple *stmt = gsi_stmt (*gsi);
1613 tree fn;
1615 if (is_tm_pure_call (stmt))
1616 return;
1618 /* Check if this call is a transaction abort. */
1619 fn = gimple_call_fndecl (stmt);
1620 if (is_tm_abort (fn))
1621 *state |= GTMA_HAVE_ABORT;
1623 /* Note that something may happen. */
1624 *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
1627 /* Iterate through the statements in the sequence, moving labels
1628 (and thus edges) of transactions from "label_norm" to "label_uninst". */
1630 static tree
1631 make_tm_uninst (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1632 struct walk_stmt_info *)
1634 gimple *stmt = gsi_stmt (*gsi);
1636 if (gtransaction *txn = dyn_cast <gtransaction *> (stmt))
1638 *handled_ops_p = true;
1639 txn->label_uninst = txn->label_norm;
1640 txn->label_norm = NULL;
1642 else
1643 *handled_ops_p = !gimple_has_substatements (stmt);
1645 return NULL_TREE;
1648 /* Lower a GIMPLE_TRANSACTION statement. */
1650 static void
1651 lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
1653 gimple *g;
1654 gtransaction *stmt = as_a <gtransaction *> (gsi_stmt (*gsi));
1655 unsigned int *outer_state = (unsigned int *) wi->info;
1656 unsigned int this_state = 0;
1657 struct walk_stmt_info this_wi;
1659 /* First, lower the body. The scanning that we do inside gives
1660 us some idea of what we're dealing with. */
1661 memset (&this_wi, 0, sizeof (this_wi));
1662 this_wi.info = (void *) &this_state;
1663 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
1664 lower_sequence_tm, NULL, &this_wi);
1666 /* If there was absolutely nothing transaction related inside the
1667 transaction, we may elide it. Likewise if this is a nested
1668 transaction and does not contain an abort. */
1669 if (this_state == 0
1670 || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
1672 if (outer_state)
1673 *outer_state |= this_state;
1675 gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
1676 GSI_SAME_STMT);
1677 gimple_transaction_set_body (stmt, NULL);
1679 gsi_remove (gsi, true);
1680 wi->removed_stmt = true;
1681 return;
1684 /* Wrap the body of the transaction in a try-finally node so that
1685 the commit call is always properly called. */
1686 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
1687 if (flag_exceptions)
1689 tree ptr;
1690 gimple_seq n_seq, e_seq;
1692 n_seq = gimple_seq_alloc_with_stmt (g);
1693 e_seq = NULL;
1695 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1696 1, integer_zero_node);
1697 ptr = create_tmp_var (ptr_type_node);
1698 gimple_call_set_lhs (g, ptr);
1699 gimple_seq_add_stmt (&e_seq, g);
1701 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
1702 1, ptr);
1703 gimple_seq_add_stmt (&e_seq, g);
1705 g = gimple_build_eh_else (n_seq, e_seq);
1708 g = gimple_build_try (gimple_transaction_body (stmt),
1709 gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
1711 /* For a (potentially) outer transaction, create two paths. */
1712 gimple_seq uninst = NULL;
1713 if (outer_state == NULL)
1715 uninst = copy_gimple_seq_and_replace_locals (g);
1716 /* In the uninstrumented copy, reset inner transactions to have only
1717 an uninstrumented code path. */
1718 memset (&this_wi, 0, sizeof (this_wi));
1719 walk_gimple_seq (uninst, make_tm_uninst, NULL, &this_wi);
1722 tree label1 = create_artificial_label (UNKNOWN_LOCATION);
1723 gsi_insert_after (gsi, gimple_build_label (label1), GSI_CONTINUE_LINKING);
1724 gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
1725 gimple_transaction_set_label_norm (stmt, label1);
1727 /* If the transaction calls abort or if this is an outer transaction,
1728 add an "over" label afterwards. */
1729 tree label3 = NULL;
1730 if ((this_state & GTMA_HAVE_ABORT)
1731 || outer_state == NULL
1732 || (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER))
1734 label3 = create_artificial_label (UNKNOWN_LOCATION);
1735 gimple_transaction_set_label_over (stmt, label3);
1738 if (uninst != NULL)
1740 gsi_insert_after (gsi, gimple_build_goto (label3), GSI_CONTINUE_LINKING);
1742 tree label2 = create_artificial_label (UNKNOWN_LOCATION);
1743 gsi_insert_after (gsi, gimple_build_label (label2), GSI_CONTINUE_LINKING);
1744 gsi_insert_seq_after (gsi, uninst, GSI_CONTINUE_LINKING);
1745 gimple_transaction_set_label_uninst (stmt, label2);
1748 if (label3 != NULL)
1749 gsi_insert_after (gsi, gimple_build_label (label3), GSI_CONTINUE_LINKING);
1751 gimple_transaction_set_body (stmt, NULL);
1753 /* Record the set of operations found for use later. */
1754 this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
1755 gimple_transaction_set_subcode (stmt, this_state);
1758 /* Iterate through the statements in the sequence, lowering them all
1759 as appropriate for being in a transaction. */
1761 static tree
1762 lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1763 struct walk_stmt_info *wi)
1765 unsigned int *state = (unsigned int *) wi->info;
1766 gimple *stmt = gsi_stmt (*gsi);
1768 *handled_ops_p = true;
1769 switch (gimple_code (stmt))
1771 case GIMPLE_ASSIGN:
1772 /* Only memory reads/writes need to be instrumented. */
1773 if (gimple_assign_single_p (stmt))
1774 examine_assign_tm (state, gsi);
1775 break;
1777 case GIMPLE_CALL:
1778 examine_call_tm (state, gsi);
1779 break;
1781 case GIMPLE_ASM:
1782 *state |= GTMA_MAY_ENTER_IRREVOCABLE;
1783 break;
1785 case GIMPLE_TRANSACTION:
1786 lower_transaction (gsi, wi);
1787 break;
1789 default:
1790 *handled_ops_p = !gimple_has_substatements (stmt);
1791 break;
1794 return NULL_TREE;
1797 /* Iterate through the statements in the sequence, lowering them all
1798 as appropriate for being outside of a transaction. */
1800 static tree
1801 lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1802 struct walk_stmt_info * wi)
1804 gimple *stmt = gsi_stmt (*gsi);
1806 if (gimple_code (stmt) == GIMPLE_TRANSACTION)
1808 *handled_ops_p = true;
1809 lower_transaction (gsi, wi);
1811 else
1812 *handled_ops_p = !gimple_has_substatements (stmt);
1814 return NULL_TREE;
1817 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1818 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1819 been moved out, and all the data required for constructing a proper
1820 CFG has been recorded. */
1822 static unsigned int
1823 execute_lower_tm (void)
1825 struct walk_stmt_info wi;
1826 gimple_seq body;
1828 /* Transactional clones aren't created until a later pass. */
1829 gcc_assert (!decl_is_tm_clone (current_function_decl));
1831 body = gimple_body (current_function_decl);
1832 memset (&wi, 0, sizeof (wi));
1833 walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi);
1834 gimple_set_body (current_function_decl, body);
1836 return 0;
1839 namespace {
1841 const pass_data pass_data_lower_tm =
1843 GIMPLE_PASS, /* type */
1844 "tmlower", /* name */
1845 OPTGROUP_NONE, /* optinfo_flags */
1846 TV_TRANS_MEM, /* tv_id */
1847 PROP_gimple_lcf, /* properties_required */
1848 0, /* properties_provided */
1849 0, /* properties_destroyed */
1850 0, /* todo_flags_start */
1851 0, /* todo_flags_finish */
1854 class pass_lower_tm : public gimple_opt_pass
1856 public:
1857 pass_lower_tm (gcc::context *ctxt)
1858 : gimple_opt_pass (pass_data_lower_tm, ctxt)
1861 /* opt_pass methods: */
1862 virtual bool gate (function *) { return flag_tm; }
1863 virtual unsigned int execute (function *) { return execute_lower_tm (); }
1865 }; // class pass_lower_tm
1867 } // anon namespace
1869 gimple_opt_pass *
1870 make_pass_lower_tm (gcc::context *ctxt)
1872 return new pass_lower_tm (ctxt);
1875 /* Collect region information for each transaction. */
1877 struct tm_region
1879 public:
1881 /* The field "transaction_stmt" is initially a gtransaction *,
1882 but eventually gets lowered to a gcall *(to BUILT_IN_TM_START).
1884 Helper method to get it as a gtransaction *, with code-checking
1885 in a checked-build. */
1887 gtransaction *
1888 get_transaction_stmt () const
1890 return as_a <gtransaction *> (transaction_stmt);
1893 public:
1895 /* Link to the next unnested transaction. */
1896 struct tm_region *next;
1898 /* Link to the next inner transaction. */
1899 struct tm_region *inner;
1901 /* Link to the next outer transaction. */
1902 struct tm_region *outer;
1904 /* The GIMPLE_TRANSACTION statement beginning this transaction.
1905 After TM_MARK, this gets replaced by a call to
1906 BUILT_IN_TM_START.
1907 Hence this will be either a gtransaction *or a gcall *. */
1908 gimple *transaction_stmt;
1910 /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
1911 BUILT_IN_TM_START, this field is true if the transaction is an
1912 outer transaction. */
1913 bool original_transaction_was_outer;
1915 /* Return value from BUILT_IN_TM_START. */
1916 tree tm_state;
1918 /* The entry block to this region. This will always be the first
1919 block of the body of the transaction. */
1920 basic_block entry_block;
1922 /* The first block after an expanded call to _ITM_beginTransaction. */
1923 basic_block restart_block;
1925 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1926 These blocks are still a part of the region (i.e., the border is
1927 inclusive). Note that this set is only complete for paths in the CFG
1928 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1929 the edge to the "over" label. */
1930 bitmap exit_blocks;
1932 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1933 bitmap irr_blocks;
1936 /* True if there are pending edge statements to be committed for the
1937 current function being scanned in the tmmark pass. */
1938 bool pending_edge_inserts_p;
1940 static struct tm_region *all_tm_regions;
1941 static bitmap_obstack tm_obstack;
1944 /* A subroutine of tm_region_init. Record the existence of the
1945 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1947 static struct tm_region *
1948 tm_region_init_0 (struct tm_region *outer, basic_block bb,
1949 gtransaction *stmt)
1951 struct tm_region *region;
1953 region = (struct tm_region *)
1954 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1956 if (outer)
1958 region->next = outer->inner;
1959 outer->inner = region;
1961 else
1963 region->next = all_tm_regions;
1964 all_tm_regions = region;
1966 region->inner = NULL;
1967 region->outer = outer;
1969 region->transaction_stmt = stmt;
1970 region->original_transaction_was_outer = false;
1971 region->tm_state = NULL;
1973 /* There are either one or two edges out of the block containing
1974 the GIMPLE_TRANSACTION, one to the actual region and one to the
1975 "over" label if the region contains an abort. The former will
1976 always be the one marked FALLTHRU. */
1977 region->entry_block = FALLTHRU_EDGE (bb)->dest;
1979 region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
1980 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1982 return region;
1985 /* A subroutine of tm_region_init. Record all the exit and
1986 irrevocable blocks in BB into the region's exit_blocks and
1987 irr_blocks bitmaps. Returns the new region being scanned. */
1989 static struct tm_region *
1990 tm_region_init_1 (struct tm_region *region, basic_block bb)
1992 gimple_stmt_iterator gsi;
1993 gimple *g;
1995 if (!region
1996 || (!region->irr_blocks && !region->exit_blocks))
1997 return region;
1999 /* Check to see if this is the end of a region by seeing if it
2000 contains a call to __builtin_tm_commit{,_eh}. Note that the
2001 outermost region for DECL_IS_TM_CLONE need not collect this. */
2002 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
2004 g = gsi_stmt (gsi);
2005 if (gimple_code (g) == GIMPLE_CALL)
2007 tree fn = gimple_call_fndecl (g);
2008 if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
2010 if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
2011 || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
2012 && region->exit_blocks)
2014 bitmap_set_bit (region->exit_blocks, bb->index);
2015 region = region->outer;
2016 break;
2018 if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
2019 bitmap_set_bit (region->irr_blocks, bb->index);
2023 return region;
2026 /* Collect all of the transaction regions within the current function
2027 and record them in ALL_TM_REGIONS. The REGION parameter may specify
2028 an "outermost" region for use by tm clones. */
2030 static void
2031 tm_region_init (struct tm_region *region)
2033 gimple *g;
2034 edge_iterator ei;
2035 edge e;
2036 basic_block bb;
2037 auto_vec<basic_block> queue;
2038 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2039 struct tm_region *old_region;
2040 auto_vec<tm_region *> bb_regions;
2042 all_tm_regions = region;
2043 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2045 /* We could store this information in bb->aux, but we may get called
2046 through get_all_tm_blocks() from another pass that may be already
2047 using bb->aux. */
2048 bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun));
2050 queue.safe_push (bb);
2051 bb_regions[bb->index] = region;
2054 bb = queue.pop ();
2055 region = bb_regions[bb->index];
2056 bb_regions[bb->index] = NULL;
2058 /* Record exit and irrevocable blocks. */
2059 region = tm_region_init_1 (region, bb);
2061 /* Check for the last statement in the block beginning a new region. */
2062 g = last_stmt (bb);
2063 old_region = region;
2064 if (g)
2065 if (gtransaction *trans_stmt = dyn_cast <gtransaction *> (g))
2066 region = tm_region_init_0 (region, bb, trans_stmt);
2068 /* Process subsequent blocks. */
2069 FOR_EACH_EDGE (e, ei, bb->succs)
2070 if (!bitmap_bit_p (visited_blocks, e->dest->index))
2072 bitmap_set_bit (visited_blocks, e->dest->index);
2073 queue.safe_push (e->dest);
2075 /* If the current block started a new region, make sure that only
2076 the entry block of the new region is associated with this region.
2077 Other successors are still part of the old region. */
2078 if (old_region != region && e->dest != region->entry_block)
2079 bb_regions[e->dest->index] = old_region;
2080 else
2081 bb_regions[e->dest->index] = region;
2084 while (!queue.is_empty ());
2085 BITMAP_FREE (visited_blocks);
2088 /* The "gate" function for all transactional memory expansion and optimization
2089 passes. We collect region information for each top-level transaction, and
2090 if we don't find any, we skip all of the TM passes. Each region will have
2091 all of the exit blocks recorded, and the originating statement. */
2093 static bool
2094 gate_tm_init (void)
2096 if (!flag_tm)
2097 return false;
2099 calculate_dominance_info (CDI_DOMINATORS);
2100 bitmap_obstack_initialize (&tm_obstack);
2102 /* If the function is a TM_CLONE, then the entire function is the region. */
2103 if (decl_is_tm_clone (current_function_decl))
2105 struct tm_region *region = (struct tm_region *)
2106 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
2107 memset (region, 0, sizeof (*region));
2108 region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2109 /* For a clone, the entire function is the region. But even if
2110 we don't need to record any exit blocks, we may need to
2111 record irrevocable blocks. */
2112 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
2114 tm_region_init (region);
2116 else
2118 tm_region_init (NULL);
2120 /* If we didn't find any regions, cleanup and skip the whole tree
2121 of tm-related optimizations. */
2122 if (all_tm_regions == NULL)
2124 bitmap_obstack_release (&tm_obstack);
2125 return false;
2129 return true;
2132 namespace {
2134 const pass_data pass_data_tm_init =
2136 GIMPLE_PASS, /* type */
2137 "*tminit", /* name */
2138 OPTGROUP_NONE, /* optinfo_flags */
2139 TV_TRANS_MEM, /* tv_id */
2140 ( PROP_ssa | PROP_cfg ), /* properties_required */
2141 0, /* properties_provided */
2142 0, /* properties_destroyed */
2143 0, /* todo_flags_start */
2144 0, /* todo_flags_finish */
2147 class pass_tm_init : public gimple_opt_pass
2149 public:
2150 pass_tm_init (gcc::context *ctxt)
2151 : gimple_opt_pass (pass_data_tm_init, ctxt)
2154 /* opt_pass methods: */
2155 virtual bool gate (function *) { return gate_tm_init (); }
2157 }; // class pass_tm_init
2159 } // anon namespace
2161 gimple_opt_pass *
2162 make_pass_tm_init (gcc::context *ctxt)
2164 return new pass_tm_init (ctxt);
2167 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
2168 represented by STATE. */
2170 static inline void
2171 transaction_subcode_ior (struct tm_region *region, unsigned flags)
2173 if (region && region->transaction_stmt)
2175 gtransaction *transaction_stmt = region->get_transaction_stmt ();
2176 flags |= gimple_transaction_subcode (transaction_stmt);
2177 gimple_transaction_set_subcode (transaction_stmt, flags);
2181 /* Construct a memory load in a transactional context. Return the
2182 gimple statement performing the load, or NULL if there is no
2183 TM_LOAD builtin of the appropriate size to do the load.
2185 LOC is the location to use for the new statement(s). */
2187 static gcall *
2188 build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2190 tree t, type = TREE_TYPE (rhs);
2191 gcall *gcall;
2193 built_in_function code;
2194 if (type == float_type_node)
2195 code = BUILT_IN_TM_LOAD_FLOAT;
2196 else if (type == double_type_node)
2197 code = BUILT_IN_TM_LOAD_DOUBLE;
2198 else if (type == long_double_type_node)
2199 code = BUILT_IN_TM_LOAD_LDOUBLE;
2200 else
2202 if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type)))
2203 return NULL;
2204 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
2206 if (TREE_CODE (type) == VECTOR_TYPE)
2208 switch (type_size)
2210 case 64:
2211 code = BUILT_IN_TM_LOAD_M64;
2212 break;
2213 case 128:
2214 code = BUILT_IN_TM_LOAD_M128;
2215 break;
2216 case 256:
2217 code = BUILT_IN_TM_LOAD_M256;
2218 break;
2219 default:
2220 goto unhandled_vec;
2222 if (!builtin_decl_explicit_p (code))
2223 goto unhandled_vec;
2225 else
2227 unhandled_vec:
2228 switch (type_size)
2230 case 8:
2231 code = BUILT_IN_TM_LOAD_1;
2232 break;
2233 case 16:
2234 code = BUILT_IN_TM_LOAD_2;
2235 break;
2236 case 32:
2237 code = BUILT_IN_TM_LOAD_4;
2238 break;
2239 case 64:
2240 code = BUILT_IN_TM_LOAD_8;
2241 break;
2242 default:
2243 return NULL;
2248 tree decl = builtin_decl_explicit (code);
2249 gcc_assert (decl);
2251 t = gimplify_addr (gsi, rhs);
2252 gcall = gimple_build_call (decl, 1, t);
2253 gimple_set_location (gcall, loc);
2255 t = TREE_TYPE (TREE_TYPE (decl));
2256 if (useless_type_conversion_p (type, t))
2258 gimple_call_set_lhs (gcall, lhs);
2259 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2261 else
2263 gimple *g;
2264 tree temp;
2266 temp = create_tmp_reg (t);
2267 gimple_call_set_lhs (gcall, temp);
2268 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2270 t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
2271 g = gimple_build_assign (lhs, t);
2272 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2275 return gcall;
2279 /* Similarly for storing TYPE in a transactional context. */
2281 static gcall *
2282 build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2284 tree t, fn, type = TREE_TYPE (rhs), simple_type;
2285 gcall *gcall;
2287 built_in_function code;
2288 if (type == float_type_node)
2289 code = BUILT_IN_TM_STORE_FLOAT;
2290 else if (type == double_type_node)
2291 code = BUILT_IN_TM_STORE_DOUBLE;
2292 else if (type == long_double_type_node)
2293 code = BUILT_IN_TM_STORE_LDOUBLE;
2294 else
2296 if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type)))
2297 return NULL;
2298 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
2300 if (TREE_CODE (type) == VECTOR_TYPE)
2302 switch (type_size)
2304 case 64:
2305 code = BUILT_IN_TM_STORE_M64;
2306 break;
2307 case 128:
2308 code = BUILT_IN_TM_STORE_M128;
2309 break;
2310 case 256:
2311 code = BUILT_IN_TM_STORE_M256;
2312 break;
2313 default:
2314 goto unhandled_vec;
2316 if (!builtin_decl_explicit_p (code))
2317 goto unhandled_vec;
2319 else
2321 unhandled_vec:
2322 switch (type_size)
2324 case 8:
2325 code = BUILT_IN_TM_STORE_1;
2326 break;
2327 case 16:
2328 code = BUILT_IN_TM_STORE_2;
2329 break;
2330 case 32:
2331 code = BUILT_IN_TM_STORE_4;
2332 break;
2333 case 64:
2334 code = BUILT_IN_TM_STORE_8;
2335 break;
2336 default:
2337 return NULL;
2342 fn = builtin_decl_explicit (code);
2343 gcc_assert (fn);
2345 simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
2347 if (TREE_CODE (rhs) == CONSTRUCTOR)
2349 /* Handle the easy initialization to zero. */
2350 if (!CONSTRUCTOR_ELTS (rhs))
2351 rhs = build_int_cst (simple_type, 0);
2352 else
2354 /* ...otherwise punt to the caller and probably use
2355 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2356 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2357 valid gimple. */
2358 return NULL;
2361 else if (!useless_type_conversion_p (simple_type, type))
2363 gimple *g;
2364 tree temp;
2366 temp = create_tmp_reg (simple_type);
2367 t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
2368 g = gimple_build_assign (temp, t);
2369 gimple_set_location (g, loc);
2370 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2372 rhs = temp;
2375 t = gimplify_addr (gsi, lhs);
2376 gcall = gimple_build_call (fn, 2, t, rhs);
2377 gimple_set_location (gcall, loc);
2378 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2380 return gcall;
2384 /* Expand an assignment statement into transactional builtins. */
2386 static void
2387 expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
2389 gimple *stmt = gsi_stmt (*gsi);
2390 location_t loc = gimple_location (stmt);
2391 tree lhs = gimple_assign_lhs (stmt);
2392 tree rhs = gimple_assign_rhs1 (stmt);
2393 bool store_p = requires_barrier (region->entry_block, lhs, NULL);
2394 bool load_p = requires_barrier (region->entry_block, rhs, NULL);
2395 gimple *gcall = NULL;
2397 if (!load_p && !store_p)
2399 /* Add thread private addresses to log if applicable. */
2400 requires_barrier (region->entry_block, lhs, stmt);
2401 gsi_next (gsi);
2402 return;
2405 if (load_p)
2406 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2407 if (store_p)
2408 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2410 // Remove original load/store statement.
2411 gsi_remove (gsi, true);
2413 // Attempt to use a simple load/store helper function.
2414 if (load_p && !store_p)
2415 gcall = build_tm_load (loc, lhs, rhs, gsi);
2416 else if (store_p && !load_p)
2417 gcall = build_tm_store (loc, lhs, rhs, gsi);
2419 // If gcall has not been set, then we do not have a simple helper
2420 // function available for the type. This may be true of larger
2421 // structures, vectors, and non-standard float types.
2422 if (!gcall)
2424 tree lhs_addr, rhs_addr, ltmp = NULL, copy_fn;
2426 // If this is a type that we couldn't handle above, but it's
2427 // in a register, we must spill it to memory for the copy.
2428 if (is_gimple_reg (lhs))
2430 ltmp = create_tmp_var (TREE_TYPE (lhs));
2431 lhs_addr = build_fold_addr_expr (ltmp);
2433 else
2434 lhs_addr = gimplify_addr (gsi, lhs);
2435 if (is_gimple_reg (rhs))
2437 tree rtmp = create_tmp_var (TREE_TYPE (rhs));
2438 rhs_addr = build_fold_addr_expr (rtmp);
2439 gcall = gimple_build_assign (rtmp, rhs);
2440 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2442 else
2443 rhs_addr = gimplify_addr (gsi, rhs);
2445 // Choose the appropriate memory transfer function.
2446 if (load_p && store_p)
2448 // ??? Figure out if there's any possible overlap between
2449 // the LHS and the RHS and if not, use MEMCPY.
2450 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
2452 else if (load_p)
2454 // Note that the store is non-transactional and cannot overlap.
2455 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RTWN);
2457 else
2459 // Note that the load is non-transactional and cannot overlap.
2460 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RNWT);
2463 gcall = gimple_build_call (copy_fn, 3, lhs_addr, rhs_addr,
2464 TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
2465 gimple_set_location (gcall, loc);
2466 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2468 if (ltmp)
2470 gcall = gimple_build_assign (lhs, ltmp);
2471 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2475 // Now that we have the load/store in its instrumented form, add
2476 // thread private addresses to the log if applicable.
2477 if (!store_p)
2478 requires_barrier (region->entry_block, lhs, gcall);
2482 /* Expand a call statement as appropriate for a transaction. That is,
2483 either verify that the call does not affect the transaction, or
2484 redirect the call to a clone that handles transactions, or change
2485 the transaction state to IRREVOCABLE. Return true if the call is
2486 one of the builtins that end a transaction. */
2488 static bool
2489 expand_call_tm (struct tm_region *region,
2490 gimple_stmt_iterator *gsi)
2492 gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi));
2493 tree lhs = gimple_call_lhs (stmt);
2494 tree fn_decl;
2495 struct cgraph_node *node;
2496 bool retval = false;
2498 fn_decl = gimple_call_fndecl (stmt);
2500 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
2501 || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
2502 transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
2503 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
2504 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2506 if (is_tm_pure_call (stmt))
2507 return false;
2509 if (fn_decl)
2510 retval = is_tm_ending_fndecl (fn_decl);
2511 if (!retval)
2513 /* Assume all non-const/pure calls write to memory, except
2514 transaction ending builtins. */
2515 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2518 /* For indirect calls, we already generated a call into the runtime. */
2519 if (!fn_decl)
2521 tree fn = gimple_call_fn (stmt);
2523 /* We are guaranteed never to go irrevocable on a safe or pure
2524 call, and the pure call was handled above. */
2525 if (is_tm_safe (fn))
2526 return false;
2527 else
2528 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2530 return false;
2533 node = cgraph_node::get (fn_decl);
2534 /* All calls should have cgraph here. */
2535 if (!node)
2537 /* We can have a nodeless call here if some pass after IPA-tm
2538 added uninstrumented calls. For example, loop distribution
2539 can transform certain loop constructs into __builtin_mem*
2540 calls. In this case, see if we have a suitable TM
2541 replacement and fill in the gaps. */
2542 gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL);
2543 enum built_in_function code = DECL_FUNCTION_CODE (fn_decl);
2544 gcc_assert (code == BUILT_IN_MEMCPY
2545 || code == BUILT_IN_MEMMOVE
2546 || code == BUILT_IN_MEMSET);
2548 tree repl = find_tm_replacement_function (fn_decl);
2549 if (repl)
2551 gimple_call_set_fndecl (stmt, repl);
2552 update_stmt (stmt);
2553 node = cgraph_node::create (repl);
2554 node->local.tm_may_enter_irr = false;
2555 return expand_call_tm (region, gsi);
2557 gcc_unreachable ();
2559 if (node->local.tm_may_enter_irr)
2560 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2562 if (is_tm_abort (fn_decl))
2564 transaction_subcode_ior (region, GTMA_HAVE_ABORT);
2565 return true;
2568 /* Instrument the store if needed.
2570 If the assignment happens inside the function call (return slot
2571 optimization), there is no instrumentation to be done, since
2572 the callee should have done the right thing. */
2573 if (lhs && requires_barrier (region->entry_block, lhs, stmt)
2574 && !gimple_call_return_slot_opt_p (stmt))
2576 tree tmp = create_tmp_reg (TREE_TYPE (lhs));
2577 location_t loc = gimple_location (stmt);
2578 edge fallthru_edge = NULL;
2579 gassign *assign_stmt;
2581 /* Remember if the call was going to throw. */
2582 if (stmt_can_throw_internal (stmt))
2584 edge_iterator ei;
2585 edge e;
2586 basic_block bb = gimple_bb (stmt);
2588 FOR_EACH_EDGE (e, ei, bb->succs)
2589 if (e->flags & EDGE_FALLTHRU)
2591 fallthru_edge = e;
2592 break;
2596 gimple_call_set_lhs (stmt, tmp);
2597 update_stmt (stmt);
2598 assign_stmt = gimple_build_assign (lhs, tmp);
2599 gimple_set_location (assign_stmt, loc);
2601 /* We cannot throw in the middle of a BB. If the call was going
2602 to throw, place the instrumentation on the fallthru edge, so
2603 the call remains the last statement in the block. */
2604 if (fallthru_edge)
2606 gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (assign_stmt);
2607 gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
2608 expand_assign_tm (region, &fallthru_gsi);
2609 gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
2610 pending_edge_inserts_p = true;
2612 else
2614 gsi_insert_after (gsi, assign_stmt, GSI_CONTINUE_LINKING);
2615 expand_assign_tm (region, gsi);
2618 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2621 return retval;
2625 /* Expand all statements in BB as appropriate for being inside
2626 a transaction. */
2628 static void
2629 expand_block_tm (struct tm_region *region, basic_block bb)
2631 gimple_stmt_iterator gsi;
2633 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2635 gimple *stmt = gsi_stmt (gsi);
2636 switch (gimple_code (stmt))
2638 case GIMPLE_ASSIGN:
2639 /* Only memory reads/writes need to be instrumented. */
2640 if (gimple_assign_single_p (stmt)
2641 && !gimple_clobber_p (stmt))
2643 expand_assign_tm (region, &gsi);
2644 continue;
2646 break;
2648 case GIMPLE_CALL:
2649 if (expand_call_tm (region, &gsi))
2650 return;
2651 break;
2653 case GIMPLE_ASM:
2654 gcc_unreachable ();
2656 default:
2657 break;
2659 if (!gsi_end_p (gsi))
2660 gsi_next (&gsi);
2664 /* Return the list of basic-blocks in REGION.
2666 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2667 following a TM_IRREVOCABLE call.
2669 INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the
2670 uninstrumented code path blocks in the list of basic blocks
2671 returned, false otherwise. */
2673 static vec<basic_block>
2674 get_tm_region_blocks (basic_block entry_block,
2675 bitmap exit_blocks,
2676 bitmap irr_blocks,
2677 bitmap all_region_blocks,
2678 bool stop_at_irrevocable_p,
2679 bool include_uninstrumented_p = true)
2681 vec<basic_block> bbs = vNULL;
2682 unsigned i;
2683 edge e;
2684 edge_iterator ei;
2685 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2687 i = 0;
2688 bbs.safe_push (entry_block);
2689 bitmap_set_bit (visited_blocks, entry_block->index);
2693 basic_block bb = bbs[i++];
2695 if (exit_blocks &&
2696 bitmap_bit_p (exit_blocks, bb->index))
2697 continue;
2699 if (stop_at_irrevocable_p
2700 && irr_blocks
2701 && bitmap_bit_p (irr_blocks, bb->index))
2702 continue;
2704 FOR_EACH_EDGE (e, ei, bb->succs)
2705 if ((include_uninstrumented_p
2706 || !(e->flags & EDGE_TM_UNINSTRUMENTED))
2707 && !bitmap_bit_p (visited_blocks, e->dest->index))
2709 bitmap_set_bit (visited_blocks, e->dest->index);
2710 bbs.safe_push (e->dest);
2713 while (i < bbs.length ());
2715 if (all_region_blocks)
2716 bitmap_ior_into (all_region_blocks, visited_blocks);
2718 BITMAP_FREE (visited_blocks);
2719 return bbs;
2722 // Callback data for collect_bb2reg.
2723 struct bb2reg_stuff
2725 vec<tm_region *> *bb2reg;
2726 bool include_uninstrumented_p;
2729 // Callback for expand_regions, collect innermost region data for each bb.
2730 static void *
2731 collect_bb2reg (struct tm_region *region, void *data)
2733 struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data;
2734 vec<tm_region *> *bb2reg = stuff->bb2reg;
2735 vec<basic_block> queue;
2736 unsigned int i;
2737 basic_block bb;
2739 queue = get_tm_region_blocks (region->entry_block,
2740 region->exit_blocks,
2741 region->irr_blocks,
2742 NULL,
2743 /*stop_at_irr_p=*/true,
2744 stuff->include_uninstrumented_p);
2746 // We expect expand_region to perform a post-order traversal of the region
2747 // tree. Therefore the last region seen for any bb is the innermost.
2748 FOR_EACH_VEC_ELT (queue, i, bb)
2749 (*bb2reg)[bb->index] = region;
2751 queue.release ();
2752 return NULL;
2755 // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to
2756 // which a basic block belongs. Note that we only consider the instrumented
2757 // code paths for the region; the uninstrumented code paths are ignored if
2758 // INCLUDE_UNINSTRUMENTED_P is false.
2760 // ??? This data is very similar to the bb_regions array that is collected
2761 // during tm_region_init. Or, rather, this data is similar to what could
2762 // be used within tm_region_init. The actual computation in tm_region_init
2763 // begins and ends with bb_regions entirely full of NULL pointers, due to
2764 // the way in which pointers are swapped in and out of the array.
2766 // ??? Our callers expect that blocks are not shared between transactions.
2767 // When the optimizers get too smart, and blocks are shared, then during
2768 // the tm_mark phase we'll add log entries to only one of the two transactions,
2769 // and in the tm_edge phase we'll add edges to the CFG that create invalid
2770 // cycles. The symptom being SSA defs that do not dominate their uses.
2771 // Note that the optimizers were locally correct with their transformation,
2772 // as we have no info within the program that suggests that the blocks cannot
2773 // be shared.
2775 // ??? There is currently a hack inside tree-ssa-pre.c to work around the
2776 // only known instance of this block sharing.
2778 static vec<tm_region *>
2779 get_bb_regions_instrumented (bool traverse_clones,
2780 bool include_uninstrumented_p)
2782 unsigned n = last_basic_block_for_fn (cfun);
2783 struct bb2reg_stuff stuff;
2784 vec<tm_region *> ret;
2786 ret.create (n);
2787 ret.safe_grow_cleared (n);
2788 stuff.bb2reg = &ret;
2789 stuff.include_uninstrumented_p = include_uninstrumented_p;
2790 expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones);
2792 return ret;
2795 /* Set the IN_TRANSACTION for all gimple statements that appear in a
2796 transaction. */
2798 void
2799 compute_transaction_bits (void)
2801 struct tm_region *region;
2802 vec<basic_block> queue;
2803 unsigned int i;
2804 basic_block bb;
2806 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2807 certainly don't need it to calculate CDI_DOMINATOR info. */
2808 gate_tm_init ();
2810 FOR_EACH_BB_FN (bb, cfun)
2811 bb->flags &= ~BB_IN_TRANSACTION;
2813 for (region = all_tm_regions; region; region = region->next)
2815 queue = get_tm_region_blocks (region->entry_block,
2816 region->exit_blocks,
2817 region->irr_blocks,
2818 NULL,
2819 /*stop_at_irr_p=*/true);
2820 for (i = 0; queue.iterate (i, &bb); ++i)
2821 bb->flags |= BB_IN_TRANSACTION;
2822 queue.release ();
2825 if (all_tm_regions)
2826 bitmap_obstack_release (&tm_obstack);
2829 /* Replace the GIMPLE_TRANSACTION in this region with the corresponding
2830 call to BUILT_IN_TM_START. */
2832 static void *
2833 expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
2835 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2836 basic_block transaction_bb = gimple_bb (region->transaction_stmt);
2837 tree tm_state = region->tm_state;
2838 tree tm_state_type = TREE_TYPE (tm_state);
2839 edge abort_edge = NULL;
2840 edge inst_edge = NULL;
2841 edge uninst_edge = NULL;
2842 edge fallthru_edge = NULL;
2844 // Identify the various successors of the transaction start.
2846 edge_iterator i;
2847 edge e;
2848 FOR_EACH_EDGE (e, i, transaction_bb->succs)
2850 if (e->flags & EDGE_TM_ABORT)
2851 abort_edge = e;
2852 else if (e->flags & EDGE_TM_UNINSTRUMENTED)
2853 uninst_edge = e;
2854 else
2855 inst_edge = e;
2856 if (e->flags & EDGE_FALLTHRU)
2857 fallthru_edge = e;
2861 /* ??? There are plenty of bits here we're not computing. */
2863 int subcode = gimple_transaction_subcode (region->get_transaction_stmt ());
2864 int flags = 0;
2865 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2866 flags |= PR_DOESGOIRREVOCABLE;
2867 if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
2868 flags |= PR_HASNOIRREVOCABLE;
2869 /* If the transaction does not have an abort in lexical scope and is not
2870 marked as an outer transaction, then it will never abort. */
2871 if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0)
2872 flags |= PR_HASNOABORT;
2873 if ((subcode & GTMA_HAVE_STORE) == 0)
2874 flags |= PR_READONLY;
2875 if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION))
2876 flags |= PR_INSTRUMENTEDCODE;
2877 if (uninst_edge)
2878 flags |= PR_UNINSTRUMENTEDCODE;
2879 if (subcode & GTMA_IS_OUTER)
2880 region->original_transaction_was_outer = true;
2881 tree t = build_int_cst (tm_state_type, flags);
2882 gcall *call = gimple_build_call (tm_start, 1, t);
2883 gimple_call_set_lhs (call, tm_state);
2884 gimple_set_location (call, gimple_location (region->transaction_stmt));
2886 // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START.
2887 gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb);
2888 gcc_assert (gsi_stmt (gsi) == region->transaction_stmt);
2889 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
2890 gsi_remove (&gsi, true);
2891 region->transaction_stmt = call;
2894 // Generate log saves.
2895 if (!tm_log_save_addresses.is_empty ())
2896 tm_log_emit_saves (region->entry_block, transaction_bb);
2898 // In the beginning, we've no tests to perform on transaction restart.
2899 // Note that after this point, transaction_bb becomes the "most recent
2900 // block containing tests for the transaction".
2901 region->restart_block = region->entry_block;
2903 // Generate log restores.
2904 if (!tm_log_save_addresses.is_empty ())
2906 basic_block test_bb = create_empty_bb (transaction_bb);
2907 basic_block code_bb = create_empty_bb (test_bb);
2908 basic_block join_bb = create_empty_bb (code_bb);
2909 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2910 add_bb_to_loop (code_bb, transaction_bb->loop_father);
2911 add_bb_to_loop (join_bb, transaction_bb->loop_father);
2912 if (region->restart_block == region->entry_block)
2913 region->restart_block = test_bb;
2915 tree t1 = create_tmp_reg (tm_state_type);
2916 tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES);
2917 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
2918 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2919 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2921 t2 = build_int_cst (tm_state_type, 0);
2922 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2923 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2925 tm_log_emit_restores (region->entry_block, code_bb);
2927 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2928 edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE);
2929 edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE);
2930 redirect_edge_pred (fallthru_edge, join_bb);
2932 join_bb->frequency = test_bb->frequency = transaction_bb->frequency;
2933 join_bb->count = test_bb->count = transaction_bb->count;
2935 ei->probability = PROB_ALWAYS;
2936 et->probability = PROB_LIKELY;
2937 ef->probability = PROB_UNLIKELY;
2938 et->count = apply_probability (test_bb->count, et->probability);
2939 ef->count = apply_probability (test_bb->count, ef->probability);
2941 code_bb->count = et->count;
2942 code_bb->frequency = EDGE_FREQUENCY (et);
2944 transaction_bb = join_bb;
2947 // If we have an ABORT edge, create a test to perform the abort.
2948 if (abort_edge)
2950 basic_block test_bb = create_empty_bb (transaction_bb);
2951 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2952 if (region->restart_block == region->entry_block)
2953 region->restart_block = test_bb;
2955 tree t1 = create_tmp_reg (tm_state_type);
2956 tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION);
2957 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
2958 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2959 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2961 t2 = build_int_cst (tm_state_type, 0);
2962 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2963 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2965 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2966 test_bb->frequency = transaction_bb->frequency;
2967 test_bb->count = transaction_bb->count;
2968 ei->probability = PROB_ALWAYS;
2970 // Not abort edge. If both are live, chose one at random as we'll
2971 // we'll be fixing that up below.
2972 redirect_edge_pred (fallthru_edge, test_bb);
2973 fallthru_edge->flags = EDGE_FALSE_VALUE;
2974 fallthru_edge->probability = PROB_VERY_LIKELY;
2975 fallthru_edge->count
2976 = apply_probability (test_bb->count, fallthru_edge->probability);
2978 // Abort/over edge.
2979 redirect_edge_pred (abort_edge, test_bb);
2980 abort_edge->flags = EDGE_TRUE_VALUE;
2981 abort_edge->probability = PROB_VERY_UNLIKELY;
2982 abort_edge->count
2983 = apply_probability (test_bb->count, abort_edge->probability);
2985 transaction_bb = test_bb;
2988 // If we have both instrumented and uninstrumented code paths, select one.
2989 if (inst_edge && uninst_edge)
2991 basic_block test_bb = create_empty_bb (transaction_bb);
2992 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2993 if (region->restart_block == region->entry_block)
2994 region->restart_block = test_bb;
2996 tree t1 = create_tmp_reg (tm_state_type);
2997 tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE);
2999 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
3000 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
3001 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3003 t2 = build_int_cst (tm_state_type, 0);
3004 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
3005 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3007 // Create the edge into test_bb first, as we want to copy values
3008 // out of the fallthru edge.
3009 edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags);
3010 e->probability = fallthru_edge->probability;
3011 test_bb->count = e->count = fallthru_edge->count;
3012 test_bb->frequency = EDGE_FREQUENCY (e);
3014 // Now update the edges to the inst/uninist implementations.
3015 // For now assume that the paths are equally likely. When using HTM,
3016 // we'll try the uninst path first and fallback to inst path if htm
3017 // buffers are exceeded. Without HTM we start with the inst path and
3018 // use the uninst path when falling back to serial mode.
3019 redirect_edge_pred (inst_edge, test_bb);
3020 inst_edge->flags = EDGE_FALSE_VALUE;
3021 inst_edge->probability = REG_BR_PROB_BASE / 2;
3022 inst_edge->count
3023 = apply_probability (test_bb->count, inst_edge->probability);
3025 redirect_edge_pred (uninst_edge, test_bb);
3026 uninst_edge->flags = EDGE_TRUE_VALUE;
3027 uninst_edge->probability = REG_BR_PROB_BASE / 2;
3028 uninst_edge->count
3029 = apply_probability (test_bb->count, uninst_edge->probability);
3032 // If we have no previous special cases, and we have PHIs at the beginning
3033 // of the atomic region, this means we have a loop at the beginning of the
3034 // atomic region that shares the first block. This can cause problems with
3035 // the transaction restart abnormal edges to be added in the tm_edges pass.
3036 // Solve this by adding a new empty block to receive the abnormal edges.
3037 if (region->restart_block == region->entry_block
3038 && phi_nodes (region->entry_block))
3040 basic_block empty_bb = create_empty_bb (transaction_bb);
3041 region->restart_block = empty_bb;
3042 add_bb_to_loop (empty_bb, transaction_bb->loop_father);
3044 redirect_edge_pred (fallthru_edge, empty_bb);
3045 make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU);
3048 return NULL;
3051 /* Generate the temporary to be used for the return value of
3052 BUILT_IN_TM_START. */
3054 static void *
3055 generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
3057 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
3058 region->tm_state =
3059 create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
3061 // Reset the subcode, post optimizations. We'll fill this in
3062 // again as we process blocks.
3063 if (region->exit_blocks)
3065 gtransaction *transaction_stmt = region->get_transaction_stmt ();
3066 unsigned int subcode = gimple_transaction_subcode (transaction_stmt);
3068 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
3069 subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
3070 | GTMA_MAY_ENTER_IRREVOCABLE
3071 | GTMA_HAS_NO_INSTRUMENTATION);
3072 else
3073 subcode &= GTMA_DECLARATION_MASK;
3074 gimple_transaction_set_subcode (transaction_stmt, subcode);
3077 return NULL;
3080 // Propagate flags from inner transactions outwards.
3081 static void
3082 propagate_tm_flags_out (struct tm_region *region)
3084 if (region == NULL)
3085 return;
3086 propagate_tm_flags_out (region->inner);
3088 if (region->outer && region->outer->transaction_stmt)
3090 unsigned s
3091 = gimple_transaction_subcode (region->get_transaction_stmt ());
3092 s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE
3093 | GTMA_MAY_ENTER_IRREVOCABLE);
3094 s |= gimple_transaction_subcode (region->outer->get_transaction_stmt ());
3095 gimple_transaction_set_subcode (region->outer->get_transaction_stmt (),
3099 propagate_tm_flags_out (region->next);
3102 /* Entry point to the MARK phase of TM expansion. Here we replace
3103 transactional memory statements with calls to builtins, and function
3104 calls with their transactional clones (if available). But we don't
3105 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
3107 static unsigned int
3108 execute_tm_mark (void)
3110 pending_edge_inserts_p = false;
3112 expand_regions (all_tm_regions, generate_tm_state, NULL,
3113 /*traverse_clones=*/true);
3115 tm_log_init ();
3117 vec<tm_region *> bb_regions
3118 = get_bb_regions_instrumented (/*traverse_clones=*/true,
3119 /*include_uninstrumented_p=*/false);
3120 struct tm_region *r;
3121 unsigned i;
3123 // Expand memory operations into calls into the runtime.
3124 // This collects log entries as well.
3125 FOR_EACH_VEC_ELT (bb_regions, i, r)
3127 if (r != NULL)
3129 if (r->transaction_stmt)
3131 unsigned sub
3132 = gimple_transaction_subcode (r->get_transaction_stmt ());
3134 /* If we're sure to go irrevocable, there won't be
3135 anything to expand, since the run-time will go
3136 irrevocable right away. */
3137 if (sub & GTMA_DOES_GO_IRREVOCABLE
3138 && sub & GTMA_MAY_ENTER_IRREVOCABLE)
3139 continue;
3141 expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i));
3145 bb_regions.release ();
3147 // Propagate flags from inner transactions outwards.
3148 propagate_tm_flags_out (all_tm_regions);
3150 // Expand GIMPLE_TRANSACTIONs into calls into the runtime.
3151 expand_regions (all_tm_regions, expand_transaction, NULL,
3152 /*traverse_clones=*/false);
3154 tm_log_emit ();
3155 tm_log_delete ();
3157 if (pending_edge_inserts_p)
3158 gsi_commit_edge_inserts ();
3159 free_dominance_info (CDI_DOMINATORS);
3160 return 0;
3163 namespace {
3165 const pass_data pass_data_tm_mark =
3167 GIMPLE_PASS, /* type */
3168 "tmmark", /* name */
3169 OPTGROUP_NONE, /* optinfo_flags */
3170 TV_TRANS_MEM, /* tv_id */
3171 ( PROP_ssa | PROP_cfg ), /* properties_required */
3172 0, /* properties_provided */
3173 0, /* properties_destroyed */
3174 0, /* todo_flags_start */
3175 TODO_update_ssa, /* todo_flags_finish */
3178 class pass_tm_mark : public gimple_opt_pass
3180 public:
3181 pass_tm_mark (gcc::context *ctxt)
3182 : gimple_opt_pass (pass_data_tm_mark, ctxt)
3185 /* opt_pass methods: */
3186 virtual unsigned int execute (function *) { return execute_tm_mark (); }
3188 }; // class pass_tm_mark
3190 } // anon namespace
3192 gimple_opt_pass *
3193 make_pass_tm_mark (gcc::context *ctxt)
3195 return new pass_tm_mark (ctxt);
3199 /* Create an abnormal edge from STMT at iter, splitting the block
3200 as necessary. Adjust *PNEXT as needed for the split block. */
3202 static inline void
3203 split_bb_make_tm_edge (gimple *stmt, basic_block dest_bb,
3204 gimple_stmt_iterator iter, gimple_stmt_iterator *pnext)
3206 basic_block bb = gimple_bb (stmt);
3207 if (!gsi_one_before_end_p (iter))
3209 edge e = split_block (bb, stmt);
3210 *pnext = gsi_start_bb (e->dest);
3212 make_edge (bb, dest_bb, EDGE_ABNORMAL);
3214 // Record the need for the edge for the benefit of the rtl passes.
3215 if (cfun->gimple_df->tm_restart == NULL)
3216 cfun->gimple_df->tm_restart
3217 = hash_table<tm_restart_hasher>::create_ggc (31);
3219 struct tm_restart_node dummy;
3220 dummy.stmt = stmt;
3221 dummy.label_or_list = gimple_block_label (dest_bb);
3223 tm_restart_node **slot = cfun->gimple_df->tm_restart->find_slot (&dummy,
3224 INSERT);
3225 struct tm_restart_node *n = *slot;
3226 if (n == NULL)
3228 n = ggc_alloc<tm_restart_node> ();
3229 *n = dummy;
3231 else
3233 tree old = n->label_or_list;
3234 if (TREE_CODE (old) == LABEL_DECL)
3235 old = tree_cons (NULL, old, NULL);
3236 n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
3240 /* Split block BB as necessary for every builtin function we added, and
3241 wire up the abnormal back edges implied by the transaction restart. */
3243 static void
3244 expand_block_edges (struct tm_region *const region, basic_block bb)
3246 gimple_stmt_iterator gsi, next_gsi;
3248 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi)
3250 gimple *stmt = gsi_stmt (gsi);
3251 gcall *call_stmt;
3253 next_gsi = gsi;
3254 gsi_next (&next_gsi);
3256 // ??? Shouldn't we split for any non-pure, non-irrevocable function?
3257 call_stmt = dyn_cast <gcall *> (stmt);
3258 if ((!call_stmt)
3259 || (gimple_call_flags (call_stmt) & ECF_TM_BUILTIN) == 0)
3260 continue;
3262 if (DECL_FUNCTION_CODE (gimple_call_fndecl (call_stmt))
3263 == BUILT_IN_TM_ABORT)
3265 // If we have a ``_transaction_cancel [[outer]]'', there is only
3266 // one abnormal edge: to the transaction marked OUTER.
3267 // All compiler-generated instances of BUILT_IN_TM_ABORT have a
3268 // constant argument, which we can examine here. Users invoking
3269 // TM_ABORT directly get what they deserve.
3270 tree arg = gimple_call_arg (call_stmt, 0);
3271 if (TREE_CODE (arg) == INTEGER_CST
3272 && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0
3273 && !decl_is_tm_clone (current_function_decl))
3275 // Find the GTMA_IS_OUTER transaction.
3276 for (struct tm_region *o = region; o; o = o->outer)
3277 if (o->original_transaction_was_outer)
3279 split_bb_make_tm_edge (call_stmt, o->restart_block,
3280 gsi, &next_gsi);
3281 break;
3284 // Otherwise, the front-end should have semantically checked
3285 // outer aborts, but in either case the target region is not
3286 // within this function.
3287 continue;
3290 // Non-outer, TM aborts have an abnormal edge to the inner-most
3291 // transaction, the one being aborted;
3292 split_bb_make_tm_edge (call_stmt, region->restart_block, gsi,
3293 &next_gsi);
3296 // All TM builtins have an abnormal edge to the outer-most transaction.
3297 // We never restart inner transactions. For tm clones, we know a-priori
3298 // that the outer-most transaction is outside the function.
3299 if (decl_is_tm_clone (current_function_decl))
3300 continue;
3302 if (cfun->gimple_df->tm_restart == NULL)
3303 cfun->gimple_df->tm_restart
3304 = hash_table<tm_restart_hasher>::create_ggc (31);
3306 // All TM builtins have an abnormal edge to the outer-most transaction.
3307 // We never restart inner transactions.
3308 for (struct tm_region *o = region; o; o = o->outer)
3309 if (!o->outer)
3311 split_bb_make_tm_edge (call_stmt, o->restart_block, gsi, &next_gsi);
3312 break;
3315 // Delete any tail-call annotation that may have been added.
3316 // The tail-call pass may have mis-identified the commit as being
3317 // a candidate because we had not yet added this restart edge.
3318 gimple_call_set_tail (call_stmt, false);
3322 /* Entry point to the final expansion of transactional nodes. */
3324 namespace {
3326 const pass_data pass_data_tm_edges =
3328 GIMPLE_PASS, /* type */
3329 "tmedge", /* name */
3330 OPTGROUP_NONE, /* optinfo_flags */
3331 TV_TRANS_MEM, /* tv_id */
3332 ( PROP_ssa | PROP_cfg ), /* properties_required */
3333 0, /* properties_provided */
3334 0, /* properties_destroyed */
3335 0, /* todo_flags_start */
3336 TODO_update_ssa, /* todo_flags_finish */
3339 class pass_tm_edges : public gimple_opt_pass
3341 public:
3342 pass_tm_edges (gcc::context *ctxt)
3343 : gimple_opt_pass (pass_data_tm_edges, ctxt)
3346 /* opt_pass methods: */
3347 virtual unsigned int execute (function *);
3349 }; // class pass_tm_edges
3351 unsigned int
3352 pass_tm_edges::execute (function *fun)
3354 vec<tm_region *> bb_regions
3355 = get_bb_regions_instrumented (/*traverse_clones=*/false,
3356 /*include_uninstrumented_p=*/true);
3357 struct tm_region *r;
3358 unsigned i;
3360 FOR_EACH_VEC_ELT (bb_regions, i, r)
3361 if (r != NULL)
3362 expand_block_edges (r, BASIC_BLOCK_FOR_FN (fun, i));
3364 bb_regions.release ();
3366 /* We've got to release the dominance info now, to indicate that it
3367 must be rebuilt completely. Otherwise we'll crash trying to update
3368 the SSA web in the TODO section following this pass. */
3369 free_dominance_info (CDI_DOMINATORS);
3370 bitmap_obstack_release (&tm_obstack);
3371 all_tm_regions = NULL;
3373 return 0;
3376 } // anon namespace
3378 gimple_opt_pass *
3379 make_pass_tm_edges (gcc::context *ctxt)
3381 return new pass_tm_edges (ctxt);
3384 /* Helper function for expand_regions. Expand REGION and recurse to
3385 the inner region. Call CALLBACK on each region. CALLBACK returns
3386 NULL to continue the traversal, otherwise a non-null value which
3387 this function will return as well. TRAVERSE_CLONES is true if we
3388 should traverse transactional clones. */
3390 static void *
3391 expand_regions_1 (struct tm_region *region,
3392 void *(*callback)(struct tm_region *, void *),
3393 void *data,
3394 bool traverse_clones)
3396 void *retval = NULL;
3397 if (region->exit_blocks
3398 || (traverse_clones && decl_is_tm_clone (current_function_decl)))
3400 retval = callback (region, data);
3401 if (retval)
3402 return retval;
3404 if (region->inner)
3406 retval = expand_regions (region->inner, callback, data, traverse_clones);
3407 if (retval)
3408 return retval;
3410 return retval;
3413 /* Traverse the regions enclosed and including REGION. Execute
3414 CALLBACK for each region, passing DATA. CALLBACK returns NULL to
3415 continue the traversal, otherwise a non-null value which this
3416 function will return as well. TRAVERSE_CLONES is true if we should
3417 traverse transactional clones. */
3419 static void *
3420 expand_regions (struct tm_region *region,
3421 void *(*callback)(struct tm_region *, void *),
3422 void *data,
3423 bool traverse_clones)
3425 void *retval = NULL;
3426 while (region)
3428 retval = expand_regions_1 (region, callback, data, traverse_clones);
3429 if (retval)
3430 return retval;
3431 region = region->next;
3433 return retval;
3437 /* A unique TM memory operation. */
3438 struct tm_memop
3440 /* Unique ID that all memory operations to the same location have. */
3441 unsigned int value_id;
3442 /* Address of load/store. */
3443 tree addr;
3446 /* TM memory operation hashtable helpers. */
3448 struct tm_memop_hasher : free_ptr_hash <tm_memop>
3450 static inline hashval_t hash (const tm_memop *);
3451 static inline bool equal (const tm_memop *, const tm_memop *);
3454 /* Htab support. Return a hash value for a `tm_memop'. */
3455 inline hashval_t
3456 tm_memop_hasher::hash (const tm_memop *mem)
3458 tree addr = mem->addr;
3459 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
3460 actually done with operand_equal_p (see tm_memop_eq). */
3461 if (TREE_CODE (addr) == ADDR_EXPR)
3462 addr = TREE_OPERAND (addr, 0);
3463 return iterative_hash_expr (addr, 0);
3466 /* Htab support. Return true if two tm_memop's are the same. */
3467 inline bool
3468 tm_memop_hasher::equal (const tm_memop *mem1, const tm_memop *mem2)
3470 return operand_equal_p (mem1->addr, mem2->addr, 0);
3473 /* Sets for solving data flow equations in the memory optimization pass. */
3474 struct tm_memopt_bitmaps
3476 /* Stores available to this BB upon entry. Basically, stores that
3477 dominate this BB. */
3478 bitmap store_avail_in;
3479 /* Stores available at the end of this BB. */
3480 bitmap store_avail_out;
3481 bitmap store_antic_in;
3482 bitmap store_antic_out;
3483 /* Reads available to this BB upon entry. Basically, reads that
3484 dominate this BB. */
3485 bitmap read_avail_in;
3486 /* Reads available at the end of this BB. */
3487 bitmap read_avail_out;
3488 /* Reads performed in this BB. */
3489 bitmap read_local;
3490 /* Writes performed in this BB. */
3491 bitmap store_local;
3493 /* Temporary storage for pass. */
3494 /* Is the current BB in the worklist? */
3495 bool avail_in_worklist_p;
3496 /* Have we visited this BB? */
3497 bool visited_p;
3500 static bitmap_obstack tm_memopt_obstack;
3502 /* Unique counter for TM loads and stores. Loads and stores of the
3503 same address get the same ID. */
3504 static unsigned int tm_memopt_value_id;
3505 static hash_table<tm_memop_hasher> *tm_memopt_value_numbers;
3507 #define STORE_AVAIL_IN(BB) \
3508 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
3509 #define STORE_AVAIL_OUT(BB) \
3510 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
3511 #define STORE_ANTIC_IN(BB) \
3512 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
3513 #define STORE_ANTIC_OUT(BB) \
3514 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
3515 #define READ_AVAIL_IN(BB) \
3516 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
3517 #define READ_AVAIL_OUT(BB) \
3518 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
3519 #define READ_LOCAL(BB) \
3520 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
3521 #define STORE_LOCAL(BB) \
3522 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
3523 #define AVAIL_IN_WORKLIST_P(BB) \
3524 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
3525 #define BB_VISITED_P(BB) \
3526 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
3528 /* Given a TM load/store in STMT, return the value number for the address
3529 it accesses. */
3531 static unsigned int
3532 tm_memopt_value_number (gimple *stmt, enum insert_option op)
3534 struct tm_memop tmpmem, *mem;
3535 tm_memop **slot;
3537 gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
3538 tmpmem.addr = gimple_call_arg (stmt, 0);
3539 slot = tm_memopt_value_numbers->find_slot (&tmpmem, op);
3540 if (*slot)
3541 mem = *slot;
3542 else if (op == INSERT)
3544 mem = XNEW (struct tm_memop);
3545 *slot = mem;
3546 mem->value_id = tm_memopt_value_id++;
3547 mem->addr = tmpmem.addr;
3549 else
3550 gcc_unreachable ();
3551 return mem->value_id;
3554 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
3556 static void
3557 tm_memopt_accumulate_memops (basic_block bb)
3559 gimple_stmt_iterator gsi;
3561 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3563 gimple *stmt = gsi_stmt (gsi);
3564 bitmap bits;
3565 unsigned int loc;
3567 if (is_tm_store (stmt))
3568 bits = STORE_LOCAL (bb);
3569 else if (is_tm_load (stmt))
3570 bits = READ_LOCAL (bb);
3571 else
3572 continue;
3574 loc = tm_memopt_value_number (stmt, INSERT);
3575 bitmap_set_bit (bits, loc);
3576 if (dump_file)
3578 fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
3579 is_tm_load (stmt) ? "LOAD" : "STORE", loc,
3580 gimple_bb (stmt)->index);
3581 print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0);
3582 fprintf (dump_file, "\n");
3587 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
3589 static void
3590 dump_tm_memopt_set (const char *set_name, bitmap bits)
3592 unsigned i;
3593 bitmap_iterator bi;
3594 const char *comma = "";
3596 fprintf (dump_file, "TM memopt: %s: [", set_name);
3597 EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
3599 hash_table<tm_memop_hasher>::iterator hi;
3600 struct tm_memop *mem = NULL;
3602 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
3603 FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers, mem, tm_memop_t, hi)
3604 if (mem->value_id == i)
3605 break;
3606 gcc_assert (mem->value_id == i);
3607 fprintf (dump_file, "%s", comma);
3608 comma = ", ";
3609 print_generic_expr (dump_file, mem->addr, 0);
3611 fprintf (dump_file, "]\n");
3614 /* Prettily dump all of the memopt sets in BLOCKS. */
3616 static void
3617 dump_tm_memopt_sets (vec<basic_block> blocks)
3619 size_t i;
3620 basic_block bb;
3622 for (i = 0; blocks.iterate (i, &bb); ++i)
3624 fprintf (dump_file, "------------BB %d---------\n", bb->index);
3625 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
3626 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
3627 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
3628 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
3629 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
3630 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
3634 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3636 static void
3637 tm_memopt_compute_avin (basic_block bb)
3639 edge e;
3640 unsigned ix;
3642 /* Seed with the AVOUT of any predecessor. */
3643 for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
3645 e = EDGE_PRED (bb, ix);
3646 /* Make sure we have already visited this BB, and is thus
3647 initialized.
3649 If e->src->aux is NULL, this predecessor is actually on an
3650 enclosing transaction. We only care about the current
3651 transaction, so ignore it. */
3652 if (e->src->aux && BB_VISITED_P (e->src))
3654 bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3655 bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3656 break;
3660 for (; ix < EDGE_COUNT (bb->preds); ix++)
3662 e = EDGE_PRED (bb, ix);
3663 if (e->src->aux && BB_VISITED_P (e->src))
3665 bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3666 bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3670 BB_VISITED_P (bb) = true;
3673 /* Compute the STORE_ANTIC_IN for the basic block BB. */
3675 static void
3676 tm_memopt_compute_antin (basic_block bb)
3678 edge e;
3679 unsigned ix;
3681 /* Seed with the ANTIC_OUT of any successor. */
3682 for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
3684 e = EDGE_SUCC (bb, ix);
3685 /* Make sure we have already visited this BB, and is thus
3686 initialized. */
3687 if (BB_VISITED_P (e->dest))
3689 bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3690 break;
3694 for (; ix < EDGE_COUNT (bb->succs); ix++)
3696 e = EDGE_SUCC (bb, ix);
3697 if (BB_VISITED_P (e->dest))
3698 bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3701 BB_VISITED_P (bb) = true;
3704 /* Compute the AVAIL sets for every basic block in BLOCKS.
3706 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3708 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3709 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3711 This is basically what we do in lcm's compute_available(), but here
3712 we calculate two sets of sets (one for STOREs and one for READs),
3713 and we work on a region instead of the entire CFG.
3715 REGION is the TM region.
3716 BLOCKS are the basic blocks in the region. */
3718 static void
3719 tm_memopt_compute_available (struct tm_region *region,
3720 vec<basic_block> blocks)
3722 edge e;
3723 basic_block *worklist, *qin, *qout, *qend, bb;
3724 unsigned int qlen, i;
3725 edge_iterator ei;
3726 bool changed;
3728 /* Allocate a worklist array/queue. Entries are only added to the
3729 list if they were not already on the list. So the size is
3730 bounded by the number of basic blocks in the region. */
3731 qlen = blocks.length () - 1;
3732 qin = qout = worklist =
3733 XNEWVEC (basic_block, qlen);
3735 /* Put every block in the region on the worklist. */
3736 for (i = 0; blocks.iterate (i, &bb); ++i)
3738 /* Seed AVAIL_OUT with the LOCAL set. */
3739 bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
3740 bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
3742 AVAIL_IN_WORKLIST_P (bb) = true;
3743 /* No need to insert the entry block, since it has an AVIN of
3744 null, and an AVOUT that has already been seeded in. */
3745 if (bb != region->entry_block)
3746 *qin++ = bb;
3749 /* The entry block has been initialized with the local sets. */
3750 BB_VISITED_P (region->entry_block) = true;
3752 qin = worklist;
3753 qend = &worklist[qlen];
3755 /* Iterate until the worklist is empty. */
3756 while (qlen)
3758 /* Take the first entry off the worklist. */
3759 bb = *qout++;
3760 qlen--;
3762 if (qout >= qend)
3763 qout = worklist;
3765 /* This block can be added to the worklist again if necessary. */
3766 AVAIL_IN_WORKLIST_P (bb) = false;
3767 tm_memopt_compute_avin (bb);
3769 /* Note: We do not add the LOCAL sets here because we already
3770 seeded the AVAIL_OUT sets with them. */
3771 changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
3772 changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
3773 if (changed
3774 && (region->exit_blocks == NULL
3775 || !bitmap_bit_p (region->exit_blocks, bb->index)))
3776 /* If the out state of this block changed, then we need to add
3777 its successors to the worklist if they are not already in. */
3778 FOR_EACH_EDGE (e, ei, bb->succs)
3779 if (!AVAIL_IN_WORKLIST_P (e->dest)
3780 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
3782 *qin++ = e->dest;
3783 AVAIL_IN_WORKLIST_P (e->dest) = true;
3784 qlen++;
3786 if (qin >= qend)
3787 qin = worklist;
3791 free (worklist);
3793 if (dump_file)
3794 dump_tm_memopt_sets (blocks);
3797 /* Compute ANTIC sets for every basic block in BLOCKS.
3799 We compute STORE_ANTIC_OUT as follows:
3801 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3802 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3804 REGION is the TM region.
3805 BLOCKS are the basic blocks in the region. */
3807 static void
3808 tm_memopt_compute_antic (struct tm_region *region,
3809 vec<basic_block> blocks)
3811 edge e;
3812 basic_block *worklist, *qin, *qout, *qend, bb;
3813 unsigned int qlen;
3814 int i;
3815 edge_iterator ei;
3817 /* Allocate a worklist array/queue. Entries are only added to the
3818 list if they were not already on the list. So the size is
3819 bounded by the number of basic blocks in the region. */
3820 qin = qout = worklist = XNEWVEC (basic_block, blocks.length ());
3822 for (qlen = 0, i = blocks.length () - 1; i >= 0; --i)
3824 bb = blocks[i];
3826 /* Seed ANTIC_OUT with the LOCAL set. */
3827 bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
3829 /* Put every block in the region on the worklist. */
3830 AVAIL_IN_WORKLIST_P (bb) = true;
3831 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3832 and their ANTIC_OUT has already been seeded in. */
3833 if (region->exit_blocks
3834 && !bitmap_bit_p (region->exit_blocks, bb->index))
3836 qlen++;
3837 *qin++ = bb;
3841 /* The exit blocks have been initialized with the local sets. */
3842 if (region->exit_blocks)
3844 unsigned int i;
3845 bitmap_iterator bi;
3846 EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
3847 BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun, i)) = true;
3850 qin = worklist;
3851 qend = &worklist[qlen];
3853 /* Iterate until the worklist is empty. */
3854 while (qlen)
3856 /* Take the first entry off the worklist. */
3857 bb = *qout++;
3858 qlen--;
3860 if (qout >= qend)
3861 qout = worklist;
3863 /* This block can be added to the worklist again if necessary. */
3864 AVAIL_IN_WORKLIST_P (bb) = false;
3865 tm_memopt_compute_antin (bb);
3867 /* Note: We do not add the LOCAL sets here because we already
3868 seeded the ANTIC_OUT sets with them. */
3869 if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
3870 && bb != region->entry_block)
3871 /* If the out state of this block changed, then we need to add
3872 its predecessors to the worklist if they are not already in. */
3873 FOR_EACH_EDGE (e, ei, bb->preds)
3874 if (!AVAIL_IN_WORKLIST_P (e->src))
3876 *qin++ = e->src;
3877 AVAIL_IN_WORKLIST_P (e->src) = true;
3878 qlen++;
3880 if (qin >= qend)
3881 qin = worklist;
3885 free (worklist);
3887 if (dump_file)
3888 dump_tm_memopt_sets (blocks);
3891 /* Offsets of load variants from TM_LOAD. For example,
3892 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3893 See gtm-builtins.def. */
3894 #define TRANSFORM_RAR 1
3895 #define TRANSFORM_RAW 2
3896 #define TRANSFORM_RFW 3
3897 /* Offsets of store variants from TM_STORE. */
3898 #define TRANSFORM_WAR 1
3899 #define TRANSFORM_WAW 2
3901 /* Inform about a load/store optimization. */
3903 static void
3904 dump_tm_memopt_transform (gimple *stmt)
3906 if (dump_file)
3908 fprintf (dump_file, "TM memopt: transforming: ");
3909 print_gimple_stmt (dump_file, stmt, 0, 0);
3910 fprintf (dump_file, "\n");
3914 /* Perform a read/write optimization. Replaces the TM builtin in STMT
3915 by a builtin that is OFFSET entries down in the builtins table in
3916 gtm-builtins.def. */
3918 static void
3919 tm_memopt_transform_stmt (unsigned int offset,
3920 gcall *stmt,
3921 gimple_stmt_iterator *gsi)
3923 tree fn = gimple_call_fn (stmt);
3924 gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
3925 TREE_OPERAND (fn, 0)
3926 = builtin_decl_explicit ((enum built_in_function)
3927 (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
3928 + offset));
3929 gimple_call_set_fn (stmt, fn);
3930 gsi_replace (gsi, stmt, true);
3931 dump_tm_memopt_transform (stmt);
3934 /* Perform the actual TM memory optimization transformations in the
3935 basic blocks in BLOCKS. */
3937 static void
3938 tm_memopt_transform_blocks (vec<basic_block> blocks)
3940 size_t i;
3941 basic_block bb;
3942 gimple_stmt_iterator gsi;
3944 for (i = 0; blocks.iterate (i, &bb); ++i)
3946 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3948 gimple *stmt = gsi_stmt (gsi);
3949 bitmap read_avail = READ_AVAIL_IN (bb);
3950 bitmap store_avail = STORE_AVAIL_IN (bb);
3951 bitmap store_antic = STORE_ANTIC_OUT (bb);
3952 unsigned int loc;
3954 if (is_tm_simple_load (stmt))
3956 gcall *call_stmt = as_a <gcall *> (stmt);
3957 loc = tm_memopt_value_number (stmt, NO_INSERT);
3958 if (store_avail && bitmap_bit_p (store_avail, loc))
3959 tm_memopt_transform_stmt (TRANSFORM_RAW, call_stmt, &gsi);
3960 else if (store_antic && bitmap_bit_p (store_antic, loc))
3962 tm_memopt_transform_stmt (TRANSFORM_RFW, call_stmt, &gsi);
3963 bitmap_set_bit (store_avail, loc);
3965 else if (read_avail && bitmap_bit_p (read_avail, loc))
3966 tm_memopt_transform_stmt (TRANSFORM_RAR, call_stmt, &gsi);
3967 else
3968 bitmap_set_bit (read_avail, loc);
3970 else if (is_tm_simple_store (stmt))
3972 gcall *call_stmt = as_a <gcall *> (stmt);
3973 loc = tm_memopt_value_number (stmt, NO_INSERT);
3974 if (store_avail && bitmap_bit_p (store_avail, loc))
3975 tm_memopt_transform_stmt (TRANSFORM_WAW, call_stmt, &gsi);
3976 else
3978 if (read_avail && bitmap_bit_p (read_avail, loc))
3979 tm_memopt_transform_stmt (TRANSFORM_WAR, call_stmt, &gsi);
3980 bitmap_set_bit (store_avail, loc);
3987 /* Return a new set of bitmaps for a BB. */
3989 static struct tm_memopt_bitmaps *
3990 tm_memopt_init_sets (void)
3992 struct tm_memopt_bitmaps *b
3993 = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
3994 b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3995 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3996 b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
3997 b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
3998 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3999 b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
4000 b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
4001 b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
4002 b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
4003 return b;
4006 /* Free sets computed for each BB. */
4008 static void
4009 tm_memopt_free_sets (vec<basic_block> blocks)
4011 size_t i;
4012 basic_block bb;
4014 for (i = 0; blocks.iterate (i, &bb); ++i)
4015 bb->aux = NULL;
4018 /* Clear the visited bit for every basic block in BLOCKS. */
4020 static void
4021 tm_memopt_clear_visited (vec<basic_block> blocks)
4023 size_t i;
4024 basic_block bb;
4026 for (i = 0; blocks.iterate (i, &bb); ++i)
4027 BB_VISITED_P (bb) = false;
4030 /* Replace TM load/stores with hints for the runtime. We handle
4031 things like read-after-write, write-after-read, read-after-read,
4032 read-for-write, etc. */
4034 static unsigned int
4035 execute_tm_memopt (void)
4037 struct tm_region *region;
4038 vec<basic_block> bbs;
4040 tm_memopt_value_id = 0;
4041 tm_memopt_value_numbers = new hash_table<tm_memop_hasher> (10);
4043 for (region = all_tm_regions; region; region = region->next)
4045 /* All the TM stores/loads in the current region. */
4046 size_t i;
4047 basic_block bb;
4049 bitmap_obstack_initialize (&tm_memopt_obstack);
4051 /* Save all BBs for the current region. */
4052 bbs = get_tm_region_blocks (region->entry_block,
4053 region->exit_blocks,
4054 region->irr_blocks,
4055 NULL,
4056 false);
4058 /* Collect all the memory operations. */
4059 for (i = 0; bbs.iterate (i, &bb); ++i)
4061 bb->aux = tm_memopt_init_sets ();
4062 tm_memopt_accumulate_memops (bb);
4065 /* Solve data flow equations and transform each block accordingly. */
4066 tm_memopt_clear_visited (bbs);
4067 tm_memopt_compute_available (region, bbs);
4068 tm_memopt_clear_visited (bbs);
4069 tm_memopt_compute_antic (region, bbs);
4070 tm_memopt_transform_blocks (bbs);
4072 tm_memopt_free_sets (bbs);
4073 bbs.release ();
4074 bitmap_obstack_release (&tm_memopt_obstack);
4075 tm_memopt_value_numbers->empty ();
4078 delete tm_memopt_value_numbers;
4079 tm_memopt_value_numbers = NULL;
4080 return 0;
4083 namespace {
4085 const pass_data pass_data_tm_memopt =
4087 GIMPLE_PASS, /* type */
4088 "tmmemopt", /* name */
4089 OPTGROUP_NONE, /* optinfo_flags */
4090 TV_TRANS_MEM, /* tv_id */
4091 ( PROP_ssa | PROP_cfg ), /* properties_required */
4092 0, /* properties_provided */
4093 0, /* properties_destroyed */
4094 0, /* todo_flags_start */
4095 0, /* todo_flags_finish */
4098 class pass_tm_memopt : public gimple_opt_pass
4100 public:
4101 pass_tm_memopt (gcc::context *ctxt)
4102 : gimple_opt_pass (pass_data_tm_memopt, ctxt)
4105 /* opt_pass methods: */
4106 virtual bool gate (function *) { return flag_tm && optimize > 0; }
4107 virtual unsigned int execute (function *) { return execute_tm_memopt (); }
4109 }; // class pass_tm_memopt
4111 } // anon namespace
4113 gimple_opt_pass *
4114 make_pass_tm_memopt (gcc::context *ctxt)
4116 return new pass_tm_memopt (ctxt);
4120 /* Interprocedual analysis for the creation of transactional clones.
4121 The aim of this pass is to find which functions are referenced in
4122 a non-irrevocable transaction context, and for those over which
4123 we have control (or user directive), create a version of the
4124 function which uses only the transactional interface to reference
4125 protected memories. This analysis proceeds in several steps:
4127 (1) Collect the set of all possible transactional clones:
4129 (a) For all local public functions marked tm_callable, push
4130 it onto the tm_callee queue.
4132 (b) For all local functions, scan for calls in transaction blocks.
4133 Push the caller and callee onto the tm_caller and tm_callee
4134 queues. Count the number of callers for each callee.
4136 (c) For each local function on the callee list, assume we will
4137 create a transactional clone. Push *all* calls onto the
4138 callee queues; count the number of clone callers separately
4139 to the number of original callers.
4141 (2) Propagate irrevocable status up the dominator tree:
4143 (a) Any external function on the callee list that is not marked
4144 tm_callable is irrevocable. Push all callers of such onto
4145 a worklist.
4147 (b) For each function on the worklist, mark each block that
4148 contains an irrevocable call. Use the AND operator to
4149 propagate that mark up the dominator tree.
4151 (c) If we reach the entry block for a possible transactional
4152 clone, then the transactional clone is irrevocable, and
4153 we should not create the clone after all. Push all
4154 callers onto the worklist.
4156 (d) Place tm_irrevocable calls at the beginning of the relevant
4157 blocks. Special case here is the entry block for the entire
4158 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
4159 the library to begin the region in serial mode. Decrement
4160 the call count for all callees in the irrevocable region.
4162 (3) Create the transactional clones:
4164 Any tm_callee that still has a non-zero call count is cloned.
4167 /* This structure is stored in the AUX field of each cgraph_node. */
4168 struct tm_ipa_cg_data
4170 /* The clone of the function that got created. */
4171 struct cgraph_node *clone;
4173 /* The tm regions in the normal function. */
4174 struct tm_region *all_tm_regions;
4176 /* The blocks of the normal/clone functions that contain irrevocable
4177 calls, or blocks that are post-dominated by irrevocable calls. */
4178 bitmap irrevocable_blocks_normal;
4179 bitmap irrevocable_blocks_clone;
4181 /* The blocks of the normal function that are involved in transactions. */
4182 bitmap transaction_blocks_normal;
4184 /* The number of callers to the transactional clone of this function
4185 from normal and transactional clones respectively. */
4186 unsigned tm_callers_normal;
4187 unsigned tm_callers_clone;
4189 /* True if all calls to this function's transactional clone
4190 are irrevocable. Also automatically true if the function
4191 has no transactional clone. */
4192 bool is_irrevocable;
4194 /* Flags indicating the presence of this function in various queues. */
4195 bool in_callee_queue;
4196 bool in_worklist;
4198 /* Flags indicating the kind of scan desired while in the worklist. */
4199 bool want_irr_scan_normal;
4202 typedef vec<cgraph_node *> cgraph_node_queue;
4204 /* Return the ipa data associated with NODE, allocating zeroed memory
4205 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
4206 and set *NODE accordingly. */
4208 static struct tm_ipa_cg_data *
4209 get_cg_data (struct cgraph_node **node, bool traverse_aliases)
4211 struct tm_ipa_cg_data *d;
4213 if (traverse_aliases && (*node)->alias)
4214 *node = (*node)->get_alias_target ();
4216 d = (struct tm_ipa_cg_data *) (*node)->aux;
4218 if (d == NULL)
4220 d = (struct tm_ipa_cg_data *)
4221 obstack_alloc (&tm_obstack.obstack, sizeof (*d));
4222 (*node)->aux = (void *) d;
4223 memset (d, 0, sizeof (*d));
4226 return d;
4229 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
4230 it is already present. */
4232 static void
4233 maybe_push_queue (struct cgraph_node *node,
4234 cgraph_node_queue *queue_p, bool *in_queue_p)
4236 if (!*in_queue_p)
4238 *in_queue_p = true;
4239 queue_p->safe_push (node);
4243 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
4244 Queue all callees within block BB. */
4246 static void
4247 ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
4248 basic_block bb, bool for_clone)
4250 gimple_stmt_iterator gsi;
4252 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4254 gimple *stmt = gsi_stmt (gsi);
4255 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4257 tree fndecl = gimple_call_fndecl (stmt);
4258 if (fndecl)
4260 struct tm_ipa_cg_data *d;
4261 unsigned *pcallers;
4262 struct cgraph_node *node;
4264 if (is_tm_ending_fndecl (fndecl))
4265 continue;
4266 if (find_tm_replacement_function (fndecl))
4267 continue;
4269 node = cgraph_node::get (fndecl);
4270 gcc_assert (node != NULL);
4271 d = get_cg_data (&node, true);
4273 pcallers = (for_clone ? &d->tm_callers_clone
4274 : &d->tm_callers_normal);
4275 *pcallers += 1;
4277 maybe_push_queue (node, callees_p, &d->in_callee_queue);
4283 /* Scan all calls in NODE that are within a transaction region,
4284 and push the resulting nodes into the callee queue. */
4286 static void
4287 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
4288 cgraph_node_queue *callees_p)
4290 d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
4291 d->all_tm_regions = all_tm_regions;
4293 for (tm_region *r = all_tm_regions; r; r = r->next)
4295 vec<basic_block> bbs;
4296 basic_block bb;
4297 unsigned i;
4299 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
4300 d->transaction_blocks_normal, false, false);
4302 FOR_EACH_VEC_ELT (bbs, i, bb)
4303 ipa_tm_scan_calls_block (callees_p, bb, false);
4305 bbs.release ();
4309 /* Scan all calls in NODE as if this is the transactional clone,
4310 and push the destinations into the callee queue. */
4312 static void
4313 ipa_tm_scan_calls_clone (struct cgraph_node *node,
4314 cgraph_node_queue *callees_p)
4316 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
4317 basic_block bb;
4319 FOR_EACH_BB_FN (bb, fn)
4320 ipa_tm_scan_calls_block (callees_p, bb, true);
4323 /* The function NODE has been detected to be irrevocable. Push all
4324 of its callers onto WORKLIST for the purpose of re-scanning them. */
4326 static void
4327 ipa_tm_note_irrevocable (struct cgraph_node *node,
4328 cgraph_node_queue *worklist_p)
4330 struct tm_ipa_cg_data *d = get_cg_data (&node, true);
4331 struct cgraph_edge *e;
4333 d->is_irrevocable = true;
4335 for (e = node->callers; e ; e = e->next_caller)
4337 basic_block bb;
4338 struct cgraph_node *caller;
4340 /* Don't examine recursive calls. */
4341 if (e->caller == node)
4342 continue;
4343 /* Even if we think we can go irrevocable, believe the user
4344 above all. */
4345 if (is_tm_safe_or_pure (e->caller->decl))
4346 continue;
4348 caller = e->caller;
4349 d = get_cg_data (&caller, true);
4351 /* Check if the callee is in a transactional region. If so,
4352 schedule the function for normal re-scan as well. */
4353 bb = gimple_bb (e->call_stmt);
4354 gcc_assert (bb != NULL);
4355 if (d->transaction_blocks_normal
4356 && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
4357 d->want_irr_scan_normal = true;
4359 maybe_push_queue (caller, worklist_p, &d->in_worklist);
4363 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
4364 within the block is irrevocable. */
4366 static bool
4367 ipa_tm_scan_irr_block (basic_block bb)
4369 gimple_stmt_iterator gsi;
4370 tree fn;
4372 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4374 gimple *stmt = gsi_stmt (gsi);
4375 switch (gimple_code (stmt))
4377 case GIMPLE_ASSIGN:
4378 if (gimple_assign_single_p (stmt))
4380 tree lhs = gimple_assign_lhs (stmt);
4381 tree rhs = gimple_assign_rhs1 (stmt);
4382 if (volatile_lvalue_p (lhs) || volatile_lvalue_p (rhs))
4383 return true;
4385 break;
4387 case GIMPLE_CALL:
4389 tree lhs = gimple_call_lhs (stmt);
4390 if (lhs && volatile_lvalue_p (lhs))
4391 return true;
4393 if (is_tm_pure_call (stmt))
4394 break;
4396 fn = gimple_call_fn (stmt);
4398 /* Functions with the attribute are by definition irrevocable. */
4399 if (is_tm_irrevocable (fn))
4400 return true;
4402 /* For direct function calls, go ahead and check for replacement
4403 functions, or transitive irrevocable functions. For indirect
4404 functions, we'll ask the runtime. */
4405 if (TREE_CODE (fn) == ADDR_EXPR)
4407 struct tm_ipa_cg_data *d;
4408 struct cgraph_node *node;
4410 fn = TREE_OPERAND (fn, 0);
4411 if (is_tm_ending_fndecl (fn))
4412 break;
4413 if (find_tm_replacement_function (fn))
4414 break;
4416 node = cgraph_node::get (fn);
4417 d = get_cg_data (&node, true);
4419 /* Return true if irrevocable, but above all, believe
4420 the user. */
4421 if (d->is_irrevocable
4422 && !is_tm_safe_or_pure (fn))
4423 return true;
4425 break;
4428 case GIMPLE_ASM:
4429 /* ??? The Approved Method of indicating that an inline
4430 assembly statement is not relevant to the transaction
4431 is to wrap it in a __tm_waiver block. This is not
4432 yet implemented, so we can't check for it. */
4433 if (is_tm_safe (current_function_decl))
4435 tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
4436 SET_EXPR_LOCATION (t, gimple_location (stmt));
4437 error ("%Kasm not allowed in %<transaction_safe%> function", t);
4439 return true;
4441 default:
4442 break;
4446 return false;
4449 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking
4450 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
4451 scanning past OLD_IRR or EXIT_BLOCKS. */
4453 static bool
4454 ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr,
4455 bitmap old_irr, bitmap exit_blocks)
4457 bool any_new_irr = false;
4458 edge e;
4459 edge_iterator ei;
4460 bitmap visited_blocks = BITMAP_ALLOC (NULL);
4464 basic_block bb = pqueue->pop ();
4466 /* Don't re-scan blocks we know already are irrevocable. */
4467 if (old_irr && bitmap_bit_p (old_irr, bb->index))
4468 continue;
4470 if (ipa_tm_scan_irr_block (bb))
4472 bitmap_set_bit (new_irr, bb->index);
4473 any_new_irr = true;
4475 else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
4477 FOR_EACH_EDGE (e, ei, bb->succs)
4478 if (!bitmap_bit_p (visited_blocks, e->dest->index))
4480 bitmap_set_bit (visited_blocks, e->dest->index);
4481 pqueue->safe_push (e->dest);
4485 while (!pqueue->is_empty ());
4487 BITMAP_FREE (visited_blocks);
4489 return any_new_irr;
4492 /* Propagate the irrevocable property both up and down the dominator tree.
4493 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
4494 TM regions; OLD_IRR are the results of a previous scan of the dominator
4495 tree which has been fully propagated; NEW_IRR is the set of new blocks
4496 which are gaining the irrevocable property during the current scan. */
4498 static void
4499 ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
4500 bitmap old_irr, bitmap exit_blocks)
4502 vec<basic_block> bbs;
4503 bitmap all_region_blocks;
4505 /* If this block is in the old set, no need to rescan. */
4506 if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
4507 return;
4509 all_region_blocks = BITMAP_ALLOC (&tm_obstack);
4510 bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
4511 all_region_blocks, false);
4514 basic_block bb = bbs.pop ();
4515 bool this_irr = bitmap_bit_p (new_irr, bb->index);
4516 bool all_son_irr = false;
4517 edge_iterator ei;
4518 edge e;
4520 /* Propagate up. If my children are, I am too, but we must have
4521 at least one child that is. */
4522 if (!this_irr)
4524 FOR_EACH_EDGE (e, ei, bb->succs)
4526 if (!bitmap_bit_p (new_irr, e->dest->index))
4528 all_son_irr = false;
4529 break;
4531 else
4532 all_son_irr = true;
4534 if (all_son_irr)
4536 /* Add block to new_irr if it hasn't already been processed. */
4537 if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
4539 bitmap_set_bit (new_irr, bb->index);
4540 this_irr = true;
4545 /* Propagate down to everyone we immediately dominate. */
4546 if (this_irr)
4548 basic_block son;
4549 for (son = first_dom_son (CDI_DOMINATORS, bb);
4550 son;
4551 son = next_dom_son (CDI_DOMINATORS, son))
4553 /* Make sure block is actually in a TM region, and it
4554 isn't already in old_irr. */
4555 if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
4556 && bitmap_bit_p (all_region_blocks, son->index))
4557 bitmap_set_bit (new_irr, son->index);
4561 while (!bbs.is_empty ());
4563 BITMAP_FREE (all_region_blocks);
4564 bbs.release ();
4567 static void
4568 ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
4570 gimple_stmt_iterator gsi;
4572 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4574 gimple *stmt = gsi_stmt (gsi);
4575 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4577 tree fndecl = gimple_call_fndecl (stmt);
4578 if (fndecl)
4580 struct tm_ipa_cg_data *d;
4581 unsigned *pcallers;
4582 struct cgraph_node *tnode;
4584 if (is_tm_ending_fndecl (fndecl))
4585 continue;
4586 if (find_tm_replacement_function (fndecl))
4587 continue;
4589 tnode = cgraph_node::get (fndecl);
4590 d = get_cg_data (&tnode, true);
4592 pcallers = (for_clone ? &d->tm_callers_clone
4593 : &d->tm_callers_normal);
4595 gcc_assert (*pcallers > 0);
4596 *pcallers -= 1;
4602 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
4603 as well as other irrevocable actions such as inline assembly. Mark all
4604 such blocks as irrevocable and decrement the number of calls to
4605 transactional clones. Return true if, for the transactional clone, the
4606 entire function is irrevocable. */
4608 static bool
4609 ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
4611 struct tm_ipa_cg_data *d;
4612 bitmap new_irr, old_irr;
4613 bool ret = false;
4615 /* Builtin operators (operator new, and such). */
4616 if (DECL_STRUCT_FUNCTION (node->decl) == NULL
4617 || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL)
4618 return false;
4620 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4621 calculate_dominance_info (CDI_DOMINATORS);
4623 d = get_cg_data (&node, true);
4624 auto_vec<basic_block, 10> queue;
4625 new_irr = BITMAP_ALLOC (&tm_obstack);
4627 /* Scan each tm region, propagating irrevocable status through the tree. */
4628 if (for_clone)
4630 old_irr = d->irrevocable_blocks_clone;
4631 queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
4632 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
4634 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
4635 new_irr,
4636 old_irr, NULL);
4637 ret = bitmap_bit_p (new_irr,
4638 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index);
4641 else
4643 struct tm_region *region;
4645 old_irr = d->irrevocable_blocks_normal;
4646 for (region = d->all_tm_regions; region; region = region->next)
4648 queue.quick_push (region->entry_block);
4649 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
4650 region->exit_blocks))
4651 ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
4652 region->exit_blocks);
4656 /* If we found any new irrevocable blocks, reduce the call count for
4657 transactional clones within the irrevocable blocks. Save the new
4658 set of irrevocable blocks for next time. */
4659 if (!bitmap_empty_p (new_irr))
4661 bitmap_iterator bmi;
4662 unsigned i;
4664 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4665 ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun, i),
4666 for_clone);
4668 if (old_irr)
4670 bitmap_ior_into (old_irr, new_irr);
4671 BITMAP_FREE (new_irr);
4673 else if (for_clone)
4674 d->irrevocable_blocks_clone = new_irr;
4675 else
4676 d->irrevocable_blocks_normal = new_irr;
4678 if (dump_file && new_irr)
4680 const char *dname;
4681 bitmap_iterator bmi;
4682 unsigned i;
4684 dname = lang_hooks.decl_printable_name (current_function_decl, 2);
4685 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4686 fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
4689 else
4690 BITMAP_FREE (new_irr);
4692 pop_cfun ();
4694 return ret;
4697 /* Return true if, for the transactional clone of NODE, any call
4698 may enter irrevocable mode. */
4700 static bool
4701 ipa_tm_mayenterirr_function (struct cgraph_node *node)
4703 struct tm_ipa_cg_data *d;
4704 tree decl;
4705 unsigned flags;
4707 d = get_cg_data (&node, true);
4708 decl = node->decl;
4709 flags = flags_from_decl_or_type (decl);
4711 /* Handle some TM builtins. Ordinarily these aren't actually generated
4712 at this point, but handling these functions when written in by the
4713 user makes it easier to build unit tests. */
4714 if (flags & ECF_TM_BUILTIN)
4715 return false;
4717 /* Filter out all functions that are marked. */
4718 if (flags & ECF_TM_PURE)
4719 return false;
4720 if (is_tm_safe (decl))
4721 return false;
4722 if (is_tm_irrevocable (decl))
4723 return true;
4724 if (is_tm_callable (decl))
4725 return true;
4726 if (find_tm_replacement_function (decl))
4727 return true;
4729 /* If we aren't seeing the final version of the function we don't
4730 know what it will contain at runtime. */
4731 if (node->get_availability () < AVAIL_AVAILABLE)
4732 return true;
4734 /* If the function must go irrevocable, then of course true. */
4735 if (d->is_irrevocable)
4736 return true;
4738 /* If there are any blocks marked irrevocable, then the function
4739 as a whole may enter irrevocable. */
4740 if (d->irrevocable_blocks_clone)
4741 return true;
4743 /* We may have previously marked this function as tm_may_enter_irr;
4744 see pass_diagnose_tm_blocks. */
4745 if (node->local.tm_may_enter_irr)
4746 return true;
4748 /* Recurse on the main body for aliases. In general, this will
4749 result in one of the bits above being set so that we will not
4750 have to recurse next time. */
4751 if (node->alias)
4752 return ipa_tm_mayenterirr_function (cgraph_node::get (node->thunk.alias));
4754 /* What remains is unmarked local functions without items that force
4755 the function to go irrevocable. */
4756 return false;
4759 /* Diagnose calls from transaction_safe functions to unmarked
4760 functions that are determined to not be safe. */
4762 static void
4763 ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
4765 struct cgraph_edge *e;
4767 for (e = node->callees; e ; e = e->next_callee)
4768 if (!is_tm_callable (e->callee->decl)
4769 && e->callee->local.tm_may_enter_irr)
4770 error_at (gimple_location (e->call_stmt),
4771 "unsafe function call %qD within "
4772 "%<transaction_safe%> function", e->callee->decl);
4775 /* Diagnose call from atomic transactions to unmarked functions
4776 that are determined to not be safe. */
4778 static void
4779 ipa_tm_diagnose_transaction (struct cgraph_node *node,
4780 struct tm_region *all_tm_regions)
4782 struct tm_region *r;
4784 for (r = all_tm_regions; r ; r = r->next)
4785 if (gimple_transaction_subcode (r->get_transaction_stmt ())
4786 & GTMA_IS_RELAXED)
4788 /* Atomic transactions can be nested inside relaxed. */
4789 if (r->inner)
4790 ipa_tm_diagnose_transaction (node, r->inner);
4792 else
4794 vec<basic_block> bbs;
4795 gimple_stmt_iterator gsi;
4796 basic_block bb;
4797 size_t i;
4799 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
4800 r->irr_blocks, NULL, false);
4802 for (i = 0; bbs.iterate (i, &bb); ++i)
4803 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4805 gimple *stmt = gsi_stmt (gsi);
4806 tree fndecl;
4808 if (gimple_code (stmt) == GIMPLE_ASM)
4810 error_at (gimple_location (stmt),
4811 "asm not allowed in atomic transaction");
4812 continue;
4815 if (!is_gimple_call (stmt))
4816 continue;
4817 fndecl = gimple_call_fndecl (stmt);
4819 /* Indirect function calls have been diagnosed already. */
4820 if (!fndecl)
4821 continue;
4823 /* Stop at the end of the transaction. */
4824 if (is_tm_ending_fndecl (fndecl))
4826 if (bitmap_bit_p (r->exit_blocks, bb->index))
4827 break;
4828 continue;
4831 /* Marked functions have been diagnosed already. */
4832 if (is_tm_pure_call (stmt))
4833 continue;
4834 if (is_tm_callable (fndecl))
4835 continue;
4837 if (cgraph_node::local_info (fndecl)->tm_may_enter_irr)
4838 error_at (gimple_location (stmt),
4839 "unsafe function call %qD within "
4840 "atomic transaction", fndecl);
4843 bbs.release ();
4847 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4848 OLD_DECL. The returned value is a freshly malloced pointer that
4849 should be freed by the caller. */
4851 static tree
4852 tm_mangle (tree old_asm_id)
4854 const char *old_asm_name;
4855 char *tm_name;
4856 void *alloc = NULL;
4857 struct demangle_component *dc;
4858 tree new_asm_id;
4860 /* Determine if the symbol is already a valid C++ mangled name. Do this
4861 even for C, which might be interfacing with C++ code via appropriately
4862 ugly identifiers. */
4863 /* ??? We could probably do just as well checking for "_Z" and be done. */
4864 old_asm_name = IDENTIFIER_POINTER (old_asm_id);
4865 dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
4867 if (dc == NULL)
4869 char length[8];
4871 do_unencoded:
4872 sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
4873 tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
4875 else
4877 old_asm_name += 2; /* Skip _Z */
4879 switch (dc->type)
4881 case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
4882 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
4883 /* Don't play silly games, you! */
4884 goto do_unencoded;
4886 case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
4887 /* I'd really like to know if we can ever be passed one of
4888 these from the C++ front end. The Logical Thing would
4889 seem that hidden-alias should be outer-most, so that we
4890 get hidden-alias of a transaction-clone and not vice-versa. */
4891 old_asm_name += 2;
4892 break;
4894 default:
4895 break;
4898 tm_name = concat ("_ZGTt", old_asm_name, NULL);
4900 free (alloc);
4902 new_asm_id = get_identifier (tm_name);
4903 free (tm_name);
4905 return new_asm_id;
4908 static inline void
4909 ipa_tm_mark_force_output_node (struct cgraph_node *node)
4911 node->mark_force_output ();
4912 node->analyzed = true;
4915 static inline void
4916 ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node)
4918 node->forced_by_abi = true;
4919 node->analyzed = true;
4922 /* Callback data for ipa_tm_create_version_alias. */
4923 struct create_version_alias_info
4925 struct cgraph_node *old_node;
4926 tree new_decl;
4929 /* A subroutine of ipa_tm_create_version, called via
4930 cgraph_for_node_and_aliases. Create new tm clones for each of
4931 the existing aliases. */
4932 static bool
4933 ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
4935 struct create_version_alias_info *info
4936 = (struct create_version_alias_info *)data;
4937 tree old_decl, new_decl, tm_name;
4938 struct cgraph_node *new_node;
4940 if (!node->cpp_implicit_alias)
4941 return false;
4943 old_decl = node->decl;
4944 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4945 new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
4946 TREE_CODE (old_decl), tm_name,
4947 TREE_TYPE (old_decl));
4949 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4950 SET_DECL_RTL (new_decl, NULL);
4952 /* Based loosely on C++'s make_alias_for(). */
4953 TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
4954 DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
4955 DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
4956 TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
4957 DECL_EXTERNAL (new_decl) = 0;
4958 DECL_ARTIFICIAL (new_decl) = 1;
4959 TREE_ADDRESSABLE (new_decl) = 1;
4960 TREE_USED (new_decl) = 1;
4961 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4963 /* Perform the same remapping to the comdat group. */
4964 if (DECL_ONE_ONLY (new_decl))
4965 varpool_node::get (new_decl)->set_comdat_group
4966 (tm_mangle (decl_comdat_group_id (old_decl)));
4968 new_node = cgraph_node::create_same_body_alias (new_decl, info->new_decl);
4969 new_node->tm_clone = true;
4970 new_node->externally_visible = info->old_node->externally_visible;
4971 new_node->no_reorder = info->old_node->no_reorder;
4972 /* ?? Do not traverse aliases here. */
4973 get_cg_data (&node, false)->clone = new_node;
4975 record_tm_clone_pair (old_decl, new_decl);
4977 if (info->old_node->force_output
4978 || info->old_node->ref_list.first_referring ())
4979 ipa_tm_mark_force_output_node (new_node);
4980 if (info->old_node->forced_by_abi)
4981 ipa_tm_mark_forced_by_abi_node (new_node);
4982 return false;
4985 /* Create a copy of the function (possibly declaration only) of OLD_NODE,
4986 appropriate for the transactional clone. */
4988 static void
4989 ipa_tm_create_version (struct cgraph_node *old_node)
4991 tree new_decl, old_decl, tm_name;
4992 struct cgraph_node *new_node;
4994 old_decl = old_node->decl;
4995 new_decl = copy_node (old_decl);
4997 /* DECL_ASSEMBLER_NAME needs to be set before we call
4998 cgraph_copy_node_for_versioning below, because cgraph_node will
4999 fill the assembler_name_hash. */
5000 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
5001 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
5002 SET_DECL_RTL (new_decl, NULL);
5003 TREE_SYMBOL_REFERENCED (tm_name) = 1;
5005 /* Perform the same remapping to the comdat group. */
5006 if (DECL_ONE_ONLY (new_decl))
5007 varpool_node::get (new_decl)->set_comdat_group
5008 (tm_mangle (DECL_COMDAT_GROUP (old_decl)));
5010 gcc_assert (!old_node->ipa_transforms_to_apply.exists ());
5011 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
5012 new_node->local.local = false;
5013 new_node->externally_visible = old_node->externally_visible;
5014 new_node->lowered = true;
5015 new_node->tm_clone = 1;
5016 if (!old_node->implicit_section)
5017 new_node->set_section (old_node->get_section ());
5018 get_cg_data (&old_node, true)->clone = new_node;
5020 if (old_node->get_availability () >= AVAIL_INTERPOSABLE)
5022 /* Remap extern inline to static inline. */
5023 /* ??? Is it worth trying to use make_decl_one_only? */
5024 if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
5026 DECL_EXTERNAL (new_decl) = 0;
5027 TREE_PUBLIC (new_decl) = 0;
5028 DECL_WEAK (new_decl) = 0;
5031 tree_function_versioning (old_decl, new_decl,
5032 NULL, false, NULL,
5033 false, NULL, NULL);
5036 record_tm_clone_pair (old_decl, new_decl);
5038 symtab->call_cgraph_insertion_hooks (new_node);
5039 if (old_node->force_output
5040 || old_node->ref_list.first_referring ())
5041 ipa_tm_mark_force_output_node (new_node);
5042 if (old_node->forced_by_abi)
5043 ipa_tm_mark_forced_by_abi_node (new_node);
5045 /* Do the same thing, but for any aliases of the original node. */
5047 struct create_version_alias_info data;
5048 data.old_node = old_node;
5049 data.new_decl = new_decl;
5050 old_node->call_for_symbol_thunks_and_aliases (ipa_tm_create_version_alias,
5051 &data, true);
5055 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
5057 static void
5058 ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
5059 basic_block bb)
5061 gimple_stmt_iterator gsi;
5062 gcall *g;
5064 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
5066 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
5067 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
5069 split_block_after_labels (bb);
5070 gsi = gsi_after_labels (bb);
5071 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
5073 node->create_edge (cgraph_node::get_create
5074 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
5075 g, 0,
5076 compute_call_stmt_bb_frequency (node->decl,
5077 gimple_bb (g)));
5080 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */
5082 static bool
5083 ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
5084 struct tm_region *region,
5085 gimple_stmt_iterator *gsi, gcall *stmt)
5087 tree gettm_fn, ret, old_fn, callfn;
5088 gcall *g;
5089 gassign *g2;
5090 bool safe;
5092 old_fn = gimple_call_fn (stmt);
5094 if (TREE_CODE (old_fn) == ADDR_EXPR)
5096 tree fndecl = TREE_OPERAND (old_fn, 0);
5097 tree clone = get_tm_clone_pair (fndecl);
5099 /* By transforming the call into a TM_GETTMCLONE, we are
5100 technically taking the address of the original function and
5101 its clone. Explain this so inlining will know this function
5102 is needed. */
5103 cgraph_node::get (fndecl)->mark_address_taken () ;
5104 if (clone)
5105 cgraph_node::get (clone)->mark_address_taken ();
5108 safe = is_tm_safe (TREE_TYPE (old_fn));
5109 gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
5110 : BUILT_IN_TM_GETTMCLONE_IRR);
5111 ret = create_tmp_var (ptr_type_node);
5113 if (!safe)
5114 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
5116 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
5117 if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
5118 old_fn = OBJ_TYPE_REF_EXPR (old_fn);
5120 g = gimple_build_call (gettm_fn, 1, old_fn);
5121 ret = make_ssa_name (ret, g);
5122 gimple_call_set_lhs (g, ret);
5124 gsi_insert_before (gsi, g, GSI_SAME_STMT);
5126 node->create_edge (cgraph_node::get_create (gettm_fn), g, 0,
5127 compute_call_stmt_bb_frequency (node->decl,
5128 gimple_bb (g)));
5130 /* Cast return value from tm_gettmclone* into appropriate function
5131 pointer. */
5132 callfn = create_tmp_var (TREE_TYPE (old_fn));
5133 g2 = gimple_build_assign (callfn,
5134 fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
5135 callfn = make_ssa_name (callfn, g2);
5136 gimple_assign_set_lhs (g2, callfn);
5137 gsi_insert_before (gsi, g2, GSI_SAME_STMT);
5139 /* ??? This is a hack to preserve the NOTHROW bit on the call,
5140 which we would have derived from the decl. Failure to save
5141 this bit means we might have to split the basic block. */
5142 if (gimple_call_nothrow_p (stmt))
5143 gimple_call_set_nothrow (stmt, true);
5145 gimple_call_set_fn (stmt, callfn);
5147 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
5148 for a call statement. Fix it. */
5150 tree lhs = gimple_call_lhs (stmt);
5151 tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
5152 if (lhs
5153 && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
5155 tree temp;
5157 temp = create_tmp_reg (rettype);
5158 gimple_call_set_lhs (stmt, temp);
5160 g2 = gimple_build_assign (lhs,
5161 fold_build1 (VIEW_CONVERT_EXPR,
5162 TREE_TYPE (lhs), temp));
5163 gsi_insert_after (gsi, g2, GSI_SAME_STMT);
5167 update_stmt (stmt);
5168 cgraph_edge *e = cgraph_node::get (current_function_decl)->get_edge (stmt);
5169 if (e && e->indirect_info)
5170 e->indirect_info->polymorphic = false;
5172 return true;
5175 /* Helper function for ipa_tm_transform_calls*. Given a call
5176 statement in GSI which resides inside transaction REGION, redirect
5177 the call to either its wrapper function, or its clone. */
5179 static void
5180 ipa_tm_transform_calls_redirect (struct cgraph_node *node,
5181 struct tm_region *region,
5182 gimple_stmt_iterator *gsi,
5183 bool *need_ssa_rename_p)
5185 gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi));
5186 struct cgraph_node *new_node;
5187 struct cgraph_edge *e = node->get_edge (stmt);
5188 tree fndecl = gimple_call_fndecl (stmt);
5190 /* For indirect calls, pass the address through the runtime. */
5191 if (fndecl == NULL)
5193 *need_ssa_rename_p |=
5194 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5195 return;
5198 /* Handle some TM builtins. Ordinarily these aren't actually generated
5199 at this point, but handling these functions when written in by the
5200 user makes it easier to build unit tests. */
5201 if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
5202 return;
5204 /* Fixup recursive calls inside clones. */
5205 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
5206 for recursion but not update the call statements themselves? */
5207 if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
5209 gimple_call_set_fndecl (stmt, current_function_decl);
5210 return;
5213 /* If there is a replacement, use it. */
5214 fndecl = find_tm_replacement_function (fndecl);
5215 if (fndecl)
5217 new_node = cgraph_node::get_create (fndecl);
5219 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
5221 We can't do this earlier in record_tm_replacement because
5222 cgraph_remove_unreachable_nodes is called before we inject
5223 references to the node. Further, we can't do this in some
5224 nice central place in ipa_tm_execute because we don't have
5225 the exact list of wrapper functions that would be used.
5226 Marking more wrappers than necessary results in the creation
5227 of unnecessary cgraph_nodes, which can cause some of the
5228 other IPA passes to crash.
5230 We do need to mark these nodes so that we get the proper
5231 result in expand_call_tm. */
5232 /* ??? This seems broken. How is it that we're marking the
5233 CALLEE as may_enter_irr? Surely we should be marking the
5234 CALLER. Also note that find_tm_replacement_function also
5235 contains mappings into the TM runtime, e.g. memcpy. These
5236 we know won't go irrevocable. */
5237 new_node->local.tm_may_enter_irr = 1;
5239 else
5241 struct tm_ipa_cg_data *d;
5242 struct cgraph_node *tnode = e->callee;
5244 d = get_cg_data (&tnode, true);
5245 new_node = d->clone;
5247 /* As we've already skipped pure calls and appropriate builtins,
5248 and we've already marked irrevocable blocks, if we can't come
5249 up with a static replacement, then ask the runtime. */
5250 if (new_node == NULL)
5252 *need_ssa_rename_p |=
5253 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5254 return;
5257 fndecl = new_node->decl;
5260 e->redirect_callee (new_node);
5261 gimple_call_set_fndecl (stmt, fndecl);
5264 /* Helper function for ipa_tm_transform_calls. For a given BB,
5265 install calls to tm_irrevocable when IRR_BLOCKS are reached,
5266 redirect other calls to the generated transactional clone. */
5268 static bool
5269 ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
5270 basic_block bb, bitmap irr_blocks)
5272 gimple_stmt_iterator gsi;
5273 bool need_ssa_rename = false;
5275 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5277 ipa_tm_insert_irr_call (node, region, bb);
5278 return true;
5281 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5283 gimple *stmt = gsi_stmt (gsi);
5285 if (!is_gimple_call (stmt))
5286 continue;
5287 if (is_tm_pure_call (stmt))
5288 continue;
5290 /* Redirect edges to the appropriate replacement or clone. */
5291 ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
5294 return need_ssa_rename;
5297 /* Walk the CFG for REGION, beginning at BB. Install calls to
5298 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
5299 the generated transactional clone. */
5301 static bool
5302 ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
5303 basic_block bb, bitmap irr_blocks)
5305 bool need_ssa_rename = false;
5306 edge e;
5307 edge_iterator ei;
5308 auto_vec<basic_block> queue;
5309 bitmap visited_blocks = BITMAP_ALLOC (NULL);
5311 queue.safe_push (bb);
5314 bb = queue.pop ();
5316 need_ssa_rename |=
5317 ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
5319 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5320 continue;
5322 if (region && bitmap_bit_p (region->exit_blocks, bb->index))
5323 continue;
5325 FOR_EACH_EDGE (e, ei, bb->succs)
5326 if (!bitmap_bit_p (visited_blocks, e->dest->index))
5328 bitmap_set_bit (visited_blocks, e->dest->index);
5329 queue.safe_push (e->dest);
5332 while (!queue.is_empty ());
5334 BITMAP_FREE (visited_blocks);
5336 return need_ssa_rename;
5339 /* Transform the calls within the TM regions within NODE. */
5341 static void
5342 ipa_tm_transform_transaction (struct cgraph_node *node)
5344 struct tm_ipa_cg_data *d;
5345 struct tm_region *region;
5346 bool need_ssa_rename = false;
5348 d = get_cg_data (&node, true);
5350 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
5351 calculate_dominance_info (CDI_DOMINATORS);
5353 for (region = d->all_tm_regions; region; region = region->next)
5355 /* If we're sure to go irrevocable, don't transform anything. */
5356 if (d->irrevocable_blocks_normal
5357 && bitmap_bit_p (d->irrevocable_blocks_normal,
5358 region->entry_block->index))
5360 transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE
5361 | GTMA_MAY_ENTER_IRREVOCABLE
5362 | GTMA_HAS_NO_INSTRUMENTATION);
5363 continue;
5366 need_ssa_rename |=
5367 ipa_tm_transform_calls (node, region, region->entry_block,
5368 d->irrevocable_blocks_normal);
5371 if (need_ssa_rename)
5372 update_ssa (TODO_update_ssa_only_virtuals);
5374 pop_cfun ();
5377 /* Transform the calls within the transactional clone of NODE. */
5379 static void
5380 ipa_tm_transform_clone (struct cgraph_node *node)
5382 struct tm_ipa_cg_data *d;
5383 bool need_ssa_rename;
5385 d = get_cg_data (&node, true);
5387 /* If this function makes no calls and has no irrevocable blocks,
5388 then there's nothing to do. */
5389 /* ??? Remove non-aborting top-level transactions. */
5390 if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone)
5391 return;
5393 push_cfun (DECL_STRUCT_FUNCTION (d->clone->decl));
5394 calculate_dominance_info (CDI_DOMINATORS);
5396 need_ssa_rename =
5397 ipa_tm_transform_calls (d->clone, NULL,
5398 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
5399 d->irrevocable_blocks_clone);
5401 if (need_ssa_rename)
5402 update_ssa (TODO_update_ssa_only_virtuals);
5404 pop_cfun ();
5407 /* Main entry point for the transactional memory IPA pass. */
5409 static unsigned int
5410 ipa_tm_execute (void)
5412 cgraph_node_queue tm_callees = cgraph_node_queue ();
5413 /* List of functions that will go irrevocable. */
5414 cgraph_node_queue irr_worklist = cgraph_node_queue ();
5416 struct cgraph_node *node;
5417 struct tm_ipa_cg_data *d;
5418 enum availability a;
5419 unsigned int i;
5421 cgraph_node::checking_verify_cgraph_nodes ();
5423 bitmap_obstack_initialize (&tm_obstack);
5424 initialize_original_copy_tables ();
5426 /* For all local functions marked tm_callable, queue them. */
5427 FOR_EACH_DEFINED_FUNCTION (node)
5428 if (is_tm_callable (node->decl)
5429 && node->get_availability () >= AVAIL_INTERPOSABLE)
5431 d = get_cg_data (&node, true);
5432 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5435 /* For all local reachable functions... */
5436 FOR_EACH_DEFINED_FUNCTION (node)
5437 if (node->lowered
5438 && node->get_availability () >= AVAIL_INTERPOSABLE)
5440 /* ... marked tm_pure, record that fact for the runtime by
5441 indicating that the pure function is its own tm_callable.
5442 No need to do this if the function's address can't be taken. */
5443 if (is_tm_pure (node->decl))
5445 if (!node->local.local)
5446 record_tm_clone_pair (node->decl, node->decl);
5447 continue;
5450 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
5451 calculate_dominance_info (CDI_DOMINATORS);
5453 tm_region_init (NULL);
5454 if (all_tm_regions)
5456 d = get_cg_data (&node, true);
5458 /* Scan for calls that are in each transaction, and
5459 generate the uninstrumented code path. */
5460 ipa_tm_scan_calls_transaction (d, &tm_callees);
5462 /* Put it in the worklist so we can scan the function
5463 later (ipa_tm_scan_irr_function) and mark the
5464 irrevocable blocks. */
5465 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5466 d->want_irr_scan_normal = true;
5469 pop_cfun ();
5472 /* For every local function on the callee list, scan as if we will be
5473 creating a transactional clone, queueing all new functions we find
5474 along the way. */
5475 for (i = 0; i < tm_callees.length (); ++i)
5477 node = tm_callees[i];
5478 a = node->get_availability ();
5479 d = get_cg_data (&node, true);
5481 /* Put it in the worklist so we can scan the function later
5482 (ipa_tm_scan_irr_function) and mark the irrevocable
5483 blocks. */
5484 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5486 /* Some callees cannot be arbitrarily cloned. These will always be
5487 irrevocable. Mark these now, so that we need not scan them. */
5488 if (is_tm_irrevocable (node->decl))
5489 ipa_tm_note_irrevocable (node, &irr_worklist);
5490 else if (a <= AVAIL_NOT_AVAILABLE
5491 && !is_tm_safe_or_pure (node->decl))
5492 ipa_tm_note_irrevocable (node, &irr_worklist);
5493 else if (a >= AVAIL_INTERPOSABLE)
5495 if (!tree_versionable_function_p (node->decl))
5496 ipa_tm_note_irrevocable (node, &irr_worklist);
5497 else if (!d->is_irrevocable)
5499 /* If this is an alias, make sure its base is queued as well.
5500 we need not scan the callees now, as the base will do. */
5501 if (node->alias)
5503 node = cgraph_node::get (node->thunk.alias);
5504 d = get_cg_data (&node, true);
5505 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5506 continue;
5509 /* Add all nodes called by this function into
5510 tm_callees as well. */
5511 ipa_tm_scan_calls_clone (node, &tm_callees);
5516 /* Iterate scans until no more work to be done. Prefer not to use
5517 vec::pop because the worklist tends to follow a breadth-first
5518 search of the callgraph, which should allow convergance with a
5519 minimum number of scans. But we also don't want the worklist
5520 array to grow without bound, so we shift the array up periodically. */
5521 for (i = 0; i < irr_worklist.length (); ++i)
5523 if (i > 256 && i == irr_worklist.length () / 8)
5525 irr_worklist.block_remove (0, i);
5526 i = 0;
5529 node = irr_worklist[i];
5530 d = get_cg_data (&node, true);
5531 d->in_worklist = false;
5533 if (d->want_irr_scan_normal)
5535 d->want_irr_scan_normal = false;
5536 ipa_tm_scan_irr_function (node, false);
5538 if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
5539 ipa_tm_note_irrevocable (node, &irr_worklist);
5542 /* For every function on the callee list, collect the tm_may_enter_irr
5543 bit on the node. */
5544 irr_worklist.truncate (0);
5545 for (i = 0; i < tm_callees.length (); ++i)
5547 node = tm_callees[i];
5548 if (ipa_tm_mayenterirr_function (node))
5550 d = get_cg_data (&node, true);
5551 gcc_assert (d->in_worklist == false);
5552 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5556 /* Propagate the tm_may_enter_irr bit to callers until stable. */
5557 for (i = 0; i < irr_worklist.length (); ++i)
5559 struct cgraph_node *caller;
5560 struct cgraph_edge *e;
5561 struct ipa_ref *ref;
5563 if (i > 256 && i == irr_worklist.length () / 8)
5565 irr_worklist.block_remove (0, i);
5566 i = 0;
5569 node = irr_worklist[i];
5570 d = get_cg_data (&node, true);
5571 d->in_worklist = false;
5572 node->local.tm_may_enter_irr = true;
5574 /* Propagate back to normal callers. */
5575 for (e = node->callers; e ; e = e->next_caller)
5577 caller = e->caller;
5578 if (!is_tm_safe_or_pure (caller->decl)
5579 && !caller->local.tm_may_enter_irr)
5581 d = get_cg_data (&caller, true);
5582 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5586 /* Propagate back to referring aliases as well. */
5587 FOR_EACH_ALIAS (node, ref)
5589 caller = dyn_cast<cgraph_node *> (ref->referring);
5590 if (!caller->local.tm_may_enter_irr)
5592 /* ?? Do not traverse aliases here. */
5593 d = get_cg_data (&caller, false);
5594 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5599 /* Now validate all tm_safe functions, and all atomic regions in
5600 other functions. */
5601 FOR_EACH_DEFINED_FUNCTION (node)
5602 if (node->lowered
5603 && node->get_availability () >= AVAIL_INTERPOSABLE)
5605 d = get_cg_data (&node, true);
5606 if (is_tm_safe (node->decl))
5607 ipa_tm_diagnose_tm_safe (node);
5608 else if (d->all_tm_regions)
5609 ipa_tm_diagnose_transaction (node, d->all_tm_regions);
5612 /* Create clones. Do those that are not irrevocable and have a
5613 positive call count. Do those publicly visible functions that
5614 the user directed us to clone. */
5615 for (i = 0; i < tm_callees.length (); ++i)
5617 bool doit = false;
5619 node = tm_callees[i];
5620 if (node->cpp_implicit_alias)
5621 continue;
5623 a = node->get_availability ();
5624 d = get_cg_data (&node, true);
5626 if (a <= AVAIL_NOT_AVAILABLE)
5627 doit = is_tm_callable (node->decl);
5628 else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl))
5629 doit = true;
5630 else if (!d->is_irrevocable
5631 && d->tm_callers_normal + d->tm_callers_clone > 0)
5632 doit = true;
5634 if (doit)
5635 ipa_tm_create_version (node);
5638 /* Redirect calls to the new clones, and insert irrevocable marks. */
5639 for (i = 0; i < tm_callees.length (); ++i)
5641 node = tm_callees[i];
5642 if (node->analyzed)
5644 d = get_cg_data (&node, true);
5645 if (d->clone)
5646 ipa_tm_transform_clone (node);
5649 FOR_EACH_DEFINED_FUNCTION (node)
5650 if (node->lowered
5651 && node->get_availability () >= AVAIL_INTERPOSABLE)
5653 d = get_cg_data (&node, true);
5654 if (d->all_tm_regions)
5655 ipa_tm_transform_transaction (node);
5658 /* Free and clear all data structures. */
5659 tm_callees.release ();
5660 irr_worklist.release ();
5661 bitmap_obstack_release (&tm_obstack);
5662 free_original_copy_tables ();
5664 FOR_EACH_FUNCTION (node)
5665 node->aux = NULL;
5667 cgraph_node::checking_verify_cgraph_nodes ();
5669 return 0;
5672 namespace {
5674 const pass_data pass_data_ipa_tm =
5676 SIMPLE_IPA_PASS, /* type */
5677 "tmipa", /* name */
5678 OPTGROUP_NONE, /* optinfo_flags */
5679 TV_TRANS_MEM, /* tv_id */
5680 ( PROP_ssa | PROP_cfg ), /* properties_required */
5681 0, /* properties_provided */
5682 0, /* properties_destroyed */
5683 0, /* todo_flags_start */
5684 0, /* todo_flags_finish */
5687 class pass_ipa_tm : public simple_ipa_opt_pass
5689 public:
5690 pass_ipa_tm (gcc::context *ctxt)
5691 : simple_ipa_opt_pass (pass_data_ipa_tm, ctxt)
5694 /* opt_pass methods: */
5695 virtual bool gate (function *) { return flag_tm; }
5696 virtual unsigned int execute (function *) { return ipa_tm_execute (); }
5698 }; // class pass_ipa_tm
5700 } // anon namespace
5702 simple_ipa_opt_pass *
5703 make_pass_ipa_tm (gcc::context *ctxt)
5705 return new pass_ipa_tm (ctxt);
5708 #include "gt-trans-mem.h"