[AArch64] Rewrite vabs<q>_s<8,16,32,64> AdvSIMD intrinsics to fold to tree
[official-gcc.git] / gcc / trans-mem.c
blob6289ea46a89f3049bad7ee4316db9186a80c1b60
1 /* Passes for transactional memory support.
2 Copyright (C) 2008-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "tree-flow.h"
27 #include "tree-pass.h"
28 #include "tree-inline.h"
29 #include "diagnostic-core.h"
30 #include "demangle.h"
31 #include "output.h"
32 #include "trans-mem.h"
33 #include "params.h"
34 #include "target.h"
35 #include "langhooks.h"
36 #include "gimple-pretty-print.h"
37 #include "cfgloop.h"
40 #define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
41 #define PROB_VERY_LIKELY (PROB_ALWAYS - PROB_VERY_UNLIKELY)
42 #define PROB_UNLIKELY (REG_BR_PROB_BASE / 5 - 1)
43 #define PROB_LIKELY (PROB_ALWAYS - PROB_VERY_LIKELY)
44 #define PROB_ALWAYS (REG_BR_PROB_BASE)
46 #define A_RUNINSTRUMENTEDCODE 0x0001
47 #define A_RUNUNINSTRUMENTEDCODE 0x0002
48 #define A_SAVELIVEVARIABLES 0x0004
49 #define A_RESTORELIVEVARIABLES 0x0008
50 #define A_ABORTTRANSACTION 0x0010
52 #define AR_USERABORT 0x0001
53 #define AR_USERRETRY 0x0002
54 #define AR_TMCONFLICT 0x0004
55 #define AR_EXCEPTIONBLOCKABORT 0x0008
56 #define AR_OUTERABORT 0x0010
58 #define MODE_SERIALIRREVOCABLE 0x0000
61 /* The representation of a transaction changes several times during the
62 lowering process. In the beginning, in the front-end we have the
63 GENERIC tree TRANSACTION_EXPR. For example,
65 __transaction {
66 local++;
67 if (++global == 10)
68 __tm_abort;
71 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
72 trivially replaced with a GIMPLE_TRANSACTION node.
74 During pass_lower_tm, we examine the body of transactions looking
75 for aborts. Transactions that do not contain an abort may be
76 merged into an outer transaction. We also add a TRY-FINALLY node
77 to arrange for the transaction to be committed on any exit.
79 [??? Think about how this arrangement affects throw-with-commit
80 and throw-with-abort operations. In this case we want the TRY to
81 handle gotos, but not to catch any exceptions because the transaction
82 will already be closed.]
84 GIMPLE_TRANSACTION [label=NULL] {
85 try {
86 local = local + 1;
87 t0 = global;
88 t1 = t0 + 1;
89 global = t1;
90 if (t1 == 10)
91 __builtin___tm_abort ();
92 } finally {
93 __builtin___tm_commit ();
97 During pass_lower_eh, we create EH regions for the transactions,
98 intermixed with the regular EH stuff. This gives us a nice persistent
99 mapping (all the way through rtl) from transactional memory operation
100 back to the transaction, which allows us to get the abnormal edges
101 correct to model transaction aborts and restarts:
103 GIMPLE_TRANSACTION [label=over]
104 local = local + 1;
105 t0 = global;
106 t1 = t0 + 1;
107 global = t1;
108 if (t1 == 10)
109 __builtin___tm_abort ();
110 __builtin___tm_commit ();
111 over:
113 This is the end of all_lowering_passes, and so is what is present
114 during the IPA passes, and through all of the optimization passes.
116 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
117 functions and mark functions for cloning.
119 At the end of gimple optimization, before exiting SSA form,
120 pass_tm_edges replaces statements that perform transactional
121 memory operations with the appropriate TM builtins, and swap
122 out function calls with their transactional clones. At this
123 point we introduce the abnormal transaction restart edges and
124 complete lowering of the GIMPLE_TRANSACTION node.
126 x = __builtin___tm_start (MAY_ABORT);
127 eh_label:
128 if (x & abort_transaction)
129 goto over;
130 local = local + 1;
131 t0 = __builtin___tm_load (global);
132 t1 = t0 + 1;
133 __builtin___tm_store (&global, t1);
134 if (t1 == 10)
135 __builtin___tm_abort ();
136 __builtin___tm_commit ();
137 over:
140 static void *expand_regions (struct tm_region *,
141 void *(*callback)(struct tm_region *, void *),
142 void *, bool);
145 /* Return the attributes we want to examine for X, or NULL if it's not
146 something we examine. We look at function types, but allow pointers
147 to function types and function decls and peek through. */
149 static tree
150 get_attrs_for (const_tree x)
152 switch (TREE_CODE (x))
154 case FUNCTION_DECL:
155 return TYPE_ATTRIBUTES (TREE_TYPE (x));
156 break;
158 default:
159 if (TYPE_P (x))
160 return NULL;
161 x = TREE_TYPE (x);
162 if (TREE_CODE (x) != POINTER_TYPE)
163 return NULL;
164 /* FALLTHRU */
166 case POINTER_TYPE:
167 x = TREE_TYPE (x);
168 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
169 return NULL;
170 /* FALLTHRU */
172 case FUNCTION_TYPE:
173 case METHOD_TYPE:
174 return TYPE_ATTRIBUTES (x);
178 /* Return true if X has been marked TM_PURE. */
180 bool
181 is_tm_pure (const_tree x)
183 unsigned flags;
185 switch (TREE_CODE (x))
187 case FUNCTION_DECL:
188 case FUNCTION_TYPE:
189 case METHOD_TYPE:
190 break;
192 default:
193 if (TYPE_P (x))
194 return false;
195 x = TREE_TYPE (x);
196 if (TREE_CODE (x) != POINTER_TYPE)
197 return false;
198 /* FALLTHRU */
200 case POINTER_TYPE:
201 x = TREE_TYPE (x);
202 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
203 return false;
204 break;
207 flags = flags_from_decl_or_type (x);
208 return (flags & ECF_TM_PURE) != 0;
211 /* Return true if X has been marked TM_IRREVOCABLE. */
213 static bool
214 is_tm_irrevocable (tree x)
216 tree attrs = get_attrs_for (x);
218 if (attrs && lookup_attribute ("transaction_unsafe", attrs))
219 return true;
221 /* A call to the irrevocable builtin is by definition,
222 irrevocable. */
223 if (TREE_CODE (x) == ADDR_EXPR)
224 x = TREE_OPERAND (x, 0);
225 if (TREE_CODE (x) == FUNCTION_DECL
226 && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
227 && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
228 return true;
230 return false;
233 /* Return true if X has been marked TM_SAFE. */
235 bool
236 is_tm_safe (const_tree x)
238 if (flag_tm)
240 tree attrs = get_attrs_for (x);
241 if (attrs)
243 if (lookup_attribute ("transaction_safe", attrs))
244 return true;
245 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
246 return true;
249 return false;
252 /* Return true if CALL is const, or tm_pure. */
254 static bool
255 is_tm_pure_call (gimple call)
257 tree fn = gimple_call_fn (call);
259 if (TREE_CODE (fn) == ADDR_EXPR)
261 fn = TREE_OPERAND (fn, 0);
262 gcc_assert (TREE_CODE (fn) == FUNCTION_DECL);
264 else
265 fn = TREE_TYPE (fn);
267 return is_tm_pure (fn);
270 /* Return true if X has been marked TM_CALLABLE. */
272 static bool
273 is_tm_callable (tree x)
275 tree attrs = get_attrs_for (x);
276 if (attrs)
278 if (lookup_attribute ("transaction_callable", attrs))
279 return true;
280 if (lookup_attribute ("transaction_safe", attrs))
281 return true;
282 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
283 return true;
285 return false;
288 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
290 bool
291 is_tm_may_cancel_outer (tree x)
293 tree attrs = get_attrs_for (x);
294 if (attrs)
295 return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
296 return false;
299 /* Return true for built in functions that "end" a transaction. */
301 bool
302 is_tm_ending_fndecl (tree fndecl)
304 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
305 switch (DECL_FUNCTION_CODE (fndecl))
307 case BUILT_IN_TM_COMMIT:
308 case BUILT_IN_TM_COMMIT_EH:
309 case BUILT_IN_TM_ABORT:
310 case BUILT_IN_TM_IRREVOCABLE:
311 return true;
312 default:
313 break;
316 return false;
319 /* Return true if STMT is a TM load. */
321 static bool
322 is_tm_load (gimple stmt)
324 tree fndecl;
326 if (gimple_code (stmt) != GIMPLE_CALL)
327 return false;
329 fndecl = gimple_call_fndecl (stmt);
330 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
331 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
334 /* Same as above, but for simple TM loads, that is, not the
335 after-write, after-read, etc optimized variants. */
337 static bool
338 is_tm_simple_load (gimple stmt)
340 tree fndecl;
342 if (gimple_code (stmt) != GIMPLE_CALL)
343 return false;
345 fndecl = gimple_call_fndecl (stmt);
346 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
348 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
349 return (fcode == BUILT_IN_TM_LOAD_1
350 || fcode == BUILT_IN_TM_LOAD_2
351 || fcode == BUILT_IN_TM_LOAD_4
352 || fcode == BUILT_IN_TM_LOAD_8
353 || fcode == BUILT_IN_TM_LOAD_FLOAT
354 || fcode == BUILT_IN_TM_LOAD_DOUBLE
355 || fcode == BUILT_IN_TM_LOAD_LDOUBLE
356 || fcode == BUILT_IN_TM_LOAD_M64
357 || fcode == BUILT_IN_TM_LOAD_M128
358 || fcode == BUILT_IN_TM_LOAD_M256);
360 return false;
363 /* Return true if STMT is a TM store. */
365 static bool
366 is_tm_store (gimple stmt)
368 tree fndecl;
370 if (gimple_code (stmt) != GIMPLE_CALL)
371 return false;
373 fndecl = gimple_call_fndecl (stmt);
374 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
375 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
378 /* Same as above, but for simple TM stores, that is, not the
379 after-write, after-read, etc optimized variants. */
381 static bool
382 is_tm_simple_store (gimple stmt)
384 tree fndecl;
386 if (gimple_code (stmt) != GIMPLE_CALL)
387 return false;
389 fndecl = gimple_call_fndecl (stmt);
390 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
392 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
393 return (fcode == BUILT_IN_TM_STORE_1
394 || fcode == BUILT_IN_TM_STORE_2
395 || fcode == BUILT_IN_TM_STORE_4
396 || fcode == BUILT_IN_TM_STORE_8
397 || fcode == BUILT_IN_TM_STORE_FLOAT
398 || fcode == BUILT_IN_TM_STORE_DOUBLE
399 || fcode == BUILT_IN_TM_STORE_LDOUBLE
400 || fcode == BUILT_IN_TM_STORE_M64
401 || fcode == BUILT_IN_TM_STORE_M128
402 || fcode == BUILT_IN_TM_STORE_M256);
404 return false;
407 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */
409 static bool
410 is_tm_abort (tree fndecl)
412 return (fndecl
413 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
414 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
417 /* Build a GENERIC tree for a user abort. This is called by front ends
418 while transforming the __tm_abort statement. */
420 tree
421 build_tm_abort_call (location_t loc, bool is_outer)
423 return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
424 build_int_cst (integer_type_node,
425 AR_USERABORT
426 | (is_outer ? AR_OUTERABORT : 0)));
429 /* Common gateing function for several of the TM passes. */
431 static bool
432 gate_tm (void)
434 return flag_tm;
437 /* Map for aribtrary function replacement under TM, as created
438 by the tm_wrap attribute. */
440 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
441 htab_t tm_wrap_map;
443 void
444 record_tm_replacement (tree from, tree to)
446 struct tree_map **slot, *h;
448 /* Do not inline wrapper functions that will get replaced in the TM
449 pass.
451 Suppose you have foo() that will get replaced into tmfoo(). Make
452 sure the inliner doesn't try to outsmart us and inline foo()
453 before we get a chance to do the TM replacement. */
454 DECL_UNINLINABLE (from) = 1;
456 if (tm_wrap_map == NULL)
457 tm_wrap_map = htab_create_ggc (32, tree_map_hash, tree_map_eq, 0);
459 h = ggc_alloc_tree_map ();
460 h->hash = htab_hash_pointer (from);
461 h->base.from = from;
462 h->to = to;
464 slot = (struct tree_map **)
465 htab_find_slot_with_hash (tm_wrap_map, h, h->hash, INSERT);
466 *slot = h;
469 /* Return a TM-aware replacement function for DECL. */
471 static tree
472 find_tm_replacement_function (tree fndecl)
474 if (tm_wrap_map)
476 struct tree_map *h, in;
478 in.base.from = fndecl;
479 in.hash = htab_hash_pointer (fndecl);
480 h = (struct tree_map *) htab_find_with_hash (tm_wrap_map, &in, in.hash);
481 if (h)
482 return h->to;
485 /* ??? We may well want TM versions of most of the common <string.h>
486 functions. For now, we've already these two defined. */
487 /* Adjust expand_call_tm() attributes as necessary for the cases
488 handled here: */
489 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
490 switch (DECL_FUNCTION_CODE (fndecl))
492 case BUILT_IN_MEMCPY:
493 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
494 case BUILT_IN_MEMMOVE:
495 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
496 case BUILT_IN_MEMSET:
497 return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
498 default:
499 return NULL;
502 return NULL;
505 /* When appropriate, record TM replacement for memory allocation functions.
507 FROM is the FNDECL to wrap. */
508 void
509 tm_malloc_replacement (tree from)
511 const char *str;
512 tree to;
514 if (TREE_CODE (from) != FUNCTION_DECL)
515 return;
517 /* If we have a previous replacement, the user must be explicitly
518 wrapping malloc/calloc/free. They better know what they're
519 doing... */
520 if (find_tm_replacement_function (from))
521 return;
523 str = IDENTIFIER_POINTER (DECL_NAME (from));
525 if (!strcmp (str, "malloc"))
526 to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
527 else if (!strcmp (str, "calloc"))
528 to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
529 else if (!strcmp (str, "free"))
530 to = builtin_decl_explicit (BUILT_IN_TM_FREE);
531 else
532 return;
534 TREE_NOTHROW (to) = 0;
536 record_tm_replacement (from, to);
539 /* Diagnostics for tm_safe functions/regions. Called by the front end
540 once we've lowered the function to high-gimple. */
542 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
543 Process exactly one statement. WI->INFO is set to non-null when in
544 the context of a tm_safe function, and null for a __transaction block. */
546 #define DIAG_TM_OUTER 1
547 #define DIAG_TM_SAFE 2
548 #define DIAG_TM_RELAXED 4
550 struct diagnose_tm
552 unsigned int summary_flags : 8;
553 unsigned int block_flags : 8;
554 unsigned int func_flags : 8;
555 unsigned int saw_volatile : 1;
556 gimple stmt;
559 /* Return true if T is a volatile variable of some kind. */
561 static bool
562 volatile_var_p (tree t)
564 return (SSA_VAR_P (t)
565 && TREE_THIS_VOLATILE (TREE_TYPE (t)));
568 /* Tree callback function for diagnose_tm pass. */
570 static tree
571 diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
572 void *data)
574 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
575 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
577 if (volatile_var_p (*tp)
578 && d->block_flags & DIAG_TM_SAFE
579 && !d->saw_volatile)
581 d->saw_volatile = 1;
582 error_at (gimple_location (d->stmt),
583 "invalid volatile use of %qD inside transaction",
584 *tp);
587 return NULL_TREE;
590 static tree
591 diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
592 struct walk_stmt_info *wi)
594 gimple stmt = gsi_stmt (*gsi);
595 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
597 /* Save stmt for use in leaf analysis. */
598 d->stmt = stmt;
600 switch (gimple_code (stmt))
602 case GIMPLE_CALL:
604 tree fn = gimple_call_fn (stmt);
606 if ((d->summary_flags & DIAG_TM_OUTER) == 0
607 && is_tm_may_cancel_outer (fn))
608 error_at (gimple_location (stmt),
609 "%<transaction_may_cancel_outer%> function call not within"
610 " outer transaction or %<transaction_may_cancel_outer%>");
612 if (d->summary_flags & DIAG_TM_SAFE)
614 bool is_safe, direct_call_p;
615 tree replacement;
617 if (TREE_CODE (fn) == ADDR_EXPR
618 && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
620 direct_call_p = true;
621 replacement = TREE_OPERAND (fn, 0);
622 replacement = find_tm_replacement_function (replacement);
623 if (replacement)
624 fn = replacement;
626 else
628 direct_call_p = false;
629 replacement = NULL_TREE;
632 if (is_tm_safe_or_pure (fn))
633 is_safe = true;
634 else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
636 /* A function explicitly marked transaction_callable as
637 opposed to transaction_safe is being defined to be
638 unsafe as part of its ABI, regardless of its contents. */
639 is_safe = false;
641 else if (direct_call_p)
643 if (flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
644 is_safe = true;
645 else if (replacement)
647 /* ??? At present we've been considering replacements
648 merely transaction_callable, and therefore might
649 enter irrevocable. The tm_wrap attribute has not
650 yet made it into the new language spec. */
651 is_safe = false;
653 else
655 /* ??? Diagnostics for unmarked direct calls moved into
656 the IPA pass. Section 3.2 of the spec details how
657 functions not marked should be considered "implicitly
658 safe" based on having examined the function body. */
659 is_safe = true;
662 else
664 /* An unmarked indirect call. Consider it unsafe even
665 though optimization may yet figure out how to inline. */
666 is_safe = false;
669 if (!is_safe)
671 if (TREE_CODE (fn) == ADDR_EXPR)
672 fn = TREE_OPERAND (fn, 0);
673 if (d->block_flags & DIAG_TM_SAFE)
675 if (direct_call_p)
676 error_at (gimple_location (stmt),
677 "unsafe function call %qD within "
678 "atomic transaction", fn);
679 else
681 if (!DECL_P (fn) || DECL_NAME (fn))
682 error_at (gimple_location (stmt),
683 "unsafe function call %qE within "
684 "atomic transaction", fn);
685 else
686 error_at (gimple_location (stmt),
687 "unsafe indirect function call within "
688 "atomic transaction");
691 else
693 if (direct_call_p)
694 error_at (gimple_location (stmt),
695 "unsafe function call %qD within "
696 "%<transaction_safe%> function", fn);
697 else
699 if (!DECL_P (fn) || DECL_NAME (fn))
700 error_at (gimple_location (stmt),
701 "unsafe function call %qE within "
702 "%<transaction_safe%> function", fn);
703 else
704 error_at (gimple_location (stmt),
705 "unsafe indirect function call within "
706 "%<transaction_safe%> function");
712 break;
714 case GIMPLE_ASM:
715 /* ??? We ought to come up with a way to add attributes to
716 asm statements, and then add "transaction_safe" to it.
717 Either that or get the language spec to resurrect __tm_waiver. */
718 if (d->block_flags & DIAG_TM_SAFE)
719 error_at (gimple_location (stmt),
720 "asm not allowed in atomic transaction");
721 else if (d->func_flags & DIAG_TM_SAFE)
722 error_at (gimple_location (stmt),
723 "asm not allowed in %<transaction_safe%> function");
724 break;
726 case GIMPLE_TRANSACTION:
728 unsigned char inner_flags = DIAG_TM_SAFE;
730 if (gimple_transaction_subcode (stmt) & GTMA_IS_RELAXED)
732 if (d->block_flags & DIAG_TM_SAFE)
733 error_at (gimple_location (stmt),
734 "relaxed transaction in atomic transaction");
735 else if (d->func_flags & DIAG_TM_SAFE)
736 error_at (gimple_location (stmt),
737 "relaxed transaction in %<transaction_safe%> function");
738 inner_flags = DIAG_TM_RELAXED;
740 else if (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)
742 if (d->block_flags)
743 error_at (gimple_location (stmt),
744 "outer transaction in transaction");
745 else if (d->func_flags & DIAG_TM_OUTER)
746 error_at (gimple_location (stmt),
747 "outer transaction in "
748 "%<transaction_may_cancel_outer%> function");
749 else if (d->func_flags & DIAG_TM_SAFE)
750 error_at (gimple_location (stmt),
751 "outer transaction in %<transaction_safe%> function");
752 inner_flags |= DIAG_TM_OUTER;
755 *handled_ops_p = true;
756 if (gimple_transaction_body (stmt))
758 struct walk_stmt_info wi_inner;
759 struct diagnose_tm d_inner;
761 memset (&d_inner, 0, sizeof (d_inner));
762 d_inner.func_flags = d->func_flags;
763 d_inner.block_flags = d->block_flags | inner_flags;
764 d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
766 memset (&wi_inner, 0, sizeof (wi_inner));
767 wi_inner.info = &d_inner;
769 walk_gimple_seq (gimple_transaction_body (stmt),
770 diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
773 break;
775 default:
776 break;
779 return NULL_TREE;
782 static unsigned int
783 diagnose_tm_blocks (void)
785 struct walk_stmt_info wi;
786 struct diagnose_tm d;
788 memset (&d, 0, sizeof (d));
789 if (is_tm_may_cancel_outer (current_function_decl))
790 d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
791 else if (is_tm_safe (current_function_decl))
792 d.func_flags = DIAG_TM_SAFE;
793 d.summary_flags = d.func_flags;
795 memset (&wi, 0, sizeof (wi));
796 wi.info = &d;
798 walk_gimple_seq (gimple_body (current_function_decl),
799 diagnose_tm_1, diagnose_tm_1_op, &wi);
801 return 0;
804 struct gimple_opt_pass pass_diagnose_tm_blocks =
807 GIMPLE_PASS,
808 "*diagnose_tm_blocks", /* name */
809 OPTGROUP_NONE, /* optinfo_flags */
810 gate_tm, /* gate */
811 diagnose_tm_blocks, /* execute */
812 NULL, /* sub */
813 NULL, /* next */
814 0, /* static_pass_number */
815 TV_TRANS_MEM, /* tv_id */
816 PROP_gimple_any, /* properties_required */
817 0, /* properties_provided */
818 0, /* properties_destroyed */
819 0, /* todo_flags_start */
820 0, /* todo_flags_finish */
824 /* Instead of instrumenting thread private memory, we save the
825 addresses in a log which we later use to save/restore the addresses
826 upon transaction start/restart.
828 The log is keyed by address, where each element contains individual
829 statements among different code paths that perform the store.
831 This log is later used to generate either plain save/restore of the
832 addresses upon transaction start/restart, or calls to the ITM_L*
833 logging functions.
835 So for something like:
837 struct large { int x[1000]; };
838 struct large lala = { 0 };
839 __transaction {
840 lala.x[i] = 123;
844 We can either save/restore:
846 lala = { 0 };
847 trxn = _ITM_startTransaction ();
848 if (trxn & a_saveLiveVariables)
849 tmp_lala1 = lala.x[i];
850 else if (a & a_restoreLiveVariables)
851 lala.x[i] = tmp_lala1;
853 or use the logging functions:
855 lala = { 0 };
856 trxn = _ITM_startTransaction ();
857 _ITM_LU4 (&lala.x[i]);
859 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
860 far up the dominator tree to shadow all of the writes to a given
861 location (thus reducing the total number of logging calls), but not
862 so high as to be called on a path that does not perform a
863 write. */
865 /* One individual log entry. We may have multiple statements for the
866 same location if neither dominate each other (on different
867 execution paths). */
868 typedef struct tm_log_entry
870 /* Address to save. */
871 tree addr;
872 /* Entry block for the transaction this address occurs in. */
873 basic_block entry_block;
874 /* Dominating statements the store occurs in. */
875 gimple_vec stmts;
876 /* Initially, while we are building the log, we place a nonzero
877 value here to mean that this address *will* be saved with a
878 save/restore sequence. Later, when generating the save sequence
879 we place the SSA temp generated here. */
880 tree save_var;
881 } *tm_log_entry_t;
884 /* Log entry hashtable helpers. */
886 struct log_entry_hasher
888 typedef tm_log_entry value_type;
889 typedef tm_log_entry compare_type;
890 static inline hashval_t hash (const value_type *);
891 static inline bool equal (const value_type *, const compare_type *);
892 static inline void remove (value_type *);
895 /* Htab support. Return hash value for a `tm_log_entry'. */
896 inline hashval_t
897 log_entry_hasher::hash (const value_type *log)
899 return iterative_hash_expr (log->addr, 0);
902 /* Htab support. Return true if two log entries are the same. */
903 inline bool
904 log_entry_hasher::equal (const value_type *log1, const compare_type *log2)
906 /* FIXME:
908 rth: I suggest that we get rid of the component refs etc.
909 I.e. resolve the reference to base + offset.
911 We may need to actually finish a merge with mainline for this,
912 since we'd like to be presented with Richi's MEM_REF_EXPRs more
913 often than not. But in the meantime your tm_log_entry could save
914 the results of get_inner_reference.
916 See: g++.dg/tm/pr46653.C
919 /* Special case plain equality because operand_equal_p() below will
920 return FALSE if the addresses are equal but they have
921 side-effects (e.g. a volatile address). */
922 if (log1->addr == log2->addr)
923 return true;
925 return operand_equal_p (log1->addr, log2->addr, 0);
928 /* Htab support. Free one tm_log_entry. */
929 inline void
930 log_entry_hasher::remove (value_type *lp)
932 lp->stmts.release ();
933 free (lp);
937 /* The actual log. */
938 static hash_table <log_entry_hasher> tm_log;
940 /* Addresses to log with a save/restore sequence. These should be in
941 dominator order. */
942 static vec<tree> tm_log_save_addresses;
944 enum thread_memory_type
946 mem_non_local = 0,
947 mem_thread_local,
948 mem_transaction_local,
949 mem_max
952 typedef struct tm_new_mem_map
954 /* SSA_NAME being dereferenced. */
955 tree val;
956 enum thread_memory_type local_new_memory;
957 } tm_new_mem_map_t;
959 /* Hashtable helpers. */
961 struct tm_mem_map_hasher : typed_free_remove <tm_new_mem_map_t>
963 typedef tm_new_mem_map_t value_type;
964 typedef tm_new_mem_map_t compare_type;
965 static inline hashval_t hash (const value_type *);
966 static inline bool equal (const value_type *, const compare_type *);
969 inline hashval_t
970 tm_mem_map_hasher::hash (const value_type *v)
972 return (intptr_t)v->val >> 4;
975 inline bool
976 tm_mem_map_hasher::equal (const value_type *v, const compare_type *c)
978 return v->val == c->val;
981 /* Map for an SSA_NAME originally pointing to a non aliased new piece
982 of memory (malloc, alloc, etc). */
983 static hash_table <tm_mem_map_hasher> tm_new_mem_hash;
985 /* Initialize logging data structures. */
986 static void
987 tm_log_init (void)
989 tm_log.create (10);
990 tm_new_mem_hash.create (5);
991 tm_log_save_addresses.create (5);
994 /* Free logging data structures. */
995 static void
996 tm_log_delete (void)
998 tm_log.dispose ();
999 tm_new_mem_hash.dispose ();
1000 tm_log_save_addresses.release ();
1003 /* Return true if MEM is a transaction invariant memory for the TM
1004 region starting at REGION_ENTRY_BLOCK. */
1005 static bool
1006 transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
1008 if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
1009 && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
1011 basic_block def_bb;
1013 def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
1014 return def_bb != region_entry_block
1015 && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
1018 mem = strip_invariant_refs (mem);
1019 return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
1022 /* Given an address ADDR in STMT, find it in the memory log or add it,
1023 making sure to keep only the addresses highest in the dominator
1024 tree.
1026 ENTRY_BLOCK is the entry_block for the transaction.
1028 If we find the address in the log, make sure it's either the same
1029 address, or an equivalent one that dominates ADDR.
1031 If we find the address, but neither ADDR dominates the found
1032 address, nor the found one dominates ADDR, we're on different
1033 execution paths. Add it.
1035 If known, ENTRY_BLOCK is the entry block for the region, otherwise
1036 NULL. */
1037 static void
1038 tm_log_add (basic_block entry_block, tree addr, gimple stmt)
1040 tm_log_entry **slot;
1041 struct tm_log_entry l, *lp;
1043 l.addr = addr;
1044 slot = tm_log.find_slot (&l, INSERT);
1045 if (!*slot)
1047 tree type = TREE_TYPE (addr);
1049 lp = XNEW (struct tm_log_entry);
1050 lp->addr = addr;
1051 *slot = lp;
1053 /* Small invariant addresses can be handled as save/restores. */
1054 if (entry_block
1055 && transaction_invariant_address_p (lp->addr, entry_block)
1056 && TYPE_SIZE_UNIT (type) != NULL
1057 && host_integerp (TYPE_SIZE_UNIT (type), 1)
1058 && (tree_low_cst (TYPE_SIZE_UNIT (type), 1)
1059 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
1060 /* We must be able to copy this type normally. I.e., no
1061 special constructors and the like. */
1062 && !TREE_ADDRESSABLE (type))
1064 lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
1065 lp->stmts.create (0);
1066 lp->entry_block = entry_block;
1067 /* Save addresses separately in dominator order so we don't
1068 get confused by overlapping addresses in the save/restore
1069 sequence. */
1070 tm_log_save_addresses.safe_push (lp->addr);
1072 else
1074 /* Use the logging functions. */
1075 lp->stmts.create (5);
1076 lp->stmts.quick_push (stmt);
1077 lp->save_var = NULL;
1080 else
1082 size_t i;
1083 gimple oldstmt;
1085 lp = *slot;
1087 /* If we're generating a save/restore sequence, we don't care
1088 about statements. */
1089 if (lp->save_var)
1090 return;
1092 for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i)
1094 if (stmt == oldstmt)
1095 return;
1096 /* We already have a store to the same address, higher up the
1097 dominator tree. Nothing to do. */
1098 if (dominated_by_p (CDI_DOMINATORS,
1099 gimple_bb (stmt), gimple_bb (oldstmt)))
1100 return;
1101 /* We should be processing blocks in dominator tree order. */
1102 gcc_assert (!dominated_by_p (CDI_DOMINATORS,
1103 gimple_bb (oldstmt), gimple_bb (stmt)));
1105 /* Store is on a different code path. */
1106 lp->stmts.safe_push (stmt);
1110 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1111 result, insert the new statements before GSI. */
1113 static tree
1114 gimplify_addr (gimple_stmt_iterator *gsi, tree x)
1116 if (TREE_CODE (x) == TARGET_MEM_REF)
1117 x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
1118 else
1119 x = build_fold_addr_expr (x);
1120 return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
1123 /* Instrument one address with the logging functions.
1124 ADDR is the address to save.
1125 STMT is the statement before which to place it. */
1126 static void
1127 tm_log_emit_stmt (tree addr, gimple stmt)
1129 tree type = TREE_TYPE (addr);
1130 tree size = TYPE_SIZE_UNIT (type);
1131 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1132 gimple log;
1133 enum built_in_function code = BUILT_IN_TM_LOG;
1135 if (type == float_type_node)
1136 code = BUILT_IN_TM_LOG_FLOAT;
1137 else if (type == double_type_node)
1138 code = BUILT_IN_TM_LOG_DOUBLE;
1139 else if (type == long_double_type_node)
1140 code = BUILT_IN_TM_LOG_LDOUBLE;
1141 else if (host_integerp (size, 1))
1143 unsigned int n = tree_low_cst (size, 1);
1144 switch (n)
1146 case 1:
1147 code = BUILT_IN_TM_LOG_1;
1148 break;
1149 case 2:
1150 code = BUILT_IN_TM_LOG_2;
1151 break;
1152 case 4:
1153 code = BUILT_IN_TM_LOG_4;
1154 break;
1155 case 8:
1156 code = BUILT_IN_TM_LOG_8;
1157 break;
1158 default:
1159 code = BUILT_IN_TM_LOG;
1160 if (TREE_CODE (type) == VECTOR_TYPE)
1162 if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64))
1163 code = BUILT_IN_TM_LOG_M64;
1164 else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128))
1165 code = BUILT_IN_TM_LOG_M128;
1166 else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256))
1167 code = BUILT_IN_TM_LOG_M256;
1169 break;
1173 addr = gimplify_addr (&gsi, addr);
1174 if (code == BUILT_IN_TM_LOG)
1175 log = gimple_build_call (builtin_decl_explicit (code), 2, addr, size);
1176 else
1177 log = gimple_build_call (builtin_decl_explicit (code), 1, addr);
1178 gsi_insert_before (&gsi, log, GSI_SAME_STMT);
1181 /* Go through the log and instrument address that must be instrumented
1182 with the logging functions. Leave the save/restore addresses for
1183 later. */
1184 static void
1185 tm_log_emit (void)
1187 hash_table <log_entry_hasher>::iterator hi;
1188 struct tm_log_entry *lp;
1190 FOR_EACH_HASH_TABLE_ELEMENT (tm_log, lp, tm_log_entry_t, hi)
1192 size_t i;
1193 gimple stmt;
1195 if (dump_file)
1197 fprintf (dump_file, "TM thread private mem logging: ");
1198 print_generic_expr (dump_file, lp->addr, 0);
1199 fprintf (dump_file, "\n");
1202 if (lp->save_var)
1204 if (dump_file)
1205 fprintf (dump_file, "DUMPING to variable\n");
1206 continue;
1208 else
1210 if (dump_file)
1211 fprintf (dump_file, "DUMPING with logging functions\n");
1212 for (i = 0; lp->stmts.iterate (i, &stmt); ++i)
1213 tm_log_emit_stmt (lp->addr, stmt);
1218 /* Emit the save sequence for the corresponding addresses in the log.
1219 ENTRY_BLOCK is the entry block for the transaction.
1220 BB is the basic block to insert the code in. */
1221 static void
1222 tm_log_emit_saves (basic_block entry_block, basic_block bb)
1224 size_t i;
1225 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1226 gimple stmt;
1227 struct tm_log_entry l, *lp;
1229 for (i = 0; i < tm_log_save_addresses.length (); ++i)
1231 l.addr = tm_log_save_addresses[i];
1232 lp = *(tm_log.find_slot (&l, NO_INSERT));
1233 gcc_assert (lp->save_var != NULL);
1235 /* We only care about variables in the current transaction. */
1236 if (lp->entry_block != entry_block)
1237 continue;
1239 stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
1241 /* Make sure we can create an SSA_NAME for this type. For
1242 instance, aggregates aren't allowed, in which case the system
1243 will create a VOP for us and everything will just work. */
1244 if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
1246 lp->save_var = make_ssa_name (lp->save_var, stmt);
1247 gimple_assign_set_lhs (stmt, lp->save_var);
1250 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1254 /* Emit the restore sequence for the corresponding addresses in the log.
1255 ENTRY_BLOCK is the entry block for the transaction.
1256 BB is the basic block to insert the code in. */
1257 static void
1258 tm_log_emit_restores (basic_block entry_block, basic_block bb)
1260 int i;
1261 struct tm_log_entry l, *lp;
1262 gimple_stmt_iterator gsi;
1263 gimple stmt;
1265 for (i = tm_log_save_addresses.length () - 1; i >= 0; i--)
1267 l.addr = tm_log_save_addresses[i];
1268 lp = *(tm_log.find_slot (&l, NO_INSERT));
1269 gcc_assert (lp->save_var != NULL);
1271 /* We only care about variables in the current transaction. */
1272 if (lp->entry_block != entry_block)
1273 continue;
1275 /* Restores are in LIFO order from the saves in case we have
1276 overlaps. */
1277 gsi = gsi_start_bb (bb);
1279 stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
1280 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1285 static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
1286 struct walk_stmt_info *);
1287 static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
1288 struct walk_stmt_info *);
1290 /* Evaluate an address X being dereferenced and determine if it
1291 originally points to a non aliased new chunk of memory (malloc,
1292 alloca, etc).
1294 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1295 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1296 Return MEM_NON_LOCAL otherwise.
1298 ENTRY_BLOCK is the entry block to the transaction containing the
1299 dereference of X. */
1300 static enum thread_memory_type
1301 thread_private_new_memory (basic_block entry_block, tree x)
1303 gimple stmt = NULL;
1304 enum tree_code code;
1305 tm_new_mem_map_t **slot;
1306 tm_new_mem_map_t elt, *elt_p;
1307 tree val = x;
1308 enum thread_memory_type retval = mem_transaction_local;
1310 if (!entry_block
1311 || TREE_CODE (x) != SSA_NAME
1312 /* Possible uninitialized use, or a function argument. In
1313 either case, we don't care. */
1314 || SSA_NAME_IS_DEFAULT_DEF (x))
1315 return mem_non_local;
1317 /* Look in cache first. */
1318 elt.val = x;
1319 slot = tm_new_mem_hash.find_slot (&elt, INSERT);
1320 elt_p = *slot;
1321 if (elt_p)
1322 return elt_p->local_new_memory;
1324 /* Optimistically assume the memory is transaction local during
1325 processing. This catches recursion into this variable. */
1326 *slot = elt_p = XNEW (tm_new_mem_map_t);
1327 elt_p->val = val;
1328 elt_p->local_new_memory = mem_transaction_local;
1330 /* Search DEF chain to find the original definition of this address. */
1333 if (ptr_deref_may_alias_global_p (x))
1335 /* Address escapes. This is not thread-private. */
1336 retval = mem_non_local;
1337 goto new_memory_ret;
1340 stmt = SSA_NAME_DEF_STMT (x);
1342 /* If the malloc call is outside the transaction, this is
1343 thread-local. */
1344 if (retval != mem_thread_local
1345 && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
1346 retval = mem_thread_local;
1348 if (is_gimple_assign (stmt))
1350 code = gimple_assign_rhs_code (stmt);
1351 /* x = foo ==> foo */
1352 if (code == SSA_NAME)
1353 x = gimple_assign_rhs1 (stmt);
1354 /* x = foo + n ==> foo */
1355 else if (code == POINTER_PLUS_EXPR)
1356 x = gimple_assign_rhs1 (stmt);
1357 /* x = (cast*) foo ==> foo */
1358 else if (code == VIEW_CONVERT_EXPR || code == NOP_EXPR)
1359 x = gimple_assign_rhs1 (stmt);
1360 /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
1361 else if (code == COND_EXPR)
1363 tree op1 = gimple_assign_rhs2 (stmt);
1364 tree op2 = gimple_assign_rhs3 (stmt);
1365 enum thread_memory_type mem;
1366 retval = thread_private_new_memory (entry_block, op1);
1367 if (retval == mem_non_local)
1368 goto new_memory_ret;
1369 mem = thread_private_new_memory (entry_block, op2);
1370 retval = MIN (retval, mem);
1371 goto new_memory_ret;
1373 else
1375 retval = mem_non_local;
1376 goto new_memory_ret;
1379 else
1381 if (gimple_code (stmt) == GIMPLE_PHI)
1383 unsigned int i;
1384 enum thread_memory_type mem;
1385 tree phi_result = gimple_phi_result (stmt);
1387 /* If any of the ancestors are non-local, we are sure to
1388 be non-local. Otherwise we can avoid doing anything
1389 and inherit what has already been generated. */
1390 retval = mem_max;
1391 for (i = 0; i < gimple_phi_num_args (stmt); ++i)
1393 tree op = PHI_ARG_DEF (stmt, i);
1395 /* Exclude self-assignment. */
1396 if (phi_result == op)
1397 continue;
1399 mem = thread_private_new_memory (entry_block, op);
1400 if (mem == mem_non_local)
1402 retval = mem;
1403 goto new_memory_ret;
1405 retval = MIN (retval, mem);
1407 goto new_memory_ret;
1409 break;
1412 while (TREE_CODE (x) == SSA_NAME);
1414 if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
1415 /* Thread-local or transaction-local. */
1417 else
1418 retval = mem_non_local;
1420 new_memory_ret:
1421 elt_p->local_new_memory = retval;
1422 return retval;
1425 /* Determine whether X has to be instrumented using a read
1426 or write barrier.
1428 ENTRY_BLOCK is the entry block for the region where stmt resides
1429 in. NULL if unknown.
1431 STMT is the statement in which X occurs in. It is used for thread
1432 private memory instrumentation. If no TPM instrumentation is
1433 desired, STMT should be null. */
1434 static bool
1435 requires_barrier (basic_block entry_block, tree x, gimple stmt)
1437 tree orig = x;
1438 while (handled_component_p (x))
1439 x = TREE_OPERAND (x, 0);
1441 switch (TREE_CODE (x))
1443 case INDIRECT_REF:
1444 case MEM_REF:
1446 enum thread_memory_type ret;
1448 ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
1449 if (ret == mem_non_local)
1450 return true;
1451 if (stmt && ret == mem_thread_local)
1452 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1453 tm_log_add (entry_block, orig, stmt);
1455 /* Transaction-locals require nothing at all. For malloc, a
1456 transaction restart frees the memory and we reallocate.
1457 For alloca, the stack pointer gets reset by the retry and
1458 we reallocate. */
1459 return false;
1462 case TARGET_MEM_REF:
1463 if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
1464 return true;
1465 x = TREE_OPERAND (TMR_BASE (x), 0);
1466 if (TREE_CODE (x) == PARM_DECL)
1467 return false;
1468 gcc_assert (TREE_CODE (x) == VAR_DECL);
1469 /* FALLTHRU */
1471 case PARM_DECL:
1472 case RESULT_DECL:
1473 case VAR_DECL:
1474 if (DECL_BY_REFERENCE (x))
1476 /* ??? This value is a pointer, but aggregate_value_p has been
1477 jigged to return true which confuses needs_to_live_in_memory.
1478 This ought to be cleaned up generically.
1480 FIXME: Verify this still happens after the next mainline
1481 merge. Testcase ie g++.dg/tm/pr47554.C.
1483 return false;
1486 if (is_global_var (x))
1487 return !TREE_READONLY (x);
1488 if (/* FIXME: This condition should actually go below in the
1489 tm_log_add() call, however is_call_clobbered() depends on
1490 aliasing info which is not available during
1491 gimplification. Since requires_barrier() gets called
1492 during lower_sequence_tm/gimplification, leave the call
1493 to needs_to_live_in_memory until we eliminate
1494 lower_sequence_tm altogether. */
1495 needs_to_live_in_memory (x))
1496 return true;
1497 else
1499 /* For local memory that doesn't escape (aka thread private
1500 memory), we can either save the value at the beginning of
1501 the transaction and restore on restart, or call a tm
1502 function to dynamically save and restore on restart
1503 (ITM_L*). */
1504 if (stmt)
1505 tm_log_add (entry_block, orig, stmt);
1506 return false;
1509 default:
1510 return false;
1514 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1515 a transaction region. */
1517 static void
1518 examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
1520 gimple stmt = gsi_stmt (*gsi);
1522 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
1523 *state |= GTMA_HAVE_LOAD;
1524 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
1525 *state |= GTMA_HAVE_STORE;
1528 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1530 static void
1531 examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
1533 gimple stmt = gsi_stmt (*gsi);
1534 tree fn;
1536 if (is_tm_pure_call (stmt))
1537 return;
1539 /* Check if this call is a transaction abort. */
1540 fn = gimple_call_fndecl (stmt);
1541 if (is_tm_abort (fn))
1542 *state |= GTMA_HAVE_ABORT;
1544 /* Note that something may happen. */
1545 *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
1548 /* Lower a GIMPLE_TRANSACTION statement. */
1550 static void
1551 lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
1553 gimple g, stmt = gsi_stmt (*gsi);
1554 unsigned int *outer_state = (unsigned int *) wi->info;
1555 unsigned int this_state = 0;
1556 struct walk_stmt_info this_wi;
1558 /* First, lower the body. The scanning that we do inside gives
1559 us some idea of what we're dealing with. */
1560 memset (&this_wi, 0, sizeof (this_wi));
1561 this_wi.info = (void *) &this_state;
1562 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
1563 lower_sequence_tm, NULL, &this_wi);
1565 /* If there was absolutely nothing transaction related inside the
1566 transaction, we may elide it. Likewise if this is a nested
1567 transaction and does not contain an abort. */
1568 if (this_state == 0
1569 || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
1571 if (outer_state)
1572 *outer_state |= this_state;
1574 gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
1575 GSI_SAME_STMT);
1576 gimple_transaction_set_body (stmt, NULL);
1578 gsi_remove (gsi, true);
1579 wi->removed_stmt = true;
1580 return;
1583 /* Wrap the body of the transaction in a try-finally node so that
1584 the commit call is always properly called. */
1585 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
1586 if (flag_exceptions)
1588 tree ptr;
1589 gimple_seq n_seq, e_seq;
1591 n_seq = gimple_seq_alloc_with_stmt (g);
1592 e_seq = NULL;
1594 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1595 1, integer_zero_node);
1596 ptr = create_tmp_var (ptr_type_node, NULL);
1597 gimple_call_set_lhs (g, ptr);
1598 gimple_seq_add_stmt (&e_seq, g);
1600 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
1601 1, ptr);
1602 gimple_seq_add_stmt (&e_seq, g);
1604 g = gimple_build_eh_else (n_seq, e_seq);
1607 g = gimple_build_try (gimple_transaction_body (stmt),
1608 gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
1609 gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
1611 gimple_transaction_set_body (stmt, NULL);
1613 /* If the transaction calls abort or if this is an outer transaction,
1614 add an "over" label afterwards. */
1615 if ((this_state & (GTMA_HAVE_ABORT))
1616 || (gimple_transaction_subcode(stmt) & GTMA_IS_OUTER))
1618 tree label = create_artificial_label (UNKNOWN_LOCATION);
1619 gimple_transaction_set_label (stmt, label);
1620 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
1623 /* Record the set of operations found for use later. */
1624 this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
1625 gimple_transaction_set_subcode (stmt, this_state);
1628 /* Iterate through the statements in the sequence, lowering them all
1629 as appropriate for being in a transaction. */
1631 static tree
1632 lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1633 struct walk_stmt_info *wi)
1635 unsigned int *state = (unsigned int *) wi->info;
1636 gimple stmt = gsi_stmt (*gsi);
1638 *handled_ops_p = true;
1639 switch (gimple_code (stmt))
1641 case GIMPLE_ASSIGN:
1642 /* Only memory reads/writes need to be instrumented. */
1643 if (gimple_assign_single_p (stmt))
1644 examine_assign_tm (state, gsi);
1645 break;
1647 case GIMPLE_CALL:
1648 examine_call_tm (state, gsi);
1649 break;
1651 case GIMPLE_ASM:
1652 *state |= GTMA_MAY_ENTER_IRREVOCABLE;
1653 break;
1655 case GIMPLE_TRANSACTION:
1656 lower_transaction (gsi, wi);
1657 break;
1659 default:
1660 *handled_ops_p = !gimple_has_substatements (stmt);
1661 break;
1664 return NULL_TREE;
1667 /* Iterate through the statements in the sequence, lowering them all
1668 as appropriate for being outside of a transaction. */
1670 static tree
1671 lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1672 struct walk_stmt_info * wi)
1674 gimple stmt = gsi_stmt (*gsi);
1676 if (gimple_code (stmt) == GIMPLE_TRANSACTION)
1678 *handled_ops_p = true;
1679 lower_transaction (gsi, wi);
1681 else
1682 *handled_ops_p = !gimple_has_substatements (stmt);
1684 return NULL_TREE;
1687 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1688 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1689 been moved out, and all the data required for constructing a proper
1690 CFG has been recorded. */
1692 static unsigned int
1693 execute_lower_tm (void)
1695 struct walk_stmt_info wi;
1696 gimple_seq body;
1698 /* Transactional clones aren't created until a later pass. */
1699 gcc_assert (!decl_is_tm_clone (current_function_decl));
1701 body = gimple_body (current_function_decl);
1702 memset (&wi, 0, sizeof (wi));
1703 walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi);
1704 gimple_set_body (current_function_decl, body);
1706 return 0;
1709 struct gimple_opt_pass pass_lower_tm =
1712 GIMPLE_PASS,
1713 "tmlower", /* name */
1714 OPTGROUP_NONE, /* optinfo_flags */
1715 gate_tm, /* gate */
1716 execute_lower_tm, /* execute */
1717 NULL, /* sub */
1718 NULL, /* next */
1719 0, /* static_pass_number */
1720 TV_TRANS_MEM, /* tv_id */
1721 PROP_gimple_lcf, /* properties_required */
1722 0, /* properties_provided */
1723 0, /* properties_destroyed */
1724 0, /* todo_flags_start */
1725 0, /* todo_flags_finish */
1729 /* Collect region information for each transaction. */
1731 struct tm_region
1733 /* Link to the next unnested transaction. */
1734 struct tm_region *next;
1736 /* Link to the next inner transaction. */
1737 struct tm_region *inner;
1739 /* Link to the next outer transaction. */
1740 struct tm_region *outer;
1742 /* The GIMPLE_TRANSACTION statement beginning this transaction.
1743 After TM_MARK, this gets replaced by a call to
1744 BUILT_IN_TM_START. */
1745 gimple transaction_stmt;
1747 /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
1748 BUILT_IN_TM_START, this field is true if the transaction is an
1749 outer transaction. */
1750 bool original_transaction_was_outer;
1752 /* Return value from BUILT_IN_TM_START. */
1753 tree tm_state;
1755 /* The entry block to this region. This will always be the first
1756 block of the body of the transaction. */
1757 basic_block entry_block;
1759 /* The first block after an expanded call to _ITM_beginTransaction. */
1760 basic_block restart_block;
1762 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1763 These blocks are still a part of the region (i.e., the border is
1764 inclusive). Note that this set is only complete for paths in the CFG
1765 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1766 the edge to the "over" label. */
1767 bitmap exit_blocks;
1769 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1770 bitmap irr_blocks;
1773 typedef struct tm_region *tm_region_p;
1775 /* True if there are pending edge statements to be committed for the
1776 current function being scanned in the tmmark pass. */
1777 bool pending_edge_inserts_p;
1779 static struct tm_region *all_tm_regions;
1780 static bitmap_obstack tm_obstack;
1783 /* A subroutine of tm_region_init. Record the existence of the
1784 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1786 static struct tm_region *
1787 tm_region_init_0 (struct tm_region *outer, basic_block bb, gimple stmt)
1789 struct tm_region *region;
1791 region = (struct tm_region *)
1792 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1794 if (outer)
1796 region->next = outer->inner;
1797 outer->inner = region;
1799 else
1801 region->next = all_tm_regions;
1802 all_tm_regions = region;
1804 region->inner = NULL;
1805 region->outer = outer;
1807 region->transaction_stmt = stmt;
1808 region->original_transaction_was_outer = false;
1809 region->tm_state = NULL;
1811 /* There are either one or two edges out of the block containing
1812 the GIMPLE_TRANSACTION, one to the actual region and one to the
1813 "over" label if the region contains an abort. The former will
1814 always be the one marked FALLTHRU. */
1815 region->entry_block = FALLTHRU_EDGE (bb)->dest;
1817 region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
1818 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1820 return region;
1823 /* A subroutine of tm_region_init. Record all the exit and
1824 irrevocable blocks in BB into the region's exit_blocks and
1825 irr_blocks bitmaps. Returns the new region being scanned. */
1827 static struct tm_region *
1828 tm_region_init_1 (struct tm_region *region, basic_block bb)
1830 gimple_stmt_iterator gsi;
1831 gimple g;
1833 if (!region
1834 || (!region->irr_blocks && !region->exit_blocks))
1835 return region;
1837 /* Check to see if this is the end of a region by seeing if it
1838 contains a call to __builtin_tm_commit{,_eh}. Note that the
1839 outermost region for DECL_IS_TM_CLONE need not collect this. */
1840 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
1842 g = gsi_stmt (gsi);
1843 if (gimple_code (g) == GIMPLE_CALL)
1845 tree fn = gimple_call_fndecl (g);
1846 if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
1848 if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
1849 || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
1850 && region->exit_blocks)
1852 bitmap_set_bit (region->exit_blocks, bb->index);
1853 region = region->outer;
1854 break;
1856 if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
1857 bitmap_set_bit (region->irr_blocks, bb->index);
1861 return region;
1864 /* Collect all of the transaction regions within the current function
1865 and record them in ALL_TM_REGIONS. The REGION parameter may specify
1866 an "outermost" region for use by tm clones. */
1868 static void
1869 tm_region_init (struct tm_region *region)
1871 gimple g;
1872 edge_iterator ei;
1873 edge e;
1874 basic_block bb;
1875 vec<basic_block> queue = vNULL;
1876 bitmap visited_blocks = BITMAP_ALLOC (NULL);
1877 struct tm_region *old_region;
1878 vec<tm_region_p> bb_regions = vNULL;
1880 all_tm_regions = region;
1881 bb = single_succ (ENTRY_BLOCK_PTR);
1883 /* We could store this information in bb->aux, but we may get called
1884 through get_all_tm_blocks() from another pass that may be already
1885 using bb->aux. */
1886 bb_regions.safe_grow_cleared (last_basic_block);
1888 queue.safe_push (bb);
1889 bb_regions[bb->index] = region;
1892 bb = queue.pop ();
1893 region = bb_regions[bb->index];
1894 bb_regions[bb->index] = NULL;
1896 /* Record exit and irrevocable blocks. */
1897 region = tm_region_init_1 (region, bb);
1899 /* Check for the last statement in the block beginning a new region. */
1900 g = last_stmt (bb);
1901 old_region = region;
1902 if (g && gimple_code (g) == GIMPLE_TRANSACTION)
1903 region = tm_region_init_0 (region, bb, g);
1905 /* Process subsequent blocks. */
1906 FOR_EACH_EDGE (e, ei, bb->succs)
1907 if (!bitmap_bit_p (visited_blocks, e->dest->index))
1909 bitmap_set_bit (visited_blocks, e->dest->index);
1910 queue.safe_push (e->dest);
1912 /* If the current block started a new region, make sure that only
1913 the entry block of the new region is associated with this region.
1914 Other successors are still part of the old region. */
1915 if (old_region != region && e->dest != region->entry_block)
1916 bb_regions[e->dest->index] = old_region;
1917 else
1918 bb_regions[e->dest->index] = region;
1921 while (!queue.is_empty ());
1922 queue.release ();
1923 BITMAP_FREE (visited_blocks);
1924 bb_regions.release ();
1927 /* The "gate" function for all transactional memory expansion and optimization
1928 passes. We collect region information for each top-level transaction, and
1929 if we don't find any, we skip all of the TM passes. Each region will have
1930 all of the exit blocks recorded, and the originating statement. */
1932 static bool
1933 gate_tm_init (void)
1935 if (!flag_tm)
1936 return false;
1938 calculate_dominance_info (CDI_DOMINATORS);
1939 bitmap_obstack_initialize (&tm_obstack);
1941 /* If the function is a TM_CLONE, then the entire function is the region. */
1942 if (decl_is_tm_clone (current_function_decl))
1944 struct tm_region *region = (struct tm_region *)
1945 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1946 memset (region, 0, sizeof (*region));
1947 region->entry_block = single_succ (ENTRY_BLOCK_PTR);
1948 /* For a clone, the entire function is the region. But even if
1949 we don't need to record any exit blocks, we may need to
1950 record irrevocable blocks. */
1951 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1953 tm_region_init (region);
1955 else
1957 tm_region_init (NULL);
1959 /* If we didn't find any regions, cleanup and skip the whole tree
1960 of tm-related optimizations. */
1961 if (all_tm_regions == NULL)
1963 bitmap_obstack_release (&tm_obstack);
1964 return false;
1968 return true;
1971 struct gimple_opt_pass pass_tm_init =
1974 GIMPLE_PASS,
1975 "*tminit", /* name */
1976 OPTGROUP_NONE, /* optinfo_flags */
1977 gate_tm_init, /* gate */
1978 NULL, /* execute */
1979 NULL, /* sub */
1980 NULL, /* next */
1981 0, /* static_pass_number */
1982 TV_TRANS_MEM, /* tv_id */
1983 PROP_ssa | PROP_cfg, /* properties_required */
1984 0, /* properties_provided */
1985 0, /* properties_destroyed */
1986 0, /* todo_flags_start */
1987 0, /* todo_flags_finish */
1991 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
1992 represented by STATE. */
1994 static inline void
1995 transaction_subcode_ior (struct tm_region *region, unsigned flags)
1997 if (region && region->transaction_stmt)
1999 flags |= gimple_transaction_subcode (region->transaction_stmt);
2000 gimple_transaction_set_subcode (region->transaction_stmt, flags);
2004 /* Construct a memory load in a transactional context. Return the
2005 gimple statement performing the load, or NULL if there is no
2006 TM_LOAD builtin of the appropriate size to do the load.
2008 LOC is the location to use for the new statement(s). */
2010 static gimple
2011 build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2013 enum built_in_function code = END_BUILTINS;
2014 tree t, type = TREE_TYPE (rhs), decl;
2015 gimple gcall;
2017 if (type == float_type_node)
2018 code = BUILT_IN_TM_LOAD_FLOAT;
2019 else if (type == double_type_node)
2020 code = BUILT_IN_TM_LOAD_DOUBLE;
2021 else if (type == long_double_type_node)
2022 code = BUILT_IN_TM_LOAD_LDOUBLE;
2023 else if (TYPE_SIZE_UNIT (type) != NULL
2024 && host_integerp (TYPE_SIZE_UNIT (type), 1))
2026 switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
2028 case 1:
2029 code = BUILT_IN_TM_LOAD_1;
2030 break;
2031 case 2:
2032 code = BUILT_IN_TM_LOAD_2;
2033 break;
2034 case 4:
2035 code = BUILT_IN_TM_LOAD_4;
2036 break;
2037 case 8:
2038 code = BUILT_IN_TM_LOAD_8;
2039 break;
2043 if (code == END_BUILTINS)
2045 decl = targetm.vectorize.builtin_tm_load (type);
2046 if (!decl)
2047 return NULL;
2049 else
2050 decl = builtin_decl_explicit (code);
2052 t = gimplify_addr (gsi, rhs);
2053 gcall = gimple_build_call (decl, 1, t);
2054 gimple_set_location (gcall, loc);
2056 t = TREE_TYPE (TREE_TYPE (decl));
2057 if (useless_type_conversion_p (type, t))
2059 gimple_call_set_lhs (gcall, lhs);
2060 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2062 else
2064 gimple g;
2065 tree temp;
2067 temp = create_tmp_reg (t, NULL);
2068 gimple_call_set_lhs (gcall, temp);
2069 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2071 t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
2072 g = gimple_build_assign (lhs, t);
2073 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2076 return gcall;
2080 /* Similarly for storing TYPE in a transactional context. */
2082 static gimple
2083 build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2085 enum built_in_function code = END_BUILTINS;
2086 tree t, fn, type = TREE_TYPE (rhs), simple_type;
2087 gimple gcall;
2089 if (type == float_type_node)
2090 code = BUILT_IN_TM_STORE_FLOAT;
2091 else if (type == double_type_node)
2092 code = BUILT_IN_TM_STORE_DOUBLE;
2093 else if (type == long_double_type_node)
2094 code = BUILT_IN_TM_STORE_LDOUBLE;
2095 else if (TYPE_SIZE_UNIT (type) != NULL
2096 && host_integerp (TYPE_SIZE_UNIT (type), 1))
2098 switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
2100 case 1:
2101 code = BUILT_IN_TM_STORE_1;
2102 break;
2103 case 2:
2104 code = BUILT_IN_TM_STORE_2;
2105 break;
2106 case 4:
2107 code = BUILT_IN_TM_STORE_4;
2108 break;
2109 case 8:
2110 code = BUILT_IN_TM_STORE_8;
2111 break;
2115 if (code == END_BUILTINS)
2117 fn = targetm.vectorize.builtin_tm_store (type);
2118 if (!fn)
2119 return NULL;
2121 else
2122 fn = builtin_decl_explicit (code);
2124 simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
2126 if (TREE_CODE (rhs) == CONSTRUCTOR)
2128 /* Handle the easy initialization to zero. */
2129 if (!CONSTRUCTOR_ELTS (rhs))
2130 rhs = build_int_cst (simple_type, 0);
2131 else
2133 /* ...otherwise punt to the caller and probably use
2134 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2135 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2136 valid gimple. */
2137 return NULL;
2140 else if (!useless_type_conversion_p (simple_type, type))
2142 gimple g;
2143 tree temp;
2145 temp = create_tmp_reg (simple_type, NULL);
2146 t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
2147 g = gimple_build_assign (temp, t);
2148 gimple_set_location (g, loc);
2149 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2151 rhs = temp;
2154 t = gimplify_addr (gsi, lhs);
2155 gcall = gimple_build_call (fn, 2, t, rhs);
2156 gimple_set_location (gcall, loc);
2157 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2159 return gcall;
2163 /* Expand an assignment statement into transactional builtins. */
2165 static void
2166 expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
2168 gimple stmt = gsi_stmt (*gsi);
2169 location_t loc = gimple_location (stmt);
2170 tree lhs = gimple_assign_lhs (stmt);
2171 tree rhs = gimple_assign_rhs1 (stmt);
2172 bool store_p = requires_barrier (region->entry_block, lhs, NULL);
2173 bool load_p = requires_barrier (region->entry_block, rhs, NULL);
2174 gimple gcall = NULL;
2176 if (!load_p && !store_p)
2178 /* Add thread private addresses to log if applicable. */
2179 requires_barrier (region->entry_block, lhs, stmt);
2180 gsi_next (gsi);
2181 return;
2184 // Remove original load/store statement.
2185 gsi_remove (gsi, true);
2187 if (load_p && !store_p)
2189 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2190 gcall = build_tm_load (loc, lhs, rhs, gsi);
2192 else if (store_p && !load_p)
2194 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2195 gcall = build_tm_store (loc, lhs, rhs, gsi);
2197 if (!gcall)
2199 tree lhs_addr, rhs_addr, tmp;
2201 if (load_p)
2202 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2203 if (store_p)
2204 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2206 /* ??? Figure out if there's any possible overlap between the LHS
2207 and the RHS and if not, use MEMCPY. */
2209 if (load_p && is_gimple_reg (lhs))
2211 tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
2212 lhs_addr = build_fold_addr_expr (tmp);
2214 else
2216 tmp = NULL_TREE;
2217 lhs_addr = gimplify_addr (gsi, lhs);
2219 rhs_addr = gimplify_addr (gsi, rhs);
2220 gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
2221 3, lhs_addr, rhs_addr,
2222 TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
2223 gimple_set_location (gcall, loc);
2224 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2226 if (tmp)
2228 gcall = gimple_build_assign (lhs, tmp);
2229 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2233 /* Now that we have the load/store in its instrumented form, add
2234 thread private addresses to the log if applicable. */
2235 if (!store_p)
2236 requires_barrier (region->entry_block, lhs, gcall);
2238 // The calls to build_tm_{store,load} above inserted the instrumented
2239 // call into the stream.
2240 // gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2244 /* Expand a call statement as appropriate for a transaction. That is,
2245 either verify that the call does not affect the transaction, or
2246 redirect the call to a clone that handles transactions, or change
2247 the transaction state to IRREVOCABLE. Return true if the call is
2248 one of the builtins that end a transaction. */
2250 static bool
2251 expand_call_tm (struct tm_region *region,
2252 gimple_stmt_iterator *gsi)
2254 gimple stmt = gsi_stmt (*gsi);
2255 tree lhs = gimple_call_lhs (stmt);
2256 tree fn_decl;
2257 struct cgraph_node *node;
2258 bool retval = false;
2260 fn_decl = gimple_call_fndecl (stmt);
2262 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
2263 || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
2264 transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
2265 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
2266 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2268 if (is_tm_pure_call (stmt))
2269 return false;
2271 if (fn_decl)
2272 retval = is_tm_ending_fndecl (fn_decl);
2273 if (!retval)
2275 /* Assume all non-const/pure calls write to memory, except
2276 transaction ending builtins. */
2277 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2280 /* For indirect calls, we already generated a call into the runtime. */
2281 if (!fn_decl)
2283 tree fn = gimple_call_fn (stmt);
2285 /* We are guaranteed never to go irrevocable on a safe or pure
2286 call, and the pure call was handled above. */
2287 if (is_tm_safe (fn))
2288 return false;
2289 else
2290 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2292 return false;
2295 node = cgraph_get_node (fn_decl);
2296 /* All calls should have cgraph here. */
2297 if (!node)
2299 /* We can have a nodeless call here if some pass after IPA-tm
2300 added uninstrumented calls. For example, loop distribution
2301 can transform certain loop constructs into __builtin_mem*
2302 calls. In this case, see if we have a suitable TM
2303 replacement and fill in the gaps. */
2304 gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL);
2305 enum built_in_function code = DECL_FUNCTION_CODE (fn_decl);
2306 gcc_assert (code == BUILT_IN_MEMCPY
2307 || code == BUILT_IN_MEMMOVE
2308 || code == BUILT_IN_MEMSET);
2310 tree repl = find_tm_replacement_function (fn_decl);
2311 if (repl)
2313 gimple_call_set_fndecl (stmt, repl);
2314 update_stmt (stmt);
2315 node = cgraph_create_node (repl);
2316 node->local.tm_may_enter_irr = false;
2317 return expand_call_tm (region, gsi);
2319 gcc_unreachable ();
2321 if (node->local.tm_may_enter_irr)
2322 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2324 if (is_tm_abort (fn_decl))
2326 transaction_subcode_ior (region, GTMA_HAVE_ABORT);
2327 return true;
2330 /* Instrument the store if needed.
2332 If the assignment happens inside the function call (return slot
2333 optimization), there is no instrumentation to be done, since
2334 the callee should have done the right thing. */
2335 if (lhs && requires_barrier (region->entry_block, lhs, stmt)
2336 && !gimple_call_return_slot_opt_p (stmt))
2338 tree tmp = create_tmp_reg (TREE_TYPE (lhs), NULL);
2339 location_t loc = gimple_location (stmt);
2340 edge fallthru_edge = NULL;
2342 /* Remember if the call was going to throw. */
2343 if (stmt_can_throw_internal (stmt))
2345 edge_iterator ei;
2346 edge e;
2347 basic_block bb = gimple_bb (stmt);
2349 FOR_EACH_EDGE (e, ei, bb->succs)
2350 if (e->flags & EDGE_FALLTHRU)
2352 fallthru_edge = e;
2353 break;
2357 gimple_call_set_lhs (stmt, tmp);
2358 update_stmt (stmt);
2359 stmt = gimple_build_assign (lhs, tmp);
2360 gimple_set_location (stmt, loc);
2362 /* We cannot throw in the middle of a BB. If the call was going
2363 to throw, place the instrumentation on the fallthru edge, so
2364 the call remains the last statement in the block. */
2365 if (fallthru_edge)
2367 gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (stmt);
2368 gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
2369 expand_assign_tm (region, &fallthru_gsi);
2370 gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
2371 pending_edge_inserts_p = true;
2373 else
2375 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
2376 expand_assign_tm (region, gsi);
2379 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2382 return retval;
2386 /* Expand all statements in BB as appropriate for being inside
2387 a transaction. */
2389 static void
2390 expand_block_tm (struct tm_region *region, basic_block bb)
2392 gimple_stmt_iterator gsi;
2394 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2396 gimple stmt = gsi_stmt (gsi);
2397 switch (gimple_code (stmt))
2399 case GIMPLE_ASSIGN:
2400 /* Only memory reads/writes need to be instrumented. */
2401 if (gimple_assign_single_p (stmt)
2402 && !gimple_clobber_p (stmt))
2404 expand_assign_tm (region, &gsi);
2405 continue;
2407 break;
2409 case GIMPLE_CALL:
2410 if (expand_call_tm (region, &gsi))
2411 return;
2412 break;
2414 case GIMPLE_ASM:
2415 gcc_unreachable ();
2417 default:
2418 break;
2420 if (!gsi_end_p (gsi))
2421 gsi_next (&gsi);
2425 /* Return the list of basic-blocks in REGION.
2427 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2428 following a TM_IRREVOCABLE call.
2430 INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the
2431 uninstrumented code path blocks in the list of basic blocks
2432 returned, false otherwise. */
2434 static vec<basic_block>
2435 get_tm_region_blocks (basic_block entry_block,
2436 bitmap exit_blocks,
2437 bitmap irr_blocks,
2438 bitmap all_region_blocks,
2439 bool stop_at_irrevocable_p,
2440 bool include_uninstrumented_p = true)
2442 vec<basic_block> bbs = vNULL;
2443 unsigned i;
2444 edge e;
2445 edge_iterator ei;
2446 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2448 i = 0;
2449 bbs.safe_push (entry_block);
2450 bitmap_set_bit (visited_blocks, entry_block->index);
2454 basic_block bb = bbs[i++];
2456 if (exit_blocks &&
2457 bitmap_bit_p (exit_blocks, bb->index))
2458 continue;
2460 if (stop_at_irrevocable_p
2461 && irr_blocks
2462 && bitmap_bit_p (irr_blocks, bb->index))
2463 continue;
2465 FOR_EACH_EDGE (e, ei, bb->succs)
2466 if ((include_uninstrumented_p
2467 || !(e->flags & EDGE_TM_UNINSTRUMENTED))
2468 && !bitmap_bit_p (visited_blocks, e->dest->index))
2470 bitmap_set_bit (visited_blocks, e->dest->index);
2471 bbs.safe_push (e->dest);
2474 while (i < bbs.length ());
2476 if (all_region_blocks)
2477 bitmap_ior_into (all_region_blocks, visited_blocks);
2479 BITMAP_FREE (visited_blocks);
2480 return bbs;
2483 // Callback data for collect_bb2reg.
2484 struct bb2reg_stuff
2486 vec<tm_region_p> *bb2reg;
2487 bool include_uninstrumented_p;
2490 // Callback for expand_regions, collect innermost region data for each bb.
2491 static void *
2492 collect_bb2reg (struct tm_region *region, void *data)
2494 struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data;
2495 vec<tm_region_p> *bb2reg = stuff->bb2reg;
2496 vec<basic_block> queue;
2497 unsigned int i;
2498 basic_block bb;
2500 queue = get_tm_region_blocks (region->entry_block,
2501 region->exit_blocks,
2502 region->irr_blocks,
2503 NULL,
2504 /*stop_at_irr_p=*/true,
2505 stuff->include_uninstrumented_p);
2507 // We expect expand_region to perform a post-order traversal of the region
2508 // tree. Therefore the last region seen for any bb is the innermost.
2509 FOR_EACH_VEC_ELT (queue, i, bb)
2510 (*bb2reg)[bb->index] = region;
2512 queue.release ();
2513 return NULL;
2516 // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to
2517 // which a basic block belongs. Note that we only consider the instrumented
2518 // code paths for the region; the uninstrumented code paths are ignored if
2519 // INCLUDE_UNINSTRUMENTED_P is false.
2521 // ??? This data is very similar to the bb_regions array that is collected
2522 // during tm_region_init. Or, rather, this data is similar to what could
2523 // be used within tm_region_init. The actual computation in tm_region_init
2524 // begins and ends with bb_regions entirely full of NULL pointers, due to
2525 // the way in which pointers are swapped in and out of the array.
2527 // ??? Our callers expect that blocks are not shared between transactions.
2528 // When the optimizers get too smart, and blocks are shared, then during
2529 // the tm_mark phase we'll add log entries to only one of the two transactions,
2530 // and in the tm_edge phase we'll add edges to the CFG that create invalid
2531 // cycles. The symptom being SSA defs that do not dominate their uses.
2532 // Note that the optimizers were locally correct with their transformation,
2533 // as we have no info within the program that suggests that the blocks cannot
2534 // be shared.
2536 // ??? There is currently a hack inside tree-ssa-pre.c to work around the
2537 // only known instance of this block sharing.
2539 static vec<tm_region_p>
2540 get_bb_regions_instrumented (bool traverse_clones,
2541 bool include_uninstrumented_p)
2543 unsigned n = last_basic_block;
2544 struct bb2reg_stuff stuff;
2545 vec<tm_region_p> ret;
2547 ret.create (n);
2548 ret.safe_grow_cleared (n);
2549 stuff.bb2reg = &ret;
2550 stuff.include_uninstrumented_p = include_uninstrumented_p;
2551 expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones);
2553 return ret;
2556 /* Set the IN_TRANSACTION for all gimple statements that appear in a
2557 transaction. */
2559 void
2560 compute_transaction_bits (void)
2562 struct tm_region *region;
2563 vec<basic_block> queue;
2564 unsigned int i;
2565 basic_block bb;
2567 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2568 certainly don't need it to calculate CDI_DOMINATOR info. */
2569 gate_tm_init ();
2571 FOR_EACH_BB (bb)
2572 bb->flags &= ~BB_IN_TRANSACTION;
2574 for (region = all_tm_regions; region; region = region->next)
2576 queue = get_tm_region_blocks (region->entry_block,
2577 region->exit_blocks,
2578 region->irr_blocks,
2579 NULL,
2580 /*stop_at_irr_p=*/true);
2581 for (i = 0; queue.iterate (i, &bb); ++i)
2582 bb->flags |= BB_IN_TRANSACTION;
2583 queue.release ();
2586 if (all_tm_regions)
2587 bitmap_obstack_release (&tm_obstack);
2590 /* Replace the GIMPLE_TRANSACTION in this region with the corresponding
2591 call to BUILT_IN_TM_START. */
2593 static void *
2594 expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
2596 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2597 basic_block transaction_bb = gimple_bb (region->transaction_stmt);
2598 tree tm_state = region->tm_state;
2599 tree tm_state_type = TREE_TYPE (tm_state);
2600 edge abort_edge = NULL;
2601 edge inst_edge = NULL;
2602 edge uninst_edge = NULL;
2603 edge fallthru_edge = NULL;
2605 // Identify the various successors of the transaction start.
2607 edge_iterator i;
2608 edge e;
2609 FOR_EACH_EDGE (e, i, transaction_bb->succs)
2611 if (e->flags & EDGE_TM_ABORT)
2612 abort_edge = e;
2613 else if (e->flags & EDGE_TM_UNINSTRUMENTED)
2614 uninst_edge = e;
2615 else
2616 inst_edge = e;
2617 if (e->flags & EDGE_FALLTHRU)
2618 fallthru_edge = e;
2622 /* ??? There are plenty of bits here we're not computing. */
2624 int subcode = gimple_transaction_subcode (region->transaction_stmt);
2625 int flags = 0;
2626 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2627 flags |= PR_DOESGOIRREVOCABLE;
2628 if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
2629 flags |= PR_HASNOIRREVOCABLE;
2630 /* If the transaction does not have an abort in lexical scope and is not
2631 marked as an outer transaction, then it will never abort. */
2632 if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0)
2633 flags |= PR_HASNOABORT;
2634 if ((subcode & GTMA_HAVE_STORE) == 0)
2635 flags |= PR_READONLY;
2636 if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION))
2637 flags |= PR_INSTRUMENTEDCODE;
2638 if (uninst_edge)
2639 flags |= PR_UNINSTRUMENTEDCODE;
2640 if (subcode & GTMA_IS_OUTER)
2641 region->original_transaction_was_outer = true;
2642 tree t = build_int_cst (tm_state_type, flags);
2643 gimple call = gimple_build_call (tm_start, 1, t);
2644 gimple_call_set_lhs (call, tm_state);
2645 gimple_set_location (call, gimple_location (region->transaction_stmt));
2647 // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START.
2648 gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb);
2649 gcc_assert (gsi_stmt (gsi) == region->transaction_stmt);
2650 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
2651 gsi_remove (&gsi, true);
2652 region->transaction_stmt = call;
2655 // Generate log saves.
2656 if (!tm_log_save_addresses.is_empty ())
2657 tm_log_emit_saves (region->entry_block, transaction_bb);
2659 // In the beginning, we've no tests to perform on transaction restart.
2660 // Note that after this point, transaction_bb becomes the "most recent
2661 // block containing tests for the transaction".
2662 region->restart_block = region->entry_block;
2664 // Generate log restores.
2665 if (!tm_log_save_addresses.is_empty ())
2667 basic_block test_bb = create_empty_bb (transaction_bb);
2668 basic_block code_bb = create_empty_bb (test_bb);
2669 basic_block join_bb = create_empty_bb (code_bb);
2670 if (current_loops && transaction_bb->loop_father)
2672 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2673 add_bb_to_loop (code_bb, transaction_bb->loop_father);
2674 add_bb_to_loop (join_bb, transaction_bb->loop_father);
2676 if (region->restart_block == region->entry_block)
2677 region->restart_block = test_bb;
2679 tree t1 = create_tmp_reg (tm_state_type, NULL);
2680 tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES);
2681 gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1,
2682 tm_state, t2);
2683 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2684 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2686 t2 = build_int_cst (tm_state_type, 0);
2687 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2688 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2690 tm_log_emit_restores (region->entry_block, code_bb);
2692 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2693 edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE);
2694 edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE);
2695 redirect_edge_pred (fallthru_edge, join_bb);
2697 join_bb->frequency = test_bb->frequency = transaction_bb->frequency;
2698 join_bb->count = test_bb->count = transaction_bb->count;
2700 ei->probability = PROB_ALWAYS;
2701 et->probability = PROB_LIKELY;
2702 ef->probability = PROB_UNLIKELY;
2703 et->count = apply_probability(test_bb->count, et->probability);
2704 ef->count = apply_probability(test_bb->count, ef->probability);
2706 code_bb->count = et->count;
2707 code_bb->frequency = EDGE_FREQUENCY (et);
2709 transaction_bb = join_bb;
2712 // If we have an ABORT edge, create a test to perform the abort.
2713 if (abort_edge)
2715 basic_block test_bb = create_empty_bb (transaction_bb);
2716 if (current_loops && transaction_bb->loop_father)
2717 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2718 if (region->restart_block == region->entry_block)
2719 region->restart_block = test_bb;
2721 tree t1 = create_tmp_reg (tm_state_type, NULL);
2722 tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION);
2723 gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1,
2724 tm_state, t2);
2725 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2726 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2728 t2 = build_int_cst (tm_state_type, 0);
2729 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2730 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2732 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2733 test_bb->frequency = transaction_bb->frequency;
2734 test_bb->count = transaction_bb->count;
2735 ei->probability = PROB_ALWAYS;
2737 // Not abort edge. If both are live, chose one at random as we'll
2738 // we'll be fixing that up below.
2739 redirect_edge_pred (fallthru_edge, test_bb);
2740 fallthru_edge->flags = EDGE_FALSE_VALUE;
2741 fallthru_edge->probability = PROB_VERY_LIKELY;
2742 fallthru_edge->count
2743 = apply_probability(test_bb->count, fallthru_edge->probability);
2745 // Abort/over edge.
2746 redirect_edge_pred (abort_edge, test_bb);
2747 abort_edge->flags = EDGE_TRUE_VALUE;
2748 abort_edge->probability = PROB_VERY_UNLIKELY;
2749 abort_edge->count
2750 = apply_probability(test_bb->count, abort_edge->probability);
2752 transaction_bb = test_bb;
2755 // If we have both instrumented and uninstrumented code paths, select one.
2756 if (inst_edge && uninst_edge)
2758 basic_block test_bb = create_empty_bb (transaction_bb);
2759 if (current_loops && transaction_bb->loop_father)
2760 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2761 if (region->restart_block == region->entry_block)
2762 region->restart_block = test_bb;
2764 tree t1 = create_tmp_reg (tm_state_type, NULL);
2765 tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE);
2767 gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1,
2768 tm_state, t2);
2769 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2770 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2772 t2 = build_int_cst (tm_state_type, 0);
2773 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2774 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2776 // Create the edge into test_bb first, as we want to copy values
2777 // out of the fallthru edge.
2778 edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags);
2779 e->probability = fallthru_edge->probability;
2780 test_bb->count = e->count = fallthru_edge->count;
2781 test_bb->frequency = EDGE_FREQUENCY (e);
2783 // Now update the edges to the inst/uninist implementations.
2784 // For now assume that the paths are equally likely. When using HTM,
2785 // we'll try the uninst path first and fallback to inst path if htm
2786 // buffers are exceeded. Without HTM we start with the inst path and
2787 // use the uninst path when falling back to serial mode.
2788 redirect_edge_pred (inst_edge, test_bb);
2789 inst_edge->flags = EDGE_FALSE_VALUE;
2790 inst_edge->probability = REG_BR_PROB_BASE / 2;
2791 inst_edge->count
2792 = apply_probability(test_bb->count, inst_edge->probability);
2794 redirect_edge_pred (uninst_edge, test_bb);
2795 uninst_edge->flags = EDGE_TRUE_VALUE;
2796 uninst_edge->probability = REG_BR_PROB_BASE / 2;
2797 uninst_edge->count
2798 = apply_probability(test_bb->count, uninst_edge->probability);
2801 // If we have no previous special cases, and we have PHIs at the beginning
2802 // of the atomic region, this means we have a loop at the beginning of the
2803 // atomic region that shares the first block. This can cause problems with
2804 // the transaction restart abnormal edges to be added in the tm_edges pass.
2805 // Solve this by adding a new empty block to receive the abnormal edges.
2806 if (region->restart_block == region->entry_block
2807 && phi_nodes (region->entry_block))
2809 basic_block empty_bb = create_empty_bb (transaction_bb);
2810 region->restart_block = empty_bb;
2811 if (current_loops && transaction_bb->loop_father)
2812 add_bb_to_loop (empty_bb, transaction_bb->loop_father);
2814 redirect_edge_pred (fallthru_edge, empty_bb);
2815 make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU);
2818 return NULL;
2821 /* Generate the temporary to be used for the return value of
2822 BUILT_IN_TM_START. */
2824 static void *
2825 generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
2827 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2828 region->tm_state =
2829 create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
2831 // Reset the subcode, post optimizations. We'll fill this in
2832 // again as we process blocks.
2833 if (region->exit_blocks)
2835 unsigned int subcode
2836 = gimple_transaction_subcode (region->transaction_stmt);
2838 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2839 subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
2840 | GTMA_MAY_ENTER_IRREVOCABLE
2841 | GTMA_HAS_NO_INSTRUMENTATION);
2842 else
2843 subcode &= GTMA_DECLARATION_MASK;
2844 gimple_transaction_set_subcode (region->transaction_stmt, subcode);
2847 return NULL;
2850 // Propagate flags from inner transactions outwards.
2851 static void
2852 propagate_tm_flags_out (struct tm_region *region)
2854 if (region == NULL)
2855 return;
2856 propagate_tm_flags_out (region->inner);
2858 if (region->outer && region->outer->transaction_stmt)
2860 unsigned s = gimple_transaction_subcode (region->transaction_stmt);
2861 s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE
2862 | GTMA_MAY_ENTER_IRREVOCABLE);
2863 s |= gimple_transaction_subcode (region->outer->transaction_stmt);
2864 gimple_transaction_set_subcode (region->outer->transaction_stmt, s);
2867 propagate_tm_flags_out (region->next);
2870 /* Entry point to the MARK phase of TM expansion. Here we replace
2871 transactional memory statements with calls to builtins, and function
2872 calls with their transactional clones (if available). But we don't
2873 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
2875 static unsigned int
2876 execute_tm_mark (void)
2878 pending_edge_inserts_p = false;
2880 expand_regions (all_tm_regions, generate_tm_state, NULL,
2881 /*traverse_clones=*/true);
2883 tm_log_init ();
2885 vec<tm_region_p> bb_regions
2886 = get_bb_regions_instrumented (/*traverse_clones=*/true,
2887 /*include_uninstrumented_p=*/false);
2888 struct tm_region *r;
2889 unsigned i;
2891 // Expand memory operations into calls into the runtime.
2892 // This collects log entries as well.
2893 FOR_EACH_VEC_ELT (bb_regions, i, r)
2895 if (r != NULL)
2897 if (r->transaction_stmt)
2899 unsigned sub = gimple_transaction_subcode (r->transaction_stmt);
2901 /* If we're sure to go irrevocable, there won't be
2902 anything to expand, since the run-time will go
2903 irrevocable right away. */
2904 if (sub & GTMA_DOES_GO_IRREVOCABLE
2905 && sub & GTMA_MAY_ENTER_IRREVOCABLE)
2906 continue;
2908 expand_block_tm (r, BASIC_BLOCK (i));
2912 bb_regions.release ();
2914 // Propagate flags from inner transactions outwards.
2915 propagate_tm_flags_out (all_tm_regions);
2917 // Expand GIMPLE_TRANSACTIONs into calls into the runtime.
2918 expand_regions (all_tm_regions, expand_transaction, NULL,
2919 /*traverse_clones=*/false);
2921 tm_log_emit ();
2922 tm_log_delete ();
2924 if (pending_edge_inserts_p)
2925 gsi_commit_edge_inserts ();
2926 free_dominance_info (CDI_DOMINATORS);
2927 return 0;
2930 struct gimple_opt_pass pass_tm_mark =
2933 GIMPLE_PASS,
2934 "tmmark", /* name */
2935 OPTGROUP_NONE, /* optinfo_flags */
2936 NULL, /* gate */
2937 execute_tm_mark, /* execute */
2938 NULL, /* sub */
2939 NULL, /* next */
2940 0, /* static_pass_number */
2941 TV_TRANS_MEM, /* tv_id */
2942 PROP_ssa | PROP_cfg, /* properties_required */
2943 0, /* properties_provided */
2944 0, /* properties_destroyed */
2945 0, /* todo_flags_start */
2946 TODO_update_ssa
2947 | TODO_verify_ssa, /* todo_flags_finish */
2952 /* Create an abnormal edge from STMT at iter, splitting the block
2953 as necessary. Adjust *PNEXT as needed for the split block. */
2955 static inline void
2956 split_bb_make_tm_edge (gimple stmt, basic_block dest_bb,
2957 gimple_stmt_iterator iter, gimple_stmt_iterator *pnext)
2959 basic_block bb = gimple_bb (stmt);
2960 if (!gsi_one_before_end_p (iter))
2962 edge e = split_block (bb, stmt);
2963 *pnext = gsi_start_bb (e->dest);
2965 make_edge (bb, dest_bb, EDGE_ABNORMAL);
2967 // Record the need for the edge for the benefit of the rtl passes.
2968 if (cfun->gimple_df->tm_restart == NULL)
2969 cfun->gimple_df->tm_restart = htab_create_ggc (31, struct_ptr_hash,
2970 struct_ptr_eq, ggc_free);
2972 struct tm_restart_node dummy;
2973 dummy.stmt = stmt;
2974 dummy.label_or_list = gimple_block_label (dest_bb);
2976 void **slot = htab_find_slot (cfun->gimple_df->tm_restart, &dummy, INSERT);
2977 struct tm_restart_node *n = (struct tm_restart_node *) *slot;
2978 if (n == NULL)
2980 n = ggc_alloc_tm_restart_node ();
2981 *n = dummy;
2983 else
2985 tree old = n->label_or_list;
2986 if (TREE_CODE (old) == LABEL_DECL)
2987 old = tree_cons (NULL, old, NULL);
2988 n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
2992 /* Split block BB as necessary for every builtin function we added, and
2993 wire up the abnormal back edges implied by the transaction restart. */
2995 static void
2996 expand_block_edges (struct tm_region *const region, basic_block bb)
2998 gimple_stmt_iterator gsi, next_gsi;
3000 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi)
3002 gimple stmt = gsi_stmt (gsi);
3004 next_gsi = gsi;
3005 gsi_next (&next_gsi);
3007 // ??? Shouldn't we split for any non-pure, non-irrevocable function?
3008 if (gimple_code (stmt) != GIMPLE_CALL
3009 || (gimple_call_flags (stmt) & ECF_TM_BUILTIN) == 0)
3010 continue;
3012 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)) == BUILT_IN_TM_ABORT)
3014 // If we have a ``_transaction_cancel [[outer]]'', there is only
3015 // one abnormal edge: to the transaction marked OUTER.
3016 // All compiler-generated instances of BUILT_IN_TM_ABORT have a
3017 // constant argument, which we can examine here. Users invoking
3018 // TM_ABORT directly get what they deserve.
3019 tree arg = gimple_call_arg (stmt, 0);
3020 if (TREE_CODE (arg) == INTEGER_CST
3021 && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0
3022 && !decl_is_tm_clone (current_function_decl))
3024 // Find the GTMA_IS_OUTER transaction.
3025 for (struct tm_region *o = region; o; o = o->outer)
3026 if (o->original_transaction_was_outer)
3028 split_bb_make_tm_edge (stmt, o->restart_block,
3029 gsi, &next_gsi);
3030 break;
3033 // Otherwise, the front-end should have semantically checked
3034 // outer aborts, but in either case the target region is not
3035 // within this function.
3036 continue;
3039 // Non-outer, TM aborts have an abnormal edge to the inner-most
3040 // transaction, the one being aborted;
3041 split_bb_make_tm_edge (stmt, region->restart_block, gsi, &next_gsi);
3044 // All TM builtins have an abnormal edge to the outer-most transaction.
3045 // We never restart inner transactions. For tm clones, we know a-priori
3046 // that the outer-most transaction is outside the function.
3047 if (decl_is_tm_clone (current_function_decl))
3048 continue;
3050 if (cfun->gimple_df->tm_restart == NULL)
3051 cfun->gimple_df->tm_restart
3052 = htab_create_ggc (31, struct_ptr_hash, struct_ptr_eq, ggc_free);
3054 // All TM builtins have an abnormal edge to the outer-most transaction.
3055 // We never restart inner transactions.
3056 for (struct tm_region *o = region; o; o = o->outer)
3057 if (!o->outer)
3059 split_bb_make_tm_edge (stmt, o->restart_block, gsi, &next_gsi);
3060 break;
3063 // Delete any tail-call annotation that may have been added.
3064 // The tail-call pass may have mis-identified the commit as being
3065 // a candidate because we had not yet added this restart edge.
3066 gimple_call_set_tail (stmt, false);
3070 /* Entry point to the final expansion of transactional nodes. */
3072 static unsigned int
3073 execute_tm_edges (void)
3075 vec<tm_region_p> bb_regions
3076 = get_bb_regions_instrumented (/*traverse_clones=*/false,
3077 /*include_uninstrumented_p=*/true);
3078 struct tm_region *r;
3079 unsigned i;
3081 FOR_EACH_VEC_ELT (bb_regions, i, r)
3082 if (r != NULL)
3083 expand_block_edges (r, BASIC_BLOCK (i));
3085 bb_regions.release ();
3087 /* We've got to release the dominance info now, to indicate that it
3088 must be rebuilt completely. Otherwise we'll crash trying to update
3089 the SSA web in the TODO section following this pass. */
3090 free_dominance_info (CDI_DOMINATORS);
3091 bitmap_obstack_release (&tm_obstack);
3092 all_tm_regions = NULL;
3094 return 0;
3097 struct gimple_opt_pass pass_tm_edges =
3100 GIMPLE_PASS,
3101 "tmedge", /* name */
3102 OPTGROUP_NONE, /* optinfo_flags */
3103 NULL, /* gate */
3104 execute_tm_edges, /* execute */
3105 NULL, /* sub */
3106 NULL, /* next */
3107 0, /* static_pass_number */
3108 TV_TRANS_MEM, /* tv_id */
3109 PROP_ssa | PROP_cfg, /* properties_required */
3110 0, /* properties_provided */
3111 0, /* properties_destroyed */
3112 0, /* todo_flags_start */
3113 TODO_update_ssa
3114 | TODO_verify_ssa, /* todo_flags_finish */
3118 /* Helper function for expand_regions. Expand REGION and recurse to
3119 the inner region. Call CALLBACK on each region. CALLBACK returns
3120 NULL to continue the traversal, otherwise a non-null value which
3121 this function will return as well. TRAVERSE_CLONES is true if we
3122 should traverse transactional clones. */
3124 static void *
3125 expand_regions_1 (struct tm_region *region,
3126 void *(*callback)(struct tm_region *, void *),
3127 void *data,
3128 bool traverse_clones)
3130 void *retval = NULL;
3131 if (region->exit_blocks
3132 || (traverse_clones && decl_is_tm_clone (current_function_decl)))
3134 retval = callback (region, data);
3135 if (retval)
3136 return retval;
3138 if (region->inner)
3140 retval = expand_regions (region->inner, callback, data, traverse_clones);
3141 if (retval)
3142 return retval;
3144 return retval;
3147 /* Traverse the regions enclosed and including REGION. Execute
3148 CALLBACK for each region, passing DATA. CALLBACK returns NULL to
3149 continue the traversal, otherwise a non-null value which this
3150 function will return as well. TRAVERSE_CLONES is true if we should
3151 traverse transactional clones. */
3153 static void *
3154 expand_regions (struct tm_region *region,
3155 void *(*callback)(struct tm_region *, void *),
3156 void *data,
3157 bool traverse_clones)
3159 void *retval = NULL;
3160 while (region)
3162 retval = expand_regions_1 (region, callback, data, traverse_clones);
3163 if (retval)
3164 return retval;
3165 region = region->next;
3167 return retval;
3171 /* A unique TM memory operation. */
3172 typedef struct tm_memop
3174 /* Unique ID that all memory operations to the same location have. */
3175 unsigned int value_id;
3176 /* Address of load/store. */
3177 tree addr;
3178 } *tm_memop_t;
3180 /* TM memory operation hashtable helpers. */
3182 struct tm_memop_hasher : typed_free_remove <tm_memop>
3184 typedef tm_memop value_type;
3185 typedef tm_memop compare_type;
3186 static inline hashval_t hash (const value_type *);
3187 static inline bool equal (const value_type *, const compare_type *);
3190 /* Htab support. Return a hash value for a `tm_memop'. */
3191 inline hashval_t
3192 tm_memop_hasher::hash (const value_type *mem)
3194 tree addr = mem->addr;
3195 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
3196 actually done with operand_equal_p (see tm_memop_eq). */
3197 if (TREE_CODE (addr) == ADDR_EXPR)
3198 addr = TREE_OPERAND (addr, 0);
3199 return iterative_hash_expr (addr, 0);
3202 /* Htab support. Return true if two tm_memop's are the same. */
3203 inline bool
3204 tm_memop_hasher::equal (const value_type *mem1, const compare_type *mem2)
3206 return operand_equal_p (mem1->addr, mem2->addr, 0);
3209 /* Sets for solving data flow equations in the memory optimization pass. */
3210 struct tm_memopt_bitmaps
3212 /* Stores available to this BB upon entry. Basically, stores that
3213 dominate this BB. */
3214 bitmap store_avail_in;
3215 /* Stores available at the end of this BB. */
3216 bitmap store_avail_out;
3217 bitmap store_antic_in;
3218 bitmap store_antic_out;
3219 /* Reads available to this BB upon entry. Basically, reads that
3220 dominate this BB. */
3221 bitmap read_avail_in;
3222 /* Reads available at the end of this BB. */
3223 bitmap read_avail_out;
3224 /* Reads performed in this BB. */
3225 bitmap read_local;
3226 /* Writes performed in this BB. */
3227 bitmap store_local;
3229 /* Temporary storage for pass. */
3230 /* Is the current BB in the worklist? */
3231 bool avail_in_worklist_p;
3232 /* Have we visited this BB? */
3233 bool visited_p;
3236 static bitmap_obstack tm_memopt_obstack;
3238 /* Unique counter for TM loads and stores. Loads and stores of the
3239 same address get the same ID. */
3240 static unsigned int tm_memopt_value_id;
3241 static hash_table <tm_memop_hasher> tm_memopt_value_numbers;
3243 #define STORE_AVAIL_IN(BB) \
3244 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
3245 #define STORE_AVAIL_OUT(BB) \
3246 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
3247 #define STORE_ANTIC_IN(BB) \
3248 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
3249 #define STORE_ANTIC_OUT(BB) \
3250 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
3251 #define READ_AVAIL_IN(BB) \
3252 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
3253 #define READ_AVAIL_OUT(BB) \
3254 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
3255 #define READ_LOCAL(BB) \
3256 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
3257 #define STORE_LOCAL(BB) \
3258 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
3259 #define AVAIL_IN_WORKLIST_P(BB) \
3260 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
3261 #define BB_VISITED_P(BB) \
3262 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
3264 /* Given a TM load/store in STMT, return the value number for the address
3265 it accesses. */
3267 static unsigned int
3268 tm_memopt_value_number (gimple stmt, enum insert_option op)
3270 struct tm_memop tmpmem, *mem;
3271 tm_memop **slot;
3273 gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
3274 tmpmem.addr = gimple_call_arg (stmt, 0);
3275 slot = tm_memopt_value_numbers.find_slot (&tmpmem, op);
3276 if (*slot)
3277 mem = *slot;
3278 else if (op == INSERT)
3280 mem = XNEW (struct tm_memop);
3281 *slot = mem;
3282 mem->value_id = tm_memopt_value_id++;
3283 mem->addr = tmpmem.addr;
3285 else
3286 gcc_unreachable ();
3287 return mem->value_id;
3290 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
3292 static void
3293 tm_memopt_accumulate_memops (basic_block bb)
3295 gimple_stmt_iterator gsi;
3297 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3299 gimple stmt = gsi_stmt (gsi);
3300 bitmap bits;
3301 unsigned int loc;
3303 if (is_tm_store (stmt))
3304 bits = STORE_LOCAL (bb);
3305 else if (is_tm_load (stmt))
3306 bits = READ_LOCAL (bb);
3307 else
3308 continue;
3310 loc = tm_memopt_value_number (stmt, INSERT);
3311 bitmap_set_bit (bits, loc);
3312 if (dump_file)
3314 fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
3315 is_tm_load (stmt) ? "LOAD" : "STORE", loc,
3316 gimple_bb (stmt)->index);
3317 print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0);
3318 fprintf (dump_file, "\n");
3323 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
3325 static void
3326 dump_tm_memopt_set (const char *set_name, bitmap bits)
3328 unsigned i;
3329 bitmap_iterator bi;
3330 const char *comma = "";
3332 fprintf (dump_file, "TM memopt: %s: [", set_name);
3333 EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
3335 hash_table <tm_memop_hasher>::iterator hi;
3336 struct tm_memop *mem = NULL;
3338 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
3339 FOR_EACH_HASH_TABLE_ELEMENT (tm_memopt_value_numbers, mem, tm_memop_t, hi)
3340 if (mem->value_id == i)
3341 break;
3342 gcc_assert (mem->value_id == i);
3343 fprintf (dump_file, "%s", comma);
3344 comma = ", ";
3345 print_generic_expr (dump_file, mem->addr, 0);
3347 fprintf (dump_file, "]\n");
3350 /* Prettily dump all of the memopt sets in BLOCKS. */
3352 static void
3353 dump_tm_memopt_sets (vec<basic_block> blocks)
3355 size_t i;
3356 basic_block bb;
3358 for (i = 0; blocks.iterate (i, &bb); ++i)
3360 fprintf (dump_file, "------------BB %d---------\n", bb->index);
3361 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
3362 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
3363 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
3364 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
3365 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
3366 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
3370 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3372 static void
3373 tm_memopt_compute_avin (basic_block bb)
3375 edge e;
3376 unsigned ix;
3378 /* Seed with the AVOUT of any predecessor. */
3379 for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
3381 e = EDGE_PRED (bb, ix);
3382 /* Make sure we have already visited this BB, and is thus
3383 initialized.
3385 If e->src->aux is NULL, this predecessor is actually on an
3386 enclosing transaction. We only care about the current
3387 transaction, so ignore it. */
3388 if (e->src->aux && BB_VISITED_P (e->src))
3390 bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3391 bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3392 break;
3396 for (; ix < EDGE_COUNT (bb->preds); ix++)
3398 e = EDGE_PRED (bb, ix);
3399 if (e->src->aux && BB_VISITED_P (e->src))
3401 bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3402 bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3406 BB_VISITED_P (bb) = true;
3409 /* Compute the STORE_ANTIC_IN for the basic block BB. */
3411 static void
3412 tm_memopt_compute_antin (basic_block bb)
3414 edge e;
3415 unsigned ix;
3417 /* Seed with the ANTIC_OUT of any successor. */
3418 for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
3420 e = EDGE_SUCC (bb, ix);
3421 /* Make sure we have already visited this BB, and is thus
3422 initialized. */
3423 if (BB_VISITED_P (e->dest))
3425 bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3426 break;
3430 for (; ix < EDGE_COUNT (bb->succs); ix++)
3432 e = EDGE_SUCC (bb, ix);
3433 if (BB_VISITED_P (e->dest))
3434 bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3437 BB_VISITED_P (bb) = true;
3440 /* Compute the AVAIL sets for every basic block in BLOCKS.
3442 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3444 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3445 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3447 This is basically what we do in lcm's compute_available(), but here
3448 we calculate two sets of sets (one for STOREs and one for READs),
3449 and we work on a region instead of the entire CFG.
3451 REGION is the TM region.
3452 BLOCKS are the basic blocks in the region. */
3454 static void
3455 tm_memopt_compute_available (struct tm_region *region,
3456 vec<basic_block> blocks)
3458 edge e;
3459 basic_block *worklist, *qin, *qout, *qend, bb;
3460 unsigned int qlen, i;
3461 edge_iterator ei;
3462 bool changed;
3464 /* Allocate a worklist array/queue. Entries are only added to the
3465 list if they were not already on the list. So the size is
3466 bounded by the number of basic blocks in the region. */
3467 qlen = blocks.length () - 1;
3468 qin = qout = worklist =
3469 XNEWVEC (basic_block, qlen);
3471 /* Put every block in the region on the worklist. */
3472 for (i = 0; blocks.iterate (i, &bb); ++i)
3474 /* Seed AVAIL_OUT with the LOCAL set. */
3475 bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
3476 bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
3478 AVAIL_IN_WORKLIST_P (bb) = true;
3479 /* No need to insert the entry block, since it has an AVIN of
3480 null, and an AVOUT that has already been seeded in. */
3481 if (bb != region->entry_block)
3482 *qin++ = bb;
3485 /* The entry block has been initialized with the local sets. */
3486 BB_VISITED_P (region->entry_block) = true;
3488 qin = worklist;
3489 qend = &worklist[qlen];
3491 /* Iterate until the worklist is empty. */
3492 while (qlen)
3494 /* Take the first entry off the worklist. */
3495 bb = *qout++;
3496 qlen--;
3498 if (qout >= qend)
3499 qout = worklist;
3501 /* This block can be added to the worklist again if necessary. */
3502 AVAIL_IN_WORKLIST_P (bb) = false;
3503 tm_memopt_compute_avin (bb);
3505 /* Note: We do not add the LOCAL sets here because we already
3506 seeded the AVAIL_OUT sets with them. */
3507 changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
3508 changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
3509 if (changed
3510 && (region->exit_blocks == NULL
3511 || !bitmap_bit_p (region->exit_blocks, bb->index)))
3512 /* If the out state of this block changed, then we need to add
3513 its successors to the worklist if they are not already in. */
3514 FOR_EACH_EDGE (e, ei, bb->succs)
3515 if (!AVAIL_IN_WORKLIST_P (e->dest) && e->dest != EXIT_BLOCK_PTR)
3517 *qin++ = e->dest;
3518 AVAIL_IN_WORKLIST_P (e->dest) = true;
3519 qlen++;
3521 if (qin >= qend)
3522 qin = worklist;
3526 free (worklist);
3528 if (dump_file)
3529 dump_tm_memopt_sets (blocks);
3532 /* Compute ANTIC sets for every basic block in BLOCKS.
3534 We compute STORE_ANTIC_OUT as follows:
3536 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3537 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3539 REGION is the TM region.
3540 BLOCKS are the basic blocks in the region. */
3542 static void
3543 tm_memopt_compute_antic (struct tm_region *region,
3544 vec<basic_block> blocks)
3546 edge e;
3547 basic_block *worklist, *qin, *qout, *qend, bb;
3548 unsigned int qlen;
3549 int i;
3550 edge_iterator ei;
3552 /* Allocate a worklist array/queue. Entries are only added to the
3553 list if they were not already on the list. So the size is
3554 bounded by the number of basic blocks in the region. */
3555 qin = qout = worklist = XNEWVEC (basic_block, blocks.length ());
3557 for (qlen = 0, i = blocks.length () - 1; i >= 0; --i)
3559 bb = blocks[i];
3561 /* Seed ANTIC_OUT with the LOCAL set. */
3562 bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
3564 /* Put every block in the region on the worklist. */
3565 AVAIL_IN_WORKLIST_P (bb) = true;
3566 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3567 and their ANTIC_OUT has already been seeded in. */
3568 if (region->exit_blocks
3569 && !bitmap_bit_p (region->exit_blocks, bb->index))
3571 qlen++;
3572 *qin++ = bb;
3576 /* The exit blocks have been initialized with the local sets. */
3577 if (region->exit_blocks)
3579 unsigned int i;
3580 bitmap_iterator bi;
3581 EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
3582 BB_VISITED_P (BASIC_BLOCK (i)) = true;
3585 qin = worklist;
3586 qend = &worklist[qlen];
3588 /* Iterate until the worklist is empty. */
3589 while (qlen)
3591 /* Take the first entry off the worklist. */
3592 bb = *qout++;
3593 qlen--;
3595 if (qout >= qend)
3596 qout = worklist;
3598 /* This block can be added to the worklist again if necessary. */
3599 AVAIL_IN_WORKLIST_P (bb) = false;
3600 tm_memopt_compute_antin (bb);
3602 /* Note: We do not add the LOCAL sets here because we already
3603 seeded the ANTIC_OUT sets with them. */
3604 if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
3605 && bb != region->entry_block)
3606 /* If the out state of this block changed, then we need to add
3607 its predecessors to the worklist if they are not already in. */
3608 FOR_EACH_EDGE (e, ei, bb->preds)
3609 if (!AVAIL_IN_WORKLIST_P (e->src))
3611 *qin++ = e->src;
3612 AVAIL_IN_WORKLIST_P (e->src) = true;
3613 qlen++;
3615 if (qin >= qend)
3616 qin = worklist;
3620 free (worklist);
3622 if (dump_file)
3623 dump_tm_memopt_sets (blocks);
3626 /* Offsets of load variants from TM_LOAD. For example,
3627 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3628 See gtm-builtins.def. */
3629 #define TRANSFORM_RAR 1
3630 #define TRANSFORM_RAW 2
3631 #define TRANSFORM_RFW 3
3632 /* Offsets of store variants from TM_STORE. */
3633 #define TRANSFORM_WAR 1
3634 #define TRANSFORM_WAW 2
3636 /* Inform about a load/store optimization. */
3638 static void
3639 dump_tm_memopt_transform (gimple stmt)
3641 if (dump_file)
3643 fprintf (dump_file, "TM memopt: transforming: ");
3644 print_gimple_stmt (dump_file, stmt, 0, 0);
3645 fprintf (dump_file, "\n");
3649 /* Perform a read/write optimization. Replaces the TM builtin in STMT
3650 by a builtin that is OFFSET entries down in the builtins table in
3651 gtm-builtins.def. */
3653 static void
3654 tm_memopt_transform_stmt (unsigned int offset,
3655 gimple stmt,
3656 gimple_stmt_iterator *gsi)
3658 tree fn = gimple_call_fn (stmt);
3659 gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
3660 TREE_OPERAND (fn, 0)
3661 = builtin_decl_explicit ((enum built_in_function)
3662 (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
3663 + offset));
3664 gimple_call_set_fn (stmt, fn);
3665 gsi_replace (gsi, stmt, true);
3666 dump_tm_memopt_transform (stmt);
3669 /* Perform the actual TM memory optimization transformations in the
3670 basic blocks in BLOCKS. */
3672 static void
3673 tm_memopt_transform_blocks (vec<basic_block> blocks)
3675 size_t i;
3676 basic_block bb;
3677 gimple_stmt_iterator gsi;
3679 for (i = 0; blocks.iterate (i, &bb); ++i)
3681 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3683 gimple stmt = gsi_stmt (gsi);
3684 bitmap read_avail = READ_AVAIL_IN (bb);
3685 bitmap store_avail = STORE_AVAIL_IN (bb);
3686 bitmap store_antic = STORE_ANTIC_OUT (bb);
3687 unsigned int loc;
3689 if (is_tm_simple_load (stmt))
3691 loc = tm_memopt_value_number (stmt, NO_INSERT);
3692 if (store_avail && bitmap_bit_p (store_avail, loc))
3693 tm_memopt_transform_stmt (TRANSFORM_RAW, stmt, &gsi);
3694 else if (store_antic && bitmap_bit_p (store_antic, loc))
3696 tm_memopt_transform_stmt (TRANSFORM_RFW, stmt, &gsi);
3697 bitmap_set_bit (store_avail, loc);
3699 else if (read_avail && bitmap_bit_p (read_avail, loc))
3700 tm_memopt_transform_stmt (TRANSFORM_RAR, stmt, &gsi);
3701 else
3702 bitmap_set_bit (read_avail, loc);
3704 else if (is_tm_simple_store (stmt))
3706 loc = tm_memopt_value_number (stmt, NO_INSERT);
3707 if (store_avail && bitmap_bit_p (store_avail, loc))
3708 tm_memopt_transform_stmt (TRANSFORM_WAW, stmt, &gsi);
3709 else
3711 if (read_avail && bitmap_bit_p (read_avail, loc))
3712 tm_memopt_transform_stmt (TRANSFORM_WAR, stmt, &gsi);
3713 bitmap_set_bit (store_avail, loc);
3720 /* Return a new set of bitmaps for a BB. */
3722 static struct tm_memopt_bitmaps *
3723 tm_memopt_init_sets (void)
3725 struct tm_memopt_bitmaps *b
3726 = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
3727 b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3728 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3729 b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
3730 b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
3731 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3732 b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3733 b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3734 b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
3735 b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
3736 return b;
3739 /* Free sets computed for each BB. */
3741 static void
3742 tm_memopt_free_sets (vec<basic_block> blocks)
3744 size_t i;
3745 basic_block bb;
3747 for (i = 0; blocks.iterate (i, &bb); ++i)
3748 bb->aux = NULL;
3751 /* Clear the visited bit for every basic block in BLOCKS. */
3753 static void
3754 tm_memopt_clear_visited (vec<basic_block> blocks)
3756 size_t i;
3757 basic_block bb;
3759 for (i = 0; blocks.iterate (i, &bb); ++i)
3760 BB_VISITED_P (bb) = false;
3763 /* Replace TM load/stores with hints for the runtime. We handle
3764 things like read-after-write, write-after-read, read-after-read,
3765 read-for-write, etc. */
3767 static unsigned int
3768 execute_tm_memopt (void)
3770 struct tm_region *region;
3771 vec<basic_block> bbs;
3773 tm_memopt_value_id = 0;
3774 tm_memopt_value_numbers.create (10);
3776 for (region = all_tm_regions; region; region = region->next)
3778 /* All the TM stores/loads in the current region. */
3779 size_t i;
3780 basic_block bb;
3782 bitmap_obstack_initialize (&tm_memopt_obstack);
3784 /* Save all BBs for the current region. */
3785 bbs = get_tm_region_blocks (region->entry_block,
3786 region->exit_blocks,
3787 region->irr_blocks,
3788 NULL,
3789 false);
3791 /* Collect all the memory operations. */
3792 for (i = 0; bbs.iterate (i, &bb); ++i)
3794 bb->aux = tm_memopt_init_sets ();
3795 tm_memopt_accumulate_memops (bb);
3798 /* Solve data flow equations and transform each block accordingly. */
3799 tm_memopt_clear_visited (bbs);
3800 tm_memopt_compute_available (region, bbs);
3801 tm_memopt_clear_visited (bbs);
3802 tm_memopt_compute_antic (region, bbs);
3803 tm_memopt_transform_blocks (bbs);
3805 tm_memopt_free_sets (bbs);
3806 bbs.release ();
3807 bitmap_obstack_release (&tm_memopt_obstack);
3808 tm_memopt_value_numbers.empty ();
3811 tm_memopt_value_numbers.dispose ();
3812 return 0;
3815 static bool
3816 gate_tm_memopt (void)
3818 return flag_tm && optimize > 0;
3821 struct gimple_opt_pass pass_tm_memopt =
3824 GIMPLE_PASS,
3825 "tmmemopt", /* name */
3826 OPTGROUP_NONE, /* optinfo_flags */
3827 gate_tm_memopt, /* gate */
3828 execute_tm_memopt, /* execute */
3829 NULL, /* sub */
3830 NULL, /* next */
3831 0, /* static_pass_number */
3832 TV_TRANS_MEM, /* tv_id */
3833 PROP_ssa | PROP_cfg, /* properties_required */
3834 0, /* properties_provided */
3835 0, /* properties_destroyed */
3836 0, /* todo_flags_start */
3837 0, /* todo_flags_finish */
3842 /* Interprocedual analysis for the creation of transactional clones.
3843 The aim of this pass is to find which functions are referenced in
3844 a non-irrevocable transaction context, and for those over which
3845 we have control (or user directive), create a version of the
3846 function which uses only the transactional interface to reference
3847 protected memories. This analysis proceeds in several steps:
3849 (1) Collect the set of all possible transactional clones:
3851 (a) For all local public functions marked tm_callable, push
3852 it onto the tm_callee queue.
3854 (b) For all local functions, scan for calls in transaction blocks.
3855 Push the caller and callee onto the tm_caller and tm_callee
3856 queues. Count the number of callers for each callee.
3858 (c) For each local function on the callee list, assume we will
3859 create a transactional clone. Push *all* calls onto the
3860 callee queues; count the number of clone callers separately
3861 to the number of original callers.
3863 (2) Propagate irrevocable status up the dominator tree:
3865 (a) Any external function on the callee list that is not marked
3866 tm_callable is irrevocable. Push all callers of such onto
3867 a worklist.
3869 (b) For each function on the worklist, mark each block that
3870 contains an irrevocable call. Use the AND operator to
3871 propagate that mark up the dominator tree.
3873 (c) If we reach the entry block for a possible transactional
3874 clone, then the transactional clone is irrevocable, and
3875 we should not create the clone after all. Push all
3876 callers onto the worklist.
3878 (d) Place tm_irrevocable calls at the beginning of the relevant
3879 blocks. Special case here is the entry block for the entire
3880 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
3881 the library to begin the region in serial mode. Decrement
3882 the call count for all callees in the irrevocable region.
3884 (3) Create the transactional clones:
3886 Any tm_callee that still has a non-zero call count is cloned.
3889 /* This structure is stored in the AUX field of each cgraph_node. */
3890 struct tm_ipa_cg_data
3892 /* The clone of the function that got created. */
3893 struct cgraph_node *clone;
3895 /* The tm regions in the normal function. */
3896 struct tm_region *all_tm_regions;
3898 /* The blocks of the normal/clone functions that contain irrevocable
3899 calls, or blocks that are post-dominated by irrevocable calls. */
3900 bitmap irrevocable_blocks_normal;
3901 bitmap irrevocable_blocks_clone;
3903 /* The blocks of the normal function that are involved in transactions. */
3904 bitmap transaction_blocks_normal;
3906 /* The number of callers to the transactional clone of this function
3907 from normal and transactional clones respectively. */
3908 unsigned tm_callers_normal;
3909 unsigned tm_callers_clone;
3911 /* True if all calls to this function's transactional clone
3912 are irrevocable. Also automatically true if the function
3913 has no transactional clone. */
3914 bool is_irrevocable;
3916 /* Flags indicating the presence of this function in various queues. */
3917 bool in_callee_queue;
3918 bool in_worklist;
3920 /* Flags indicating the kind of scan desired while in the worklist. */
3921 bool want_irr_scan_normal;
3924 typedef vec<cgraph_node_ptr> cgraph_node_queue;
3926 /* Return the ipa data associated with NODE, allocating zeroed memory
3927 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
3928 and set *NODE accordingly. */
3930 static struct tm_ipa_cg_data *
3931 get_cg_data (struct cgraph_node **node, bool traverse_aliases)
3933 struct tm_ipa_cg_data *d;
3935 if (traverse_aliases && (*node)->symbol.alias)
3936 *node = cgraph_alias_target (*node);
3938 d = (struct tm_ipa_cg_data *) (*node)->symbol.aux;
3940 if (d == NULL)
3942 d = (struct tm_ipa_cg_data *)
3943 obstack_alloc (&tm_obstack.obstack, sizeof (*d));
3944 (*node)->symbol.aux = (void *) d;
3945 memset (d, 0, sizeof (*d));
3948 return d;
3951 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
3952 it is already present. */
3954 static void
3955 maybe_push_queue (struct cgraph_node *node,
3956 cgraph_node_queue *queue_p, bool *in_queue_p)
3958 if (!*in_queue_p)
3960 *in_queue_p = true;
3961 queue_p->safe_push (node);
3965 /* Duplicate the basic blocks in QUEUE for use in the uninstrumented
3966 code path. QUEUE are the basic blocks inside the transaction
3967 represented in REGION.
3969 Later in split_code_paths() we will add the conditional to choose
3970 between the two alternatives. */
3972 static void
3973 ipa_uninstrument_transaction (struct tm_region *region,
3974 vec<basic_block> queue)
3976 gimple transaction = region->transaction_stmt;
3977 basic_block transaction_bb = gimple_bb (transaction);
3978 int n = queue.length ();
3979 basic_block *new_bbs = XNEWVEC (basic_block, n);
3981 copy_bbs (queue.address (), n, new_bbs, NULL, 0, NULL, NULL, transaction_bb,
3982 true);
3983 edge e = make_edge (transaction_bb, new_bbs[0], EDGE_TM_UNINSTRUMENTED);
3984 add_phi_args_after_copy (new_bbs, n, e);
3986 // Now we will have a GIMPLE_ATOMIC with 3 possible edges out of it.
3987 // a) EDGE_FALLTHRU into the transaction
3988 // b) EDGE_TM_ABORT out of the transaction
3989 // c) EDGE_TM_UNINSTRUMENTED into the uninstrumented blocks.
3991 free (new_bbs);
3994 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
3995 Queue all callees within block BB. */
3997 static void
3998 ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
3999 basic_block bb, bool for_clone)
4001 gimple_stmt_iterator gsi;
4003 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4005 gimple stmt = gsi_stmt (gsi);
4006 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4008 tree fndecl = gimple_call_fndecl (stmt);
4009 if (fndecl)
4011 struct tm_ipa_cg_data *d;
4012 unsigned *pcallers;
4013 struct cgraph_node *node;
4015 if (is_tm_ending_fndecl (fndecl))
4016 continue;
4017 if (find_tm_replacement_function (fndecl))
4018 continue;
4020 node = cgraph_get_node (fndecl);
4021 gcc_assert (node != NULL);
4022 d = get_cg_data (&node, true);
4024 pcallers = (for_clone ? &d->tm_callers_clone
4025 : &d->tm_callers_normal);
4026 *pcallers += 1;
4028 maybe_push_queue (node, callees_p, &d->in_callee_queue);
4034 /* Scan all calls in NODE that are within a transaction region,
4035 and push the resulting nodes into the callee queue. */
4037 static void
4038 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
4039 cgraph_node_queue *callees_p)
4041 struct tm_region *r;
4043 d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
4044 d->all_tm_regions = all_tm_regions;
4046 for (r = all_tm_regions; r; r = r->next)
4048 vec<basic_block> bbs;
4049 basic_block bb;
4050 unsigned i;
4052 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
4053 d->transaction_blocks_normal, false);
4055 // Generate the uninstrumented code path for this transaction.
4056 ipa_uninstrument_transaction (r, bbs);
4058 FOR_EACH_VEC_ELT (bbs, i, bb)
4059 ipa_tm_scan_calls_block (callees_p, bb, false);
4061 bbs.release ();
4064 // ??? copy_bbs should maintain cgraph edges for the blocks as it is
4065 // copying them, rather than forcing us to do this externally.
4066 rebuild_cgraph_edges ();
4068 // ??? In ipa_uninstrument_transaction we don't try to update dominators
4069 // because copy_bbs doesn't return a VEC like iterate_fix_dominators expects.
4070 // Instead, just release dominators here so update_ssa recomputes them.
4071 free_dominance_info (CDI_DOMINATORS);
4073 // When building the uninstrumented code path, copy_bbs will have invoked
4074 // create_new_def_for starting an "ssa update context". There is only one
4075 // instance of this context, so resolve ssa updates before moving on to
4076 // the next function.
4077 update_ssa (TODO_update_ssa);
4080 /* Scan all calls in NODE as if this is the transactional clone,
4081 and push the destinations into the callee queue. */
4083 static void
4084 ipa_tm_scan_calls_clone (struct cgraph_node *node,
4085 cgraph_node_queue *callees_p)
4087 struct function *fn = DECL_STRUCT_FUNCTION (node->symbol.decl);
4088 basic_block bb;
4090 FOR_EACH_BB_FN (bb, fn)
4091 ipa_tm_scan_calls_block (callees_p, bb, true);
4094 /* The function NODE has been detected to be irrevocable. Push all
4095 of its callers onto WORKLIST for the purpose of re-scanning them. */
4097 static void
4098 ipa_tm_note_irrevocable (struct cgraph_node *node,
4099 cgraph_node_queue *worklist_p)
4101 struct tm_ipa_cg_data *d = get_cg_data (&node, true);
4102 struct cgraph_edge *e;
4104 d->is_irrevocable = true;
4106 for (e = node->callers; e ; e = e->next_caller)
4108 basic_block bb;
4109 struct cgraph_node *caller;
4111 /* Don't examine recursive calls. */
4112 if (e->caller == node)
4113 continue;
4114 /* Even if we think we can go irrevocable, believe the user
4115 above all. */
4116 if (is_tm_safe_or_pure (e->caller->symbol.decl))
4117 continue;
4119 caller = e->caller;
4120 d = get_cg_data (&caller, true);
4122 /* Check if the callee is in a transactional region. If so,
4123 schedule the function for normal re-scan as well. */
4124 bb = gimple_bb (e->call_stmt);
4125 gcc_assert (bb != NULL);
4126 if (d->transaction_blocks_normal
4127 && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
4128 d->want_irr_scan_normal = true;
4130 maybe_push_queue (caller, worklist_p, &d->in_worklist);
4134 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
4135 within the block is irrevocable. */
4137 static bool
4138 ipa_tm_scan_irr_block (basic_block bb)
4140 gimple_stmt_iterator gsi;
4141 tree fn;
4143 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4145 gimple stmt = gsi_stmt (gsi);
4146 switch (gimple_code (stmt))
4148 case GIMPLE_ASSIGN:
4149 if (gimple_assign_single_p (stmt))
4151 tree lhs = gimple_assign_lhs (stmt);
4152 tree rhs = gimple_assign_rhs1 (stmt);
4153 if (volatile_var_p (lhs) || volatile_var_p (rhs))
4154 return true;
4156 break;
4158 case GIMPLE_CALL:
4160 tree lhs = gimple_call_lhs (stmt);
4161 if (lhs && volatile_var_p (lhs))
4162 return true;
4164 if (is_tm_pure_call (stmt))
4165 break;
4167 fn = gimple_call_fn (stmt);
4169 /* Functions with the attribute are by definition irrevocable. */
4170 if (is_tm_irrevocable (fn))
4171 return true;
4173 /* For direct function calls, go ahead and check for replacement
4174 functions, or transitive irrevocable functions. For indirect
4175 functions, we'll ask the runtime. */
4176 if (TREE_CODE (fn) == ADDR_EXPR)
4178 struct tm_ipa_cg_data *d;
4179 struct cgraph_node *node;
4181 fn = TREE_OPERAND (fn, 0);
4182 if (is_tm_ending_fndecl (fn))
4183 break;
4184 if (find_tm_replacement_function (fn))
4185 break;
4187 node = cgraph_get_node(fn);
4188 d = get_cg_data (&node, true);
4190 /* Return true if irrevocable, but above all, believe
4191 the user. */
4192 if (d->is_irrevocable
4193 && !is_tm_safe_or_pure (fn))
4194 return true;
4196 break;
4199 case GIMPLE_ASM:
4200 /* ??? The Approved Method of indicating that an inline
4201 assembly statement is not relevant to the transaction
4202 is to wrap it in a __tm_waiver block. This is not
4203 yet implemented, so we can't check for it. */
4204 if (is_tm_safe (current_function_decl))
4206 tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
4207 SET_EXPR_LOCATION (t, gimple_location (stmt));
4208 error ("%Kasm not allowed in %<transaction_safe%> function", t);
4210 return true;
4212 default:
4213 break;
4217 return false;
4220 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking
4221 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
4222 scanning past OLD_IRR or EXIT_BLOCKS. */
4224 static bool
4225 ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr,
4226 bitmap old_irr, bitmap exit_blocks)
4228 bool any_new_irr = false;
4229 edge e;
4230 edge_iterator ei;
4231 bitmap visited_blocks = BITMAP_ALLOC (NULL);
4235 basic_block bb = pqueue->pop ();
4237 /* Don't re-scan blocks we know already are irrevocable. */
4238 if (old_irr && bitmap_bit_p (old_irr, bb->index))
4239 continue;
4241 if (ipa_tm_scan_irr_block (bb))
4243 bitmap_set_bit (new_irr, bb->index);
4244 any_new_irr = true;
4246 else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
4248 FOR_EACH_EDGE (e, ei, bb->succs)
4249 if (!bitmap_bit_p (visited_blocks, e->dest->index))
4251 bitmap_set_bit (visited_blocks, e->dest->index);
4252 pqueue->safe_push (e->dest);
4256 while (!pqueue->is_empty ());
4258 BITMAP_FREE (visited_blocks);
4260 return any_new_irr;
4263 /* Propagate the irrevocable property both up and down the dominator tree.
4264 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
4265 TM regions; OLD_IRR are the results of a previous scan of the dominator
4266 tree which has been fully propagated; NEW_IRR is the set of new blocks
4267 which are gaining the irrevocable property during the current scan. */
4269 static void
4270 ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
4271 bitmap old_irr, bitmap exit_blocks)
4273 vec<basic_block> bbs;
4274 bitmap all_region_blocks;
4276 /* If this block is in the old set, no need to rescan. */
4277 if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
4278 return;
4280 all_region_blocks = BITMAP_ALLOC (&tm_obstack);
4281 bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
4282 all_region_blocks, false);
4285 basic_block bb = bbs.pop ();
4286 bool this_irr = bitmap_bit_p (new_irr, bb->index);
4287 bool all_son_irr = false;
4288 edge_iterator ei;
4289 edge e;
4291 /* Propagate up. If my children are, I am too, but we must have
4292 at least one child that is. */
4293 if (!this_irr)
4295 FOR_EACH_EDGE (e, ei, bb->succs)
4297 if (!bitmap_bit_p (new_irr, e->dest->index))
4299 all_son_irr = false;
4300 break;
4302 else
4303 all_son_irr = true;
4305 if (all_son_irr)
4307 /* Add block to new_irr if it hasn't already been processed. */
4308 if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
4310 bitmap_set_bit (new_irr, bb->index);
4311 this_irr = true;
4316 /* Propagate down to everyone we immediately dominate. */
4317 if (this_irr)
4319 basic_block son;
4320 for (son = first_dom_son (CDI_DOMINATORS, bb);
4321 son;
4322 son = next_dom_son (CDI_DOMINATORS, son))
4324 /* Make sure block is actually in a TM region, and it
4325 isn't already in old_irr. */
4326 if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
4327 && bitmap_bit_p (all_region_blocks, son->index))
4328 bitmap_set_bit (new_irr, son->index);
4332 while (!bbs.is_empty ());
4334 BITMAP_FREE (all_region_blocks);
4335 bbs.release ();
4338 static void
4339 ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
4341 gimple_stmt_iterator gsi;
4343 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4345 gimple stmt = gsi_stmt (gsi);
4346 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4348 tree fndecl = gimple_call_fndecl (stmt);
4349 if (fndecl)
4351 struct tm_ipa_cg_data *d;
4352 unsigned *pcallers;
4353 struct cgraph_node *tnode;
4355 if (is_tm_ending_fndecl (fndecl))
4356 continue;
4357 if (find_tm_replacement_function (fndecl))
4358 continue;
4360 tnode = cgraph_get_node (fndecl);
4361 d = get_cg_data (&tnode, true);
4363 pcallers = (for_clone ? &d->tm_callers_clone
4364 : &d->tm_callers_normal);
4366 gcc_assert (*pcallers > 0);
4367 *pcallers -= 1;
4373 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
4374 as well as other irrevocable actions such as inline assembly. Mark all
4375 such blocks as irrevocable and decrement the number of calls to
4376 transactional clones. Return true if, for the transactional clone, the
4377 entire function is irrevocable. */
4379 static bool
4380 ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
4382 struct tm_ipa_cg_data *d;
4383 bitmap new_irr, old_irr;
4384 vec<basic_block> queue;
4385 bool ret = false;
4387 /* Builtin operators (operator new, and such). */
4388 if (DECL_STRUCT_FUNCTION (node->symbol.decl) == NULL
4389 || DECL_STRUCT_FUNCTION (node->symbol.decl)->cfg == NULL)
4390 return false;
4392 push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl));
4393 calculate_dominance_info (CDI_DOMINATORS);
4395 d = get_cg_data (&node, true);
4396 queue.create (10);
4397 new_irr = BITMAP_ALLOC (&tm_obstack);
4399 /* Scan each tm region, propagating irrevocable status through the tree. */
4400 if (for_clone)
4402 old_irr = d->irrevocable_blocks_clone;
4403 queue.quick_push (single_succ (ENTRY_BLOCK_PTR));
4404 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
4406 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR), new_irr,
4407 old_irr, NULL);
4408 ret = bitmap_bit_p (new_irr, single_succ (ENTRY_BLOCK_PTR)->index);
4411 else
4413 struct tm_region *region;
4415 old_irr = d->irrevocable_blocks_normal;
4416 for (region = d->all_tm_regions; region; region = region->next)
4418 queue.quick_push (region->entry_block);
4419 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
4420 region->exit_blocks))
4421 ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
4422 region->exit_blocks);
4426 /* If we found any new irrevocable blocks, reduce the call count for
4427 transactional clones within the irrevocable blocks. Save the new
4428 set of irrevocable blocks for next time. */
4429 if (!bitmap_empty_p (new_irr))
4431 bitmap_iterator bmi;
4432 unsigned i;
4434 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4435 ipa_tm_decrement_clone_counts (BASIC_BLOCK (i), for_clone);
4437 if (old_irr)
4439 bitmap_ior_into (old_irr, new_irr);
4440 BITMAP_FREE (new_irr);
4442 else if (for_clone)
4443 d->irrevocable_blocks_clone = new_irr;
4444 else
4445 d->irrevocable_blocks_normal = new_irr;
4447 if (dump_file && new_irr)
4449 const char *dname;
4450 bitmap_iterator bmi;
4451 unsigned i;
4453 dname = lang_hooks.decl_printable_name (current_function_decl, 2);
4454 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4455 fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
4458 else
4459 BITMAP_FREE (new_irr);
4461 queue.release ();
4462 pop_cfun ();
4464 return ret;
4467 /* Return true if, for the transactional clone of NODE, any call
4468 may enter irrevocable mode. */
4470 static bool
4471 ipa_tm_mayenterirr_function (struct cgraph_node *node)
4473 struct tm_ipa_cg_data *d;
4474 tree decl;
4475 unsigned flags;
4477 d = get_cg_data (&node, true);
4478 decl = node->symbol.decl;
4479 flags = flags_from_decl_or_type (decl);
4481 /* Handle some TM builtins. Ordinarily these aren't actually generated
4482 at this point, but handling these functions when written in by the
4483 user makes it easier to build unit tests. */
4484 if (flags & ECF_TM_BUILTIN)
4485 return false;
4487 /* Filter out all functions that are marked. */
4488 if (flags & ECF_TM_PURE)
4489 return false;
4490 if (is_tm_safe (decl))
4491 return false;
4492 if (is_tm_irrevocable (decl))
4493 return true;
4494 if (is_tm_callable (decl))
4495 return true;
4496 if (find_tm_replacement_function (decl))
4497 return true;
4499 /* If we aren't seeing the final version of the function we don't
4500 know what it will contain at runtime. */
4501 if (cgraph_function_body_availability (node) < AVAIL_AVAILABLE)
4502 return true;
4504 /* If the function must go irrevocable, then of course true. */
4505 if (d->is_irrevocable)
4506 return true;
4508 /* If there are any blocks marked irrevocable, then the function
4509 as a whole may enter irrevocable. */
4510 if (d->irrevocable_blocks_clone)
4511 return true;
4513 /* We may have previously marked this function as tm_may_enter_irr;
4514 see pass_diagnose_tm_blocks. */
4515 if (node->local.tm_may_enter_irr)
4516 return true;
4518 /* Recurse on the main body for aliases. In general, this will
4519 result in one of the bits above being set so that we will not
4520 have to recurse next time. */
4521 if (node->symbol.alias)
4522 return ipa_tm_mayenterirr_function (cgraph_get_node (node->thunk.alias));
4524 /* What remains is unmarked local functions without items that force
4525 the function to go irrevocable. */
4526 return false;
4529 /* Diagnose calls from transaction_safe functions to unmarked
4530 functions that are determined to not be safe. */
4532 static void
4533 ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
4535 struct cgraph_edge *e;
4537 for (e = node->callees; e ; e = e->next_callee)
4538 if (!is_tm_callable (e->callee->symbol.decl)
4539 && e->callee->local.tm_may_enter_irr)
4540 error_at (gimple_location (e->call_stmt),
4541 "unsafe function call %qD within "
4542 "%<transaction_safe%> function", e->callee->symbol.decl);
4545 /* Diagnose call from atomic transactions to unmarked functions
4546 that are determined to not be safe. */
4548 static void
4549 ipa_tm_diagnose_transaction (struct cgraph_node *node,
4550 struct tm_region *all_tm_regions)
4552 struct tm_region *r;
4554 for (r = all_tm_regions; r ; r = r->next)
4555 if (gimple_transaction_subcode (r->transaction_stmt) & GTMA_IS_RELAXED)
4557 /* Atomic transactions can be nested inside relaxed. */
4558 if (r->inner)
4559 ipa_tm_diagnose_transaction (node, r->inner);
4561 else
4563 vec<basic_block> bbs;
4564 gimple_stmt_iterator gsi;
4565 basic_block bb;
4566 size_t i;
4568 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
4569 r->irr_blocks, NULL, false);
4571 for (i = 0; bbs.iterate (i, &bb); ++i)
4572 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4574 gimple stmt = gsi_stmt (gsi);
4575 tree fndecl;
4577 if (gimple_code (stmt) == GIMPLE_ASM)
4579 error_at (gimple_location (stmt),
4580 "asm not allowed in atomic transaction");
4581 continue;
4584 if (!is_gimple_call (stmt))
4585 continue;
4586 fndecl = gimple_call_fndecl (stmt);
4588 /* Indirect function calls have been diagnosed already. */
4589 if (!fndecl)
4590 continue;
4592 /* Stop at the end of the transaction. */
4593 if (is_tm_ending_fndecl (fndecl))
4595 if (bitmap_bit_p (r->exit_blocks, bb->index))
4596 break;
4597 continue;
4600 /* Marked functions have been diagnosed already. */
4601 if (is_tm_pure_call (stmt))
4602 continue;
4603 if (is_tm_callable (fndecl))
4604 continue;
4606 if (cgraph_local_info (fndecl)->tm_may_enter_irr)
4607 error_at (gimple_location (stmt),
4608 "unsafe function call %qD within "
4609 "atomic transaction", fndecl);
4612 bbs.release ();
4616 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4617 OLD_DECL. The returned value is a freshly malloced pointer that
4618 should be freed by the caller. */
4620 static tree
4621 tm_mangle (tree old_asm_id)
4623 const char *old_asm_name;
4624 char *tm_name;
4625 void *alloc = NULL;
4626 struct demangle_component *dc;
4627 tree new_asm_id;
4629 /* Determine if the symbol is already a valid C++ mangled name. Do this
4630 even for C, which might be interfacing with C++ code via appropriately
4631 ugly identifiers. */
4632 /* ??? We could probably do just as well checking for "_Z" and be done. */
4633 old_asm_name = IDENTIFIER_POINTER (old_asm_id);
4634 dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
4636 if (dc == NULL)
4638 char length[8];
4640 do_unencoded:
4641 sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
4642 tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
4644 else
4646 old_asm_name += 2; /* Skip _Z */
4648 switch (dc->type)
4650 case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
4651 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
4652 /* Don't play silly games, you! */
4653 goto do_unencoded;
4655 case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
4656 /* I'd really like to know if we can ever be passed one of
4657 these from the C++ front end. The Logical Thing would
4658 seem that hidden-alias should be outer-most, so that we
4659 get hidden-alias of a transaction-clone and not vice-versa. */
4660 old_asm_name += 2;
4661 break;
4663 default:
4664 break;
4667 tm_name = concat ("_ZGTt", old_asm_name, NULL);
4669 free (alloc);
4671 new_asm_id = get_identifier (tm_name);
4672 free (tm_name);
4674 return new_asm_id;
4677 static inline void
4678 ipa_tm_mark_force_output_node (struct cgraph_node *node)
4680 cgraph_mark_force_output_node (node);
4681 node->symbol.analyzed = true;
4684 static inline void
4685 ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node)
4687 node->symbol.forced_by_abi = true;
4688 node->symbol.analyzed = true;
4691 /* Callback data for ipa_tm_create_version_alias. */
4692 struct create_version_alias_info
4694 struct cgraph_node *old_node;
4695 tree new_decl;
4698 /* A subroutine of ipa_tm_create_version, called via
4699 cgraph_for_node_and_aliases. Create new tm clones for each of
4700 the existing aliases. */
4701 static bool
4702 ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
4704 struct create_version_alias_info *info
4705 = (struct create_version_alias_info *)data;
4706 tree old_decl, new_decl, tm_name;
4707 struct cgraph_node *new_node;
4709 if (!node->symbol.cpp_implicit_alias)
4710 return false;
4712 old_decl = node->symbol.decl;
4713 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4714 new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
4715 TREE_CODE (old_decl), tm_name,
4716 TREE_TYPE (old_decl));
4718 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4719 SET_DECL_RTL (new_decl, NULL);
4721 /* Based loosely on C++'s make_alias_for(). */
4722 TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
4723 DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
4724 DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
4725 TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
4726 DECL_EXTERNAL (new_decl) = 0;
4727 DECL_ARTIFICIAL (new_decl) = 1;
4728 TREE_ADDRESSABLE (new_decl) = 1;
4729 TREE_USED (new_decl) = 1;
4730 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4732 /* Perform the same remapping to the comdat group. */
4733 if (DECL_ONE_ONLY (new_decl))
4734 DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
4736 new_node = cgraph_same_body_alias (NULL, new_decl, info->new_decl);
4737 new_node->tm_clone = true;
4738 new_node->symbol.externally_visible = info->old_node->symbol.externally_visible;
4739 /* ?? Do not traverse aliases here. */
4740 get_cg_data (&node, false)->clone = new_node;
4742 record_tm_clone_pair (old_decl, new_decl);
4744 if (info->old_node->symbol.force_output
4745 || ipa_ref_list_first_referring (&info->old_node->symbol.ref_list))
4746 ipa_tm_mark_force_output_node (new_node);
4747 if (info->old_node->symbol.forced_by_abi)
4748 ipa_tm_mark_forced_by_abi_node (new_node);
4749 return false;
4752 /* Create a copy of the function (possibly declaration only) of OLD_NODE,
4753 appropriate for the transactional clone. */
4755 static void
4756 ipa_tm_create_version (struct cgraph_node *old_node)
4758 tree new_decl, old_decl, tm_name;
4759 struct cgraph_node *new_node;
4761 old_decl = old_node->symbol.decl;
4762 new_decl = copy_node (old_decl);
4764 /* DECL_ASSEMBLER_NAME needs to be set before we call
4765 cgraph_copy_node_for_versioning below, because cgraph_node will
4766 fill the assembler_name_hash. */
4767 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4768 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4769 SET_DECL_RTL (new_decl, NULL);
4770 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4772 /* Perform the same remapping to the comdat group. */
4773 if (DECL_ONE_ONLY (new_decl))
4774 DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
4776 new_node = cgraph_copy_node_for_versioning (old_node, new_decl, vNULL, NULL);
4777 new_node->symbol.externally_visible = old_node->symbol.externally_visible;
4778 new_node->lowered = true;
4779 new_node->tm_clone = 1;
4780 get_cg_data (&old_node, true)->clone = new_node;
4782 if (cgraph_function_body_availability (old_node) >= AVAIL_OVERWRITABLE)
4784 /* Remap extern inline to static inline. */
4785 /* ??? Is it worth trying to use make_decl_one_only? */
4786 if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
4788 DECL_EXTERNAL (new_decl) = 0;
4789 TREE_PUBLIC (new_decl) = 0;
4790 DECL_WEAK (new_decl) = 0;
4793 tree_function_versioning (old_decl, new_decl,
4794 NULL, false, NULL,
4795 false, NULL, NULL);
4798 record_tm_clone_pair (old_decl, new_decl);
4800 cgraph_call_function_insertion_hooks (new_node);
4801 if (old_node->symbol.force_output
4802 || ipa_ref_list_first_referring (&old_node->symbol.ref_list))
4803 ipa_tm_mark_force_output_node (new_node);
4804 if (old_node->symbol.forced_by_abi)
4805 ipa_tm_mark_forced_by_abi_node (new_node);
4807 /* Do the same thing, but for any aliases of the original node. */
4809 struct create_version_alias_info data;
4810 data.old_node = old_node;
4811 data.new_decl = new_decl;
4812 cgraph_for_node_and_aliases (old_node, ipa_tm_create_version_alias,
4813 &data, true);
4817 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
4819 static void
4820 ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
4821 basic_block bb)
4823 gimple_stmt_iterator gsi;
4824 gimple g;
4826 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4828 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
4829 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
4831 split_block_after_labels (bb);
4832 gsi = gsi_after_labels (bb);
4833 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
4835 cgraph_create_edge (node,
4836 cgraph_get_create_node
4837 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
4838 g, 0,
4839 compute_call_stmt_bb_frequency (node->symbol.decl,
4840 gimple_bb (g)));
4843 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */
4845 static bool
4846 ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
4847 struct tm_region *region,
4848 gimple_stmt_iterator *gsi, gimple stmt)
4850 tree gettm_fn, ret, old_fn, callfn;
4851 gimple g, g2;
4852 bool safe;
4854 old_fn = gimple_call_fn (stmt);
4856 if (TREE_CODE (old_fn) == ADDR_EXPR)
4858 tree fndecl = TREE_OPERAND (old_fn, 0);
4859 tree clone = get_tm_clone_pair (fndecl);
4861 /* By transforming the call into a TM_GETTMCLONE, we are
4862 technically taking the address of the original function and
4863 its clone. Explain this so inlining will know this function
4864 is needed. */
4865 cgraph_mark_address_taken_node (cgraph_get_node (fndecl));
4866 if (clone)
4867 cgraph_mark_address_taken_node (cgraph_get_node (clone));
4870 safe = is_tm_safe (TREE_TYPE (old_fn));
4871 gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
4872 : BUILT_IN_TM_GETTMCLONE_IRR);
4873 ret = create_tmp_var (ptr_type_node, NULL);
4875 if (!safe)
4876 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4878 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
4879 if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
4880 old_fn = OBJ_TYPE_REF_EXPR (old_fn);
4882 g = gimple_build_call (gettm_fn, 1, old_fn);
4883 ret = make_ssa_name (ret, g);
4884 gimple_call_set_lhs (g, ret);
4886 gsi_insert_before (gsi, g, GSI_SAME_STMT);
4888 cgraph_create_edge (node, cgraph_get_create_node (gettm_fn), g, 0,
4889 compute_call_stmt_bb_frequency (node->symbol.decl,
4890 gimple_bb(g)));
4892 /* Cast return value from tm_gettmclone* into appropriate function
4893 pointer. */
4894 callfn = create_tmp_var (TREE_TYPE (old_fn), NULL);
4895 g2 = gimple_build_assign (callfn,
4896 fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
4897 callfn = make_ssa_name (callfn, g2);
4898 gimple_assign_set_lhs (g2, callfn);
4899 gsi_insert_before (gsi, g2, GSI_SAME_STMT);
4901 /* ??? This is a hack to preserve the NOTHROW bit on the call,
4902 which we would have derived from the decl. Failure to save
4903 this bit means we might have to split the basic block. */
4904 if (gimple_call_nothrow_p (stmt))
4905 gimple_call_set_nothrow (stmt, true);
4907 gimple_call_set_fn (stmt, callfn);
4909 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
4910 for a call statement. Fix it. */
4912 tree lhs = gimple_call_lhs (stmt);
4913 tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
4914 if (lhs
4915 && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
4917 tree temp;
4919 temp = create_tmp_reg (rettype, 0);
4920 gimple_call_set_lhs (stmt, temp);
4922 g2 = gimple_build_assign (lhs,
4923 fold_build1 (VIEW_CONVERT_EXPR,
4924 TREE_TYPE (lhs), temp));
4925 gsi_insert_after (gsi, g2, GSI_SAME_STMT);
4929 update_stmt (stmt);
4931 return true;
4934 /* Helper function for ipa_tm_transform_calls*. Given a call
4935 statement in GSI which resides inside transaction REGION, redirect
4936 the call to either its wrapper function, or its clone. */
4938 static void
4939 ipa_tm_transform_calls_redirect (struct cgraph_node *node,
4940 struct tm_region *region,
4941 gimple_stmt_iterator *gsi,
4942 bool *need_ssa_rename_p)
4944 gimple stmt = gsi_stmt (*gsi);
4945 struct cgraph_node *new_node;
4946 struct cgraph_edge *e = cgraph_edge (node, stmt);
4947 tree fndecl = gimple_call_fndecl (stmt);
4949 /* For indirect calls, pass the address through the runtime. */
4950 if (fndecl == NULL)
4952 *need_ssa_rename_p |=
4953 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
4954 return;
4957 /* Handle some TM builtins. Ordinarily these aren't actually generated
4958 at this point, but handling these functions when written in by the
4959 user makes it easier to build unit tests. */
4960 if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
4961 return;
4963 /* Fixup recursive calls inside clones. */
4964 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
4965 for recursion but not update the call statements themselves? */
4966 if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
4968 gimple_call_set_fndecl (stmt, current_function_decl);
4969 return;
4972 /* If there is a replacement, use it. */
4973 fndecl = find_tm_replacement_function (fndecl);
4974 if (fndecl)
4976 new_node = cgraph_get_create_node (fndecl);
4978 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
4980 We can't do this earlier in record_tm_replacement because
4981 cgraph_remove_unreachable_nodes is called before we inject
4982 references to the node. Further, we can't do this in some
4983 nice central place in ipa_tm_execute because we don't have
4984 the exact list of wrapper functions that would be used.
4985 Marking more wrappers than necessary results in the creation
4986 of unnecessary cgraph_nodes, which can cause some of the
4987 other IPA passes to crash.
4989 We do need to mark these nodes so that we get the proper
4990 result in expand_call_tm. */
4991 /* ??? This seems broken. How is it that we're marking the
4992 CALLEE as may_enter_irr? Surely we should be marking the
4993 CALLER. Also note that find_tm_replacement_function also
4994 contains mappings into the TM runtime, e.g. memcpy. These
4995 we know won't go irrevocable. */
4996 new_node->local.tm_may_enter_irr = 1;
4998 else
5000 struct tm_ipa_cg_data *d;
5001 struct cgraph_node *tnode = e->callee;
5003 d = get_cg_data (&tnode, true);
5004 new_node = d->clone;
5006 /* As we've already skipped pure calls and appropriate builtins,
5007 and we've already marked irrevocable blocks, if we can't come
5008 up with a static replacement, then ask the runtime. */
5009 if (new_node == NULL)
5011 *need_ssa_rename_p |=
5012 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5013 return;
5016 fndecl = new_node->symbol.decl;
5019 cgraph_redirect_edge_callee (e, new_node);
5020 gimple_call_set_fndecl (stmt, fndecl);
5023 /* Helper function for ipa_tm_transform_calls. For a given BB,
5024 install calls to tm_irrevocable when IRR_BLOCKS are reached,
5025 redirect other calls to the generated transactional clone. */
5027 static bool
5028 ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
5029 basic_block bb, bitmap irr_blocks)
5031 gimple_stmt_iterator gsi;
5032 bool need_ssa_rename = false;
5034 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5036 ipa_tm_insert_irr_call (node, region, bb);
5037 return true;
5040 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5042 gimple stmt = gsi_stmt (gsi);
5044 if (!is_gimple_call (stmt))
5045 continue;
5046 if (is_tm_pure_call (stmt))
5047 continue;
5049 /* Redirect edges to the appropriate replacement or clone. */
5050 ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
5053 return need_ssa_rename;
5056 /* Walk the CFG for REGION, beginning at BB. Install calls to
5057 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
5058 the generated transactional clone. */
5060 static bool
5061 ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
5062 basic_block bb, bitmap irr_blocks)
5064 bool need_ssa_rename = false;
5065 edge e;
5066 edge_iterator ei;
5067 vec<basic_block> queue = vNULL;
5068 bitmap visited_blocks = BITMAP_ALLOC (NULL);
5070 queue.safe_push (bb);
5073 bb = queue.pop ();
5075 need_ssa_rename |=
5076 ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
5078 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5079 continue;
5081 if (region && bitmap_bit_p (region->exit_blocks, bb->index))
5082 continue;
5084 FOR_EACH_EDGE (e, ei, bb->succs)
5085 if (!bitmap_bit_p (visited_blocks, e->dest->index))
5087 bitmap_set_bit (visited_blocks, e->dest->index);
5088 queue.safe_push (e->dest);
5091 while (!queue.is_empty ());
5093 queue.release ();
5094 BITMAP_FREE (visited_blocks);
5096 return need_ssa_rename;
5099 /* Transform the calls within the TM regions within NODE. */
5101 static void
5102 ipa_tm_transform_transaction (struct cgraph_node *node)
5104 struct tm_ipa_cg_data *d;
5105 struct tm_region *region;
5106 bool need_ssa_rename = false;
5108 d = get_cg_data (&node, true);
5110 push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl));
5111 calculate_dominance_info (CDI_DOMINATORS);
5113 for (region = d->all_tm_regions; region; region = region->next)
5115 /* If we're sure to go irrevocable, don't transform anything. */
5116 if (d->irrevocable_blocks_normal
5117 && bitmap_bit_p (d->irrevocable_blocks_normal,
5118 region->entry_block->index))
5120 transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE
5121 | GTMA_MAY_ENTER_IRREVOCABLE
5122 | GTMA_HAS_NO_INSTRUMENTATION);
5123 continue;
5126 need_ssa_rename |=
5127 ipa_tm_transform_calls (node, region, region->entry_block,
5128 d->irrevocable_blocks_normal);
5131 if (need_ssa_rename)
5132 update_ssa (TODO_update_ssa_only_virtuals);
5134 pop_cfun ();
5137 /* Transform the calls within the transactional clone of NODE. */
5139 static void
5140 ipa_tm_transform_clone (struct cgraph_node *node)
5142 struct tm_ipa_cg_data *d;
5143 bool need_ssa_rename;
5145 d = get_cg_data (&node, true);
5147 /* If this function makes no calls and has no irrevocable blocks,
5148 then there's nothing to do. */
5149 /* ??? Remove non-aborting top-level transactions. */
5150 if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone)
5151 return;
5153 push_cfun (DECL_STRUCT_FUNCTION (d->clone->symbol.decl));
5154 calculate_dominance_info (CDI_DOMINATORS);
5156 need_ssa_rename =
5157 ipa_tm_transform_calls (d->clone, NULL, single_succ (ENTRY_BLOCK_PTR),
5158 d->irrevocable_blocks_clone);
5160 if (need_ssa_rename)
5161 update_ssa (TODO_update_ssa_only_virtuals);
5163 pop_cfun ();
5166 /* Main entry point for the transactional memory IPA pass. */
5168 static unsigned int
5169 ipa_tm_execute (void)
5171 cgraph_node_queue tm_callees = cgraph_node_queue();
5172 /* List of functions that will go irrevocable. */
5173 cgraph_node_queue irr_worklist = cgraph_node_queue();
5175 struct cgraph_node *node;
5176 struct tm_ipa_cg_data *d;
5177 enum availability a;
5178 unsigned int i;
5180 #ifdef ENABLE_CHECKING
5181 verify_cgraph ();
5182 #endif
5184 bitmap_obstack_initialize (&tm_obstack);
5185 initialize_original_copy_tables ();
5187 /* For all local functions marked tm_callable, queue them. */
5188 FOR_EACH_DEFINED_FUNCTION (node)
5189 if (is_tm_callable (node->symbol.decl)
5190 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
5192 d = get_cg_data (&node, true);
5193 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5196 /* For all local reachable functions... */
5197 FOR_EACH_DEFINED_FUNCTION (node)
5198 if (node->lowered
5199 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
5201 /* ... marked tm_pure, record that fact for the runtime by
5202 indicating that the pure function is its own tm_callable.
5203 No need to do this if the function's address can't be taken. */
5204 if (is_tm_pure (node->symbol.decl))
5206 if (!node->local.local)
5207 record_tm_clone_pair (node->symbol.decl, node->symbol.decl);
5208 continue;
5211 push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl));
5212 calculate_dominance_info (CDI_DOMINATORS);
5214 tm_region_init (NULL);
5215 if (all_tm_regions)
5217 d = get_cg_data (&node, true);
5219 /* Scan for calls that are in each transaction, and
5220 generate the uninstrumented code path. */
5221 ipa_tm_scan_calls_transaction (d, &tm_callees);
5223 /* Put it in the worklist so we can scan the function
5224 later (ipa_tm_scan_irr_function) and mark the
5225 irrevocable blocks. */
5226 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5227 d->want_irr_scan_normal = true;
5230 pop_cfun ();
5233 /* For every local function on the callee list, scan as if we will be
5234 creating a transactional clone, queueing all new functions we find
5235 along the way. */
5236 for (i = 0; i < tm_callees.length (); ++i)
5238 node = tm_callees[i];
5239 a = cgraph_function_body_availability (node);
5240 d = get_cg_data (&node, true);
5242 /* Put it in the worklist so we can scan the function later
5243 (ipa_tm_scan_irr_function) and mark the irrevocable
5244 blocks. */
5245 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5247 /* Some callees cannot be arbitrarily cloned. These will always be
5248 irrevocable. Mark these now, so that we need not scan them. */
5249 if (is_tm_irrevocable (node->symbol.decl))
5250 ipa_tm_note_irrevocable (node, &irr_worklist);
5251 else if (a <= AVAIL_NOT_AVAILABLE
5252 && !is_tm_safe_or_pure (node->symbol.decl))
5253 ipa_tm_note_irrevocable (node, &irr_worklist);
5254 else if (a >= AVAIL_OVERWRITABLE)
5256 if (!tree_versionable_function_p (node->symbol.decl))
5257 ipa_tm_note_irrevocable (node, &irr_worklist);
5258 else if (!d->is_irrevocable)
5260 /* If this is an alias, make sure its base is queued as well.
5261 we need not scan the callees now, as the base will do. */
5262 if (node->symbol.alias)
5264 node = cgraph_get_node (node->thunk.alias);
5265 d = get_cg_data (&node, true);
5266 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5267 continue;
5270 /* Add all nodes called by this function into
5271 tm_callees as well. */
5272 ipa_tm_scan_calls_clone (node, &tm_callees);
5277 /* Iterate scans until no more work to be done. Prefer not to use
5278 vec::pop because the worklist tends to follow a breadth-first
5279 search of the callgraph, which should allow convergance with a
5280 minimum number of scans. But we also don't want the worklist
5281 array to grow without bound, so we shift the array up periodically. */
5282 for (i = 0; i < irr_worklist.length (); ++i)
5284 if (i > 256 && i == irr_worklist.length () / 8)
5286 irr_worklist.block_remove (0, i);
5287 i = 0;
5290 node = irr_worklist[i];
5291 d = get_cg_data (&node, true);
5292 d->in_worklist = false;
5294 if (d->want_irr_scan_normal)
5296 d->want_irr_scan_normal = false;
5297 ipa_tm_scan_irr_function (node, false);
5299 if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
5300 ipa_tm_note_irrevocable (node, &irr_worklist);
5303 /* For every function on the callee list, collect the tm_may_enter_irr
5304 bit on the node. */
5305 irr_worklist.truncate (0);
5306 for (i = 0; i < tm_callees.length (); ++i)
5308 node = tm_callees[i];
5309 if (ipa_tm_mayenterirr_function (node))
5311 d = get_cg_data (&node, true);
5312 gcc_assert (d->in_worklist == false);
5313 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5317 /* Propagate the tm_may_enter_irr bit to callers until stable. */
5318 for (i = 0; i < irr_worklist.length (); ++i)
5320 struct cgraph_node *caller;
5321 struct cgraph_edge *e;
5322 struct ipa_ref *ref;
5323 unsigned j;
5325 if (i > 256 && i == irr_worklist.length () / 8)
5327 irr_worklist.block_remove (0, i);
5328 i = 0;
5331 node = irr_worklist[i];
5332 d = get_cg_data (&node, true);
5333 d->in_worklist = false;
5334 node->local.tm_may_enter_irr = true;
5336 /* Propagate back to normal callers. */
5337 for (e = node->callers; e ; e = e->next_caller)
5339 caller = e->caller;
5340 if (!is_tm_safe_or_pure (caller->symbol.decl)
5341 && !caller->local.tm_may_enter_irr)
5343 d = get_cg_data (&caller, true);
5344 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5348 /* Propagate back to referring aliases as well. */
5349 for (j = 0; ipa_ref_list_referring_iterate (&node->symbol.ref_list, j, ref); j++)
5351 caller = cgraph (ref->referring);
5352 if (ref->use == IPA_REF_ALIAS
5353 && !caller->local.tm_may_enter_irr)
5355 /* ?? Do not traverse aliases here. */
5356 d = get_cg_data (&caller, false);
5357 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5362 /* Now validate all tm_safe functions, and all atomic regions in
5363 other functions. */
5364 FOR_EACH_DEFINED_FUNCTION (node)
5365 if (node->lowered
5366 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
5368 d = get_cg_data (&node, true);
5369 if (is_tm_safe (node->symbol.decl))
5370 ipa_tm_diagnose_tm_safe (node);
5371 else if (d->all_tm_regions)
5372 ipa_tm_diagnose_transaction (node, d->all_tm_regions);
5375 /* Create clones. Do those that are not irrevocable and have a
5376 positive call count. Do those publicly visible functions that
5377 the user directed us to clone. */
5378 for (i = 0; i < tm_callees.length (); ++i)
5380 bool doit = false;
5382 node = tm_callees[i];
5383 if (node->symbol.cpp_implicit_alias)
5384 continue;
5386 a = cgraph_function_body_availability (node);
5387 d = get_cg_data (&node, true);
5389 if (a <= AVAIL_NOT_AVAILABLE)
5390 doit = is_tm_callable (node->symbol.decl);
5391 else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->symbol.decl))
5392 doit = true;
5393 else if (!d->is_irrevocable
5394 && d->tm_callers_normal + d->tm_callers_clone > 0)
5395 doit = true;
5397 if (doit)
5398 ipa_tm_create_version (node);
5401 /* Redirect calls to the new clones, and insert irrevocable marks. */
5402 for (i = 0; i < tm_callees.length (); ++i)
5404 node = tm_callees[i];
5405 if (node->symbol.analyzed)
5407 d = get_cg_data (&node, true);
5408 if (d->clone)
5409 ipa_tm_transform_clone (node);
5412 FOR_EACH_DEFINED_FUNCTION (node)
5413 if (node->lowered
5414 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
5416 d = get_cg_data (&node, true);
5417 if (d->all_tm_regions)
5418 ipa_tm_transform_transaction (node);
5421 /* Free and clear all data structures. */
5422 tm_callees.release ();
5423 irr_worklist.release ();
5424 bitmap_obstack_release (&tm_obstack);
5425 free_original_copy_tables ();
5427 FOR_EACH_FUNCTION (node)
5428 node->symbol.aux = NULL;
5430 #ifdef ENABLE_CHECKING
5431 verify_cgraph ();
5432 #endif
5434 return 0;
5437 struct simple_ipa_opt_pass pass_ipa_tm =
5440 SIMPLE_IPA_PASS,
5441 "tmipa", /* name */
5442 OPTGROUP_NONE, /* optinfo_flags */
5443 gate_tm, /* gate */
5444 ipa_tm_execute, /* execute */
5445 NULL, /* sub */
5446 NULL, /* next */
5447 0, /* static_pass_number */
5448 TV_TRANS_MEM, /* tv_id */
5449 PROP_ssa | PROP_cfg, /* properties_required */
5450 0, /* properties_provided */
5451 0, /* properties_destroyed */
5452 0, /* todo_flags_start */
5453 0, /* todo_flags_finish */
5457 #include "gt-trans-mem.h"