* ChangeLog: Fix whitespace.
[official-gcc.git] / gcc / trans-mem.c
blobedb678e8c1e4000b56592b57e9004c281bf8ed1e
1 /* Passes for transactional memory support.
2 Copyright (C) 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "gimple.h"
25 #include "tree-flow.h"
26 #include "tree-pass.h"
27 #include "tree-inline.h"
28 #include "diagnostic-core.h"
29 #include "demangle.h"
30 #include "output.h"
31 #include "trans-mem.h"
32 #include "params.h"
33 #include "target.h"
34 #include "langhooks.h"
35 #include "gimple-pretty-print.h"
36 #include "cfgloop.h"
39 #define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
40 #define PROB_ALWAYS (REG_BR_PROB_BASE)
42 #define A_RUNINSTRUMENTEDCODE 0x0001
43 #define A_RUNUNINSTRUMENTEDCODE 0x0002
44 #define A_SAVELIVEVARIABLES 0x0004
45 #define A_RESTORELIVEVARIABLES 0x0008
46 #define A_ABORTTRANSACTION 0x0010
48 #define AR_USERABORT 0x0001
49 #define AR_USERRETRY 0x0002
50 #define AR_TMCONFLICT 0x0004
51 #define AR_EXCEPTIONBLOCKABORT 0x0008
52 #define AR_OUTERABORT 0x0010
54 #define MODE_SERIALIRREVOCABLE 0x0000
57 /* The representation of a transaction changes several times during the
58 lowering process. In the beginning, in the front-end we have the
59 GENERIC tree TRANSACTION_EXPR. For example,
61 __transaction {
62 local++;
63 if (++global == 10)
64 __tm_abort;
67 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
68 trivially replaced with a GIMPLE_TRANSACTION node.
70 During pass_lower_tm, we examine the body of transactions looking
71 for aborts. Transactions that do not contain an abort may be
72 merged into an outer transaction. We also add a TRY-FINALLY node
73 to arrange for the transaction to be committed on any exit.
75 [??? Think about how this arrangement affects throw-with-commit
76 and throw-with-abort operations. In this case we want the TRY to
77 handle gotos, but not to catch any exceptions because the transaction
78 will already be closed.]
80 GIMPLE_TRANSACTION [label=NULL] {
81 try {
82 local = local + 1;
83 t0 = global;
84 t1 = t0 + 1;
85 global = t1;
86 if (t1 == 10)
87 __builtin___tm_abort ();
88 } finally {
89 __builtin___tm_commit ();
93 During pass_lower_eh, we create EH regions for the transactions,
94 intermixed with the regular EH stuff. This gives us a nice persistent
95 mapping (all the way through rtl) from transactional memory operation
96 back to the transaction, which allows us to get the abnormal edges
97 correct to model transaction aborts and restarts:
99 GIMPLE_TRANSACTION [label=over]
100 local = local + 1;
101 t0 = global;
102 t1 = t0 + 1;
103 global = t1;
104 if (t1 == 10)
105 __builtin___tm_abort ();
106 __builtin___tm_commit ();
107 over:
109 This is the end of all_lowering_passes, and so is what is present
110 during the IPA passes, and through all of the optimization passes.
112 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
113 functions and mark functions for cloning.
115 At the end of gimple optimization, before exiting SSA form,
116 pass_tm_edges replaces statements that perform transactional
117 memory operations with the appropriate TM builtins, and swap
118 out function calls with their transactional clones. At this
119 point we introduce the abnormal transaction restart edges and
120 complete lowering of the GIMPLE_TRANSACTION node.
122 x = __builtin___tm_start (MAY_ABORT);
123 eh_label:
124 if (x & abort_transaction)
125 goto over;
126 local = local + 1;
127 t0 = __builtin___tm_load (global);
128 t1 = t0 + 1;
129 __builtin___tm_store (&global, t1);
130 if (t1 == 10)
131 __builtin___tm_abort ();
132 __builtin___tm_commit ();
133 over:
137 /* Return the attributes we want to examine for X, or NULL if it's not
138 something we examine. We look at function types, but allow pointers
139 to function types and function decls and peek through. */
141 static tree
142 get_attrs_for (const_tree x)
144 switch (TREE_CODE (x))
146 case FUNCTION_DECL:
147 return TYPE_ATTRIBUTES (TREE_TYPE (x));
148 break;
150 default:
151 if (TYPE_P (x))
152 return NULL;
153 x = TREE_TYPE (x);
154 if (TREE_CODE (x) != POINTER_TYPE)
155 return NULL;
156 /* FALLTHRU */
158 case POINTER_TYPE:
159 x = TREE_TYPE (x);
160 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
161 return NULL;
162 /* FALLTHRU */
164 case FUNCTION_TYPE:
165 case METHOD_TYPE:
166 return TYPE_ATTRIBUTES (x);
170 /* Return true if X has been marked TM_PURE. */
172 bool
173 is_tm_pure (const_tree x)
175 unsigned flags;
177 switch (TREE_CODE (x))
179 case FUNCTION_DECL:
180 case FUNCTION_TYPE:
181 case METHOD_TYPE:
182 break;
184 default:
185 if (TYPE_P (x))
186 return false;
187 x = TREE_TYPE (x);
188 if (TREE_CODE (x) != POINTER_TYPE)
189 return false;
190 /* FALLTHRU */
192 case POINTER_TYPE:
193 x = TREE_TYPE (x);
194 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
195 return false;
196 break;
199 flags = flags_from_decl_or_type (x);
200 return (flags & ECF_TM_PURE) != 0;
203 /* Return true if X has been marked TM_IRREVOCABLE. */
205 static bool
206 is_tm_irrevocable (tree x)
208 tree attrs = get_attrs_for (x);
210 if (attrs && lookup_attribute ("transaction_unsafe", attrs))
211 return true;
213 /* A call to the irrevocable builtin is by definition,
214 irrevocable. */
215 if (TREE_CODE (x) == ADDR_EXPR)
216 x = TREE_OPERAND (x, 0);
217 if (TREE_CODE (x) == FUNCTION_DECL
218 && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
219 && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
220 return true;
222 return false;
225 /* Return true if X has been marked TM_SAFE. */
227 bool
228 is_tm_safe (const_tree x)
230 if (flag_tm)
232 tree attrs = get_attrs_for (x);
233 if (attrs)
235 if (lookup_attribute ("transaction_safe", attrs))
236 return true;
237 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
238 return true;
241 return false;
244 /* Return true if CALL is const, or tm_pure. */
246 static bool
247 is_tm_pure_call (gimple call)
249 tree fn = gimple_call_fn (call);
251 if (TREE_CODE (fn) == ADDR_EXPR)
253 fn = TREE_OPERAND (fn, 0);
254 gcc_assert (TREE_CODE (fn) == FUNCTION_DECL);
256 else
257 fn = TREE_TYPE (fn);
259 return is_tm_pure (fn);
262 /* Return true if X has been marked TM_CALLABLE. */
264 static bool
265 is_tm_callable (tree x)
267 tree attrs = get_attrs_for (x);
268 if (attrs)
270 if (lookup_attribute ("transaction_callable", attrs))
271 return true;
272 if (lookup_attribute ("transaction_safe", attrs))
273 return true;
274 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
275 return true;
277 return false;
280 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
282 bool
283 is_tm_may_cancel_outer (tree x)
285 tree attrs = get_attrs_for (x);
286 if (attrs)
287 return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
288 return false;
291 /* Return true for built in functions that "end" a transaction. */
293 bool
294 is_tm_ending_fndecl (tree fndecl)
296 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
297 switch (DECL_FUNCTION_CODE (fndecl))
299 case BUILT_IN_TM_COMMIT:
300 case BUILT_IN_TM_COMMIT_EH:
301 case BUILT_IN_TM_ABORT:
302 case BUILT_IN_TM_IRREVOCABLE:
303 return true;
304 default:
305 break;
308 return false;
311 /* Return true if STMT is a TM load. */
313 static bool
314 is_tm_load (gimple stmt)
316 tree fndecl;
318 if (gimple_code (stmt) != GIMPLE_CALL)
319 return false;
321 fndecl = gimple_call_fndecl (stmt);
322 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
323 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
326 /* Same as above, but for simple TM loads, that is, not the
327 after-write, after-read, etc optimized variants. */
329 static bool
330 is_tm_simple_load (gimple stmt)
332 tree fndecl;
334 if (gimple_code (stmt) != GIMPLE_CALL)
335 return false;
337 fndecl = gimple_call_fndecl (stmt);
338 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
340 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
341 return (fcode == BUILT_IN_TM_LOAD_1
342 || fcode == BUILT_IN_TM_LOAD_2
343 || fcode == BUILT_IN_TM_LOAD_4
344 || fcode == BUILT_IN_TM_LOAD_8
345 || fcode == BUILT_IN_TM_LOAD_FLOAT
346 || fcode == BUILT_IN_TM_LOAD_DOUBLE
347 || fcode == BUILT_IN_TM_LOAD_LDOUBLE
348 || fcode == BUILT_IN_TM_LOAD_M64
349 || fcode == BUILT_IN_TM_LOAD_M128
350 || fcode == BUILT_IN_TM_LOAD_M256);
352 return false;
355 /* Return true if STMT is a TM store. */
357 static bool
358 is_tm_store (gimple stmt)
360 tree fndecl;
362 if (gimple_code (stmt) != GIMPLE_CALL)
363 return false;
365 fndecl = gimple_call_fndecl (stmt);
366 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
367 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
370 /* Same as above, but for simple TM stores, that is, not the
371 after-write, after-read, etc optimized variants. */
373 static bool
374 is_tm_simple_store (gimple stmt)
376 tree fndecl;
378 if (gimple_code (stmt) != GIMPLE_CALL)
379 return false;
381 fndecl = gimple_call_fndecl (stmt);
382 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
384 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
385 return (fcode == BUILT_IN_TM_STORE_1
386 || fcode == BUILT_IN_TM_STORE_2
387 || fcode == BUILT_IN_TM_STORE_4
388 || fcode == BUILT_IN_TM_STORE_8
389 || fcode == BUILT_IN_TM_STORE_FLOAT
390 || fcode == BUILT_IN_TM_STORE_DOUBLE
391 || fcode == BUILT_IN_TM_STORE_LDOUBLE
392 || fcode == BUILT_IN_TM_STORE_M64
393 || fcode == BUILT_IN_TM_STORE_M128
394 || fcode == BUILT_IN_TM_STORE_M256);
396 return false;
399 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */
401 static bool
402 is_tm_abort (tree fndecl)
404 return (fndecl
405 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
406 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
409 /* Build a GENERIC tree for a user abort. This is called by front ends
410 while transforming the __tm_abort statement. */
412 tree
413 build_tm_abort_call (location_t loc, bool is_outer)
415 return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
416 build_int_cst (integer_type_node,
417 AR_USERABORT
418 | (is_outer ? AR_OUTERABORT : 0)));
421 /* Common gateing function for several of the TM passes. */
423 static bool
424 gate_tm (void)
426 return flag_tm;
429 /* Map for aribtrary function replacement under TM, as created
430 by the tm_wrap attribute. */
432 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
433 htab_t tm_wrap_map;
435 void
436 record_tm_replacement (tree from, tree to)
438 struct tree_map **slot, *h;
440 /* Do not inline wrapper functions that will get replaced in the TM
441 pass.
443 Suppose you have foo() that will get replaced into tmfoo(). Make
444 sure the inliner doesn't try to outsmart us and inline foo()
445 before we get a chance to do the TM replacement. */
446 DECL_UNINLINABLE (from) = 1;
448 if (tm_wrap_map == NULL)
449 tm_wrap_map = htab_create_ggc (32, tree_map_hash, tree_map_eq, 0);
451 h = ggc_alloc_tree_map ();
452 h->hash = htab_hash_pointer (from);
453 h->base.from = from;
454 h->to = to;
456 slot = (struct tree_map **)
457 htab_find_slot_with_hash (tm_wrap_map, h, h->hash, INSERT);
458 *slot = h;
461 /* Return a TM-aware replacement function for DECL. */
463 static tree
464 find_tm_replacement_function (tree fndecl)
466 if (tm_wrap_map)
468 struct tree_map *h, in;
470 in.base.from = fndecl;
471 in.hash = htab_hash_pointer (fndecl);
472 h = (struct tree_map *) htab_find_with_hash (tm_wrap_map, &in, in.hash);
473 if (h)
474 return h->to;
477 /* ??? We may well want TM versions of most of the common <string.h>
478 functions. For now, we've already these two defined. */
479 /* Adjust expand_call_tm() attributes as necessary for the cases
480 handled here: */
481 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
482 switch (DECL_FUNCTION_CODE (fndecl))
484 case BUILT_IN_MEMCPY:
485 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
486 case BUILT_IN_MEMMOVE:
487 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
488 case BUILT_IN_MEMSET:
489 return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
490 default:
491 return NULL;
494 return NULL;
497 /* When appropriate, record TM replacement for memory allocation functions.
499 FROM is the FNDECL to wrap. */
500 void
501 tm_malloc_replacement (tree from)
503 const char *str;
504 tree to;
506 if (TREE_CODE (from) != FUNCTION_DECL)
507 return;
509 /* If we have a previous replacement, the user must be explicitly
510 wrapping malloc/calloc/free. They better know what they're
511 doing... */
512 if (find_tm_replacement_function (from))
513 return;
515 str = IDENTIFIER_POINTER (DECL_NAME (from));
517 if (!strcmp (str, "malloc"))
518 to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
519 else if (!strcmp (str, "calloc"))
520 to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
521 else if (!strcmp (str, "free"))
522 to = builtin_decl_explicit (BUILT_IN_TM_FREE);
523 else
524 return;
526 TREE_NOTHROW (to) = 0;
528 record_tm_replacement (from, to);
531 /* Diagnostics for tm_safe functions/regions. Called by the front end
532 once we've lowered the function to high-gimple. */
534 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
535 Process exactly one statement. WI->INFO is set to non-null when in
536 the context of a tm_safe function, and null for a __transaction block. */
538 #define DIAG_TM_OUTER 1
539 #define DIAG_TM_SAFE 2
540 #define DIAG_TM_RELAXED 4
542 struct diagnose_tm
544 unsigned int summary_flags : 8;
545 unsigned int block_flags : 8;
546 unsigned int func_flags : 8;
547 unsigned int saw_volatile : 1;
548 gimple stmt;
551 /* Tree callback function for diagnose_tm pass. */
553 static tree
554 diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
555 void *data)
557 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
558 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
559 enum tree_code code = TREE_CODE (*tp);
561 if ((code == VAR_DECL
562 || code == RESULT_DECL
563 || code == PARM_DECL)
564 && d->block_flags & (DIAG_TM_SAFE | DIAG_TM_RELAXED)
565 && TREE_THIS_VOLATILE (TREE_TYPE (*tp))
566 && !d->saw_volatile)
568 d->saw_volatile = 1;
569 error_at (gimple_location (d->stmt),
570 "invalid volatile use of %qD inside transaction",
571 *tp);
574 return NULL_TREE;
577 static tree
578 diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
579 struct walk_stmt_info *wi)
581 gimple stmt = gsi_stmt (*gsi);
582 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
584 /* Save stmt for use in leaf analysis. */
585 d->stmt = stmt;
587 switch (gimple_code (stmt))
589 case GIMPLE_CALL:
591 tree fn = gimple_call_fn (stmt);
593 if ((d->summary_flags & DIAG_TM_OUTER) == 0
594 && is_tm_may_cancel_outer (fn))
595 error_at (gimple_location (stmt),
596 "%<transaction_may_cancel_outer%> function call not within"
597 " outer transaction or %<transaction_may_cancel_outer%>");
599 if (d->summary_flags & DIAG_TM_SAFE)
601 bool is_safe, direct_call_p;
602 tree replacement;
604 if (TREE_CODE (fn) == ADDR_EXPR
605 && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
607 direct_call_p = true;
608 replacement = TREE_OPERAND (fn, 0);
609 replacement = find_tm_replacement_function (replacement);
610 if (replacement)
611 fn = replacement;
613 else
615 direct_call_p = false;
616 replacement = NULL_TREE;
619 if (is_tm_safe_or_pure (fn))
620 is_safe = true;
621 else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
623 /* A function explicitly marked transaction_callable as
624 opposed to transaction_safe is being defined to be
625 unsafe as part of its ABI, regardless of its contents. */
626 is_safe = false;
628 else if (direct_call_p)
630 if (flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
631 is_safe = true;
632 else if (replacement)
634 /* ??? At present we've been considering replacements
635 merely transaction_callable, and therefore might
636 enter irrevocable. The tm_wrap attribute has not
637 yet made it into the new language spec. */
638 is_safe = false;
640 else
642 /* ??? Diagnostics for unmarked direct calls moved into
643 the IPA pass. Section 3.2 of the spec details how
644 functions not marked should be considered "implicitly
645 safe" based on having examined the function body. */
646 is_safe = true;
649 else
651 /* An unmarked indirect call. Consider it unsafe even
652 though optimization may yet figure out how to inline. */
653 is_safe = false;
656 if (!is_safe)
658 if (TREE_CODE (fn) == ADDR_EXPR)
659 fn = TREE_OPERAND (fn, 0);
660 if (d->block_flags & DIAG_TM_SAFE)
662 if (direct_call_p)
663 error_at (gimple_location (stmt),
664 "unsafe function call %qD within "
665 "atomic transaction", fn);
666 else
668 if (!DECL_P (fn) || DECL_NAME (fn))
669 error_at (gimple_location (stmt),
670 "unsafe function call %qE within "
671 "atomic transaction", fn);
672 else
673 error_at (gimple_location (stmt),
674 "unsafe indirect function call within "
675 "atomic transaction");
678 else
680 if (direct_call_p)
681 error_at (gimple_location (stmt),
682 "unsafe function call %qD within "
683 "%<transaction_safe%> function", fn);
684 else
686 if (!DECL_P (fn) || DECL_NAME (fn))
687 error_at (gimple_location (stmt),
688 "unsafe function call %qE within "
689 "%<transaction_safe%> function", fn);
690 else
691 error_at (gimple_location (stmt),
692 "unsafe indirect function call within "
693 "%<transaction_safe%> function");
699 break;
701 case GIMPLE_ASM:
702 /* ??? We ought to come up with a way to add attributes to
703 asm statements, and then add "transaction_safe" to it.
704 Either that or get the language spec to resurrect __tm_waiver. */
705 if (d->block_flags & DIAG_TM_SAFE)
706 error_at (gimple_location (stmt),
707 "asm not allowed in atomic transaction");
708 else if (d->func_flags & DIAG_TM_SAFE)
709 error_at (gimple_location (stmt),
710 "asm not allowed in %<transaction_safe%> function");
711 break;
713 case GIMPLE_TRANSACTION:
715 unsigned char inner_flags = DIAG_TM_SAFE;
717 if (gimple_transaction_subcode (stmt) & GTMA_IS_RELAXED)
719 if (d->block_flags & DIAG_TM_SAFE)
720 error_at (gimple_location (stmt),
721 "relaxed transaction in atomic transaction");
722 else if (d->func_flags & DIAG_TM_SAFE)
723 error_at (gimple_location (stmt),
724 "relaxed transaction in %<transaction_safe%> function");
725 inner_flags = DIAG_TM_RELAXED;
727 else if (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)
729 if (d->block_flags)
730 error_at (gimple_location (stmt),
731 "outer transaction in transaction");
732 else if (d->func_flags & DIAG_TM_OUTER)
733 error_at (gimple_location (stmt),
734 "outer transaction in "
735 "%<transaction_may_cancel_outer%> function");
736 else if (d->func_flags & DIAG_TM_SAFE)
737 error_at (gimple_location (stmt),
738 "outer transaction in %<transaction_safe%> function");
739 inner_flags |= DIAG_TM_OUTER;
742 *handled_ops_p = true;
743 if (gimple_transaction_body (stmt))
745 struct walk_stmt_info wi_inner;
746 struct diagnose_tm d_inner;
748 memset (&d_inner, 0, sizeof (d_inner));
749 d_inner.func_flags = d->func_flags;
750 d_inner.block_flags = d->block_flags | inner_flags;
751 d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
753 memset (&wi_inner, 0, sizeof (wi_inner));
754 wi_inner.info = &d_inner;
756 walk_gimple_seq (gimple_transaction_body (stmt),
757 diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
760 break;
762 default:
763 break;
766 return NULL_TREE;
769 static unsigned int
770 diagnose_tm_blocks (void)
772 struct walk_stmt_info wi;
773 struct diagnose_tm d;
775 memset (&d, 0, sizeof (d));
776 if (is_tm_may_cancel_outer (current_function_decl))
777 d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
778 else if (is_tm_safe (current_function_decl))
779 d.func_flags = DIAG_TM_SAFE;
780 d.summary_flags = d.func_flags;
782 memset (&wi, 0, sizeof (wi));
783 wi.info = &d;
785 walk_gimple_seq (gimple_body (current_function_decl),
786 diagnose_tm_1, diagnose_tm_1_op, &wi);
788 return 0;
791 struct gimple_opt_pass pass_diagnose_tm_blocks =
794 GIMPLE_PASS,
795 "*diagnose_tm_blocks", /* name */
796 gate_tm, /* gate */
797 diagnose_tm_blocks, /* execute */
798 NULL, /* sub */
799 NULL, /* next */
800 0, /* static_pass_number */
801 TV_TRANS_MEM, /* tv_id */
802 PROP_gimple_any, /* properties_required */
803 0, /* properties_provided */
804 0, /* properties_destroyed */
805 0, /* todo_flags_start */
806 0, /* todo_flags_finish */
810 /* Instead of instrumenting thread private memory, we save the
811 addresses in a log which we later use to save/restore the addresses
812 upon transaction start/restart.
814 The log is keyed by address, where each element contains individual
815 statements among different code paths that perform the store.
817 This log is later used to generate either plain save/restore of the
818 addresses upon transaction start/restart, or calls to the ITM_L*
819 logging functions.
821 So for something like:
823 struct large { int x[1000]; };
824 struct large lala = { 0 };
825 __transaction {
826 lala.x[i] = 123;
830 We can either save/restore:
832 lala = { 0 };
833 trxn = _ITM_startTransaction ();
834 if (trxn & a_saveLiveVariables)
835 tmp_lala1 = lala.x[i];
836 else if (a & a_restoreLiveVariables)
837 lala.x[i] = tmp_lala1;
839 or use the logging functions:
841 lala = { 0 };
842 trxn = _ITM_startTransaction ();
843 _ITM_LU4 (&lala.x[i]);
845 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
846 far up the dominator tree to shadow all of the writes to a given
847 location (thus reducing the total number of logging calls), but not
848 so high as to be called on a path that does not perform a
849 write. */
851 /* One individual log entry. We may have multiple statements for the
852 same location if neither dominate each other (on different
853 execution paths). */
854 typedef struct tm_log_entry
856 /* Address to save. */
857 tree addr;
858 /* Entry block for the transaction this address occurs in. */
859 basic_block entry_block;
860 /* Dominating statements the store occurs in. */
861 gimple_vec stmts;
862 /* Initially, while we are building the log, we place a nonzero
863 value here to mean that this address *will* be saved with a
864 save/restore sequence. Later, when generating the save sequence
865 we place the SSA temp generated here. */
866 tree save_var;
867 } *tm_log_entry_t;
869 /* The actual log. */
870 static htab_t tm_log;
872 /* Addresses to log with a save/restore sequence. These should be in
873 dominator order. */
874 static VEC(tree,heap) *tm_log_save_addresses;
876 /* Map for an SSA_NAME originally pointing to a non aliased new piece
877 of memory (malloc, alloc, etc). */
878 static htab_t tm_new_mem_hash;
880 enum thread_memory_type
882 mem_non_local = 0,
883 mem_thread_local,
884 mem_transaction_local,
885 mem_max
888 typedef struct tm_new_mem_map
890 /* SSA_NAME being dereferenced. */
891 tree val;
892 enum thread_memory_type local_new_memory;
893 } tm_new_mem_map_t;
895 /* Htab support. Return hash value for a `tm_log_entry'. */
896 static hashval_t
897 tm_log_hash (const void *p)
899 const struct tm_log_entry *log = (const struct tm_log_entry *) p;
900 return iterative_hash_expr (log->addr, 0);
903 /* Htab support. Return true if two log entries are the same. */
904 static int
905 tm_log_eq (const void *p1, const void *p2)
907 const struct tm_log_entry *log1 = (const struct tm_log_entry *) p1;
908 const struct tm_log_entry *log2 = (const struct tm_log_entry *) p2;
910 /* FIXME:
912 rth: I suggest that we get rid of the component refs etc.
913 I.e. resolve the reference to base + offset.
915 We may need to actually finish a merge with mainline for this,
916 since we'd like to be presented with Richi's MEM_REF_EXPRs more
917 often than not. But in the meantime your tm_log_entry could save
918 the results of get_inner_reference.
920 See: g++.dg/tm/pr46653.C
923 /* Special case plain equality because operand_equal_p() below will
924 return FALSE if the addresses are equal but they have
925 side-effects (e.g. a volatile address). */
926 if (log1->addr == log2->addr)
927 return true;
929 return operand_equal_p (log1->addr, log2->addr, 0);
932 /* Htab support. Free one tm_log_entry. */
933 static void
934 tm_log_free (void *p)
936 struct tm_log_entry *lp = (struct tm_log_entry *) p;
937 VEC_free (gimple, heap, lp->stmts);
938 free (lp);
941 /* Initialize logging data structures. */
942 static void
943 tm_log_init (void)
945 tm_log = htab_create (10, tm_log_hash, tm_log_eq, tm_log_free);
946 tm_new_mem_hash = htab_create (5, struct_ptr_hash, struct_ptr_eq, free);
947 tm_log_save_addresses = VEC_alloc (tree, heap, 5);
950 /* Free logging data structures. */
951 static void
952 tm_log_delete (void)
954 htab_delete (tm_log);
955 htab_delete (tm_new_mem_hash);
956 VEC_free (tree, heap, tm_log_save_addresses);
959 /* Return true if MEM is a transaction invariant memory for the TM
960 region starting at REGION_ENTRY_BLOCK. */
961 static bool
962 transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
964 if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
965 && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
967 basic_block def_bb;
969 def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
970 return def_bb != region_entry_block
971 && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
974 mem = strip_invariant_refs (mem);
975 return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
978 /* Given an address ADDR in STMT, find it in the memory log or add it,
979 making sure to keep only the addresses highest in the dominator
980 tree.
982 ENTRY_BLOCK is the entry_block for the transaction.
984 If we find the address in the log, make sure it's either the same
985 address, or an equivalent one that dominates ADDR.
987 If we find the address, but neither ADDR dominates the found
988 address, nor the found one dominates ADDR, we're on different
989 execution paths. Add it.
991 If known, ENTRY_BLOCK is the entry block for the region, otherwise
992 NULL. */
993 static void
994 tm_log_add (basic_block entry_block, tree addr, gimple stmt)
996 void **slot;
997 struct tm_log_entry l, *lp;
999 l.addr = addr;
1000 slot = htab_find_slot (tm_log, &l, INSERT);
1001 if (!*slot)
1003 tree type = TREE_TYPE (addr);
1005 lp = XNEW (struct tm_log_entry);
1006 lp->addr = addr;
1007 *slot = lp;
1009 /* Small invariant addresses can be handled as save/restores. */
1010 if (entry_block
1011 && transaction_invariant_address_p (lp->addr, entry_block)
1012 && TYPE_SIZE_UNIT (type) != NULL
1013 && host_integerp (TYPE_SIZE_UNIT (type), 1)
1014 && (tree_low_cst (TYPE_SIZE_UNIT (type), 1)
1015 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
1016 /* We must be able to copy this type normally. I.e., no
1017 special constructors and the like. */
1018 && !TREE_ADDRESSABLE (type))
1020 lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
1021 lp->stmts = NULL;
1022 lp->entry_block = entry_block;
1023 /* Save addresses separately in dominator order so we don't
1024 get confused by overlapping addresses in the save/restore
1025 sequence. */
1026 VEC_safe_push (tree, heap, tm_log_save_addresses, lp->addr);
1028 else
1030 /* Use the logging functions. */
1031 lp->stmts = VEC_alloc (gimple, heap, 5);
1032 VEC_quick_push (gimple, lp->stmts, stmt);
1033 lp->save_var = NULL;
1036 else
1038 size_t i;
1039 gimple oldstmt;
1041 lp = (struct tm_log_entry *) *slot;
1043 /* If we're generating a save/restore sequence, we don't care
1044 about statements. */
1045 if (lp->save_var)
1046 return;
1048 for (i = 0; VEC_iterate (gimple, lp->stmts, i, oldstmt); ++i)
1050 if (stmt == oldstmt)
1051 return;
1052 /* We already have a store to the same address, higher up the
1053 dominator tree. Nothing to do. */
1054 if (dominated_by_p (CDI_DOMINATORS,
1055 gimple_bb (stmt), gimple_bb (oldstmt)))
1056 return;
1057 /* We should be processing blocks in dominator tree order. */
1058 gcc_assert (!dominated_by_p (CDI_DOMINATORS,
1059 gimple_bb (oldstmt), gimple_bb (stmt)));
1061 /* Store is on a different code path. */
1062 VEC_safe_push (gimple, heap, lp->stmts, stmt);
1066 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1067 result, insert the new statements before GSI. */
1069 static tree
1070 gimplify_addr (gimple_stmt_iterator *gsi, tree x)
1072 if (TREE_CODE (x) == TARGET_MEM_REF)
1073 x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
1074 else
1075 x = build_fold_addr_expr (x);
1076 return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
1079 /* Instrument one address with the logging functions.
1080 ADDR is the address to save.
1081 STMT is the statement before which to place it. */
1082 static void
1083 tm_log_emit_stmt (tree addr, gimple stmt)
1085 tree type = TREE_TYPE (addr);
1086 tree size = TYPE_SIZE_UNIT (type);
1087 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1088 gimple log;
1089 enum built_in_function code = BUILT_IN_TM_LOG;
1091 if (type == float_type_node)
1092 code = BUILT_IN_TM_LOG_FLOAT;
1093 else if (type == double_type_node)
1094 code = BUILT_IN_TM_LOG_DOUBLE;
1095 else if (type == long_double_type_node)
1096 code = BUILT_IN_TM_LOG_LDOUBLE;
1097 else if (host_integerp (size, 1))
1099 unsigned int n = tree_low_cst (size, 1);
1100 switch (n)
1102 case 1:
1103 code = BUILT_IN_TM_LOG_1;
1104 break;
1105 case 2:
1106 code = BUILT_IN_TM_LOG_2;
1107 break;
1108 case 4:
1109 code = BUILT_IN_TM_LOG_4;
1110 break;
1111 case 8:
1112 code = BUILT_IN_TM_LOG_8;
1113 break;
1114 default:
1115 code = BUILT_IN_TM_LOG;
1116 if (TREE_CODE (type) == VECTOR_TYPE)
1118 if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64))
1119 code = BUILT_IN_TM_LOG_M64;
1120 else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128))
1121 code = BUILT_IN_TM_LOG_M128;
1122 else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256))
1123 code = BUILT_IN_TM_LOG_M256;
1125 break;
1129 addr = gimplify_addr (&gsi, addr);
1130 if (code == BUILT_IN_TM_LOG)
1131 log = gimple_build_call (builtin_decl_explicit (code), 2, addr, size);
1132 else
1133 log = gimple_build_call (builtin_decl_explicit (code), 1, addr);
1134 gsi_insert_before (&gsi, log, GSI_SAME_STMT);
1137 /* Go through the log and instrument address that must be instrumented
1138 with the logging functions. Leave the save/restore addresses for
1139 later. */
1140 static void
1141 tm_log_emit (void)
1143 htab_iterator hi;
1144 struct tm_log_entry *lp;
1146 FOR_EACH_HTAB_ELEMENT (tm_log, lp, tm_log_entry_t, hi)
1148 size_t i;
1149 gimple stmt;
1151 if (dump_file)
1153 fprintf (dump_file, "TM thread private mem logging: ");
1154 print_generic_expr (dump_file, lp->addr, 0);
1155 fprintf (dump_file, "\n");
1158 if (lp->save_var)
1160 if (dump_file)
1161 fprintf (dump_file, "DUMPING to variable\n");
1162 continue;
1164 else
1166 if (dump_file)
1167 fprintf (dump_file, "DUMPING with logging functions\n");
1168 for (i = 0; VEC_iterate (gimple, lp->stmts, i, stmt); ++i)
1169 tm_log_emit_stmt (lp->addr, stmt);
1174 /* Emit the save sequence for the corresponding addresses in the log.
1175 ENTRY_BLOCK is the entry block for the transaction.
1176 BB is the basic block to insert the code in. */
1177 static void
1178 tm_log_emit_saves (basic_block entry_block, basic_block bb)
1180 size_t i;
1181 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1182 gimple stmt;
1183 struct tm_log_entry l, *lp;
1185 for (i = 0; i < VEC_length (tree, tm_log_save_addresses); ++i)
1187 l.addr = VEC_index (tree, tm_log_save_addresses, i);
1188 lp = (struct tm_log_entry *) *htab_find_slot (tm_log, &l, NO_INSERT);
1189 gcc_assert (lp->save_var != NULL);
1191 /* We only care about variables in the current transaction. */
1192 if (lp->entry_block != entry_block)
1193 continue;
1195 stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
1197 /* Make sure we can create an SSA_NAME for this type. For
1198 instance, aggregates aren't allowed, in which case the system
1199 will create a VOP for us and everything will just work. */
1200 if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
1202 lp->save_var = make_ssa_name (lp->save_var, stmt);
1203 gimple_assign_set_lhs (stmt, lp->save_var);
1206 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1210 /* Emit the restore sequence for the corresponding addresses in the log.
1211 ENTRY_BLOCK is the entry block for the transaction.
1212 BB is the basic block to insert the code in. */
1213 static void
1214 tm_log_emit_restores (basic_block entry_block, basic_block bb)
1216 int i;
1217 struct tm_log_entry l, *lp;
1218 gimple_stmt_iterator gsi;
1219 gimple stmt;
1221 for (i = VEC_length (tree, tm_log_save_addresses) - 1; i >= 0; i--)
1223 l.addr = VEC_index (tree, tm_log_save_addresses, i);
1224 lp = (struct tm_log_entry *) *htab_find_slot (tm_log, &l, NO_INSERT);
1225 gcc_assert (lp->save_var != NULL);
1227 /* We only care about variables in the current transaction. */
1228 if (lp->entry_block != entry_block)
1229 continue;
1231 /* Restores are in LIFO order from the saves in case we have
1232 overlaps. */
1233 gsi = gsi_start_bb (bb);
1235 stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
1236 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1240 /* Emit the checks for performing either a save or a restore sequence.
1242 TRXN_PROP is either A_SAVELIVEVARIABLES or A_RESTORELIVEVARIABLES.
1244 The code sequence is inserted in a new basic block created in
1245 END_BB which is inserted between BEFORE_BB and the destination of
1246 FALLTHRU_EDGE.
1248 STATUS is the return value from _ITM_beginTransaction.
1249 ENTRY_BLOCK is the entry block for the transaction.
1250 EMITF is a callback to emit the actual save/restore code.
1252 The basic block containing the conditional checking for TRXN_PROP
1253 is returned. */
1254 static basic_block
1255 tm_log_emit_save_or_restores (basic_block entry_block,
1256 unsigned trxn_prop,
1257 tree status,
1258 void (*emitf)(basic_block, basic_block),
1259 basic_block before_bb,
1260 edge fallthru_edge,
1261 basic_block *end_bb)
1263 basic_block cond_bb, code_bb;
1264 gimple cond_stmt, stmt;
1265 gimple_stmt_iterator gsi;
1266 tree t1, t2;
1267 int old_flags = fallthru_edge->flags;
1269 cond_bb = create_empty_bb (before_bb);
1270 code_bb = create_empty_bb (cond_bb);
1271 *end_bb = create_empty_bb (code_bb);
1272 if (current_loops && before_bb->loop_father)
1274 add_bb_to_loop (cond_bb, before_bb->loop_father);
1275 add_bb_to_loop (code_bb, before_bb->loop_father);
1276 add_bb_to_loop (*end_bb, before_bb->loop_father);
1278 redirect_edge_pred (fallthru_edge, *end_bb);
1279 fallthru_edge->flags = EDGE_FALLTHRU;
1280 make_edge (before_bb, cond_bb, old_flags);
1282 set_immediate_dominator (CDI_DOMINATORS, cond_bb, before_bb);
1283 set_immediate_dominator (CDI_DOMINATORS, code_bb, cond_bb);
1285 gsi = gsi_last_bb (cond_bb);
1287 /* t1 = status & A_{property}. */
1288 t1 = create_tmp_reg (TREE_TYPE (status), NULL);
1289 t2 = build_int_cst (TREE_TYPE (status), trxn_prop);
1290 stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, status, t2);
1291 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1293 /* if (t1). */
1294 t2 = build_int_cst (TREE_TYPE (status), 0);
1295 cond_stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
1296 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
1298 emitf (entry_block, code_bb);
1300 make_edge (cond_bb, code_bb, EDGE_TRUE_VALUE);
1301 make_edge (cond_bb, *end_bb, EDGE_FALSE_VALUE);
1302 make_edge (code_bb, *end_bb, EDGE_FALLTHRU);
1304 return cond_bb;
1307 static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
1308 struct walk_stmt_info *);
1309 static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
1310 struct walk_stmt_info *);
1312 /* Evaluate an address X being dereferenced and determine if it
1313 originally points to a non aliased new chunk of memory (malloc,
1314 alloca, etc).
1316 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1317 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1318 Return MEM_NON_LOCAL otherwise.
1320 ENTRY_BLOCK is the entry block to the transaction containing the
1321 dereference of X. */
1322 static enum thread_memory_type
1323 thread_private_new_memory (basic_block entry_block, tree x)
1325 gimple stmt = NULL;
1326 enum tree_code code;
1327 void **slot;
1328 tm_new_mem_map_t elt, *elt_p;
1329 tree val = x;
1330 enum thread_memory_type retval = mem_transaction_local;
1332 if (!entry_block
1333 || TREE_CODE (x) != SSA_NAME
1334 /* Possible uninitialized use, or a function argument. In
1335 either case, we don't care. */
1336 || SSA_NAME_IS_DEFAULT_DEF (x))
1337 return mem_non_local;
1339 /* Look in cache first. */
1340 elt.val = x;
1341 slot = htab_find_slot (tm_new_mem_hash, &elt, INSERT);
1342 elt_p = (tm_new_mem_map_t *) *slot;
1343 if (elt_p)
1344 return elt_p->local_new_memory;
1346 /* Optimistically assume the memory is transaction local during
1347 processing. This catches recursion into this variable. */
1348 *slot = elt_p = XNEW (tm_new_mem_map_t);
1349 elt_p->val = val;
1350 elt_p->local_new_memory = mem_transaction_local;
1352 /* Search DEF chain to find the original definition of this address. */
1355 if (ptr_deref_may_alias_global_p (x))
1357 /* Address escapes. This is not thread-private. */
1358 retval = mem_non_local;
1359 goto new_memory_ret;
1362 stmt = SSA_NAME_DEF_STMT (x);
1364 /* If the malloc call is outside the transaction, this is
1365 thread-local. */
1366 if (retval != mem_thread_local
1367 && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
1368 retval = mem_thread_local;
1370 if (is_gimple_assign (stmt))
1372 code = gimple_assign_rhs_code (stmt);
1373 /* x = foo ==> foo */
1374 if (code == SSA_NAME)
1375 x = gimple_assign_rhs1 (stmt);
1376 /* x = foo + n ==> foo */
1377 else if (code == POINTER_PLUS_EXPR)
1378 x = gimple_assign_rhs1 (stmt);
1379 /* x = (cast*) foo ==> foo */
1380 else if (code == VIEW_CONVERT_EXPR || code == NOP_EXPR)
1381 x = gimple_assign_rhs1 (stmt);
1382 else
1384 retval = mem_non_local;
1385 goto new_memory_ret;
1388 else
1390 if (gimple_code (stmt) == GIMPLE_PHI)
1392 unsigned int i;
1393 enum thread_memory_type mem;
1394 tree phi_result = gimple_phi_result (stmt);
1396 /* If any of the ancestors are non-local, we are sure to
1397 be non-local. Otherwise we can avoid doing anything
1398 and inherit what has already been generated. */
1399 retval = mem_max;
1400 for (i = 0; i < gimple_phi_num_args (stmt); ++i)
1402 tree op = PHI_ARG_DEF (stmt, i);
1404 /* Exclude self-assignment. */
1405 if (phi_result == op)
1406 continue;
1408 mem = thread_private_new_memory (entry_block, op);
1409 if (mem == mem_non_local)
1411 retval = mem;
1412 goto new_memory_ret;
1414 retval = MIN (retval, mem);
1416 goto new_memory_ret;
1418 break;
1421 while (TREE_CODE (x) == SSA_NAME);
1423 if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
1424 /* Thread-local or transaction-local. */
1426 else
1427 retval = mem_non_local;
1429 new_memory_ret:
1430 elt_p->local_new_memory = retval;
1431 return retval;
1434 /* Determine whether X has to be instrumented using a read
1435 or write barrier.
1437 ENTRY_BLOCK is the entry block for the region where stmt resides
1438 in. NULL if unknown.
1440 STMT is the statement in which X occurs in. It is used for thread
1441 private memory instrumentation. If no TPM instrumentation is
1442 desired, STMT should be null. */
1443 static bool
1444 requires_barrier (basic_block entry_block, tree x, gimple stmt)
1446 tree orig = x;
1447 while (handled_component_p (x))
1448 x = TREE_OPERAND (x, 0);
1450 switch (TREE_CODE (x))
1452 case INDIRECT_REF:
1453 case MEM_REF:
1455 enum thread_memory_type ret;
1457 ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
1458 if (ret == mem_non_local)
1459 return true;
1460 if (stmt && ret == mem_thread_local)
1461 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1462 tm_log_add (entry_block, orig, stmt);
1464 /* Transaction-locals require nothing at all. For malloc, a
1465 transaction restart frees the memory and we reallocate.
1466 For alloca, the stack pointer gets reset by the retry and
1467 we reallocate. */
1468 return false;
1471 case TARGET_MEM_REF:
1472 if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
1473 return true;
1474 x = TREE_OPERAND (TMR_BASE (x), 0);
1475 if (TREE_CODE (x) == PARM_DECL)
1476 return false;
1477 gcc_assert (TREE_CODE (x) == VAR_DECL);
1478 /* FALLTHRU */
1480 case PARM_DECL:
1481 case RESULT_DECL:
1482 case VAR_DECL:
1483 if (DECL_BY_REFERENCE (x))
1485 /* ??? This value is a pointer, but aggregate_value_p has been
1486 jigged to return true which confuses needs_to_live_in_memory.
1487 This ought to be cleaned up generically.
1489 FIXME: Verify this still happens after the next mainline
1490 merge. Testcase ie g++.dg/tm/pr47554.C.
1492 return false;
1495 if (is_global_var (x))
1496 return !TREE_READONLY (x);
1497 if (/* FIXME: This condition should actually go below in the
1498 tm_log_add() call, however is_call_clobbered() depends on
1499 aliasing info which is not available during
1500 gimplification. Since requires_barrier() gets called
1501 during lower_sequence_tm/gimplification, leave the call
1502 to needs_to_live_in_memory until we eliminate
1503 lower_sequence_tm altogether. */
1504 needs_to_live_in_memory (x))
1505 return true;
1506 else
1508 /* For local memory that doesn't escape (aka thread private
1509 memory), we can either save the value at the beginning of
1510 the transaction and restore on restart, or call a tm
1511 function to dynamically save and restore on restart
1512 (ITM_L*). */
1513 if (stmt)
1514 tm_log_add (entry_block, orig, stmt);
1515 return false;
1518 default:
1519 return false;
1523 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1524 a transaction region. */
1526 static void
1527 examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
1529 gimple stmt = gsi_stmt (*gsi);
1531 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
1532 *state |= GTMA_HAVE_LOAD;
1533 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
1534 *state |= GTMA_HAVE_STORE;
1537 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1539 static void
1540 examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
1542 gimple stmt = gsi_stmt (*gsi);
1543 tree fn;
1545 if (is_tm_pure_call (stmt))
1546 return;
1548 /* Check if this call is a transaction abort. */
1549 fn = gimple_call_fndecl (stmt);
1550 if (is_tm_abort (fn))
1551 *state |= GTMA_HAVE_ABORT;
1553 /* Note that something may happen. */
1554 *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
1557 /* Lower a GIMPLE_TRANSACTION statement. */
1559 static void
1560 lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
1562 gimple g, stmt = gsi_stmt (*gsi);
1563 unsigned int *outer_state = (unsigned int *) wi->info;
1564 unsigned int this_state = 0;
1565 struct walk_stmt_info this_wi;
1567 /* First, lower the body. The scanning that we do inside gives
1568 us some idea of what we're dealing with. */
1569 memset (&this_wi, 0, sizeof (this_wi));
1570 this_wi.info = (void *) &this_state;
1571 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
1572 lower_sequence_tm, NULL, &this_wi);
1574 /* If there was absolutely nothing transaction related inside the
1575 transaction, we may elide it. Likewise if this is a nested
1576 transaction and does not contain an abort. */
1577 if (this_state == 0
1578 || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
1580 if (outer_state)
1581 *outer_state |= this_state;
1583 gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
1584 GSI_SAME_STMT);
1585 gimple_transaction_set_body (stmt, NULL);
1587 gsi_remove (gsi, true);
1588 wi->removed_stmt = true;
1589 return;
1592 /* Wrap the body of the transaction in a try-finally node so that
1593 the commit call is always properly called. */
1594 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
1595 if (flag_exceptions)
1597 tree ptr;
1598 gimple_seq n_seq, e_seq;
1600 n_seq = gimple_seq_alloc_with_stmt (g);
1601 e_seq = NULL;
1603 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1604 1, integer_zero_node);
1605 ptr = create_tmp_var (ptr_type_node, NULL);
1606 gimple_call_set_lhs (g, ptr);
1607 gimple_seq_add_stmt (&e_seq, g);
1609 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
1610 1, ptr);
1611 gimple_seq_add_stmt (&e_seq, g);
1613 g = gimple_build_eh_else (n_seq, e_seq);
1616 g = gimple_build_try (gimple_transaction_body (stmt),
1617 gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
1618 gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
1620 gimple_transaction_set_body (stmt, NULL);
1622 /* If the transaction calls abort or if this is an outer transaction,
1623 add an "over" label afterwards. */
1624 if ((this_state & (GTMA_HAVE_ABORT))
1625 || (gimple_transaction_subcode(stmt) & GTMA_IS_OUTER))
1627 tree label = create_artificial_label (UNKNOWN_LOCATION);
1628 gimple_transaction_set_label (stmt, label);
1629 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
1632 /* Record the set of operations found for use later. */
1633 this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
1634 gimple_transaction_set_subcode (stmt, this_state);
1637 /* Iterate through the statements in the sequence, lowering them all
1638 as appropriate for being in a transaction. */
1640 static tree
1641 lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1642 struct walk_stmt_info *wi)
1644 unsigned int *state = (unsigned int *) wi->info;
1645 gimple stmt = gsi_stmt (*gsi);
1647 *handled_ops_p = true;
1648 switch (gimple_code (stmt))
1650 case GIMPLE_ASSIGN:
1651 /* Only memory reads/writes need to be instrumented. */
1652 if (gimple_assign_single_p (stmt))
1653 examine_assign_tm (state, gsi);
1654 break;
1656 case GIMPLE_CALL:
1657 examine_call_tm (state, gsi);
1658 break;
1660 case GIMPLE_ASM:
1661 *state |= GTMA_MAY_ENTER_IRREVOCABLE;
1662 break;
1664 case GIMPLE_TRANSACTION:
1665 lower_transaction (gsi, wi);
1666 break;
1668 default:
1669 *handled_ops_p = !gimple_has_substatements (stmt);
1670 break;
1673 return NULL_TREE;
1676 /* Iterate through the statements in the sequence, lowering them all
1677 as appropriate for being outside of a transaction. */
1679 static tree
1680 lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1681 struct walk_stmt_info * wi)
1683 gimple stmt = gsi_stmt (*gsi);
1685 if (gimple_code (stmt) == GIMPLE_TRANSACTION)
1687 *handled_ops_p = true;
1688 lower_transaction (gsi, wi);
1690 else
1691 *handled_ops_p = !gimple_has_substatements (stmt);
1693 return NULL_TREE;
1696 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1697 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1698 been moved out, and all the data required for constructing a proper
1699 CFG has been recorded. */
1701 static unsigned int
1702 execute_lower_tm (void)
1704 struct walk_stmt_info wi;
1705 gimple_seq body;
1707 /* Transactional clones aren't created until a later pass. */
1708 gcc_assert (!decl_is_tm_clone (current_function_decl));
1710 body = gimple_body (current_function_decl);
1711 memset (&wi, 0, sizeof (wi));
1712 walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi);
1713 gimple_set_body (current_function_decl, body);
1715 return 0;
1718 struct gimple_opt_pass pass_lower_tm =
1721 GIMPLE_PASS,
1722 "tmlower", /* name */
1723 gate_tm, /* gate */
1724 execute_lower_tm, /* execute */
1725 NULL, /* sub */
1726 NULL, /* next */
1727 0, /* static_pass_number */
1728 TV_TRANS_MEM, /* tv_id */
1729 PROP_gimple_lcf, /* properties_required */
1730 0, /* properties_provided */
1731 0, /* properties_destroyed */
1732 0, /* todo_flags_start */
1733 0, /* todo_flags_finish */
1737 /* Collect region information for each transaction. */
1739 struct tm_region
1741 /* Link to the next unnested transaction. */
1742 struct tm_region *next;
1744 /* Link to the next inner transaction. */
1745 struct tm_region *inner;
1747 /* Link to the next outer transaction. */
1748 struct tm_region *outer;
1750 /* The GIMPLE_TRANSACTION statement beginning this transaction. */
1751 gimple transaction_stmt;
1753 /* The entry block to this region. */
1754 basic_block entry_block;
1756 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1757 These blocks are still a part of the region (i.e., the border is
1758 inclusive). Note that this set is only complete for paths in the CFG
1759 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1760 the edge to the "over" label. */
1761 bitmap exit_blocks;
1763 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1764 bitmap irr_blocks;
1767 typedef struct tm_region *tm_region_p;
1768 DEF_VEC_P (tm_region_p);
1769 DEF_VEC_ALLOC_P (tm_region_p, heap);
1771 /* True if there are pending edge statements to be committed for the
1772 current function being scanned in the tmmark pass. */
1773 bool pending_edge_inserts_p;
1775 static struct tm_region *all_tm_regions;
1776 static bitmap_obstack tm_obstack;
1779 /* A subroutine of tm_region_init. Record the existence of the
1780 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1782 static struct tm_region *
1783 tm_region_init_0 (struct tm_region *outer, basic_block bb, gimple stmt)
1785 struct tm_region *region;
1787 region = (struct tm_region *)
1788 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1790 if (outer)
1792 region->next = outer->inner;
1793 outer->inner = region;
1795 else
1797 region->next = all_tm_regions;
1798 all_tm_regions = region;
1800 region->inner = NULL;
1801 region->outer = outer;
1803 region->transaction_stmt = stmt;
1805 /* There are either one or two edges out of the block containing
1806 the GIMPLE_TRANSACTION, one to the actual region and one to the
1807 "over" label if the region contains an abort. The former will
1808 always be the one marked FALLTHRU. */
1809 region->entry_block = FALLTHRU_EDGE (bb)->dest;
1811 region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
1812 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1814 return region;
1817 /* A subroutine of tm_region_init. Record all the exit and
1818 irrevocable blocks in BB into the region's exit_blocks and
1819 irr_blocks bitmaps. Returns the new region being scanned. */
1821 static struct tm_region *
1822 tm_region_init_1 (struct tm_region *region, basic_block bb)
1824 gimple_stmt_iterator gsi;
1825 gimple g;
1827 if (!region
1828 || (!region->irr_blocks && !region->exit_blocks))
1829 return region;
1831 /* Check to see if this is the end of a region by seeing if it
1832 contains a call to __builtin_tm_commit{,_eh}. Note that the
1833 outermost region for DECL_IS_TM_CLONE need not collect this. */
1834 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
1836 g = gsi_stmt (gsi);
1837 if (gimple_code (g) == GIMPLE_CALL)
1839 tree fn = gimple_call_fndecl (g);
1840 if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
1842 if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
1843 || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
1844 && region->exit_blocks)
1846 bitmap_set_bit (region->exit_blocks, bb->index);
1847 region = region->outer;
1848 break;
1850 if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
1851 bitmap_set_bit (region->irr_blocks, bb->index);
1855 return region;
1858 /* Collect all of the transaction regions within the current function
1859 and record them in ALL_TM_REGIONS. The REGION parameter may specify
1860 an "outermost" region for use by tm clones. */
1862 static void
1863 tm_region_init (struct tm_region *region)
1865 gimple g;
1866 edge_iterator ei;
1867 edge e;
1868 basic_block bb;
1869 VEC(basic_block, heap) *queue = NULL;
1870 bitmap visited_blocks = BITMAP_ALLOC (NULL);
1871 struct tm_region *old_region;
1872 VEC(tm_region_p, heap) *bb_regions = NULL;
1874 all_tm_regions = region;
1875 bb = single_succ (ENTRY_BLOCK_PTR);
1877 /* We could store this information in bb->aux, but we may get called
1878 through get_all_tm_blocks() from another pass that may be already
1879 using bb->aux. */
1880 VEC_safe_grow_cleared (tm_region_p, heap, bb_regions, last_basic_block);
1882 VEC_safe_push (basic_block, heap, queue, bb);
1883 VEC_replace (tm_region_p, bb_regions, bb->index, region);
1886 bb = VEC_pop (basic_block, queue);
1887 region = VEC_index (tm_region_p, bb_regions, bb->index);
1888 VEC_replace (tm_region_p, bb_regions, bb->index, NULL);
1890 /* Record exit and irrevocable blocks. */
1891 region = tm_region_init_1 (region, bb);
1893 /* Check for the last statement in the block beginning a new region. */
1894 g = last_stmt (bb);
1895 old_region = region;
1896 if (g && gimple_code (g) == GIMPLE_TRANSACTION)
1897 region = tm_region_init_0 (region, bb, g);
1899 /* Process subsequent blocks. */
1900 FOR_EACH_EDGE (e, ei, bb->succs)
1901 if (!bitmap_bit_p (visited_blocks, e->dest->index))
1903 bitmap_set_bit (visited_blocks, e->dest->index);
1904 VEC_safe_push (basic_block, heap, queue, e->dest);
1906 /* If the current block started a new region, make sure that only
1907 the entry block of the new region is associated with this region.
1908 Other successors are still part of the old region. */
1909 if (old_region != region && e->dest != region->entry_block)
1910 VEC_replace (tm_region_p, bb_regions, e->dest->index, old_region);
1911 else
1912 VEC_replace (tm_region_p, bb_regions, e->dest->index, region);
1915 while (!VEC_empty (basic_block, queue));
1916 VEC_free (basic_block, heap, queue);
1917 BITMAP_FREE (visited_blocks);
1918 VEC_free (tm_region_p, heap, bb_regions);
1921 /* The "gate" function for all transactional memory expansion and optimization
1922 passes. We collect region information for each top-level transaction, and
1923 if we don't find any, we skip all of the TM passes. Each region will have
1924 all of the exit blocks recorded, and the originating statement. */
1926 static bool
1927 gate_tm_init (void)
1929 if (!flag_tm)
1930 return false;
1932 calculate_dominance_info (CDI_DOMINATORS);
1933 bitmap_obstack_initialize (&tm_obstack);
1935 /* If the function is a TM_CLONE, then the entire function is the region. */
1936 if (decl_is_tm_clone (current_function_decl))
1938 struct tm_region *region = (struct tm_region *)
1939 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1940 memset (region, 0, sizeof (*region));
1941 region->entry_block = single_succ (ENTRY_BLOCK_PTR);
1942 /* For a clone, the entire function is the region. But even if
1943 we don't need to record any exit blocks, we may need to
1944 record irrevocable blocks. */
1945 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1947 tm_region_init (region);
1949 else
1951 tm_region_init (NULL);
1953 /* If we didn't find any regions, cleanup and skip the whole tree
1954 of tm-related optimizations. */
1955 if (all_tm_regions == NULL)
1957 bitmap_obstack_release (&tm_obstack);
1958 return false;
1962 return true;
1965 struct gimple_opt_pass pass_tm_init =
1968 GIMPLE_PASS,
1969 "*tminit", /* name */
1970 gate_tm_init, /* gate */
1971 NULL, /* execute */
1972 NULL, /* sub */
1973 NULL, /* next */
1974 0, /* static_pass_number */
1975 TV_TRANS_MEM, /* tv_id */
1976 PROP_ssa | PROP_cfg, /* properties_required */
1977 0, /* properties_provided */
1978 0, /* properties_destroyed */
1979 0, /* todo_flags_start */
1980 0, /* todo_flags_finish */
1984 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
1985 represented by STATE. */
1987 static inline void
1988 transaction_subcode_ior (struct tm_region *region, unsigned flags)
1990 if (region && region->transaction_stmt)
1992 flags |= gimple_transaction_subcode (region->transaction_stmt);
1993 gimple_transaction_set_subcode (region->transaction_stmt, flags);
1997 /* Construct a memory load in a transactional context. Return the
1998 gimple statement performing the load, or NULL if there is no
1999 TM_LOAD builtin of the appropriate size to do the load.
2001 LOC is the location to use for the new statement(s). */
2003 static gimple
2004 build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2006 enum built_in_function code = END_BUILTINS;
2007 tree t, type = TREE_TYPE (rhs), decl;
2008 gimple gcall;
2010 if (type == float_type_node)
2011 code = BUILT_IN_TM_LOAD_FLOAT;
2012 else if (type == double_type_node)
2013 code = BUILT_IN_TM_LOAD_DOUBLE;
2014 else if (type == long_double_type_node)
2015 code = BUILT_IN_TM_LOAD_LDOUBLE;
2016 else if (TYPE_SIZE_UNIT (type) != NULL
2017 && host_integerp (TYPE_SIZE_UNIT (type), 1))
2019 switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
2021 case 1:
2022 code = BUILT_IN_TM_LOAD_1;
2023 break;
2024 case 2:
2025 code = BUILT_IN_TM_LOAD_2;
2026 break;
2027 case 4:
2028 code = BUILT_IN_TM_LOAD_4;
2029 break;
2030 case 8:
2031 code = BUILT_IN_TM_LOAD_8;
2032 break;
2036 if (code == END_BUILTINS)
2038 decl = targetm.vectorize.builtin_tm_load (type);
2039 if (!decl)
2040 return NULL;
2042 else
2043 decl = builtin_decl_explicit (code);
2045 t = gimplify_addr (gsi, rhs);
2046 gcall = gimple_build_call (decl, 1, t);
2047 gimple_set_location (gcall, loc);
2049 t = TREE_TYPE (TREE_TYPE (decl));
2050 if (useless_type_conversion_p (type, t))
2052 gimple_call_set_lhs (gcall, lhs);
2053 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2055 else
2057 gimple g;
2058 tree temp;
2060 temp = create_tmp_reg (t, NULL);
2061 gimple_call_set_lhs (gcall, temp);
2062 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2064 t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
2065 g = gimple_build_assign (lhs, t);
2066 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2069 return gcall;
2073 /* Similarly for storing TYPE in a transactional context. */
2075 static gimple
2076 build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2078 enum built_in_function code = END_BUILTINS;
2079 tree t, fn, type = TREE_TYPE (rhs), simple_type;
2080 gimple gcall;
2082 if (type == float_type_node)
2083 code = BUILT_IN_TM_STORE_FLOAT;
2084 else if (type == double_type_node)
2085 code = BUILT_IN_TM_STORE_DOUBLE;
2086 else if (type == long_double_type_node)
2087 code = BUILT_IN_TM_STORE_LDOUBLE;
2088 else if (TYPE_SIZE_UNIT (type) != NULL
2089 && host_integerp (TYPE_SIZE_UNIT (type), 1))
2091 switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
2093 case 1:
2094 code = BUILT_IN_TM_STORE_1;
2095 break;
2096 case 2:
2097 code = BUILT_IN_TM_STORE_2;
2098 break;
2099 case 4:
2100 code = BUILT_IN_TM_STORE_4;
2101 break;
2102 case 8:
2103 code = BUILT_IN_TM_STORE_8;
2104 break;
2108 if (code == END_BUILTINS)
2110 fn = targetm.vectorize.builtin_tm_store (type);
2111 if (!fn)
2112 return NULL;
2114 else
2115 fn = builtin_decl_explicit (code);
2117 simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
2119 if (TREE_CODE (rhs) == CONSTRUCTOR)
2121 /* Handle the easy initialization to zero. */
2122 if (CONSTRUCTOR_ELTS (rhs) == 0)
2123 rhs = build_int_cst (simple_type, 0);
2124 else
2126 /* ...otherwise punt to the caller and probably use
2127 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2128 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2129 valid gimple. */
2130 return NULL;
2133 else if (!useless_type_conversion_p (simple_type, type))
2135 gimple g;
2136 tree temp;
2138 temp = create_tmp_reg (simple_type, NULL);
2139 t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
2140 g = gimple_build_assign (temp, t);
2141 gimple_set_location (g, loc);
2142 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2144 rhs = temp;
2147 t = gimplify_addr (gsi, lhs);
2148 gcall = gimple_build_call (fn, 2, t, rhs);
2149 gimple_set_location (gcall, loc);
2150 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2152 return gcall;
2156 /* Expand an assignment statement into transactional builtins. */
2158 static void
2159 expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
2161 gimple stmt = gsi_stmt (*gsi);
2162 location_t loc = gimple_location (stmt);
2163 tree lhs = gimple_assign_lhs (stmt);
2164 tree rhs = gimple_assign_rhs1 (stmt);
2165 bool store_p = requires_barrier (region->entry_block, lhs, NULL);
2166 bool load_p = requires_barrier (region->entry_block, rhs, NULL);
2167 gimple gcall = NULL;
2169 if (!load_p && !store_p)
2171 /* Add thread private addresses to log if applicable. */
2172 requires_barrier (region->entry_block, lhs, stmt);
2173 gsi_next (gsi);
2174 return;
2177 gsi_remove (gsi, true);
2179 if (load_p && !store_p)
2181 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2182 gcall = build_tm_load (loc, lhs, rhs, gsi);
2184 else if (store_p && !load_p)
2186 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2187 gcall = build_tm_store (loc, lhs, rhs, gsi);
2189 if (!gcall)
2191 tree lhs_addr, rhs_addr, tmp;
2193 if (load_p)
2194 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2195 if (store_p)
2196 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2198 /* ??? Figure out if there's any possible overlap between the LHS
2199 and the RHS and if not, use MEMCPY. */
2201 if (load_p && is_gimple_reg (lhs))
2203 tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
2204 lhs_addr = build_fold_addr_expr (tmp);
2206 else
2208 tmp = NULL_TREE;
2209 lhs_addr = gimplify_addr (gsi, lhs);
2211 rhs_addr = gimplify_addr (gsi, rhs);
2212 gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
2213 3, lhs_addr, rhs_addr,
2214 TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
2215 gimple_set_location (gcall, loc);
2216 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2218 if (tmp)
2220 gcall = gimple_build_assign (lhs, tmp);
2221 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2225 /* Now that we have the load/store in its instrumented form, add
2226 thread private addresses to the log if applicable. */
2227 if (!store_p)
2228 requires_barrier (region->entry_block, lhs, gcall);
2230 /* add_stmt_to_tm_region (region, gcall); */
2234 /* Expand a call statement as appropriate for a transaction. That is,
2235 either verify that the call does not affect the transaction, or
2236 redirect the call to a clone that handles transactions, or change
2237 the transaction state to IRREVOCABLE. Return true if the call is
2238 one of the builtins that end a transaction. */
2240 static bool
2241 expand_call_tm (struct tm_region *region,
2242 gimple_stmt_iterator *gsi)
2244 gimple stmt = gsi_stmt (*gsi);
2245 tree lhs = gimple_call_lhs (stmt);
2246 tree fn_decl;
2247 struct cgraph_node *node;
2248 bool retval = false;
2250 fn_decl = gimple_call_fndecl (stmt);
2252 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
2253 || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
2254 transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
2255 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
2256 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2258 if (is_tm_pure_call (stmt))
2259 return false;
2261 if (fn_decl)
2262 retval = is_tm_ending_fndecl (fn_decl);
2263 if (!retval)
2265 /* Assume all non-const/pure calls write to memory, except
2266 transaction ending builtins. */
2267 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2270 /* For indirect calls, we already generated a call into the runtime. */
2271 if (!fn_decl)
2273 tree fn = gimple_call_fn (stmt);
2275 /* We are guaranteed never to go irrevocable on a safe or pure
2276 call, and the pure call was handled above. */
2277 if (is_tm_safe (fn))
2278 return false;
2279 else
2280 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2282 return false;
2285 node = cgraph_get_node (fn_decl);
2286 /* All calls should have cgraph here. */
2287 gcc_assert (node);
2288 if (node->local.tm_may_enter_irr)
2289 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2291 if (is_tm_abort (fn_decl))
2293 transaction_subcode_ior (region, GTMA_HAVE_ABORT);
2294 return true;
2297 /* Instrument the store if needed.
2299 If the assignment happens inside the function call (return slot
2300 optimization), there is no instrumentation to be done, since
2301 the callee should have done the right thing. */
2302 if (lhs && requires_barrier (region->entry_block, lhs, stmt)
2303 && !gimple_call_return_slot_opt_p (stmt))
2305 tree tmp = create_tmp_reg (TREE_TYPE (lhs), NULL);
2306 location_t loc = gimple_location (stmt);
2307 edge fallthru_edge = NULL;
2309 /* Remember if the call was going to throw. */
2310 if (stmt_can_throw_internal (stmt))
2312 edge_iterator ei;
2313 edge e;
2314 basic_block bb = gimple_bb (stmt);
2316 FOR_EACH_EDGE (e, ei, bb->succs)
2317 if (e->flags & EDGE_FALLTHRU)
2319 fallthru_edge = e;
2320 break;
2324 gimple_call_set_lhs (stmt, tmp);
2325 update_stmt (stmt);
2326 stmt = gimple_build_assign (lhs, tmp);
2327 gimple_set_location (stmt, loc);
2329 /* We cannot throw in the middle of a BB. If the call was going
2330 to throw, place the instrumentation on the fallthru edge, so
2331 the call remains the last statement in the block. */
2332 if (fallthru_edge)
2334 gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (stmt);
2335 gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
2336 expand_assign_tm (region, &fallthru_gsi);
2337 gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
2338 pending_edge_inserts_p = true;
2340 else
2342 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
2343 expand_assign_tm (region, gsi);
2346 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2349 return retval;
2353 /* Expand all statements in BB as appropriate for being inside
2354 a transaction. */
2356 static void
2357 expand_block_tm (struct tm_region *region, basic_block bb)
2359 gimple_stmt_iterator gsi;
2361 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2363 gimple stmt = gsi_stmt (gsi);
2364 switch (gimple_code (stmt))
2366 case GIMPLE_ASSIGN:
2367 /* Only memory reads/writes need to be instrumented. */
2368 if (gimple_assign_single_p (stmt)
2369 && !gimple_clobber_p (stmt))
2371 expand_assign_tm (region, &gsi);
2372 continue;
2374 break;
2376 case GIMPLE_CALL:
2377 if (expand_call_tm (region, &gsi))
2378 return;
2379 break;
2381 case GIMPLE_ASM:
2382 gcc_unreachable ();
2384 default:
2385 break;
2387 if (!gsi_end_p (gsi))
2388 gsi_next (&gsi);
2392 /* Return the list of basic-blocks in REGION.
2394 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2395 following a TM_IRREVOCABLE call. */
2397 static VEC (basic_block, heap) *
2398 get_tm_region_blocks (basic_block entry_block,
2399 bitmap exit_blocks,
2400 bitmap irr_blocks,
2401 bitmap all_region_blocks,
2402 bool stop_at_irrevocable_p)
2404 VEC(basic_block, heap) *bbs = NULL;
2405 unsigned i;
2406 edge e;
2407 edge_iterator ei;
2408 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2410 i = 0;
2411 VEC_safe_push (basic_block, heap, bbs, entry_block);
2412 bitmap_set_bit (visited_blocks, entry_block->index);
2416 basic_block bb = VEC_index (basic_block, bbs, i++);
2418 if (exit_blocks &&
2419 bitmap_bit_p (exit_blocks, bb->index))
2420 continue;
2422 if (stop_at_irrevocable_p
2423 && irr_blocks
2424 && bitmap_bit_p (irr_blocks, bb->index))
2425 continue;
2427 FOR_EACH_EDGE (e, ei, bb->succs)
2428 if (!bitmap_bit_p (visited_blocks, e->dest->index))
2430 bitmap_set_bit (visited_blocks, e->dest->index);
2431 VEC_safe_push (basic_block, heap, bbs, e->dest);
2434 while (i < VEC_length (basic_block, bbs));
2436 if (all_region_blocks)
2437 bitmap_ior_into (all_region_blocks, visited_blocks);
2439 BITMAP_FREE (visited_blocks);
2440 return bbs;
2443 /* Set the IN_TRANSACTION for all gimple statements that appear in a
2444 transaction. */
2446 void
2447 compute_transaction_bits (void)
2449 struct tm_region *region;
2450 VEC (basic_block, heap) *queue;
2451 unsigned int i;
2452 basic_block bb;
2454 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2455 certainly don't need it to calculate CDI_DOMINATOR info. */
2456 gate_tm_init ();
2458 FOR_EACH_BB (bb)
2459 bb->flags &= ~BB_IN_TRANSACTION;
2461 for (region = all_tm_regions; region; region = region->next)
2463 queue = get_tm_region_blocks (region->entry_block,
2464 region->exit_blocks,
2465 region->irr_blocks,
2466 NULL,
2467 /*stop_at_irr_p=*/true);
2468 for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
2469 bb->flags |= BB_IN_TRANSACTION;
2470 VEC_free (basic_block, heap, queue);
2473 if (all_tm_regions)
2474 bitmap_obstack_release (&tm_obstack);
2477 /* Entry point to the MARK phase of TM expansion. Here we replace
2478 transactional memory statements with calls to builtins, and function
2479 calls with their transactional clones (if available). But we don't
2480 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
2482 static unsigned int
2483 execute_tm_mark (void)
2485 struct tm_region *region;
2486 basic_block bb;
2487 VEC (basic_block, heap) *queue;
2488 size_t i;
2490 queue = VEC_alloc (basic_block, heap, 10);
2491 pending_edge_inserts_p = false;
2493 for (region = all_tm_regions; region ; region = region->next)
2495 tm_log_init ();
2496 /* If we have a transaction... */
2497 if (region->exit_blocks)
2499 unsigned int subcode
2500 = gimple_transaction_subcode (region->transaction_stmt);
2502 /* Collect a new SUBCODE set, now that optimizations are done... */
2503 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2504 subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
2505 | GTMA_MAY_ENTER_IRREVOCABLE);
2506 else
2507 subcode &= GTMA_DECLARATION_MASK;
2508 gimple_transaction_set_subcode (region->transaction_stmt, subcode);
2511 queue = get_tm_region_blocks (region->entry_block,
2512 region->exit_blocks,
2513 region->irr_blocks,
2514 NULL,
2515 /*stop_at_irr_p=*/true);
2516 for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
2517 expand_block_tm (region, bb);
2518 VEC_free (basic_block, heap, queue);
2520 tm_log_emit ();
2523 if (pending_edge_inserts_p)
2524 gsi_commit_edge_inserts ();
2525 return 0;
2528 struct gimple_opt_pass pass_tm_mark =
2531 GIMPLE_PASS,
2532 "tmmark", /* name */
2533 NULL, /* gate */
2534 execute_tm_mark, /* execute */
2535 NULL, /* sub */
2536 NULL, /* next */
2537 0, /* static_pass_number */
2538 TV_TRANS_MEM, /* tv_id */
2539 PROP_ssa | PROP_cfg, /* properties_required */
2540 0, /* properties_provided */
2541 0, /* properties_destroyed */
2542 0, /* todo_flags_start */
2543 TODO_update_ssa
2544 | TODO_verify_ssa, /* todo_flags_finish */
2548 /* Create an abnormal call edge from BB to the first block of the region
2549 represented by STATE. Also record the edge in the TM_RESTART map. */
2551 static inline void
2552 make_tm_edge (gimple stmt, basic_block bb, struct tm_region *region)
2554 void **slot;
2555 struct tm_restart_node *n, dummy;
2557 if (cfun->gimple_df->tm_restart == NULL)
2558 cfun->gimple_df->tm_restart = htab_create_ggc (31, struct_ptr_hash,
2559 struct_ptr_eq, ggc_free);
2561 dummy.stmt = stmt;
2562 dummy.label_or_list = gimple_block_label (region->entry_block);
2563 slot = htab_find_slot (cfun->gimple_df->tm_restart, &dummy, INSERT);
2564 n = (struct tm_restart_node *) *slot;
2565 if (n == NULL)
2567 n = ggc_alloc_tm_restart_node ();
2568 *n = dummy;
2570 else
2572 tree old = n->label_or_list;
2573 if (TREE_CODE (old) == LABEL_DECL)
2574 old = tree_cons (NULL, old, NULL);
2575 n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
2578 make_edge (bb, region->entry_block, EDGE_ABNORMAL);
2582 /* Split block BB as necessary for every builtin function we added, and
2583 wire up the abnormal back edges implied by the transaction restart. */
2585 static void
2586 expand_block_edges (struct tm_region *region, basic_block bb)
2588 gimple_stmt_iterator gsi;
2590 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2592 bool do_next = true;
2593 gimple stmt = gsi_stmt (gsi);
2595 /* ??? TM_COMMIT (and any other tm builtin function) in a nested
2596 transaction has an abnormal edge back to the outer-most transaction
2597 (there are no nested retries), while a TM_ABORT also has an abnormal
2598 backedge to the inner-most transaction. We haven't actually saved
2599 the inner-most transaction here. We should be able to get to it
2600 via the region_nr saved on STMT, and read the transaction_stmt from
2601 that, and find the first region block from there. */
2602 /* ??? Shouldn't we split for any non-pure, non-irrevocable function? */
2603 if (gimple_code (stmt) == GIMPLE_CALL
2604 && (gimple_call_flags (stmt) & ECF_TM_BUILTIN) != 0)
2606 if (gsi_one_before_end_p (gsi))
2607 make_tm_edge (stmt, bb, region);
2608 else
2610 edge e = split_block (bb, stmt);
2611 make_tm_edge (stmt, bb, region);
2612 bb = e->dest;
2613 gsi = gsi_start_bb (bb);
2614 do_next = false;
2617 /* Delete any tail-call annotation that may have been added.
2618 The tail-call pass may have mis-identified the commit as being
2619 a candidate because we had not yet added this restart edge. */
2620 gimple_call_set_tail (stmt, false);
2623 if (do_next)
2624 gsi_next (&gsi);
2628 /* Expand the GIMPLE_TRANSACTION statement into the STM library call. */
2630 static void
2631 expand_transaction (struct tm_region *region)
2633 tree status, tm_start;
2634 basic_block atomic_bb, slice_bb;
2635 gimple_stmt_iterator gsi;
2636 tree t1, t2;
2637 gimple g;
2638 int flags, subcode;
2640 tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2641 status = create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
2643 /* ??? There are plenty of bits here we're not computing. */
2644 subcode = gimple_transaction_subcode (region->transaction_stmt);
2645 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2646 flags = PR_DOESGOIRREVOCABLE | PR_UNINSTRUMENTEDCODE;
2647 else
2648 flags = PR_INSTRUMENTEDCODE;
2649 if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
2650 flags |= PR_HASNOIRREVOCABLE;
2651 /* If the transaction does not have an abort in lexical scope and is not
2652 marked as an outer transaction, then it will never abort. */
2653 if ((subcode & GTMA_HAVE_ABORT) == 0
2654 && (subcode & GTMA_IS_OUTER) == 0)
2655 flags |= PR_HASNOABORT;
2656 if ((subcode & GTMA_HAVE_STORE) == 0)
2657 flags |= PR_READONLY;
2658 t2 = build_int_cst (TREE_TYPE (status), flags);
2659 g = gimple_build_call (tm_start, 1, t2);
2660 gimple_call_set_lhs (g, status);
2661 gimple_set_location (g, gimple_location (region->transaction_stmt));
2663 atomic_bb = gimple_bb (region->transaction_stmt);
2665 if (!VEC_empty (tree, tm_log_save_addresses))
2666 tm_log_emit_saves (region->entry_block, atomic_bb);
2668 gsi = gsi_last_bb (atomic_bb);
2669 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2670 gsi_remove (&gsi, true);
2672 if (!VEC_empty (tree, tm_log_save_addresses))
2673 region->entry_block =
2674 tm_log_emit_save_or_restores (region->entry_block,
2675 A_RESTORELIVEVARIABLES,
2676 status,
2677 tm_log_emit_restores,
2678 atomic_bb,
2679 FALLTHRU_EDGE (atomic_bb),
2680 &slice_bb);
2681 else
2682 slice_bb = atomic_bb;
2684 /* If we have an ABORT statement, create a test following the start
2685 call to perform the abort. */
2686 if (gimple_transaction_label (region->transaction_stmt))
2688 edge e;
2689 basic_block test_bb;
2691 test_bb = create_empty_bb (slice_bb);
2692 if (current_loops && slice_bb->loop_father)
2693 add_bb_to_loop (test_bb, slice_bb->loop_father);
2694 if (VEC_empty (tree, tm_log_save_addresses))
2695 region->entry_block = test_bb;
2696 gsi = gsi_last_bb (test_bb);
2698 t1 = create_tmp_reg (TREE_TYPE (status), NULL);
2699 t2 = build_int_cst (TREE_TYPE (status), A_ABORTTRANSACTION);
2700 g = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, status, t2);
2701 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
2703 t2 = build_int_cst (TREE_TYPE (status), 0);
2704 g = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2705 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
2707 e = FALLTHRU_EDGE (slice_bb);
2708 redirect_edge_pred (e, test_bb);
2709 e->flags = EDGE_FALSE_VALUE;
2710 e->probability = PROB_ALWAYS - PROB_VERY_UNLIKELY;
2712 e = BRANCH_EDGE (atomic_bb);
2713 redirect_edge_pred (e, test_bb);
2714 e->flags = EDGE_TRUE_VALUE;
2715 e->probability = PROB_VERY_UNLIKELY;
2717 e = make_edge (slice_bb, test_bb, EDGE_FALLTHRU);
2720 /* If we've no abort, but we do have PHIs at the beginning of the atomic
2721 region, that means we've a loop at the beginning of the atomic region
2722 that shares the first block. This can cause problems with the abnormal
2723 edges we're about to add for the transaction restart. Solve this by
2724 adding a new empty block to receive the abnormal edges. */
2725 else if (phi_nodes (region->entry_block))
2727 edge e;
2728 basic_block empty_bb;
2730 region->entry_block = empty_bb = create_empty_bb (atomic_bb);
2731 if (current_loops && atomic_bb->loop_father)
2732 add_bb_to_loop (empty_bb, atomic_bb->loop_father);
2734 e = FALLTHRU_EDGE (atomic_bb);
2735 redirect_edge_pred (e, empty_bb);
2737 e = make_edge (atomic_bb, empty_bb, EDGE_FALLTHRU);
2740 /* The GIMPLE_TRANSACTION statement no longer exists. */
2741 region->transaction_stmt = NULL;
2744 static void expand_regions (struct tm_region *);
2746 /* Helper function for expand_regions. Expand REGION and recurse to
2747 the inner region. */
2749 static void
2750 expand_regions_1 (struct tm_region *region)
2752 if (region->exit_blocks)
2754 unsigned int i;
2755 basic_block bb;
2756 VEC (basic_block, heap) *queue;
2758 /* Collect the set of blocks in this region. Do this before
2759 splitting edges, so that we don't have to play with the
2760 dominator tree in the middle. */
2761 queue = get_tm_region_blocks (region->entry_block,
2762 region->exit_blocks,
2763 region->irr_blocks,
2764 NULL,
2765 /*stop_at_irr_p=*/false);
2766 expand_transaction (region);
2767 for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
2768 expand_block_edges (region, bb);
2769 VEC_free (basic_block, heap, queue);
2771 if (region->inner)
2772 expand_regions (region->inner);
2775 /* Expand regions starting at REGION. */
2777 static void
2778 expand_regions (struct tm_region *region)
2780 while (region)
2782 expand_regions_1 (region);
2783 region = region->next;
2787 /* Entry point to the final expansion of transactional nodes. */
2789 static unsigned int
2790 execute_tm_edges (void)
2792 expand_regions (all_tm_regions);
2793 tm_log_delete ();
2795 /* We've got to release the dominance info now, to indicate that it
2796 must be rebuilt completely. Otherwise we'll crash trying to update
2797 the SSA web in the TODO section following this pass. */
2798 free_dominance_info (CDI_DOMINATORS);
2799 bitmap_obstack_release (&tm_obstack);
2800 all_tm_regions = NULL;
2802 return 0;
2805 struct gimple_opt_pass pass_tm_edges =
2808 GIMPLE_PASS,
2809 "tmedge", /* name */
2810 NULL, /* gate */
2811 execute_tm_edges, /* execute */
2812 NULL, /* sub */
2813 NULL, /* next */
2814 0, /* static_pass_number */
2815 TV_TRANS_MEM, /* tv_id */
2816 PROP_ssa | PROP_cfg, /* properties_required */
2817 0, /* properties_provided */
2818 0, /* properties_destroyed */
2819 0, /* todo_flags_start */
2820 TODO_update_ssa
2821 | TODO_verify_ssa, /* todo_flags_finish */
2825 /* A unique TM memory operation. */
2826 typedef struct tm_memop
2828 /* Unique ID that all memory operations to the same location have. */
2829 unsigned int value_id;
2830 /* Address of load/store. */
2831 tree addr;
2832 } *tm_memop_t;
2834 /* Sets for solving data flow equations in the memory optimization pass. */
2835 struct tm_memopt_bitmaps
2837 /* Stores available to this BB upon entry. Basically, stores that
2838 dominate this BB. */
2839 bitmap store_avail_in;
2840 /* Stores available at the end of this BB. */
2841 bitmap store_avail_out;
2842 bitmap store_antic_in;
2843 bitmap store_antic_out;
2844 /* Reads available to this BB upon entry. Basically, reads that
2845 dominate this BB. */
2846 bitmap read_avail_in;
2847 /* Reads available at the end of this BB. */
2848 bitmap read_avail_out;
2849 /* Reads performed in this BB. */
2850 bitmap read_local;
2851 /* Writes performed in this BB. */
2852 bitmap store_local;
2854 /* Temporary storage for pass. */
2855 /* Is the current BB in the worklist? */
2856 bool avail_in_worklist_p;
2857 /* Have we visited this BB? */
2858 bool visited_p;
2861 static bitmap_obstack tm_memopt_obstack;
2863 /* Unique counter for TM loads and stores. Loads and stores of the
2864 same address get the same ID. */
2865 static unsigned int tm_memopt_value_id;
2866 static htab_t tm_memopt_value_numbers;
2868 #define STORE_AVAIL_IN(BB) \
2869 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
2870 #define STORE_AVAIL_OUT(BB) \
2871 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
2872 #define STORE_ANTIC_IN(BB) \
2873 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
2874 #define STORE_ANTIC_OUT(BB) \
2875 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
2876 #define READ_AVAIL_IN(BB) \
2877 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
2878 #define READ_AVAIL_OUT(BB) \
2879 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
2880 #define READ_LOCAL(BB) \
2881 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
2882 #define STORE_LOCAL(BB) \
2883 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
2884 #define AVAIL_IN_WORKLIST_P(BB) \
2885 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
2886 #define BB_VISITED_P(BB) \
2887 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
2889 /* Htab support. Return a hash value for a `tm_memop'. */
2890 static hashval_t
2891 tm_memop_hash (const void *p)
2893 const struct tm_memop *mem = (const struct tm_memop *) p;
2894 tree addr = mem->addr;
2895 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
2896 actually done with operand_equal_p (see tm_memop_eq). */
2897 if (TREE_CODE (addr) == ADDR_EXPR)
2898 addr = TREE_OPERAND (addr, 0);
2899 return iterative_hash_expr (addr, 0);
2902 /* Htab support. Return true if two tm_memop's are the same. */
2903 static int
2904 tm_memop_eq (const void *p1, const void *p2)
2906 const struct tm_memop *mem1 = (const struct tm_memop *) p1;
2907 const struct tm_memop *mem2 = (const struct tm_memop *) p2;
2909 return operand_equal_p (mem1->addr, mem2->addr, 0);
2912 /* Given a TM load/store in STMT, return the value number for the address
2913 it accesses. */
2915 static unsigned int
2916 tm_memopt_value_number (gimple stmt, enum insert_option op)
2918 struct tm_memop tmpmem, *mem;
2919 void **slot;
2921 gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
2922 tmpmem.addr = gimple_call_arg (stmt, 0);
2923 slot = htab_find_slot (tm_memopt_value_numbers, &tmpmem, op);
2924 if (*slot)
2925 mem = (struct tm_memop *) *slot;
2926 else if (op == INSERT)
2928 mem = XNEW (struct tm_memop);
2929 *slot = mem;
2930 mem->value_id = tm_memopt_value_id++;
2931 mem->addr = tmpmem.addr;
2933 else
2934 gcc_unreachable ();
2935 return mem->value_id;
2938 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
2940 static void
2941 tm_memopt_accumulate_memops (basic_block bb)
2943 gimple_stmt_iterator gsi;
2945 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2947 gimple stmt = gsi_stmt (gsi);
2948 bitmap bits;
2949 unsigned int loc;
2951 if (is_tm_store (stmt))
2952 bits = STORE_LOCAL (bb);
2953 else if (is_tm_load (stmt))
2954 bits = READ_LOCAL (bb);
2955 else
2956 continue;
2958 loc = tm_memopt_value_number (stmt, INSERT);
2959 bitmap_set_bit (bits, loc);
2960 if (dump_file)
2962 fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
2963 is_tm_load (stmt) ? "LOAD" : "STORE", loc,
2964 gimple_bb (stmt)->index);
2965 print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0);
2966 fprintf (dump_file, "\n");
2971 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
2973 static void
2974 dump_tm_memopt_set (const char *set_name, bitmap bits)
2976 unsigned i;
2977 bitmap_iterator bi;
2978 const char *comma = "";
2980 fprintf (dump_file, "TM memopt: %s: [", set_name);
2981 EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
2983 htab_iterator hi;
2984 struct tm_memop *mem;
2986 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
2987 FOR_EACH_HTAB_ELEMENT (tm_memopt_value_numbers, mem, tm_memop_t, hi)
2988 if (mem->value_id == i)
2989 break;
2990 gcc_assert (mem->value_id == i);
2991 fprintf (dump_file, "%s", comma);
2992 comma = ", ";
2993 print_generic_expr (dump_file, mem->addr, 0);
2995 fprintf (dump_file, "]\n");
2998 /* Prettily dump all of the memopt sets in BLOCKS. */
3000 static void
3001 dump_tm_memopt_sets (VEC (basic_block, heap) *blocks)
3003 size_t i;
3004 basic_block bb;
3006 for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
3008 fprintf (dump_file, "------------BB %d---------\n", bb->index);
3009 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
3010 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
3011 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
3012 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
3013 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
3014 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
3018 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3020 static void
3021 tm_memopt_compute_avin (basic_block bb)
3023 edge e;
3024 unsigned ix;
3026 /* Seed with the AVOUT of any predecessor. */
3027 for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
3029 e = EDGE_PRED (bb, ix);
3030 /* Make sure we have already visited this BB, and is thus
3031 initialized.
3033 If e->src->aux is NULL, this predecessor is actually on an
3034 enclosing transaction. We only care about the current
3035 transaction, so ignore it. */
3036 if (e->src->aux && BB_VISITED_P (e->src))
3038 bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3039 bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3040 break;
3044 for (; ix < EDGE_COUNT (bb->preds); ix++)
3046 e = EDGE_PRED (bb, ix);
3047 if (e->src->aux && BB_VISITED_P (e->src))
3049 bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3050 bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3054 BB_VISITED_P (bb) = true;
3057 /* Compute the STORE_ANTIC_IN for the basic block BB. */
3059 static void
3060 tm_memopt_compute_antin (basic_block bb)
3062 edge e;
3063 unsigned ix;
3065 /* Seed with the ANTIC_OUT of any successor. */
3066 for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
3068 e = EDGE_SUCC (bb, ix);
3069 /* Make sure we have already visited this BB, and is thus
3070 initialized. */
3071 if (BB_VISITED_P (e->dest))
3073 bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3074 break;
3078 for (; ix < EDGE_COUNT (bb->succs); ix++)
3080 e = EDGE_SUCC (bb, ix);
3081 if (BB_VISITED_P (e->dest))
3082 bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3085 BB_VISITED_P (bb) = true;
3088 /* Compute the AVAIL sets for every basic block in BLOCKS.
3090 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3092 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3093 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3095 This is basically what we do in lcm's compute_available(), but here
3096 we calculate two sets of sets (one for STOREs and one for READs),
3097 and we work on a region instead of the entire CFG.
3099 REGION is the TM region.
3100 BLOCKS are the basic blocks in the region. */
3102 static void
3103 tm_memopt_compute_available (struct tm_region *region,
3104 VEC (basic_block, heap) *blocks)
3106 edge e;
3107 basic_block *worklist, *qin, *qout, *qend, bb;
3108 unsigned int qlen, i;
3109 edge_iterator ei;
3110 bool changed;
3112 /* Allocate a worklist array/queue. Entries are only added to the
3113 list if they were not already on the list. So the size is
3114 bounded by the number of basic blocks in the region. */
3115 qlen = VEC_length (basic_block, blocks) - 1;
3116 qin = qout = worklist =
3117 XNEWVEC (basic_block, qlen);
3119 /* Put every block in the region on the worklist. */
3120 for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
3122 /* Seed AVAIL_OUT with the LOCAL set. */
3123 bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
3124 bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
3126 AVAIL_IN_WORKLIST_P (bb) = true;
3127 /* No need to insert the entry block, since it has an AVIN of
3128 null, and an AVOUT that has already been seeded in. */
3129 if (bb != region->entry_block)
3130 *qin++ = bb;
3133 /* The entry block has been initialized with the local sets. */
3134 BB_VISITED_P (region->entry_block) = true;
3136 qin = worklist;
3137 qend = &worklist[qlen];
3139 /* Iterate until the worklist is empty. */
3140 while (qlen)
3142 /* Take the first entry off the worklist. */
3143 bb = *qout++;
3144 qlen--;
3146 if (qout >= qend)
3147 qout = worklist;
3149 /* This block can be added to the worklist again if necessary. */
3150 AVAIL_IN_WORKLIST_P (bb) = false;
3151 tm_memopt_compute_avin (bb);
3153 /* Note: We do not add the LOCAL sets here because we already
3154 seeded the AVAIL_OUT sets with them. */
3155 changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
3156 changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
3157 if (changed
3158 && (region->exit_blocks == NULL
3159 || !bitmap_bit_p (region->exit_blocks, bb->index)))
3160 /* If the out state of this block changed, then we need to add
3161 its successors to the worklist if they are not already in. */
3162 FOR_EACH_EDGE (e, ei, bb->succs)
3163 if (!AVAIL_IN_WORKLIST_P (e->dest) && e->dest != EXIT_BLOCK_PTR)
3165 *qin++ = e->dest;
3166 AVAIL_IN_WORKLIST_P (e->dest) = true;
3167 qlen++;
3169 if (qin >= qend)
3170 qin = worklist;
3174 free (worklist);
3176 if (dump_file)
3177 dump_tm_memopt_sets (blocks);
3180 /* Compute ANTIC sets for every basic block in BLOCKS.
3182 We compute STORE_ANTIC_OUT as follows:
3184 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3185 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3187 REGION is the TM region.
3188 BLOCKS are the basic blocks in the region. */
3190 static void
3191 tm_memopt_compute_antic (struct tm_region *region,
3192 VEC (basic_block, heap) *blocks)
3194 edge e;
3195 basic_block *worklist, *qin, *qout, *qend, bb;
3196 unsigned int qlen;
3197 int i;
3198 edge_iterator ei;
3200 /* Allocate a worklist array/queue. Entries are only added to the
3201 list if they were not already on the list. So the size is
3202 bounded by the number of basic blocks in the region. */
3203 qin = qout = worklist =
3204 XNEWVEC (basic_block, VEC_length (basic_block, blocks));
3206 for (qlen = 0, i = VEC_length (basic_block, blocks) - 1; i >= 0; --i)
3208 bb = VEC_index (basic_block, blocks, i);
3210 /* Seed ANTIC_OUT with the LOCAL set. */
3211 bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
3213 /* Put every block in the region on the worklist. */
3214 AVAIL_IN_WORKLIST_P (bb) = true;
3215 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3216 and their ANTIC_OUT has already been seeded in. */
3217 if (region->exit_blocks
3218 && !bitmap_bit_p (region->exit_blocks, bb->index))
3220 qlen++;
3221 *qin++ = bb;
3225 /* The exit blocks have been initialized with the local sets. */
3226 if (region->exit_blocks)
3228 unsigned int i;
3229 bitmap_iterator bi;
3230 EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
3231 BB_VISITED_P (BASIC_BLOCK (i)) = true;
3234 qin = worklist;
3235 qend = &worklist[qlen];
3237 /* Iterate until the worklist is empty. */
3238 while (qlen)
3240 /* Take the first entry off the worklist. */
3241 bb = *qout++;
3242 qlen--;
3244 if (qout >= qend)
3245 qout = worklist;
3247 /* This block can be added to the worklist again if necessary. */
3248 AVAIL_IN_WORKLIST_P (bb) = false;
3249 tm_memopt_compute_antin (bb);
3251 /* Note: We do not add the LOCAL sets here because we already
3252 seeded the ANTIC_OUT sets with them. */
3253 if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
3254 && bb != region->entry_block)
3255 /* If the out state of this block changed, then we need to add
3256 its predecessors to the worklist if they are not already in. */
3257 FOR_EACH_EDGE (e, ei, bb->preds)
3258 if (!AVAIL_IN_WORKLIST_P (e->src))
3260 *qin++ = e->src;
3261 AVAIL_IN_WORKLIST_P (e->src) = true;
3262 qlen++;
3264 if (qin >= qend)
3265 qin = worklist;
3269 free (worklist);
3271 if (dump_file)
3272 dump_tm_memopt_sets (blocks);
3275 /* Offsets of load variants from TM_LOAD. For example,
3276 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3277 See gtm-builtins.def. */
3278 #define TRANSFORM_RAR 1
3279 #define TRANSFORM_RAW 2
3280 #define TRANSFORM_RFW 3
3281 /* Offsets of store variants from TM_STORE. */
3282 #define TRANSFORM_WAR 1
3283 #define TRANSFORM_WAW 2
3285 /* Inform about a load/store optimization. */
3287 static void
3288 dump_tm_memopt_transform (gimple stmt)
3290 if (dump_file)
3292 fprintf (dump_file, "TM memopt: transforming: ");
3293 print_gimple_stmt (dump_file, stmt, 0, 0);
3294 fprintf (dump_file, "\n");
3298 /* Perform a read/write optimization. Replaces the TM builtin in STMT
3299 by a builtin that is OFFSET entries down in the builtins table in
3300 gtm-builtins.def. */
3302 static void
3303 tm_memopt_transform_stmt (unsigned int offset,
3304 gimple stmt,
3305 gimple_stmt_iterator *gsi)
3307 tree fn = gimple_call_fn (stmt);
3308 gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
3309 TREE_OPERAND (fn, 0)
3310 = builtin_decl_explicit ((enum built_in_function)
3311 (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
3312 + offset));
3313 gimple_call_set_fn (stmt, fn);
3314 gsi_replace (gsi, stmt, true);
3315 dump_tm_memopt_transform (stmt);
3318 /* Perform the actual TM memory optimization transformations in the
3319 basic blocks in BLOCKS. */
3321 static void
3322 tm_memopt_transform_blocks (VEC (basic_block, heap) *blocks)
3324 size_t i;
3325 basic_block bb;
3326 gimple_stmt_iterator gsi;
3328 for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
3330 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3332 gimple stmt = gsi_stmt (gsi);
3333 bitmap read_avail = READ_AVAIL_IN (bb);
3334 bitmap store_avail = STORE_AVAIL_IN (bb);
3335 bitmap store_antic = STORE_ANTIC_OUT (bb);
3336 unsigned int loc;
3338 if (is_tm_simple_load (stmt))
3340 loc = tm_memopt_value_number (stmt, NO_INSERT);
3341 if (store_avail && bitmap_bit_p (store_avail, loc))
3342 tm_memopt_transform_stmt (TRANSFORM_RAW, stmt, &gsi);
3343 else if (store_antic && bitmap_bit_p (store_antic, loc))
3345 tm_memopt_transform_stmt (TRANSFORM_RFW, stmt, &gsi);
3346 bitmap_set_bit (store_avail, loc);
3348 else if (read_avail && bitmap_bit_p (read_avail, loc))
3349 tm_memopt_transform_stmt (TRANSFORM_RAR, stmt, &gsi);
3350 else
3351 bitmap_set_bit (read_avail, loc);
3353 else if (is_tm_simple_store (stmt))
3355 loc = tm_memopt_value_number (stmt, NO_INSERT);
3356 if (store_avail && bitmap_bit_p (store_avail, loc))
3357 tm_memopt_transform_stmt (TRANSFORM_WAW, stmt, &gsi);
3358 else
3360 if (read_avail && bitmap_bit_p (read_avail, loc))
3361 tm_memopt_transform_stmt (TRANSFORM_WAR, stmt, &gsi);
3362 bitmap_set_bit (store_avail, loc);
3369 /* Return a new set of bitmaps for a BB. */
3371 static struct tm_memopt_bitmaps *
3372 tm_memopt_init_sets (void)
3374 struct tm_memopt_bitmaps *b
3375 = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
3376 b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3377 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3378 b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
3379 b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
3380 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3381 b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3382 b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3383 b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
3384 b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
3385 return b;
3388 /* Free sets computed for each BB. */
3390 static void
3391 tm_memopt_free_sets (VEC (basic_block, heap) *blocks)
3393 size_t i;
3394 basic_block bb;
3396 for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
3397 bb->aux = NULL;
3400 /* Clear the visited bit for every basic block in BLOCKS. */
3402 static void
3403 tm_memopt_clear_visited (VEC (basic_block, heap) *blocks)
3405 size_t i;
3406 basic_block bb;
3408 for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
3409 BB_VISITED_P (bb) = false;
3412 /* Replace TM load/stores with hints for the runtime. We handle
3413 things like read-after-write, write-after-read, read-after-read,
3414 read-for-write, etc. */
3416 static unsigned int
3417 execute_tm_memopt (void)
3419 struct tm_region *region;
3420 VEC (basic_block, heap) *bbs;
3422 tm_memopt_value_id = 0;
3423 tm_memopt_value_numbers = htab_create (10, tm_memop_hash, tm_memop_eq, free);
3425 for (region = all_tm_regions; region; region = region->next)
3427 /* All the TM stores/loads in the current region. */
3428 size_t i;
3429 basic_block bb;
3431 bitmap_obstack_initialize (&tm_memopt_obstack);
3433 /* Save all BBs for the current region. */
3434 bbs = get_tm_region_blocks (region->entry_block,
3435 region->exit_blocks,
3436 region->irr_blocks,
3437 NULL,
3438 false);
3440 /* Collect all the memory operations. */
3441 for (i = 0; VEC_iterate (basic_block, bbs, i, bb); ++i)
3443 bb->aux = tm_memopt_init_sets ();
3444 tm_memopt_accumulate_memops (bb);
3447 /* Solve data flow equations and transform each block accordingly. */
3448 tm_memopt_clear_visited (bbs);
3449 tm_memopt_compute_available (region, bbs);
3450 tm_memopt_clear_visited (bbs);
3451 tm_memopt_compute_antic (region, bbs);
3452 tm_memopt_transform_blocks (bbs);
3454 tm_memopt_free_sets (bbs);
3455 VEC_free (basic_block, heap, bbs);
3456 bitmap_obstack_release (&tm_memopt_obstack);
3457 htab_empty (tm_memopt_value_numbers);
3460 htab_delete (tm_memopt_value_numbers);
3461 return 0;
3464 static bool
3465 gate_tm_memopt (void)
3467 return flag_tm && optimize > 0;
3470 struct gimple_opt_pass pass_tm_memopt =
3473 GIMPLE_PASS,
3474 "tmmemopt", /* name */
3475 gate_tm_memopt, /* gate */
3476 execute_tm_memopt, /* execute */
3477 NULL, /* sub */
3478 NULL, /* next */
3479 0, /* static_pass_number */
3480 TV_TRANS_MEM, /* tv_id */
3481 PROP_ssa | PROP_cfg, /* properties_required */
3482 0, /* properties_provided */
3483 0, /* properties_destroyed */
3484 0, /* todo_flags_start */
3485 0, /* todo_flags_finish */
3490 /* Interprocedual analysis for the creation of transactional clones.
3491 The aim of this pass is to find which functions are referenced in
3492 a non-irrevocable transaction context, and for those over which
3493 we have control (or user directive), create a version of the
3494 function which uses only the transactional interface to reference
3495 protected memories. This analysis proceeds in several steps:
3497 (1) Collect the set of all possible transactional clones:
3499 (a) For all local public functions marked tm_callable, push
3500 it onto the tm_callee queue.
3502 (b) For all local functions, scan for calls in transaction blocks.
3503 Push the caller and callee onto the tm_caller and tm_callee
3504 queues. Count the number of callers for each callee.
3506 (c) For each local function on the callee list, assume we will
3507 create a transactional clone. Push *all* calls onto the
3508 callee queues; count the number of clone callers separately
3509 to the number of original callers.
3511 (2) Propagate irrevocable status up the dominator tree:
3513 (a) Any external function on the callee list that is not marked
3514 tm_callable is irrevocable. Push all callers of such onto
3515 a worklist.
3517 (b) For each function on the worklist, mark each block that
3518 contains an irrevocable call. Use the AND operator to
3519 propagate that mark up the dominator tree.
3521 (c) If we reach the entry block for a possible transactional
3522 clone, then the transactional clone is irrevocable, and
3523 we should not create the clone after all. Push all
3524 callers onto the worklist.
3526 (d) Place tm_irrevocable calls at the beginning of the relevant
3527 blocks. Special case here is the entry block for the entire
3528 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
3529 the library to begin the region in serial mode. Decrement
3530 the call count for all callees in the irrevocable region.
3532 (3) Create the transactional clones:
3534 Any tm_callee that still has a non-zero call count is cloned.
3537 /* This structure is stored in the AUX field of each cgraph_node. */
3538 struct tm_ipa_cg_data
3540 /* The clone of the function that got created. */
3541 struct cgraph_node *clone;
3543 /* The tm regions in the normal function. */
3544 struct tm_region *all_tm_regions;
3546 /* The blocks of the normal/clone functions that contain irrevocable
3547 calls, or blocks that are post-dominated by irrevocable calls. */
3548 bitmap irrevocable_blocks_normal;
3549 bitmap irrevocable_blocks_clone;
3551 /* The blocks of the normal function that are involved in transactions. */
3552 bitmap transaction_blocks_normal;
3554 /* The number of callers to the transactional clone of this function
3555 from normal and transactional clones respectively. */
3556 unsigned tm_callers_normal;
3557 unsigned tm_callers_clone;
3559 /* True if all calls to this function's transactional clone
3560 are irrevocable. Also automatically true if the function
3561 has no transactional clone. */
3562 bool is_irrevocable;
3564 /* Flags indicating the presence of this function in various queues. */
3565 bool in_callee_queue;
3566 bool in_worklist;
3568 /* Flags indicating the kind of scan desired while in the worklist. */
3569 bool want_irr_scan_normal;
3572 typedef struct cgraph_node *cgraph_node_p;
3574 DEF_VEC_P (cgraph_node_p);
3575 DEF_VEC_ALLOC_P (cgraph_node_p, heap);
3577 typedef VEC (cgraph_node_p, heap) *cgraph_node_queue;
3579 /* Return the ipa data associated with NODE, allocating zeroed memory
3580 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
3581 and set *NODE accordingly. */
3583 static struct tm_ipa_cg_data *
3584 get_cg_data (struct cgraph_node **node, bool traverse_aliases)
3586 struct tm_ipa_cg_data *d;
3588 if (traverse_aliases && (*node)->alias)
3589 *node = cgraph_get_node ((*node)->thunk.alias);
3591 d = (struct tm_ipa_cg_data *) (*node)->symbol.aux;
3593 if (d == NULL)
3595 d = (struct tm_ipa_cg_data *)
3596 obstack_alloc (&tm_obstack.obstack, sizeof (*d));
3597 (*node)->symbol.aux = (void *) d;
3598 memset (d, 0, sizeof (*d));
3601 return d;
3604 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
3605 it is already present. */
3607 static void
3608 maybe_push_queue (struct cgraph_node *node,
3609 cgraph_node_queue *queue_p, bool *in_queue_p)
3611 if (!*in_queue_p)
3613 *in_queue_p = true;
3614 VEC_safe_push (cgraph_node_p, heap, *queue_p, node);
3618 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
3619 Queue all callees within block BB. */
3621 static void
3622 ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
3623 basic_block bb, bool for_clone)
3625 gimple_stmt_iterator gsi;
3627 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3629 gimple stmt = gsi_stmt (gsi);
3630 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
3632 tree fndecl = gimple_call_fndecl (stmt);
3633 if (fndecl)
3635 struct tm_ipa_cg_data *d;
3636 unsigned *pcallers;
3637 struct cgraph_node *node;
3639 if (is_tm_ending_fndecl (fndecl))
3640 continue;
3641 if (find_tm_replacement_function (fndecl))
3642 continue;
3644 node = cgraph_get_node (fndecl);
3645 gcc_assert (node != NULL);
3646 d = get_cg_data (&node, true);
3648 pcallers = (for_clone ? &d->tm_callers_clone
3649 : &d->tm_callers_normal);
3650 *pcallers += 1;
3652 maybe_push_queue (node, callees_p, &d->in_callee_queue);
3658 /* Scan all calls in NODE that are within a transaction region,
3659 and push the resulting nodes into the callee queue. */
3661 static void
3662 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
3663 cgraph_node_queue *callees_p)
3665 struct tm_region *r;
3667 d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
3668 d->all_tm_regions = all_tm_regions;
3670 for (r = all_tm_regions; r; r = r->next)
3672 VEC (basic_block, heap) *bbs;
3673 basic_block bb;
3674 unsigned i;
3676 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
3677 d->transaction_blocks_normal, false);
3679 FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
3680 ipa_tm_scan_calls_block (callees_p, bb, false);
3682 VEC_free (basic_block, heap, bbs);
3686 /* Scan all calls in NODE as if this is the transactional clone,
3687 and push the destinations into the callee queue. */
3689 static void
3690 ipa_tm_scan_calls_clone (struct cgraph_node *node,
3691 cgraph_node_queue *callees_p)
3693 struct function *fn = DECL_STRUCT_FUNCTION (node->symbol.decl);
3694 basic_block bb;
3696 FOR_EACH_BB_FN (bb, fn)
3697 ipa_tm_scan_calls_block (callees_p, bb, true);
3700 /* The function NODE has been detected to be irrevocable. Push all
3701 of its callers onto WORKLIST for the purpose of re-scanning them. */
3703 static void
3704 ipa_tm_note_irrevocable (struct cgraph_node *node,
3705 cgraph_node_queue *worklist_p)
3707 struct tm_ipa_cg_data *d = get_cg_data (&node, true);
3708 struct cgraph_edge *e;
3710 d->is_irrevocable = true;
3712 for (e = node->callers; e ; e = e->next_caller)
3714 basic_block bb;
3715 struct cgraph_node *caller;
3717 /* Don't examine recursive calls. */
3718 if (e->caller == node)
3719 continue;
3720 /* Even if we think we can go irrevocable, believe the user
3721 above all. */
3722 if (is_tm_safe_or_pure (e->caller->symbol.decl))
3723 continue;
3725 caller = e->caller;
3726 d = get_cg_data (&caller, true);
3728 /* Check if the callee is in a transactional region. If so,
3729 schedule the function for normal re-scan as well. */
3730 bb = gimple_bb (e->call_stmt);
3731 gcc_assert (bb != NULL);
3732 if (d->transaction_blocks_normal
3733 && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
3734 d->want_irr_scan_normal = true;
3736 maybe_push_queue (caller, worklist_p, &d->in_worklist);
3740 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
3741 within the block is irrevocable. */
3743 static bool
3744 ipa_tm_scan_irr_block (basic_block bb)
3746 gimple_stmt_iterator gsi;
3747 tree fn;
3749 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3751 gimple stmt = gsi_stmt (gsi);
3752 switch (gimple_code (stmt))
3754 case GIMPLE_CALL:
3755 if (is_tm_pure_call (stmt))
3756 break;
3758 fn = gimple_call_fn (stmt);
3760 /* Functions with the attribute are by definition irrevocable. */
3761 if (is_tm_irrevocable (fn))
3762 return true;
3764 /* For direct function calls, go ahead and check for replacement
3765 functions, or transitive irrevocable functions. For indirect
3766 functions, we'll ask the runtime. */
3767 if (TREE_CODE (fn) == ADDR_EXPR)
3769 struct tm_ipa_cg_data *d;
3770 struct cgraph_node *node;
3772 fn = TREE_OPERAND (fn, 0);
3773 if (is_tm_ending_fndecl (fn))
3774 break;
3775 if (find_tm_replacement_function (fn))
3776 break;
3778 node = cgraph_get_node(fn);
3779 d = get_cg_data (&node, true);
3781 /* Return true if irrevocable, but above all, believe
3782 the user. */
3783 if (d->is_irrevocable
3784 && !is_tm_safe_or_pure (fn))
3785 return true;
3787 break;
3789 case GIMPLE_ASM:
3790 /* ??? The Approved Method of indicating that an inline
3791 assembly statement is not relevant to the transaction
3792 is to wrap it in a __tm_waiver block. This is not
3793 yet implemented, so we can't check for it. */
3794 if (is_tm_safe (current_function_decl))
3796 tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
3797 SET_EXPR_LOCATION (t, gimple_location (stmt));
3798 TREE_BLOCK (t) = gimple_block (stmt);
3799 error ("%Kasm not allowed in %<transaction_safe%> function", t);
3801 return true;
3803 default:
3804 break;
3808 return false;
3811 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking
3812 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
3813 scanning past OLD_IRR or EXIT_BLOCKS. */
3815 static bool
3816 ipa_tm_scan_irr_blocks (VEC (basic_block, heap) **pqueue, bitmap new_irr,
3817 bitmap old_irr, bitmap exit_blocks)
3819 bool any_new_irr = false;
3820 edge e;
3821 edge_iterator ei;
3822 bitmap visited_blocks = BITMAP_ALLOC (NULL);
3826 basic_block bb = VEC_pop (basic_block, *pqueue);
3828 /* Don't re-scan blocks we know already are irrevocable. */
3829 if (old_irr && bitmap_bit_p (old_irr, bb->index))
3830 continue;
3832 if (ipa_tm_scan_irr_block (bb))
3834 bitmap_set_bit (new_irr, bb->index);
3835 any_new_irr = true;
3837 else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
3839 FOR_EACH_EDGE (e, ei, bb->succs)
3840 if (!bitmap_bit_p (visited_blocks, e->dest->index))
3842 bitmap_set_bit (visited_blocks, e->dest->index);
3843 VEC_safe_push (basic_block, heap, *pqueue, e->dest);
3847 while (!VEC_empty (basic_block, *pqueue));
3849 BITMAP_FREE (visited_blocks);
3851 return any_new_irr;
3854 /* Propagate the irrevocable property both up and down the dominator tree.
3855 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
3856 TM regions; OLD_IRR are the results of a previous scan of the dominator
3857 tree which has been fully propagated; NEW_IRR is the set of new blocks
3858 which are gaining the irrevocable property during the current scan. */
3860 static void
3861 ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
3862 bitmap old_irr, bitmap exit_blocks)
3864 VEC (basic_block, heap) *bbs;
3865 bitmap all_region_blocks;
3867 /* If this block is in the old set, no need to rescan. */
3868 if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
3869 return;
3871 all_region_blocks = BITMAP_ALLOC (&tm_obstack);
3872 bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
3873 all_region_blocks, false);
3876 basic_block bb = VEC_pop (basic_block, bbs);
3877 bool this_irr = bitmap_bit_p (new_irr, bb->index);
3878 bool all_son_irr = false;
3879 edge_iterator ei;
3880 edge e;
3882 /* Propagate up. If my children are, I am too, but we must have
3883 at least one child that is. */
3884 if (!this_irr)
3886 FOR_EACH_EDGE (e, ei, bb->succs)
3888 if (!bitmap_bit_p (new_irr, e->dest->index))
3890 all_son_irr = false;
3891 break;
3893 else
3894 all_son_irr = true;
3896 if (all_son_irr)
3898 /* Add block to new_irr if it hasn't already been processed. */
3899 if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
3901 bitmap_set_bit (new_irr, bb->index);
3902 this_irr = true;
3907 /* Propagate down to everyone we immediately dominate. */
3908 if (this_irr)
3910 basic_block son;
3911 for (son = first_dom_son (CDI_DOMINATORS, bb);
3912 son;
3913 son = next_dom_son (CDI_DOMINATORS, son))
3915 /* Make sure block is actually in a TM region, and it
3916 isn't already in old_irr. */
3917 if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
3918 && bitmap_bit_p (all_region_blocks, son->index))
3919 bitmap_set_bit (new_irr, son->index);
3923 while (!VEC_empty (basic_block, bbs));
3925 BITMAP_FREE (all_region_blocks);
3926 VEC_free (basic_block, heap, bbs);
3929 static void
3930 ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
3932 gimple_stmt_iterator gsi;
3934 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3936 gimple stmt = gsi_stmt (gsi);
3937 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
3939 tree fndecl = gimple_call_fndecl (stmt);
3940 if (fndecl)
3942 struct tm_ipa_cg_data *d;
3943 unsigned *pcallers;
3944 struct cgraph_node *tnode;
3946 if (is_tm_ending_fndecl (fndecl))
3947 continue;
3948 if (find_tm_replacement_function (fndecl))
3949 continue;
3951 tnode = cgraph_get_node (fndecl);
3952 d = get_cg_data (&tnode, true);
3954 pcallers = (for_clone ? &d->tm_callers_clone
3955 : &d->tm_callers_normal);
3957 gcc_assert (*pcallers > 0);
3958 *pcallers -= 1;
3964 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
3965 as well as other irrevocable actions such as inline assembly. Mark all
3966 such blocks as irrevocable and decrement the number of calls to
3967 transactional clones. Return true if, for the transactional clone, the
3968 entire function is irrevocable. */
3970 static bool
3971 ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
3973 struct tm_ipa_cg_data *d;
3974 bitmap new_irr, old_irr;
3975 VEC (basic_block, heap) *queue;
3976 bool ret = false;
3978 /* Builtin operators (operator new, and such). */
3979 if (DECL_STRUCT_FUNCTION (node->symbol.decl) == NULL
3980 || DECL_STRUCT_FUNCTION (node->symbol.decl)->cfg == NULL)
3981 return false;
3983 current_function_decl = node->symbol.decl;
3984 push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl));
3985 calculate_dominance_info (CDI_DOMINATORS);
3987 d = get_cg_data (&node, true);
3988 queue = VEC_alloc (basic_block, heap, 10);
3989 new_irr = BITMAP_ALLOC (&tm_obstack);
3991 /* Scan each tm region, propagating irrevocable status through the tree. */
3992 if (for_clone)
3994 old_irr = d->irrevocable_blocks_clone;
3995 VEC_quick_push (basic_block, queue, single_succ (ENTRY_BLOCK_PTR));
3996 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
3998 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR), new_irr,
3999 old_irr, NULL);
4000 ret = bitmap_bit_p (new_irr, single_succ (ENTRY_BLOCK_PTR)->index);
4003 else
4005 struct tm_region *region;
4007 old_irr = d->irrevocable_blocks_normal;
4008 for (region = d->all_tm_regions; region; region = region->next)
4010 VEC_quick_push (basic_block, queue, region->entry_block);
4011 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
4012 region->exit_blocks))
4013 ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
4014 region->exit_blocks);
4018 /* If we found any new irrevocable blocks, reduce the call count for
4019 transactional clones within the irrevocable blocks. Save the new
4020 set of irrevocable blocks for next time. */
4021 if (!bitmap_empty_p (new_irr))
4023 bitmap_iterator bmi;
4024 unsigned i;
4026 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4027 ipa_tm_decrement_clone_counts (BASIC_BLOCK (i), for_clone);
4029 if (old_irr)
4031 bitmap_ior_into (old_irr, new_irr);
4032 BITMAP_FREE (new_irr);
4034 else if (for_clone)
4035 d->irrevocable_blocks_clone = new_irr;
4036 else
4037 d->irrevocable_blocks_normal = new_irr;
4039 if (dump_file && new_irr)
4041 const char *dname;
4042 bitmap_iterator bmi;
4043 unsigned i;
4045 dname = lang_hooks.decl_printable_name (current_function_decl, 2);
4046 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4047 fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
4050 else
4051 BITMAP_FREE (new_irr);
4053 VEC_free (basic_block, heap, queue);
4054 pop_cfun ();
4055 current_function_decl = NULL;
4057 return ret;
4060 /* Return true if, for the transactional clone of NODE, any call
4061 may enter irrevocable mode. */
4063 static bool
4064 ipa_tm_mayenterirr_function (struct cgraph_node *node)
4066 struct tm_ipa_cg_data *d;
4067 tree decl;
4068 unsigned flags;
4070 d = get_cg_data (&node, true);
4071 decl = node->symbol.decl;
4072 flags = flags_from_decl_or_type (decl);
4074 /* Handle some TM builtins. Ordinarily these aren't actually generated
4075 at this point, but handling these functions when written in by the
4076 user makes it easier to build unit tests. */
4077 if (flags & ECF_TM_BUILTIN)
4078 return false;
4080 /* Filter out all functions that are marked. */
4081 if (flags & ECF_TM_PURE)
4082 return false;
4083 if (is_tm_safe (decl))
4084 return false;
4085 if (is_tm_irrevocable (decl))
4086 return true;
4087 if (is_tm_callable (decl))
4088 return true;
4089 if (find_tm_replacement_function (decl))
4090 return true;
4092 /* If we aren't seeing the final version of the function we don't
4093 know what it will contain at runtime. */
4094 if (cgraph_function_body_availability (node) < AVAIL_AVAILABLE)
4095 return true;
4097 /* If the function must go irrevocable, then of course true. */
4098 if (d->is_irrevocable)
4099 return true;
4101 /* If there are any blocks marked irrevocable, then the function
4102 as a whole may enter irrevocable. */
4103 if (d->irrevocable_blocks_clone)
4104 return true;
4106 /* We may have previously marked this function as tm_may_enter_irr;
4107 see pass_diagnose_tm_blocks. */
4108 if (node->local.tm_may_enter_irr)
4109 return true;
4111 /* Recurse on the main body for aliases. In general, this will
4112 result in one of the bits above being set so that we will not
4113 have to recurse next time. */
4114 if (node->alias)
4115 return ipa_tm_mayenterirr_function (cgraph_get_node (node->thunk.alias));
4117 /* What remains is unmarked local functions without items that force
4118 the function to go irrevocable. */
4119 return false;
4122 /* Diagnose calls from transaction_safe functions to unmarked
4123 functions that are determined to not be safe. */
4125 static void
4126 ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
4128 struct cgraph_edge *e;
4130 for (e = node->callees; e ; e = e->next_callee)
4131 if (!is_tm_callable (e->callee->symbol.decl)
4132 && e->callee->local.tm_may_enter_irr)
4133 error_at (gimple_location (e->call_stmt),
4134 "unsafe function call %qD within "
4135 "%<transaction_safe%> function", e->callee->symbol.decl);
4138 /* Diagnose call from atomic transactions to unmarked functions
4139 that are determined to not be safe. */
4141 static void
4142 ipa_tm_diagnose_transaction (struct cgraph_node *node,
4143 struct tm_region *all_tm_regions)
4145 struct tm_region *r;
4147 for (r = all_tm_regions; r ; r = r->next)
4148 if (gimple_transaction_subcode (r->transaction_stmt) & GTMA_IS_RELAXED)
4150 /* Atomic transactions can be nested inside relaxed. */
4151 if (r->inner)
4152 ipa_tm_diagnose_transaction (node, r->inner);
4154 else
4156 VEC (basic_block, heap) *bbs;
4157 gimple_stmt_iterator gsi;
4158 basic_block bb;
4159 size_t i;
4161 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
4162 r->irr_blocks, NULL, false);
4164 for (i = 0; VEC_iterate (basic_block, bbs, i, bb); ++i)
4165 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4167 gimple stmt = gsi_stmt (gsi);
4168 tree fndecl;
4170 if (gimple_code (stmt) == GIMPLE_ASM)
4172 error_at (gimple_location (stmt),
4173 "asm not allowed in atomic transaction");
4174 continue;
4177 if (!is_gimple_call (stmt))
4178 continue;
4179 fndecl = gimple_call_fndecl (stmt);
4181 /* Indirect function calls have been diagnosed already. */
4182 if (!fndecl)
4183 continue;
4185 /* Stop at the end of the transaction. */
4186 if (is_tm_ending_fndecl (fndecl))
4188 if (bitmap_bit_p (r->exit_blocks, bb->index))
4189 break;
4190 continue;
4193 /* Marked functions have been diagnosed already. */
4194 if (is_tm_pure_call (stmt))
4195 continue;
4196 if (is_tm_callable (fndecl))
4197 continue;
4199 if (cgraph_local_info (fndecl)->tm_may_enter_irr)
4200 error_at (gimple_location (stmt),
4201 "unsafe function call %qD within "
4202 "atomic transaction", fndecl);
4205 VEC_free (basic_block, heap, bbs);
4209 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4210 OLD_DECL. The returned value is a freshly malloced pointer that
4211 should be freed by the caller. */
4213 static tree
4214 tm_mangle (tree old_asm_id)
4216 const char *old_asm_name;
4217 char *tm_name;
4218 void *alloc = NULL;
4219 struct demangle_component *dc;
4220 tree new_asm_id;
4222 /* Determine if the symbol is already a valid C++ mangled name. Do this
4223 even for C, which might be interfacing with C++ code via appropriately
4224 ugly identifiers. */
4225 /* ??? We could probably do just as well checking for "_Z" and be done. */
4226 old_asm_name = IDENTIFIER_POINTER (old_asm_id);
4227 dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
4229 if (dc == NULL)
4231 char length[8];
4233 do_unencoded:
4234 sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
4235 tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
4237 else
4239 old_asm_name += 2; /* Skip _Z */
4241 switch (dc->type)
4243 case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
4244 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
4245 /* Don't play silly games, you! */
4246 goto do_unencoded;
4248 case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
4249 /* I'd really like to know if we can ever be passed one of
4250 these from the C++ front end. The Logical Thing would
4251 seem that hidden-alias should be outer-most, so that we
4252 get hidden-alias of a transaction-clone and not vice-versa. */
4253 old_asm_name += 2;
4254 break;
4256 default:
4257 break;
4260 tm_name = concat ("_ZGTt", old_asm_name, NULL);
4262 free (alloc);
4264 new_asm_id = get_identifier (tm_name);
4265 free (tm_name);
4267 return new_asm_id;
4270 static inline void
4271 ipa_tm_mark_force_output_node (struct cgraph_node *node)
4273 cgraph_mark_force_output_node (node);
4274 /* ??? function_and_variable_visibility will reset
4275 the needed bit, without actually checking. */
4276 node->analyzed = 1;
4279 /* Callback data for ipa_tm_create_version_alias. */
4280 struct create_version_alias_info
4282 struct cgraph_node *old_node;
4283 tree new_decl;
4286 /* A subroutine of ipa_tm_create_version, called via
4287 cgraph_for_node_and_aliases. Create new tm clones for each of
4288 the existing aliases. */
4289 static bool
4290 ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
4292 struct create_version_alias_info *info
4293 = (struct create_version_alias_info *)data;
4294 tree old_decl, new_decl, tm_name;
4295 struct cgraph_node *new_node;
4297 if (!node->same_body_alias)
4298 return false;
4300 old_decl = node->symbol.decl;
4301 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4302 new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
4303 TREE_CODE (old_decl), tm_name,
4304 TREE_TYPE (old_decl));
4306 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4307 SET_DECL_RTL (new_decl, NULL);
4309 /* Based loosely on C++'s make_alias_for(). */
4310 TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
4311 DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
4312 DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
4313 TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
4314 DECL_EXTERNAL (new_decl) = 0;
4315 DECL_ARTIFICIAL (new_decl) = 1;
4316 TREE_ADDRESSABLE (new_decl) = 1;
4317 TREE_USED (new_decl) = 1;
4318 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4320 /* Perform the same remapping to the comdat group. */
4321 if (DECL_ONE_ONLY (new_decl))
4322 DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
4324 new_node = cgraph_same_body_alias (NULL, new_decl, info->new_decl);
4325 new_node->tm_clone = true;
4326 new_node->symbol.externally_visible = info->old_node->symbol.externally_visible;
4327 /* ?? Do not traverse aliases here. */
4328 get_cg_data (&node, false)->clone = new_node;
4330 record_tm_clone_pair (old_decl, new_decl);
4332 if (info->old_node->symbol.force_output
4333 || ipa_ref_list_first_referring (&info->old_node->symbol.ref_list))
4334 ipa_tm_mark_force_output_node (new_node);
4335 return false;
4338 /* Create a copy of the function (possibly declaration only) of OLD_NODE,
4339 appropriate for the transactional clone. */
4341 static void
4342 ipa_tm_create_version (struct cgraph_node *old_node)
4344 tree new_decl, old_decl, tm_name;
4345 struct cgraph_node *new_node;
4347 old_decl = old_node->symbol.decl;
4348 new_decl = copy_node (old_decl);
4350 /* DECL_ASSEMBLER_NAME needs to be set before we call
4351 cgraph_copy_node_for_versioning below, because cgraph_node will
4352 fill the assembler_name_hash. */
4353 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4354 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4355 SET_DECL_RTL (new_decl, NULL);
4356 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4358 /* Perform the same remapping to the comdat group. */
4359 if (DECL_ONE_ONLY (new_decl))
4360 DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
4362 new_node = cgraph_copy_node_for_versioning (old_node, new_decl, NULL, NULL);
4363 new_node->symbol.externally_visible = old_node->symbol.externally_visible;
4364 new_node->lowered = true;
4365 new_node->tm_clone = 1;
4366 get_cg_data (&old_node, true)->clone = new_node;
4368 if (cgraph_function_body_availability (old_node) >= AVAIL_OVERWRITABLE)
4370 /* Remap extern inline to static inline. */
4371 /* ??? Is it worth trying to use make_decl_one_only? */
4372 if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
4374 DECL_EXTERNAL (new_decl) = 0;
4375 TREE_PUBLIC (new_decl) = 0;
4376 DECL_WEAK (new_decl) = 0;
4379 tree_function_versioning (old_decl, new_decl, NULL, false, NULL, false,
4380 NULL, NULL);
4383 record_tm_clone_pair (old_decl, new_decl);
4385 cgraph_call_function_insertion_hooks (new_node);
4386 if (old_node->symbol.force_output
4387 || ipa_ref_list_first_referring (&old_node->symbol.ref_list))
4388 ipa_tm_mark_force_output_node (new_node);
4390 /* Do the same thing, but for any aliases of the original node. */
4392 struct create_version_alias_info data;
4393 data.old_node = old_node;
4394 data.new_decl = new_decl;
4395 cgraph_for_node_and_aliases (old_node, ipa_tm_create_version_alias,
4396 &data, true);
4400 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
4402 static void
4403 ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
4404 basic_block bb)
4406 gimple_stmt_iterator gsi;
4407 gimple g;
4409 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4411 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
4412 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
4414 split_block_after_labels (bb);
4415 gsi = gsi_after_labels (bb);
4416 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
4418 cgraph_create_edge (node,
4419 cgraph_get_create_node
4420 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
4421 g, 0,
4422 compute_call_stmt_bb_frequency (node->symbol.decl,
4423 gimple_bb (g)));
4426 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */
4428 static bool
4429 ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
4430 struct tm_region *region,
4431 gimple_stmt_iterator *gsi, gimple stmt)
4433 tree gettm_fn, ret, old_fn, callfn;
4434 gimple g, g2;
4435 bool safe;
4437 old_fn = gimple_call_fn (stmt);
4439 if (TREE_CODE (old_fn) == ADDR_EXPR)
4441 tree fndecl = TREE_OPERAND (old_fn, 0);
4442 tree clone = get_tm_clone_pair (fndecl);
4444 /* By transforming the call into a TM_GETTMCLONE, we are
4445 technically taking the address of the original function and
4446 its clone. Explain this so inlining will know this function
4447 is needed. */
4448 cgraph_mark_address_taken_node (cgraph_get_node (fndecl));
4449 if (clone)
4450 cgraph_mark_address_taken_node (cgraph_get_node (clone));
4453 safe = is_tm_safe (TREE_TYPE (old_fn));
4454 gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
4455 : BUILT_IN_TM_GETTMCLONE_IRR);
4456 ret = create_tmp_var (ptr_type_node, NULL);
4458 if (!safe)
4459 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4461 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
4462 if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
4463 old_fn = OBJ_TYPE_REF_EXPR (old_fn);
4465 g = gimple_build_call (gettm_fn, 1, old_fn);
4466 ret = make_ssa_name (ret, g);
4467 gimple_call_set_lhs (g, ret);
4469 gsi_insert_before (gsi, g, GSI_SAME_STMT);
4471 cgraph_create_edge (node, cgraph_get_create_node (gettm_fn), g, 0,
4472 compute_call_stmt_bb_frequency (node->symbol.decl,
4473 gimple_bb(g)));
4475 /* Cast return value from tm_gettmclone* into appropriate function
4476 pointer. */
4477 callfn = create_tmp_var (TREE_TYPE (old_fn), NULL);
4478 g2 = gimple_build_assign (callfn,
4479 fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
4480 callfn = make_ssa_name (callfn, g2);
4481 gimple_assign_set_lhs (g2, callfn);
4482 gsi_insert_before (gsi, g2, GSI_SAME_STMT);
4484 /* ??? This is a hack to preserve the NOTHROW bit on the call,
4485 which we would have derived from the decl. Failure to save
4486 this bit means we might have to split the basic block. */
4487 if (gimple_call_nothrow_p (stmt))
4488 gimple_call_set_nothrow (stmt, true);
4490 gimple_call_set_fn (stmt, callfn);
4492 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
4493 for a call statement. Fix it. */
4495 tree lhs = gimple_call_lhs (stmt);
4496 tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
4497 if (lhs
4498 && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
4500 tree temp;
4502 temp = create_tmp_reg (rettype, 0);
4503 gimple_call_set_lhs (stmt, temp);
4505 g2 = gimple_build_assign (lhs,
4506 fold_build1 (VIEW_CONVERT_EXPR,
4507 TREE_TYPE (lhs), temp));
4508 gsi_insert_after (gsi, g2, GSI_SAME_STMT);
4512 update_stmt (stmt);
4514 return true;
4517 /* Helper function for ipa_tm_transform_calls*. Given a call
4518 statement in GSI which resides inside transaction REGION, redirect
4519 the call to either its wrapper function, or its clone. */
4521 static void
4522 ipa_tm_transform_calls_redirect (struct cgraph_node *node,
4523 struct tm_region *region,
4524 gimple_stmt_iterator *gsi,
4525 bool *need_ssa_rename_p)
4527 gimple stmt = gsi_stmt (*gsi);
4528 struct cgraph_node *new_node;
4529 struct cgraph_edge *e = cgraph_edge (node, stmt);
4530 tree fndecl = gimple_call_fndecl (stmt);
4532 /* For indirect calls, pass the address through the runtime. */
4533 if (fndecl == NULL)
4535 *need_ssa_rename_p |=
4536 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
4537 return;
4540 /* Handle some TM builtins. Ordinarily these aren't actually generated
4541 at this point, but handling these functions when written in by the
4542 user makes it easier to build unit tests. */
4543 if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
4544 return;
4546 /* Fixup recursive calls inside clones. */
4547 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
4548 for recursion but not update the call statements themselves? */
4549 if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
4551 gimple_call_set_fndecl (stmt, current_function_decl);
4552 return;
4555 /* If there is a replacement, use it. */
4556 fndecl = find_tm_replacement_function (fndecl);
4557 if (fndecl)
4559 new_node = cgraph_get_create_node (fndecl);
4561 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
4563 We can't do this earlier in record_tm_replacement because
4564 cgraph_remove_unreachable_nodes is called before we inject
4565 references to the node. Further, we can't do this in some
4566 nice central place in ipa_tm_execute because we don't have
4567 the exact list of wrapper functions that would be used.
4568 Marking more wrappers than necessary results in the creation
4569 of unnecessary cgraph_nodes, which can cause some of the
4570 other IPA passes to crash.
4572 We do need to mark these nodes so that we get the proper
4573 result in expand_call_tm. */
4574 /* ??? This seems broken. How is it that we're marking the
4575 CALLEE as may_enter_irr? Surely we should be marking the
4576 CALLER. Also note that find_tm_replacement_function also
4577 contains mappings into the TM runtime, e.g. memcpy. These
4578 we know won't go irrevocable. */
4579 new_node->local.tm_may_enter_irr = 1;
4581 else
4583 struct tm_ipa_cg_data *d;
4584 struct cgraph_node *tnode = e->callee;
4586 d = get_cg_data (&tnode, true);
4587 new_node = d->clone;
4589 /* As we've already skipped pure calls and appropriate builtins,
4590 and we've already marked irrevocable blocks, if we can't come
4591 up with a static replacement, then ask the runtime. */
4592 if (new_node == NULL)
4594 *need_ssa_rename_p |=
4595 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
4596 return;
4599 fndecl = new_node->symbol.decl;
4602 cgraph_redirect_edge_callee (e, new_node);
4603 gimple_call_set_fndecl (stmt, fndecl);
4606 /* Helper function for ipa_tm_transform_calls. For a given BB,
4607 install calls to tm_irrevocable when IRR_BLOCKS are reached,
4608 redirect other calls to the generated transactional clone. */
4610 static bool
4611 ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
4612 basic_block bb, bitmap irr_blocks)
4614 gimple_stmt_iterator gsi;
4615 bool need_ssa_rename = false;
4617 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
4619 ipa_tm_insert_irr_call (node, region, bb);
4620 return true;
4623 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4625 gimple stmt = gsi_stmt (gsi);
4627 if (!is_gimple_call (stmt))
4628 continue;
4629 if (is_tm_pure_call (stmt))
4630 continue;
4632 /* Redirect edges to the appropriate replacement or clone. */
4633 ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
4636 return need_ssa_rename;
4639 /* Walk the CFG for REGION, beginning at BB. Install calls to
4640 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
4641 the generated transactional clone. */
4643 static bool
4644 ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
4645 basic_block bb, bitmap irr_blocks)
4647 bool need_ssa_rename = false;
4648 edge e;
4649 edge_iterator ei;
4650 VEC(basic_block, heap) *queue = NULL;
4651 bitmap visited_blocks = BITMAP_ALLOC (NULL);
4653 VEC_safe_push (basic_block, heap, queue, bb);
4656 bb = VEC_pop (basic_block, queue);
4658 need_ssa_rename |=
4659 ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
4661 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
4662 continue;
4664 if (region && bitmap_bit_p (region->exit_blocks, bb->index))
4665 continue;
4667 FOR_EACH_EDGE (e, ei, bb->succs)
4668 if (!bitmap_bit_p (visited_blocks, e->dest->index))
4670 bitmap_set_bit (visited_blocks, e->dest->index);
4671 VEC_safe_push (basic_block, heap, queue, e->dest);
4674 while (!VEC_empty (basic_block, queue));
4676 VEC_free (basic_block, heap, queue);
4677 BITMAP_FREE (visited_blocks);
4679 return need_ssa_rename;
4682 /* Transform the calls within the TM regions within NODE. */
4684 static void
4685 ipa_tm_transform_transaction (struct cgraph_node *node)
4687 struct tm_ipa_cg_data *d;
4688 struct tm_region *region;
4689 bool need_ssa_rename = false;
4691 d = get_cg_data (&node, true);
4693 current_function_decl = node->symbol.decl;
4694 push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl));
4695 calculate_dominance_info (CDI_DOMINATORS);
4697 for (region = d->all_tm_regions; region; region = region->next)
4699 /* If we're sure to go irrevocable, don't transform anything. */
4700 if (d->irrevocable_blocks_normal
4701 && bitmap_bit_p (d->irrevocable_blocks_normal,
4702 region->entry_block->index))
4704 transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE);
4705 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4706 continue;
4709 need_ssa_rename |=
4710 ipa_tm_transform_calls (node, region, region->entry_block,
4711 d->irrevocable_blocks_normal);
4714 if (need_ssa_rename)
4715 update_ssa (TODO_update_ssa_only_virtuals);
4717 pop_cfun ();
4718 current_function_decl = NULL;
4721 /* Transform the calls within the transactional clone of NODE. */
4723 static void
4724 ipa_tm_transform_clone (struct cgraph_node *node)
4726 struct tm_ipa_cg_data *d;
4727 bool need_ssa_rename;
4729 d = get_cg_data (&node, true);
4731 /* If this function makes no calls and has no irrevocable blocks,
4732 then there's nothing to do. */
4733 /* ??? Remove non-aborting top-level transactions. */
4734 if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone)
4735 return;
4737 current_function_decl = d->clone->symbol.decl;
4738 push_cfun (DECL_STRUCT_FUNCTION (current_function_decl));
4739 calculate_dominance_info (CDI_DOMINATORS);
4741 need_ssa_rename =
4742 ipa_tm_transform_calls (d->clone, NULL, single_succ (ENTRY_BLOCK_PTR),
4743 d->irrevocable_blocks_clone);
4745 if (need_ssa_rename)
4746 update_ssa (TODO_update_ssa_only_virtuals);
4748 pop_cfun ();
4749 current_function_decl = NULL;
4752 /* Main entry point for the transactional memory IPA pass. */
4754 static unsigned int
4755 ipa_tm_execute (void)
4757 cgraph_node_queue tm_callees = NULL;
4758 /* List of functions that will go irrevocable. */
4759 cgraph_node_queue irr_worklist = NULL;
4761 struct cgraph_node *node;
4762 struct tm_ipa_cg_data *d;
4763 enum availability a;
4764 unsigned int i;
4766 #ifdef ENABLE_CHECKING
4767 verify_cgraph ();
4768 #endif
4770 bitmap_obstack_initialize (&tm_obstack);
4772 /* For all local functions marked tm_callable, queue them. */
4773 FOR_EACH_DEFINED_FUNCTION (node)
4774 if (is_tm_callable (node->symbol.decl)
4775 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
4777 d = get_cg_data (&node, true);
4778 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
4781 /* For all local reachable functions... */
4782 FOR_EACH_DEFINED_FUNCTION (node)
4783 if (node->lowered
4784 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
4786 /* ... marked tm_pure, record that fact for the runtime by
4787 indicating that the pure function is its own tm_callable.
4788 No need to do this if the function's address can't be taken. */
4789 if (is_tm_pure (node->symbol.decl))
4791 if (!node->local.local)
4792 record_tm_clone_pair (node->symbol.decl, node->symbol.decl);
4793 continue;
4796 current_function_decl = node->symbol.decl;
4797 push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl));
4798 calculate_dominance_info (CDI_DOMINATORS);
4800 tm_region_init (NULL);
4801 if (all_tm_regions)
4803 d = get_cg_data (&node, true);
4805 /* Scan for calls that are in each transaction. */
4806 ipa_tm_scan_calls_transaction (d, &tm_callees);
4808 /* Put it in the worklist so we can scan the function
4809 later (ipa_tm_scan_irr_function) and mark the
4810 irrevocable blocks. */
4811 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
4812 d->want_irr_scan_normal = true;
4815 pop_cfun ();
4816 current_function_decl = NULL;
4819 /* For every local function on the callee list, scan as if we will be
4820 creating a transactional clone, queueing all new functions we find
4821 along the way. */
4822 for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
4824 node = VEC_index (cgraph_node_p, tm_callees, i);
4825 a = cgraph_function_body_availability (node);
4826 d = get_cg_data (&node, true);
4828 /* Put it in the worklist so we can scan the function later
4829 (ipa_tm_scan_irr_function) and mark the irrevocable
4830 blocks. */
4831 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
4833 /* Some callees cannot be arbitrarily cloned. These will always be
4834 irrevocable. Mark these now, so that we need not scan them. */
4835 if (is_tm_irrevocable (node->symbol.decl))
4836 ipa_tm_note_irrevocable (node, &irr_worklist);
4837 else if (a <= AVAIL_NOT_AVAILABLE
4838 && !is_tm_safe_or_pure (node->symbol.decl))
4839 ipa_tm_note_irrevocable (node, &irr_worklist);
4840 else if (a >= AVAIL_OVERWRITABLE)
4842 if (!tree_versionable_function_p (node->symbol.decl))
4843 ipa_tm_note_irrevocable (node, &irr_worklist);
4844 else if (!d->is_irrevocable)
4846 /* If this is an alias, make sure its base is queued as well.
4847 we need not scan the callees now, as the base will do. */
4848 if (node->alias)
4850 node = cgraph_get_node (node->thunk.alias);
4851 d = get_cg_data (&node, true);
4852 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
4853 continue;
4856 /* Add all nodes called by this function into
4857 tm_callees as well. */
4858 ipa_tm_scan_calls_clone (node, &tm_callees);
4863 /* Iterate scans until no more work to be done. Prefer not to use
4864 VEC_pop because the worklist tends to follow a breadth-first
4865 search of the callgraph, which should allow convergance with a
4866 minimum number of scans. But we also don't want the worklist
4867 array to grow without bound, so we shift the array up periodically. */
4868 for (i = 0; i < VEC_length (cgraph_node_p, irr_worklist); ++i)
4870 if (i > 256 && i == VEC_length (cgraph_node_p, irr_worklist) / 8)
4872 VEC_block_remove (cgraph_node_p, irr_worklist, 0, i);
4873 i = 0;
4876 node = VEC_index (cgraph_node_p, irr_worklist, i);
4877 d = get_cg_data (&node, true);
4878 d->in_worklist = false;
4880 if (d->want_irr_scan_normal)
4882 d->want_irr_scan_normal = false;
4883 ipa_tm_scan_irr_function (node, false);
4885 if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
4886 ipa_tm_note_irrevocable (node, &irr_worklist);
4889 /* For every function on the callee list, collect the tm_may_enter_irr
4890 bit on the node. */
4891 VEC_truncate (cgraph_node_p, irr_worklist, 0);
4892 for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
4894 node = VEC_index (cgraph_node_p, tm_callees, i);
4895 if (ipa_tm_mayenterirr_function (node))
4897 d = get_cg_data (&node, true);
4898 gcc_assert (d->in_worklist == false);
4899 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
4903 /* Propagate the tm_may_enter_irr bit to callers until stable. */
4904 for (i = 0; i < VEC_length (cgraph_node_p, irr_worklist); ++i)
4906 struct cgraph_node *caller;
4907 struct cgraph_edge *e;
4908 struct ipa_ref *ref;
4909 unsigned j;
4911 if (i > 256 && i == VEC_length (cgraph_node_p, irr_worklist) / 8)
4913 VEC_block_remove (cgraph_node_p, irr_worklist, 0, i);
4914 i = 0;
4917 node = VEC_index (cgraph_node_p, irr_worklist, i);
4918 d = get_cg_data (&node, true);
4919 d->in_worklist = false;
4920 node->local.tm_may_enter_irr = true;
4922 /* Propagate back to normal callers. */
4923 for (e = node->callers; e ; e = e->next_caller)
4925 caller = e->caller;
4926 if (!is_tm_safe_or_pure (caller->symbol.decl)
4927 && !caller->local.tm_may_enter_irr)
4929 d = get_cg_data (&caller, true);
4930 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
4934 /* Propagate back to referring aliases as well. */
4935 for (j = 0; ipa_ref_list_referring_iterate (&node->symbol.ref_list, j, ref); j++)
4937 caller = cgraph (ref->referring);
4938 if (ref->use == IPA_REF_ALIAS
4939 && !caller->local.tm_may_enter_irr)
4941 /* ?? Do not traverse aliases here. */
4942 d = get_cg_data (&caller, false);
4943 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
4948 /* Now validate all tm_safe functions, and all atomic regions in
4949 other functions. */
4950 FOR_EACH_DEFINED_FUNCTION (node)
4951 if (node->lowered
4952 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
4954 d = get_cg_data (&node, true);
4955 if (is_tm_safe (node->symbol.decl))
4956 ipa_tm_diagnose_tm_safe (node);
4957 else if (d->all_tm_regions)
4958 ipa_tm_diagnose_transaction (node, d->all_tm_regions);
4961 /* Create clones. Do those that are not irrevocable and have a
4962 positive call count. Do those publicly visible functions that
4963 the user directed us to clone. */
4964 for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
4966 bool doit = false;
4968 node = VEC_index (cgraph_node_p, tm_callees, i);
4969 if (node->same_body_alias)
4970 continue;
4972 a = cgraph_function_body_availability (node);
4973 d = get_cg_data (&node, true);
4975 if (a <= AVAIL_NOT_AVAILABLE)
4976 doit = is_tm_callable (node->symbol.decl);
4977 else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->symbol.decl))
4978 doit = true;
4979 else if (!d->is_irrevocable
4980 && d->tm_callers_normal + d->tm_callers_clone > 0)
4981 doit = true;
4983 if (doit)
4984 ipa_tm_create_version (node);
4987 /* Redirect calls to the new clones, and insert irrevocable marks. */
4988 for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
4990 node = VEC_index (cgraph_node_p, tm_callees, i);
4991 if (node->analyzed)
4993 d = get_cg_data (&node, true);
4994 if (d->clone)
4995 ipa_tm_transform_clone (node);
4998 FOR_EACH_DEFINED_FUNCTION (node)
4999 if (node->lowered
5000 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
5002 d = get_cg_data (&node, true);
5003 if (d->all_tm_regions)
5004 ipa_tm_transform_transaction (node);
5007 /* Free and clear all data structures. */
5008 VEC_free (cgraph_node_p, heap, tm_callees);
5009 VEC_free (cgraph_node_p, heap, irr_worklist);
5010 bitmap_obstack_release (&tm_obstack);
5012 FOR_EACH_FUNCTION (node)
5013 node->symbol.aux = NULL;
5015 #ifdef ENABLE_CHECKING
5016 verify_cgraph ();
5017 #endif
5019 return 0;
5022 struct simple_ipa_opt_pass pass_ipa_tm =
5025 SIMPLE_IPA_PASS,
5026 "tmipa", /* name */
5027 gate_tm, /* gate */
5028 ipa_tm_execute, /* execute */
5029 NULL, /* sub */
5030 NULL, /* next */
5031 0, /* static_pass_number */
5032 TV_TRANS_MEM, /* tv_id */
5033 PROP_ssa | PROP_cfg, /* properties_required */
5034 0, /* properties_provided */
5035 0, /* properties_destroyed */
5036 0, /* todo_flags_start */
5037 0, /* todo_flags_finish */
5041 #include "gt-trans-mem.h"