1 /* Passes for transactional memory support.
2 Copyright (C) 2008-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
23 #include "hash-table.h"
31 #include "hard-reg-set.h"
34 #include "dominance.h"
36 #include "basic-block.h"
37 #include "tree-ssa-alias.h"
38 #include "internal-fn.h"
40 #include "gimple-expr.h"
47 #include "gimple-iterator.h"
48 #include "gimplify-me.h"
49 #include "gimple-walk.h"
50 #include "gimple-ssa.h"
52 #include "plugin-api.h"
56 #include "stringpool.h"
57 #include "tree-ssanames.h"
58 #include "tree-into-ssa.h"
59 #include "tree-pass.h"
60 #include "tree-inline.h"
61 #include "diagnostic-core.h"
64 #include "trans-mem.h"
67 #include "langhooks.h"
68 #include "gimple-pretty-print.h"
70 #include "tree-ssa-address.h"
73 #define A_RUNINSTRUMENTEDCODE 0x0001
74 #define A_RUNUNINSTRUMENTEDCODE 0x0002
75 #define A_SAVELIVEVARIABLES 0x0004
76 #define A_RESTORELIVEVARIABLES 0x0008
77 #define A_ABORTTRANSACTION 0x0010
79 #define AR_USERABORT 0x0001
80 #define AR_USERRETRY 0x0002
81 #define AR_TMCONFLICT 0x0004
82 #define AR_EXCEPTIONBLOCKABORT 0x0008
83 #define AR_OUTERABORT 0x0010
85 #define MODE_SERIALIRREVOCABLE 0x0000
88 /* The representation of a transaction changes several times during the
89 lowering process. In the beginning, in the front-end we have the
90 GENERIC tree TRANSACTION_EXPR. For example,
98 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
99 trivially replaced with a GIMPLE_TRANSACTION node.
101 During pass_lower_tm, we examine the body of transactions looking
102 for aborts. Transactions that do not contain an abort may be
103 merged into an outer transaction. We also add a TRY-FINALLY node
104 to arrange for the transaction to be committed on any exit.
106 [??? Think about how this arrangement affects throw-with-commit
107 and throw-with-abort operations. In this case we want the TRY to
108 handle gotos, but not to catch any exceptions because the transaction
109 will already be closed.]
111 GIMPLE_TRANSACTION [label=NULL] {
118 __builtin___tm_abort ();
120 __builtin___tm_commit ();
124 During pass_lower_eh, we create EH regions for the transactions,
125 intermixed with the regular EH stuff. This gives us a nice persistent
126 mapping (all the way through rtl) from transactional memory operation
127 back to the transaction, which allows us to get the abnormal edges
128 correct to model transaction aborts and restarts:
130 GIMPLE_TRANSACTION [label=over]
136 __builtin___tm_abort ();
137 __builtin___tm_commit ();
140 This is the end of all_lowering_passes, and so is what is present
141 during the IPA passes, and through all of the optimization passes.
143 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
144 functions and mark functions for cloning.
146 At the end of gimple optimization, before exiting SSA form,
147 pass_tm_edges replaces statements that perform transactional
148 memory operations with the appropriate TM builtins, and swap
149 out function calls with their transactional clones. At this
150 point we introduce the abnormal transaction restart edges and
151 complete lowering of the GIMPLE_TRANSACTION node.
153 x = __builtin___tm_start (MAY_ABORT);
155 if (x & abort_transaction)
158 t0 = __builtin___tm_load (global);
160 __builtin___tm_store (&global, t1);
162 __builtin___tm_abort ();
163 __builtin___tm_commit ();
167 static void *expand_regions (struct tm_region
*,
168 void *(*callback
)(struct tm_region
*, void *),
172 /* Return the attributes we want to examine for X, or NULL if it's not
173 something we examine. We look at function types, but allow pointers
174 to function types and function decls and peek through. */
177 get_attrs_for (const_tree x
)
179 switch (TREE_CODE (x
))
182 return TYPE_ATTRIBUTES (TREE_TYPE (x
));
189 if (TREE_CODE (x
) != POINTER_TYPE
)
195 if (TREE_CODE (x
) != FUNCTION_TYPE
&& TREE_CODE (x
) != METHOD_TYPE
)
201 return TYPE_ATTRIBUTES (x
);
205 /* Return true if X has been marked TM_PURE. */
208 is_tm_pure (const_tree x
)
212 switch (TREE_CODE (x
))
223 if (TREE_CODE (x
) != POINTER_TYPE
)
229 if (TREE_CODE (x
) != FUNCTION_TYPE
&& TREE_CODE (x
) != METHOD_TYPE
)
234 flags
= flags_from_decl_or_type (x
);
235 return (flags
& ECF_TM_PURE
) != 0;
238 /* Return true if X has been marked TM_IRREVOCABLE. */
241 is_tm_irrevocable (tree x
)
243 tree attrs
= get_attrs_for (x
);
245 if (attrs
&& lookup_attribute ("transaction_unsafe", attrs
))
248 /* A call to the irrevocable builtin is by definition,
250 if (TREE_CODE (x
) == ADDR_EXPR
)
251 x
= TREE_OPERAND (x
, 0);
252 if (TREE_CODE (x
) == FUNCTION_DECL
253 && DECL_BUILT_IN_CLASS (x
) == BUILT_IN_NORMAL
254 && DECL_FUNCTION_CODE (x
) == BUILT_IN_TM_IRREVOCABLE
)
260 /* Return true if X has been marked TM_SAFE. */
263 is_tm_safe (const_tree x
)
267 tree attrs
= get_attrs_for (x
);
270 if (lookup_attribute ("transaction_safe", attrs
))
272 if (lookup_attribute ("transaction_may_cancel_outer", attrs
))
279 /* Return true if CALL is const, or tm_pure. */
282 is_tm_pure_call (gimple call
)
284 tree fn
= gimple_call_fn (call
);
286 if (TREE_CODE (fn
) == ADDR_EXPR
)
288 fn
= TREE_OPERAND (fn
, 0);
289 gcc_assert (TREE_CODE (fn
) == FUNCTION_DECL
);
294 return is_tm_pure (fn
);
297 /* Return true if X has been marked TM_CALLABLE. */
300 is_tm_callable (tree x
)
302 tree attrs
= get_attrs_for (x
);
305 if (lookup_attribute ("transaction_callable", attrs
))
307 if (lookup_attribute ("transaction_safe", attrs
))
309 if (lookup_attribute ("transaction_may_cancel_outer", attrs
))
315 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
318 is_tm_may_cancel_outer (tree x
)
320 tree attrs
= get_attrs_for (x
);
322 return lookup_attribute ("transaction_may_cancel_outer", attrs
) != NULL
;
326 /* Return true for built in functions that "end" a transaction. */
329 is_tm_ending_fndecl (tree fndecl
)
331 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
332 switch (DECL_FUNCTION_CODE (fndecl
))
334 case BUILT_IN_TM_COMMIT
:
335 case BUILT_IN_TM_COMMIT_EH
:
336 case BUILT_IN_TM_ABORT
:
337 case BUILT_IN_TM_IRREVOCABLE
:
346 /* Return true if STMT is a built in function call that "ends" a
350 is_tm_ending (gimple stmt
)
354 if (gimple_code (stmt
) != GIMPLE_CALL
)
357 fndecl
= gimple_call_fndecl (stmt
);
358 return (fndecl
!= NULL_TREE
359 && is_tm_ending_fndecl (fndecl
));
362 /* Return true if STMT is a TM load. */
365 is_tm_load (gimple stmt
)
369 if (gimple_code (stmt
) != GIMPLE_CALL
)
372 fndecl
= gimple_call_fndecl (stmt
);
373 return (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
374 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl
)));
377 /* Same as above, but for simple TM loads, that is, not the
378 after-write, after-read, etc optimized variants. */
381 is_tm_simple_load (gimple stmt
)
385 if (gimple_code (stmt
) != GIMPLE_CALL
)
388 fndecl
= gimple_call_fndecl (stmt
);
389 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
391 enum built_in_function fcode
= DECL_FUNCTION_CODE (fndecl
);
392 return (fcode
== BUILT_IN_TM_LOAD_1
393 || fcode
== BUILT_IN_TM_LOAD_2
394 || fcode
== BUILT_IN_TM_LOAD_4
395 || fcode
== BUILT_IN_TM_LOAD_8
396 || fcode
== BUILT_IN_TM_LOAD_FLOAT
397 || fcode
== BUILT_IN_TM_LOAD_DOUBLE
398 || fcode
== BUILT_IN_TM_LOAD_LDOUBLE
399 || fcode
== BUILT_IN_TM_LOAD_M64
400 || fcode
== BUILT_IN_TM_LOAD_M128
401 || fcode
== BUILT_IN_TM_LOAD_M256
);
406 /* Return true if STMT is a TM store. */
409 is_tm_store (gimple stmt
)
413 if (gimple_code (stmt
) != GIMPLE_CALL
)
416 fndecl
= gimple_call_fndecl (stmt
);
417 return (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
418 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl
)));
421 /* Same as above, but for simple TM stores, that is, not the
422 after-write, after-read, etc optimized variants. */
425 is_tm_simple_store (gimple stmt
)
429 if (gimple_code (stmt
) != GIMPLE_CALL
)
432 fndecl
= gimple_call_fndecl (stmt
);
433 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
435 enum built_in_function fcode
= DECL_FUNCTION_CODE (fndecl
);
436 return (fcode
== BUILT_IN_TM_STORE_1
437 || fcode
== BUILT_IN_TM_STORE_2
438 || fcode
== BUILT_IN_TM_STORE_4
439 || fcode
== BUILT_IN_TM_STORE_8
440 || fcode
== BUILT_IN_TM_STORE_FLOAT
441 || fcode
== BUILT_IN_TM_STORE_DOUBLE
442 || fcode
== BUILT_IN_TM_STORE_LDOUBLE
443 || fcode
== BUILT_IN_TM_STORE_M64
444 || fcode
== BUILT_IN_TM_STORE_M128
445 || fcode
== BUILT_IN_TM_STORE_M256
);
450 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */
453 is_tm_abort (tree fndecl
)
456 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
457 && DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_TM_ABORT
);
460 /* Build a GENERIC tree for a user abort. This is called by front ends
461 while transforming the __tm_abort statement. */
464 build_tm_abort_call (location_t loc
, bool is_outer
)
466 return build_call_expr_loc (loc
, builtin_decl_explicit (BUILT_IN_TM_ABORT
), 1,
467 build_int_cst (integer_type_node
,
469 | (is_outer
? AR_OUTERABORT
: 0)));
472 /* Map for aribtrary function replacement under TM, as created
473 by the tm_wrap attribute. */
475 struct tm_wrapper_hasher
: ggc_cache_hasher
<tree_map
*>
477 static inline hashval_t
hash (tree_map
*m
) { return m
->hash
; }
479 equal (tree_map
*a
, tree_map
*b
)
481 return a
->base
.from
== b
->base
.from
;
485 handle_cache_entry (tree_map
*&m
)
487 extern void gt_ggc_mx (tree_map
*&);
488 if (m
== HTAB_EMPTY_ENTRY
|| m
== HTAB_DELETED_ENTRY
)
490 else if (ggc_marked_p (m
->base
.from
))
493 m
= static_cast<tree_map
*> (HTAB_DELETED_ENTRY
);
497 static GTY((cache
)) hash_table
<tm_wrapper_hasher
> *tm_wrap_map
;
500 record_tm_replacement (tree from
, tree to
)
502 struct tree_map
**slot
, *h
;
504 /* Do not inline wrapper functions that will get replaced in the TM
507 Suppose you have foo() that will get replaced into tmfoo(). Make
508 sure the inliner doesn't try to outsmart us and inline foo()
509 before we get a chance to do the TM replacement. */
510 DECL_UNINLINABLE (from
) = 1;
512 if (tm_wrap_map
== NULL
)
513 tm_wrap_map
= hash_table
<tm_wrapper_hasher
>::create_ggc (32);
515 h
= ggc_alloc
<tree_map
> ();
516 h
->hash
= htab_hash_pointer (from
);
520 slot
= tm_wrap_map
->find_slot_with_hash (h
, h
->hash
, INSERT
);
524 /* Return a TM-aware replacement function for DECL. */
527 find_tm_replacement_function (tree fndecl
)
531 struct tree_map
*h
, in
;
533 in
.base
.from
= fndecl
;
534 in
.hash
= htab_hash_pointer (fndecl
);
535 h
= tm_wrap_map
->find_with_hash (&in
, in
.hash
);
540 /* ??? We may well want TM versions of most of the common <string.h>
541 functions. For now, we've already these two defined. */
542 /* Adjust expand_call_tm() attributes as necessary for the cases
544 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
545 switch (DECL_FUNCTION_CODE (fndecl
))
547 case BUILT_IN_MEMCPY
:
548 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY
);
549 case BUILT_IN_MEMMOVE
:
550 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE
);
551 case BUILT_IN_MEMSET
:
552 return builtin_decl_explicit (BUILT_IN_TM_MEMSET
);
560 /* When appropriate, record TM replacement for memory allocation functions.
562 FROM is the FNDECL to wrap. */
564 tm_malloc_replacement (tree from
)
569 if (TREE_CODE (from
) != FUNCTION_DECL
)
572 /* If we have a previous replacement, the user must be explicitly
573 wrapping malloc/calloc/free. They better know what they're
575 if (find_tm_replacement_function (from
))
578 str
= IDENTIFIER_POINTER (DECL_NAME (from
));
580 if (!strcmp (str
, "malloc"))
581 to
= builtin_decl_explicit (BUILT_IN_TM_MALLOC
);
582 else if (!strcmp (str
, "calloc"))
583 to
= builtin_decl_explicit (BUILT_IN_TM_CALLOC
);
584 else if (!strcmp (str
, "free"))
585 to
= builtin_decl_explicit (BUILT_IN_TM_FREE
);
589 TREE_NOTHROW (to
) = 0;
591 record_tm_replacement (from
, to
);
594 /* Diagnostics for tm_safe functions/regions. Called by the front end
595 once we've lowered the function to high-gimple. */
597 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
598 Process exactly one statement. WI->INFO is set to non-null when in
599 the context of a tm_safe function, and null for a __transaction block. */
601 #define DIAG_TM_OUTER 1
602 #define DIAG_TM_SAFE 2
603 #define DIAG_TM_RELAXED 4
607 unsigned int summary_flags
: 8;
608 unsigned int block_flags
: 8;
609 unsigned int func_flags
: 8;
610 unsigned int saw_volatile
: 1;
614 /* Return true if T is a volatile variable of some kind. */
617 volatile_var_p (tree t
)
619 return (SSA_VAR_P (t
)
620 && TREE_THIS_VOLATILE (TREE_TYPE (t
)));
623 /* Tree callback function for diagnose_tm pass. */
626 diagnose_tm_1_op (tree
*tp
, int *walk_subtrees ATTRIBUTE_UNUSED
,
629 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
630 struct diagnose_tm
*d
= (struct diagnose_tm
*) wi
->info
;
632 if (volatile_var_p (*tp
)
633 && d
->block_flags
& DIAG_TM_SAFE
637 error_at (gimple_location (d
->stmt
),
638 "invalid volatile use of %qD inside transaction",
646 is_tm_safe_or_pure (const_tree x
)
648 return is_tm_safe (x
) || is_tm_pure (x
);
652 diagnose_tm_1 (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
653 struct walk_stmt_info
*wi
)
655 gimple stmt
= gsi_stmt (*gsi
);
656 struct diagnose_tm
*d
= (struct diagnose_tm
*) wi
->info
;
658 /* Save stmt for use in leaf analysis. */
661 switch (gimple_code (stmt
))
665 tree fn
= gimple_call_fn (stmt
);
667 if ((d
->summary_flags
& DIAG_TM_OUTER
) == 0
668 && is_tm_may_cancel_outer (fn
))
669 error_at (gimple_location (stmt
),
670 "%<transaction_may_cancel_outer%> function call not within"
671 " outer transaction or %<transaction_may_cancel_outer%>");
673 if (d
->summary_flags
& DIAG_TM_SAFE
)
675 bool is_safe
, direct_call_p
;
678 if (TREE_CODE (fn
) == ADDR_EXPR
679 && TREE_CODE (TREE_OPERAND (fn
, 0)) == FUNCTION_DECL
)
681 direct_call_p
= true;
682 replacement
= TREE_OPERAND (fn
, 0);
683 replacement
= find_tm_replacement_function (replacement
);
689 direct_call_p
= false;
690 replacement
= NULL_TREE
;
693 if (is_tm_safe_or_pure (fn
))
695 else if (is_tm_callable (fn
) || is_tm_irrevocable (fn
))
697 /* A function explicitly marked transaction_callable as
698 opposed to transaction_safe is being defined to be
699 unsafe as part of its ABI, regardless of its contents. */
702 else if (direct_call_p
)
704 if (IS_TYPE_OR_DECL_P (fn
)
705 && flags_from_decl_or_type (fn
) & ECF_TM_BUILTIN
)
707 else if (replacement
)
709 /* ??? At present we've been considering replacements
710 merely transaction_callable, and therefore might
711 enter irrevocable. The tm_wrap attribute has not
712 yet made it into the new language spec. */
717 /* ??? Diagnostics for unmarked direct calls moved into
718 the IPA pass. Section 3.2 of the spec details how
719 functions not marked should be considered "implicitly
720 safe" based on having examined the function body. */
726 /* An unmarked indirect call. Consider it unsafe even
727 though optimization may yet figure out how to inline. */
733 if (TREE_CODE (fn
) == ADDR_EXPR
)
734 fn
= TREE_OPERAND (fn
, 0);
735 if (d
->block_flags
& DIAG_TM_SAFE
)
738 error_at (gimple_location (stmt
),
739 "unsafe function call %qD within "
740 "atomic transaction", fn
);
743 if (!DECL_P (fn
) || DECL_NAME (fn
))
744 error_at (gimple_location (stmt
),
745 "unsafe function call %qE within "
746 "atomic transaction", fn
);
748 error_at (gimple_location (stmt
),
749 "unsafe indirect function call within "
750 "atomic transaction");
756 error_at (gimple_location (stmt
),
757 "unsafe function call %qD within "
758 "%<transaction_safe%> function", fn
);
761 if (!DECL_P (fn
) || DECL_NAME (fn
))
762 error_at (gimple_location (stmt
),
763 "unsafe function call %qE within "
764 "%<transaction_safe%> function", fn
);
766 error_at (gimple_location (stmt
),
767 "unsafe indirect function call within "
768 "%<transaction_safe%> function");
777 /* ??? We ought to come up with a way to add attributes to
778 asm statements, and then add "transaction_safe" to it.
779 Either that or get the language spec to resurrect __tm_waiver. */
780 if (d
->block_flags
& DIAG_TM_SAFE
)
781 error_at (gimple_location (stmt
),
782 "asm not allowed in atomic transaction");
783 else if (d
->func_flags
& DIAG_TM_SAFE
)
784 error_at (gimple_location (stmt
),
785 "asm not allowed in %<transaction_safe%> function");
788 case GIMPLE_TRANSACTION
:
790 gtransaction
*trans_stmt
= as_a
<gtransaction
*> (stmt
);
791 unsigned char inner_flags
= DIAG_TM_SAFE
;
793 if (gimple_transaction_subcode (trans_stmt
) & GTMA_IS_RELAXED
)
795 if (d
->block_flags
& DIAG_TM_SAFE
)
796 error_at (gimple_location (stmt
),
797 "relaxed transaction in atomic transaction");
798 else if (d
->func_flags
& DIAG_TM_SAFE
)
799 error_at (gimple_location (stmt
),
800 "relaxed transaction in %<transaction_safe%> function");
801 inner_flags
= DIAG_TM_RELAXED
;
803 else if (gimple_transaction_subcode (trans_stmt
) & GTMA_IS_OUTER
)
806 error_at (gimple_location (stmt
),
807 "outer transaction in transaction");
808 else if (d
->func_flags
& DIAG_TM_OUTER
)
809 error_at (gimple_location (stmt
),
810 "outer transaction in "
811 "%<transaction_may_cancel_outer%> function");
812 else if (d
->func_flags
& DIAG_TM_SAFE
)
813 error_at (gimple_location (stmt
),
814 "outer transaction in %<transaction_safe%> function");
815 inner_flags
|= DIAG_TM_OUTER
;
818 *handled_ops_p
= true;
819 if (gimple_transaction_body (trans_stmt
))
821 struct walk_stmt_info wi_inner
;
822 struct diagnose_tm d_inner
;
824 memset (&d_inner
, 0, sizeof (d_inner
));
825 d_inner
.func_flags
= d
->func_flags
;
826 d_inner
.block_flags
= d
->block_flags
| inner_flags
;
827 d_inner
.summary_flags
= d_inner
.func_flags
| d_inner
.block_flags
;
829 memset (&wi_inner
, 0, sizeof (wi_inner
));
830 wi_inner
.info
= &d_inner
;
832 walk_gimple_seq (gimple_transaction_body (trans_stmt
),
833 diagnose_tm_1
, diagnose_tm_1_op
, &wi_inner
);
846 diagnose_tm_blocks (void)
848 struct walk_stmt_info wi
;
849 struct diagnose_tm d
;
851 memset (&d
, 0, sizeof (d
));
852 if (is_tm_may_cancel_outer (current_function_decl
))
853 d
.func_flags
= DIAG_TM_OUTER
| DIAG_TM_SAFE
;
854 else if (is_tm_safe (current_function_decl
))
855 d
.func_flags
= DIAG_TM_SAFE
;
856 d
.summary_flags
= d
.func_flags
;
858 memset (&wi
, 0, sizeof (wi
));
861 walk_gimple_seq (gimple_body (current_function_decl
),
862 diagnose_tm_1
, diagnose_tm_1_op
, &wi
);
869 const pass_data pass_data_diagnose_tm_blocks
=
871 GIMPLE_PASS
, /* type */
872 "*diagnose_tm_blocks", /* name */
873 OPTGROUP_NONE
, /* optinfo_flags */
874 TV_TRANS_MEM
, /* tv_id */
875 PROP_gimple_any
, /* properties_required */
876 0, /* properties_provided */
877 0, /* properties_destroyed */
878 0, /* todo_flags_start */
879 0, /* todo_flags_finish */
882 class pass_diagnose_tm_blocks
: public gimple_opt_pass
885 pass_diagnose_tm_blocks (gcc::context
*ctxt
)
886 : gimple_opt_pass (pass_data_diagnose_tm_blocks
, ctxt
)
889 /* opt_pass methods: */
890 virtual bool gate (function
*) { return flag_tm
; }
891 virtual unsigned int execute (function
*) { return diagnose_tm_blocks (); }
893 }; // class pass_diagnose_tm_blocks
898 make_pass_diagnose_tm_blocks (gcc::context
*ctxt
)
900 return new pass_diagnose_tm_blocks (ctxt
);
903 /* Instead of instrumenting thread private memory, we save the
904 addresses in a log which we later use to save/restore the addresses
905 upon transaction start/restart.
907 The log is keyed by address, where each element contains individual
908 statements among different code paths that perform the store.
910 This log is later used to generate either plain save/restore of the
911 addresses upon transaction start/restart, or calls to the ITM_L*
914 So for something like:
916 struct large { int x[1000]; };
917 struct large lala = { 0 };
923 We can either save/restore:
926 trxn = _ITM_startTransaction ();
927 if (trxn & a_saveLiveVariables)
928 tmp_lala1 = lala.x[i];
929 else if (a & a_restoreLiveVariables)
930 lala.x[i] = tmp_lala1;
932 or use the logging functions:
935 trxn = _ITM_startTransaction ();
936 _ITM_LU4 (&lala.x[i]);
938 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
939 far up the dominator tree to shadow all of the writes to a given
940 location (thus reducing the total number of logging calls), but not
941 so high as to be called on a path that does not perform a
944 /* One individual log entry. We may have multiple statements for the
945 same location if neither dominate each other (on different
947 typedef struct tm_log_entry
949 /* Address to save. */
951 /* Entry block for the transaction this address occurs in. */
952 basic_block entry_block
;
953 /* Dominating statements the store occurs in. */
955 /* Initially, while we are building the log, we place a nonzero
956 value here to mean that this address *will* be saved with a
957 save/restore sequence. Later, when generating the save sequence
958 we place the SSA temp generated here. */
963 /* Log entry hashtable helpers. */
965 struct log_entry_hasher
967 typedef tm_log_entry value_type
;
968 typedef tm_log_entry compare_type
;
969 static inline hashval_t
hash (const value_type
*);
970 static inline bool equal (const value_type
*, const compare_type
*);
971 static inline void remove (value_type
*);
974 /* Htab support. Return hash value for a `tm_log_entry'. */
976 log_entry_hasher::hash (const value_type
*log
)
978 return iterative_hash_expr (log
->addr
, 0);
981 /* Htab support. Return true if two log entries are the same. */
983 log_entry_hasher::equal (const value_type
*log1
, const compare_type
*log2
)
987 rth: I suggest that we get rid of the component refs etc.
988 I.e. resolve the reference to base + offset.
990 We may need to actually finish a merge with mainline for this,
991 since we'd like to be presented with Richi's MEM_REF_EXPRs more
992 often than not. But in the meantime your tm_log_entry could save
993 the results of get_inner_reference.
995 See: g++.dg/tm/pr46653.C
998 /* Special case plain equality because operand_equal_p() below will
999 return FALSE if the addresses are equal but they have
1000 side-effects (e.g. a volatile address). */
1001 if (log1
->addr
== log2
->addr
)
1004 return operand_equal_p (log1
->addr
, log2
->addr
, 0);
1007 /* Htab support. Free one tm_log_entry. */
1009 log_entry_hasher::remove (value_type
*lp
)
1011 lp
->stmts
.release ();
1016 /* The actual log. */
1017 static hash_table
<log_entry_hasher
> *tm_log
;
1019 /* Addresses to log with a save/restore sequence. These should be in
1021 static vec
<tree
> tm_log_save_addresses
;
1023 enum thread_memory_type
1027 mem_transaction_local
,
1031 typedef struct tm_new_mem_map
1033 /* SSA_NAME being dereferenced. */
1035 enum thread_memory_type local_new_memory
;
1038 /* Hashtable helpers. */
1040 struct tm_mem_map_hasher
: typed_free_remove
<tm_new_mem_map_t
>
1042 typedef tm_new_mem_map_t value_type
;
1043 typedef tm_new_mem_map_t compare_type
;
1044 static inline hashval_t
hash (const value_type
*);
1045 static inline bool equal (const value_type
*, const compare_type
*);
1049 tm_mem_map_hasher::hash (const value_type
*v
)
1051 return (intptr_t)v
->val
>> 4;
1055 tm_mem_map_hasher::equal (const value_type
*v
, const compare_type
*c
)
1057 return v
->val
== c
->val
;
1060 /* Map for an SSA_NAME originally pointing to a non aliased new piece
1061 of memory (malloc, alloc, etc). */
1062 static hash_table
<tm_mem_map_hasher
> *tm_new_mem_hash
;
1064 /* Initialize logging data structures. */
1068 tm_log
= new hash_table
<log_entry_hasher
> (10);
1069 tm_new_mem_hash
= new hash_table
<tm_mem_map_hasher
> (5);
1070 tm_log_save_addresses
.create (5);
1073 /* Free logging data structures. */
1075 tm_log_delete (void)
1079 delete tm_new_mem_hash
;
1080 tm_new_mem_hash
= NULL
;
1081 tm_log_save_addresses
.release ();
1084 /* Return true if MEM is a transaction invariant memory for the TM
1085 region starting at REGION_ENTRY_BLOCK. */
1087 transaction_invariant_address_p (const_tree mem
, basic_block region_entry_block
)
1089 if ((TREE_CODE (mem
) == INDIRECT_REF
|| TREE_CODE (mem
) == MEM_REF
)
1090 && TREE_CODE (TREE_OPERAND (mem
, 0)) == SSA_NAME
)
1094 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem
, 0)));
1095 return def_bb
!= region_entry_block
1096 && dominated_by_p (CDI_DOMINATORS
, region_entry_block
, def_bb
);
1099 mem
= strip_invariant_refs (mem
);
1100 return mem
&& (CONSTANT_CLASS_P (mem
) || decl_address_invariant_p (mem
));
1103 /* Given an address ADDR in STMT, find it in the memory log or add it,
1104 making sure to keep only the addresses highest in the dominator
1107 ENTRY_BLOCK is the entry_block for the transaction.
1109 If we find the address in the log, make sure it's either the same
1110 address, or an equivalent one that dominates ADDR.
1112 If we find the address, but neither ADDR dominates the found
1113 address, nor the found one dominates ADDR, we're on different
1114 execution paths. Add it.
1116 If known, ENTRY_BLOCK is the entry block for the region, otherwise
1119 tm_log_add (basic_block entry_block
, tree addr
, gimple stmt
)
1121 tm_log_entry
**slot
;
1122 struct tm_log_entry l
, *lp
;
1125 slot
= tm_log
->find_slot (&l
, INSERT
);
1128 tree type
= TREE_TYPE (addr
);
1130 lp
= XNEW (struct tm_log_entry
);
1134 /* Small invariant addresses can be handled as save/restores. */
1136 && transaction_invariant_address_p (lp
->addr
, entry_block
)
1137 && TYPE_SIZE_UNIT (type
) != NULL
1138 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
))
1139 && ((HOST_WIDE_INT
) tree_to_uhwi (TYPE_SIZE_UNIT (type
))
1140 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE
))
1141 /* We must be able to copy this type normally. I.e., no
1142 special constructors and the like. */
1143 && !TREE_ADDRESSABLE (type
))
1145 lp
->save_var
= create_tmp_reg (TREE_TYPE (lp
->addr
), "tm_save");
1146 lp
->stmts
.create (0);
1147 lp
->entry_block
= entry_block
;
1148 /* Save addresses separately in dominator order so we don't
1149 get confused by overlapping addresses in the save/restore
1151 tm_log_save_addresses
.safe_push (lp
->addr
);
1155 /* Use the logging functions. */
1156 lp
->stmts
.create (5);
1157 lp
->stmts
.quick_push (stmt
);
1158 lp
->save_var
= NULL
;
1168 /* If we're generating a save/restore sequence, we don't care
1169 about statements. */
1173 for (i
= 0; lp
->stmts
.iterate (i
, &oldstmt
); ++i
)
1175 if (stmt
== oldstmt
)
1177 /* We already have a store to the same address, higher up the
1178 dominator tree. Nothing to do. */
1179 if (dominated_by_p (CDI_DOMINATORS
,
1180 gimple_bb (stmt
), gimple_bb (oldstmt
)))
1182 /* We should be processing blocks in dominator tree order. */
1183 gcc_assert (!dominated_by_p (CDI_DOMINATORS
,
1184 gimple_bb (oldstmt
), gimple_bb (stmt
)));
1186 /* Store is on a different code path. */
1187 lp
->stmts
.safe_push (stmt
);
1191 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1192 result, insert the new statements before GSI. */
1195 gimplify_addr (gimple_stmt_iterator
*gsi
, tree x
)
1197 if (TREE_CODE (x
) == TARGET_MEM_REF
)
1198 x
= tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x
)), x
);
1200 x
= build_fold_addr_expr (x
);
1201 return force_gimple_operand_gsi (gsi
, x
, true, NULL
, true, GSI_SAME_STMT
);
1204 /* Instrument one address with the logging functions.
1205 ADDR is the address to save.
1206 STMT is the statement before which to place it. */
1208 tm_log_emit_stmt (tree addr
, gimple stmt
)
1210 tree type
= TREE_TYPE (addr
);
1211 tree size
= TYPE_SIZE_UNIT (type
);
1212 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
1214 enum built_in_function code
= BUILT_IN_TM_LOG
;
1216 if (type
== float_type_node
)
1217 code
= BUILT_IN_TM_LOG_FLOAT
;
1218 else if (type
== double_type_node
)
1219 code
= BUILT_IN_TM_LOG_DOUBLE
;
1220 else if (type
== long_double_type_node
)
1221 code
= BUILT_IN_TM_LOG_LDOUBLE
;
1222 else if (tree_fits_uhwi_p (size
))
1224 unsigned int n
= tree_to_uhwi (size
);
1228 code
= BUILT_IN_TM_LOG_1
;
1231 code
= BUILT_IN_TM_LOG_2
;
1234 code
= BUILT_IN_TM_LOG_4
;
1237 code
= BUILT_IN_TM_LOG_8
;
1240 code
= BUILT_IN_TM_LOG
;
1241 if (TREE_CODE (type
) == VECTOR_TYPE
)
1243 if (n
== 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64
))
1244 code
= BUILT_IN_TM_LOG_M64
;
1245 else if (n
== 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128
))
1246 code
= BUILT_IN_TM_LOG_M128
;
1247 else if (n
== 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256
))
1248 code
= BUILT_IN_TM_LOG_M256
;
1254 addr
= gimplify_addr (&gsi
, addr
);
1255 if (code
== BUILT_IN_TM_LOG
)
1256 log
= gimple_build_call (builtin_decl_explicit (code
), 2, addr
, size
);
1258 log
= gimple_build_call (builtin_decl_explicit (code
), 1, addr
);
1259 gsi_insert_before (&gsi
, log
, GSI_SAME_STMT
);
1262 /* Go through the log and instrument address that must be instrumented
1263 with the logging functions. Leave the save/restore addresses for
1268 hash_table
<log_entry_hasher
>::iterator hi
;
1269 struct tm_log_entry
*lp
;
1271 FOR_EACH_HASH_TABLE_ELEMENT (*tm_log
, lp
, tm_log_entry_t
, hi
)
1278 fprintf (dump_file
, "TM thread private mem logging: ");
1279 print_generic_expr (dump_file
, lp
->addr
, 0);
1280 fprintf (dump_file
, "\n");
1286 fprintf (dump_file
, "DUMPING to variable\n");
1292 fprintf (dump_file
, "DUMPING with logging functions\n");
1293 for (i
= 0; lp
->stmts
.iterate (i
, &stmt
); ++i
)
1294 tm_log_emit_stmt (lp
->addr
, stmt
);
1299 /* Emit the save sequence for the corresponding addresses in the log.
1300 ENTRY_BLOCK is the entry block for the transaction.
1301 BB is the basic block to insert the code in. */
1303 tm_log_emit_saves (basic_block entry_block
, basic_block bb
)
1306 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
1308 struct tm_log_entry l
, *lp
;
1310 for (i
= 0; i
< tm_log_save_addresses
.length (); ++i
)
1312 l
.addr
= tm_log_save_addresses
[i
];
1313 lp
= *(tm_log
->find_slot (&l
, NO_INSERT
));
1314 gcc_assert (lp
->save_var
!= NULL
);
1316 /* We only care about variables in the current transaction. */
1317 if (lp
->entry_block
!= entry_block
)
1320 stmt
= gimple_build_assign (lp
->save_var
, unshare_expr (lp
->addr
));
1322 /* Make sure we can create an SSA_NAME for this type. For
1323 instance, aggregates aren't allowed, in which case the system
1324 will create a VOP for us and everything will just work. */
1325 if (is_gimple_reg_type (TREE_TYPE (lp
->save_var
)))
1327 lp
->save_var
= make_ssa_name (lp
->save_var
, stmt
);
1328 gimple_assign_set_lhs (stmt
, lp
->save_var
);
1331 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1335 /* Emit the restore sequence for the corresponding addresses in the log.
1336 ENTRY_BLOCK is the entry block for the transaction.
1337 BB is the basic block to insert the code in. */
1339 tm_log_emit_restores (basic_block entry_block
, basic_block bb
)
1342 struct tm_log_entry l
, *lp
;
1343 gimple_stmt_iterator gsi
;
1346 for (i
= tm_log_save_addresses
.length () - 1; i
>= 0; i
--)
1348 l
.addr
= tm_log_save_addresses
[i
];
1349 lp
= *(tm_log
->find_slot (&l
, NO_INSERT
));
1350 gcc_assert (lp
->save_var
!= NULL
);
1352 /* We only care about variables in the current transaction. */
1353 if (lp
->entry_block
!= entry_block
)
1356 /* Restores are in LIFO order from the saves in case we have
1358 gsi
= gsi_start_bb (bb
);
1360 stmt
= gimple_build_assign (unshare_expr (lp
->addr
), lp
->save_var
);
1361 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
1366 static tree
lower_sequence_tm (gimple_stmt_iterator
*, bool *,
1367 struct walk_stmt_info
*);
1368 static tree
lower_sequence_no_tm (gimple_stmt_iterator
*, bool *,
1369 struct walk_stmt_info
*);
1371 /* Evaluate an address X being dereferenced and determine if it
1372 originally points to a non aliased new chunk of memory (malloc,
1375 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1376 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1377 Return MEM_NON_LOCAL otherwise.
1379 ENTRY_BLOCK is the entry block to the transaction containing the
1380 dereference of X. */
1381 static enum thread_memory_type
1382 thread_private_new_memory (basic_block entry_block
, tree x
)
1385 enum tree_code code
;
1386 tm_new_mem_map_t
**slot
;
1387 tm_new_mem_map_t elt
, *elt_p
;
1389 enum thread_memory_type retval
= mem_transaction_local
;
1392 || TREE_CODE (x
) != SSA_NAME
1393 /* Possible uninitialized use, or a function argument. In
1394 either case, we don't care. */
1395 || SSA_NAME_IS_DEFAULT_DEF (x
))
1396 return mem_non_local
;
1398 /* Look in cache first. */
1400 slot
= tm_new_mem_hash
->find_slot (&elt
, INSERT
);
1403 return elt_p
->local_new_memory
;
1405 /* Optimistically assume the memory is transaction local during
1406 processing. This catches recursion into this variable. */
1407 *slot
= elt_p
= XNEW (tm_new_mem_map_t
);
1409 elt_p
->local_new_memory
= mem_transaction_local
;
1411 /* Search DEF chain to find the original definition of this address. */
1414 if (ptr_deref_may_alias_global_p (x
))
1416 /* Address escapes. This is not thread-private. */
1417 retval
= mem_non_local
;
1418 goto new_memory_ret
;
1421 stmt
= SSA_NAME_DEF_STMT (x
);
1423 /* If the malloc call is outside the transaction, this is
1425 if (retval
!= mem_thread_local
1426 && !dominated_by_p (CDI_DOMINATORS
, gimple_bb (stmt
), entry_block
))
1427 retval
= mem_thread_local
;
1429 if (is_gimple_assign (stmt
))
1431 code
= gimple_assign_rhs_code (stmt
);
1432 /* x = foo ==> foo */
1433 if (code
== SSA_NAME
)
1434 x
= gimple_assign_rhs1 (stmt
);
1435 /* x = foo + n ==> foo */
1436 else if (code
== POINTER_PLUS_EXPR
)
1437 x
= gimple_assign_rhs1 (stmt
);
1438 /* x = (cast*) foo ==> foo */
1439 else if (code
== VIEW_CONVERT_EXPR
|| CONVERT_EXPR_CODE_P (code
))
1440 x
= gimple_assign_rhs1 (stmt
);
1441 /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
1442 else if (code
== COND_EXPR
)
1444 tree op1
= gimple_assign_rhs2 (stmt
);
1445 tree op2
= gimple_assign_rhs3 (stmt
);
1446 enum thread_memory_type mem
;
1447 retval
= thread_private_new_memory (entry_block
, op1
);
1448 if (retval
== mem_non_local
)
1449 goto new_memory_ret
;
1450 mem
= thread_private_new_memory (entry_block
, op2
);
1451 retval
= MIN (retval
, mem
);
1452 goto new_memory_ret
;
1456 retval
= mem_non_local
;
1457 goto new_memory_ret
;
1462 if (gimple_code (stmt
) == GIMPLE_PHI
)
1465 enum thread_memory_type mem
;
1466 tree phi_result
= gimple_phi_result (stmt
);
1468 /* If any of the ancestors are non-local, we are sure to
1469 be non-local. Otherwise we can avoid doing anything
1470 and inherit what has already been generated. */
1472 for (i
= 0; i
< gimple_phi_num_args (stmt
); ++i
)
1474 tree op
= PHI_ARG_DEF (stmt
, i
);
1476 /* Exclude self-assignment. */
1477 if (phi_result
== op
)
1480 mem
= thread_private_new_memory (entry_block
, op
);
1481 if (mem
== mem_non_local
)
1484 goto new_memory_ret
;
1486 retval
= MIN (retval
, mem
);
1488 goto new_memory_ret
;
1493 while (TREE_CODE (x
) == SSA_NAME
);
1495 if (stmt
&& is_gimple_call (stmt
) && gimple_call_flags (stmt
) & ECF_MALLOC
)
1496 /* Thread-local or transaction-local. */
1499 retval
= mem_non_local
;
1502 elt_p
->local_new_memory
= retval
;
1506 /* Determine whether X has to be instrumented using a read
1509 ENTRY_BLOCK is the entry block for the region where stmt resides
1510 in. NULL if unknown.
1512 STMT is the statement in which X occurs in. It is used for thread
1513 private memory instrumentation. If no TPM instrumentation is
1514 desired, STMT should be null. */
1516 requires_barrier (basic_block entry_block
, tree x
, gimple stmt
)
1519 while (handled_component_p (x
))
1520 x
= TREE_OPERAND (x
, 0);
1522 switch (TREE_CODE (x
))
1527 enum thread_memory_type ret
;
1529 ret
= thread_private_new_memory (entry_block
, TREE_OPERAND (x
, 0));
1530 if (ret
== mem_non_local
)
1532 if (stmt
&& ret
== mem_thread_local
)
1533 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1534 tm_log_add (entry_block
, orig
, stmt
);
1536 /* Transaction-locals require nothing at all. For malloc, a
1537 transaction restart frees the memory and we reallocate.
1538 For alloca, the stack pointer gets reset by the retry and
1543 case TARGET_MEM_REF
:
1544 if (TREE_CODE (TMR_BASE (x
)) != ADDR_EXPR
)
1546 x
= TREE_OPERAND (TMR_BASE (x
), 0);
1547 if (TREE_CODE (x
) == PARM_DECL
)
1549 gcc_assert (TREE_CODE (x
) == VAR_DECL
);
1555 if (DECL_BY_REFERENCE (x
))
1557 /* ??? This value is a pointer, but aggregate_value_p has been
1558 jigged to return true which confuses needs_to_live_in_memory.
1559 This ought to be cleaned up generically.
1561 FIXME: Verify this still happens after the next mainline
1562 merge. Testcase ie g++.dg/tm/pr47554.C.
1567 if (is_global_var (x
))
1568 return !TREE_READONLY (x
);
1569 if (/* FIXME: This condition should actually go below in the
1570 tm_log_add() call, however is_call_clobbered() depends on
1571 aliasing info which is not available during
1572 gimplification. Since requires_barrier() gets called
1573 during lower_sequence_tm/gimplification, leave the call
1574 to needs_to_live_in_memory until we eliminate
1575 lower_sequence_tm altogether. */
1576 needs_to_live_in_memory (x
))
1580 /* For local memory that doesn't escape (aka thread private
1581 memory), we can either save the value at the beginning of
1582 the transaction and restore on restart, or call a tm
1583 function to dynamically save and restore on restart
1586 tm_log_add (entry_block
, orig
, stmt
);
1595 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1596 a transaction region. */
1599 examine_assign_tm (unsigned *state
, gimple_stmt_iterator
*gsi
)
1601 gimple stmt
= gsi_stmt (*gsi
);
1603 if (requires_barrier (/*entry_block=*/NULL
, gimple_assign_rhs1 (stmt
), NULL
))
1604 *state
|= GTMA_HAVE_LOAD
;
1605 if (requires_barrier (/*entry_block=*/NULL
, gimple_assign_lhs (stmt
), NULL
))
1606 *state
|= GTMA_HAVE_STORE
;
1609 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1612 examine_call_tm (unsigned *state
, gimple_stmt_iterator
*gsi
)
1614 gimple stmt
= gsi_stmt (*gsi
);
1617 if (is_tm_pure_call (stmt
))
1620 /* Check if this call is a transaction abort. */
1621 fn
= gimple_call_fndecl (stmt
);
1622 if (is_tm_abort (fn
))
1623 *state
|= GTMA_HAVE_ABORT
;
1625 /* Note that something may happen. */
1626 *state
|= GTMA_HAVE_LOAD
| GTMA_HAVE_STORE
;
1629 /* Lower a GIMPLE_TRANSACTION statement. */
1632 lower_transaction (gimple_stmt_iterator
*gsi
, struct walk_stmt_info
*wi
)
1635 gtransaction
*stmt
= as_a
<gtransaction
*> (gsi_stmt (*gsi
));
1636 unsigned int *outer_state
= (unsigned int *) wi
->info
;
1637 unsigned int this_state
= 0;
1638 struct walk_stmt_info this_wi
;
1640 /* First, lower the body. The scanning that we do inside gives
1641 us some idea of what we're dealing with. */
1642 memset (&this_wi
, 0, sizeof (this_wi
));
1643 this_wi
.info
= (void *) &this_state
;
1644 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt
),
1645 lower_sequence_tm
, NULL
, &this_wi
);
1647 /* If there was absolutely nothing transaction related inside the
1648 transaction, we may elide it. Likewise if this is a nested
1649 transaction and does not contain an abort. */
1651 || (!(this_state
& GTMA_HAVE_ABORT
) && outer_state
!= NULL
))
1654 *outer_state
|= this_state
;
1656 gsi_insert_seq_before (gsi
, gimple_transaction_body (stmt
),
1658 gimple_transaction_set_body (stmt
, NULL
);
1660 gsi_remove (gsi
, true);
1661 wi
->removed_stmt
= true;
1665 /* Wrap the body of the transaction in a try-finally node so that
1666 the commit call is always properly called. */
1667 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT
), 0);
1668 if (flag_exceptions
)
1671 gimple_seq n_seq
, e_seq
;
1673 n_seq
= gimple_seq_alloc_with_stmt (g
);
1676 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER
),
1677 1, integer_zero_node
);
1678 ptr
= create_tmp_var (ptr_type_node
);
1679 gimple_call_set_lhs (g
, ptr
);
1680 gimple_seq_add_stmt (&e_seq
, g
);
1682 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH
),
1684 gimple_seq_add_stmt (&e_seq
, g
);
1686 g
= gimple_build_eh_else (n_seq
, e_seq
);
1689 g
= gimple_build_try (gimple_transaction_body (stmt
),
1690 gimple_seq_alloc_with_stmt (g
), GIMPLE_TRY_FINALLY
);
1691 gsi_insert_after (gsi
, g
, GSI_CONTINUE_LINKING
);
1693 gimple_transaction_set_body (stmt
, NULL
);
1695 /* If the transaction calls abort or if this is an outer transaction,
1696 add an "over" label afterwards. */
1697 if ((this_state
& (GTMA_HAVE_ABORT
))
1698 || (gimple_transaction_subcode (stmt
) & GTMA_IS_OUTER
))
1700 tree label
= create_artificial_label (UNKNOWN_LOCATION
);
1701 gimple_transaction_set_label (stmt
, label
);
1702 gsi_insert_after (gsi
, gimple_build_label (label
), GSI_CONTINUE_LINKING
);
1705 /* Record the set of operations found for use later. */
1706 this_state
|= gimple_transaction_subcode (stmt
) & GTMA_DECLARATION_MASK
;
1707 gimple_transaction_set_subcode (stmt
, this_state
);
1710 /* Iterate through the statements in the sequence, lowering them all
1711 as appropriate for being in a transaction. */
1714 lower_sequence_tm (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1715 struct walk_stmt_info
*wi
)
1717 unsigned int *state
= (unsigned int *) wi
->info
;
1718 gimple stmt
= gsi_stmt (*gsi
);
1720 *handled_ops_p
= true;
1721 switch (gimple_code (stmt
))
1724 /* Only memory reads/writes need to be instrumented. */
1725 if (gimple_assign_single_p (stmt
))
1726 examine_assign_tm (state
, gsi
);
1730 examine_call_tm (state
, gsi
);
1734 *state
|= GTMA_MAY_ENTER_IRREVOCABLE
;
1737 case GIMPLE_TRANSACTION
:
1738 lower_transaction (gsi
, wi
);
1742 *handled_ops_p
= !gimple_has_substatements (stmt
);
1749 /* Iterate through the statements in the sequence, lowering them all
1750 as appropriate for being outside of a transaction. */
1753 lower_sequence_no_tm (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1754 struct walk_stmt_info
* wi
)
1756 gimple stmt
= gsi_stmt (*gsi
);
1758 if (gimple_code (stmt
) == GIMPLE_TRANSACTION
)
1760 *handled_ops_p
= true;
1761 lower_transaction (gsi
, wi
);
1764 *handled_ops_p
= !gimple_has_substatements (stmt
);
1769 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1770 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1771 been moved out, and all the data required for constructing a proper
1772 CFG has been recorded. */
1775 execute_lower_tm (void)
1777 struct walk_stmt_info wi
;
1780 /* Transactional clones aren't created until a later pass. */
1781 gcc_assert (!decl_is_tm_clone (current_function_decl
));
1783 body
= gimple_body (current_function_decl
);
1784 memset (&wi
, 0, sizeof (wi
));
1785 walk_gimple_seq_mod (&body
, lower_sequence_no_tm
, NULL
, &wi
);
1786 gimple_set_body (current_function_decl
, body
);
1793 const pass_data pass_data_lower_tm
=
1795 GIMPLE_PASS
, /* type */
1796 "tmlower", /* name */
1797 OPTGROUP_NONE
, /* optinfo_flags */
1798 TV_TRANS_MEM
, /* tv_id */
1799 PROP_gimple_lcf
, /* properties_required */
1800 0, /* properties_provided */
1801 0, /* properties_destroyed */
1802 0, /* todo_flags_start */
1803 0, /* todo_flags_finish */
1806 class pass_lower_tm
: public gimple_opt_pass
1809 pass_lower_tm (gcc::context
*ctxt
)
1810 : gimple_opt_pass (pass_data_lower_tm
, ctxt
)
1813 /* opt_pass methods: */
1814 virtual bool gate (function
*) { return flag_tm
; }
1815 virtual unsigned int execute (function
*) { return execute_lower_tm (); }
1817 }; // class pass_lower_tm
1822 make_pass_lower_tm (gcc::context
*ctxt
)
1824 return new pass_lower_tm (ctxt
);
1827 /* Collect region information for each transaction. */
1833 /* The field "transaction_stmt" is initially a gtransaction *,
1834 but eventually gets lowered to a gcall *(to BUILT_IN_TM_START).
1836 Helper method to get it as a gtransaction *, with code-checking
1837 in a checked-build. */
1840 get_transaction_stmt () const
1842 return as_a
<gtransaction
*> (transaction_stmt
);
1847 /* Link to the next unnested transaction. */
1848 struct tm_region
*next
;
1850 /* Link to the next inner transaction. */
1851 struct tm_region
*inner
;
1853 /* Link to the next outer transaction. */
1854 struct tm_region
*outer
;
1856 /* The GIMPLE_TRANSACTION statement beginning this transaction.
1857 After TM_MARK, this gets replaced by a call to
1859 Hence this will be either a gtransaction *or a gcall *. */
1860 gimple transaction_stmt
;
1862 /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
1863 BUILT_IN_TM_START, this field is true if the transaction is an
1864 outer transaction. */
1865 bool original_transaction_was_outer
;
1867 /* Return value from BUILT_IN_TM_START. */
1870 /* The entry block to this region. This will always be the first
1871 block of the body of the transaction. */
1872 basic_block entry_block
;
1874 /* The first block after an expanded call to _ITM_beginTransaction. */
1875 basic_block restart_block
;
1877 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1878 These blocks are still a part of the region (i.e., the border is
1879 inclusive). Note that this set is only complete for paths in the CFG
1880 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1881 the edge to the "over" label. */
1884 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1888 typedef struct tm_region
*tm_region_p
;
1890 /* True if there are pending edge statements to be committed for the
1891 current function being scanned in the tmmark pass. */
1892 bool pending_edge_inserts_p
;
1894 static struct tm_region
*all_tm_regions
;
1895 static bitmap_obstack tm_obstack
;
1898 /* A subroutine of tm_region_init. Record the existence of the
1899 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1901 static struct tm_region
*
1902 tm_region_init_0 (struct tm_region
*outer
, basic_block bb
,
1905 struct tm_region
*region
;
1907 region
= (struct tm_region
*)
1908 obstack_alloc (&tm_obstack
.obstack
, sizeof (struct tm_region
));
1912 region
->next
= outer
->inner
;
1913 outer
->inner
= region
;
1917 region
->next
= all_tm_regions
;
1918 all_tm_regions
= region
;
1920 region
->inner
= NULL
;
1921 region
->outer
= outer
;
1923 region
->transaction_stmt
= stmt
;
1924 region
->original_transaction_was_outer
= false;
1925 region
->tm_state
= NULL
;
1927 /* There are either one or two edges out of the block containing
1928 the GIMPLE_TRANSACTION, one to the actual region and one to the
1929 "over" label if the region contains an abort. The former will
1930 always be the one marked FALLTHRU. */
1931 region
->entry_block
= FALLTHRU_EDGE (bb
)->dest
;
1933 region
->exit_blocks
= BITMAP_ALLOC (&tm_obstack
);
1934 region
->irr_blocks
= BITMAP_ALLOC (&tm_obstack
);
1939 /* A subroutine of tm_region_init. Record all the exit and
1940 irrevocable blocks in BB into the region's exit_blocks and
1941 irr_blocks bitmaps. Returns the new region being scanned. */
1943 static struct tm_region
*
1944 tm_region_init_1 (struct tm_region
*region
, basic_block bb
)
1946 gimple_stmt_iterator gsi
;
1950 || (!region
->irr_blocks
&& !region
->exit_blocks
))
1953 /* Check to see if this is the end of a region by seeing if it
1954 contains a call to __builtin_tm_commit{,_eh}. Note that the
1955 outermost region for DECL_IS_TM_CLONE need not collect this. */
1956 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
1959 if (gimple_code (g
) == GIMPLE_CALL
)
1961 tree fn
= gimple_call_fndecl (g
);
1962 if (fn
&& DECL_BUILT_IN_CLASS (fn
) == BUILT_IN_NORMAL
)
1964 if ((DECL_FUNCTION_CODE (fn
) == BUILT_IN_TM_COMMIT
1965 || DECL_FUNCTION_CODE (fn
) == BUILT_IN_TM_COMMIT_EH
)
1966 && region
->exit_blocks
)
1968 bitmap_set_bit (region
->exit_blocks
, bb
->index
);
1969 region
= region
->outer
;
1972 if (DECL_FUNCTION_CODE (fn
) == BUILT_IN_TM_IRREVOCABLE
)
1973 bitmap_set_bit (region
->irr_blocks
, bb
->index
);
1980 /* Collect all of the transaction regions within the current function
1981 and record them in ALL_TM_REGIONS. The REGION parameter may specify
1982 an "outermost" region for use by tm clones. */
1985 tm_region_init (struct tm_region
*region
)
1991 auto_vec
<basic_block
> queue
;
1992 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
1993 struct tm_region
*old_region
;
1994 auto_vec
<tm_region_p
> bb_regions
;
1996 all_tm_regions
= region
;
1997 bb
= single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
1999 /* We could store this information in bb->aux, but we may get called
2000 through get_all_tm_blocks() from another pass that may be already
2002 bb_regions
.safe_grow_cleared (last_basic_block_for_fn (cfun
));
2004 queue
.safe_push (bb
);
2005 bb_regions
[bb
->index
] = region
;
2009 region
= bb_regions
[bb
->index
];
2010 bb_regions
[bb
->index
] = NULL
;
2012 /* Record exit and irrevocable blocks. */
2013 region
= tm_region_init_1 (region
, bb
);
2015 /* Check for the last statement in the block beginning a new region. */
2017 old_region
= region
;
2019 if (gtransaction
*trans_stmt
= dyn_cast
<gtransaction
*> (g
))
2020 region
= tm_region_init_0 (region
, bb
, trans_stmt
);
2022 /* Process subsequent blocks. */
2023 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2024 if (!bitmap_bit_p (visited_blocks
, e
->dest
->index
))
2026 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
2027 queue
.safe_push (e
->dest
);
2029 /* If the current block started a new region, make sure that only
2030 the entry block of the new region is associated with this region.
2031 Other successors are still part of the old region. */
2032 if (old_region
!= region
&& e
->dest
!= region
->entry_block
)
2033 bb_regions
[e
->dest
->index
] = old_region
;
2035 bb_regions
[e
->dest
->index
] = region
;
2038 while (!queue
.is_empty ());
2039 BITMAP_FREE (visited_blocks
);
2042 /* The "gate" function for all transactional memory expansion and optimization
2043 passes. We collect region information for each top-level transaction, and
2044 if we don't find any, we skip all of the TM passes. Each region will have
2045 all of the exit blocks recorded, and the originating statement. */
2053 calculate_dominance_info (CDI_DOMINATORS
);
2054 bitmap_obstack_initialize (&tm_obstack
);
2056 /* If the function is a TM_CLONE, then the entire function is the region. */
2057 if (decl_is_tm_clone (current_function_decl
))
2059 struct tm_region
*region
= (struct tm_region
*)
2060 obstack_alloc (&tm_obstack
.obstack
, sizeof (struct tm_region
));
2061 memset (region
, 0, sizeof (*region
));
2062 region
->entry_block
= single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
2063 /* For a clone, the entire function is the region. But even if
2064 we don't need to record any exit blocks, we may need to
2065 record irrevocable blocks. */
2066 region
->irr_blocks
= BITMAP_ALLOC (&tm_obstack
);
2068 tm_region_init (region
);
2072 tm_region_init (NULL
);
2074 /* If we didn't find any regions, cleanup and skip the whole tree
2075 of tm-related optimizations. */
2076 if (all_tm_regions
== NULL
)
2078 bitmap_obstack_release (&tm_obstack
);
2088 const pass_data pass_data_tm_init
=
2090 GIMPLE_PASS
, /* type */
2091 "*tminit", /* name */
2092 OPTGROUP_NONE
, /* optinfo_flags */
2093 TV_TRANS_MEM
, /* tv_id */
2094 ( PROP_ssa
| PROP_cfg
), /* properties_required */
2095 0, /* properties_provided */
2096 0, /* properties_destroyed */
2097 0, /* todo_flags_start */
2098 0, /* todo_flags_finish */
2101 class pass_tm_init
: public gimple_opt_pass
2104 pass_tm_init (gcc::context
*ctxt
)
2105 : gimple_opt_pass (pass_data_tm_init
, ctxt
)
2108 /* opt_pass methods: */
2109 virtual bool gate (function
*) { return gate_tm_init (); }
2111 }; // class pass_tm_init
2116 make_pass_tm_init (gcc::context
*ctxt
)
2118 return new pass_tm_init (ctxt
);
2121 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
2122 represented by STATE. */
2125 transaction_subcode_ior (struct tm_region
*region
, unsigned flags
)
2127 if (region
&& region
->transaction_stmt
)
2129 gtransaction
*transaction_stmt
= region
->get_transaction_stmt ();
2130 flags
|= gimple_transaction_subcode (transaction_stmt
);
2131 gimple_transaction_set_subcode (transaction_stmt
, flags
);
2135 /* Construct a memory load in a transactional context. Return the
2136 gimple statement performing the load, or NULL if there is no
2137 TM_LOAD builtin of the appropriate size to do the load.
2139 LOC is the location to use for the new statement(s). */
2142 build_tm_load (location_t loc
, tree lhs
, tree rhs
, gimple_stmt_iterator
*gsi
)
2144 enum built_in_function code
= END_BUILTINS
;
2145 tree t
, type
= TREE_TYPE (rhs
), decl
;
2148 if (type
== float_type_node
)
2149 code
= BUILT_IN_TM_LOAD_FLOAT
;
2150 else if (type
== double_type_node
)
2151 code
= BUILT_IN_TM_LOAD_DOUBLE
;
2152 else if (type
== long_double_type_node
)
2153 code
= BUILT_IN_TM_LOAD_LDOUBLE
;
2154 else if (TYPE_SIZE_UNIT (type
) != NULL
2155 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
)))
2157 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type
)))
2160 code
= BUILT_IN_TM_LOAD_1
;
2163 code
= BUILT_IN_TM_LOAD_2
;
2166 code
= BUILT_IN_TM_LOAD_4
;
2169 code
= BUILT_IN_TM_LOAD_8
;
2174 if (code
== END_BUILTINS
)
2176 decl
= targetm
.vectorize
.builtin_tm_load (type
);
2181 decl
= builtin_decl_explicit (code
);
2183 t
= gimplify_addr (gsi
, rhs
);
2184 gcall
= gimple_build_call (decl
, 1, t
);
2185 gimple_set_location (gcall
, loc
);
2187 t
= TREE_TYPE (TREE_TYPE (decl
));
2188 if (useless_type_conversion_p (type
, t
))
2190 gimple_call_set_lhs (gcall
, lhs
);
2191 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2198 temp
= create_tmp_reg (t
);
2199 gimple_call_set_lhs (gcall
, temp
);
2200 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2202 t
= fold_build1 (VIEW_CONVERT_EXPR
, type
, temp
);
2203 g
= gimple_build_assign (lhs
, t
);
2204 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
2211 /* Similarly for storing TYPE in a transactional context. */
2214 build_tm_store (location_t loc
, tree lhs
, tree rhs
, gimple_stmt_iterator
*gsi
)
2216 enum built_in_function code
= END_BUILTINS
;
2217 tree t
, fn
, type
= TREE_TYPE (rhs
), simple_type
;
2220 if (type
== float_type_node
)
2221 code
= BUILT_IN_TM_STORE_FLOAT
;
2222 else if (type
== double_type_node
)
2223 code
= BUILT_IN_TM_STORE_DOUBLE
;
2224 else if (type
== long_double_type_node
)
2225 code
= BUILT_IN_TM_STORE_LDOUBLE
;
2226 else if (TYPE_SIZE_UNIT (type
) != NULL
2227 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
)))
2229 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type
)))
2232 code
= BUILT_IN_TM_STORE_1
;
2235 code
= BUILT_IN_TM_STORE_2
;
2238 code
= BUILT_IN_TM_STORE_4
;
2241 code
= BUILT_IN_TM_STORE_8
;
2246 if (code
== END_BUILTINS
)
2248 fn
= targetm
.vectorize
.builtin_tm_store (type
);
2253 fn
= builtin_decl_explicit (code
);
2255 simple_type
= TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn
))));
2257 if (TREE_CODE (rhs
) == CONSTRUCTOR
)
2259 /* Handle the easy initialization to zero. */
2260 if (!CONSTRUCTOR_ELTS (rhs
))
2261 rhs
= build_int_cst (simple_type
, 0);
2264 /* ...otherwise punt to the caller and probably use
2265 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2266 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2271 else if (!useless_type_conversion_p (simple_type
, type
))
2276 temp
= create_tmp_reg (simple_type
);
2277 t
= fold_build1 (VIEW_CONVERT_EXPR
, simple_type
, rhs
);
2278 g
= gimple_build_assign (temp
, t
);
2279 gimple_set_location (g
, loc
);
2280 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
2285 t
= gimplify_addr (gsi
, lhs
);
2286 gcall
= gimple_build_call (fn
, 2, t
, rhs
);
2287 gimple_set_location (gcall
, loc
);
2288 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2294 /* Expand an assignment statement into transactional builtins. */
2297 expand_assign_tm (struct tm_region
*region
, gimple_stmt_iterator
*gsi
)
2299 gimple stmt
= gsi_stmt (*gsi
);
2300 location_t loc
= gimple_location (stmt
);
2301 tree lhs
= gimple_assign_lhs (stmt
);
2302 tree rhs
= gimple_assign_rhs1 (stmt
);
2303 bool store_p
= requires_barrier (region
->entry_block
, lhs
, NULL
);
2304 bool load_p
= requires_barrier (region
->entry_block
, rhs
, NULL
);
2305 gimple gcall
= NULL
;
2307 if (!load_p
&& !store_p
)
2309 /* Add thread private addresses to log if applicable. */
2310 requires_barrier (region
->entry_block
, lhs
, stmt
);
2315 // Remove original load/store statement.
2316 gsi_remove (gsi
, true);
2318 if (load_p
&& !store_p
)
2320 transaction_subcode_ior (region
, GTMA_HAVE_LOAD
);
2321 gcall
= build_tm_load (loc
, lhs
, rhs
, gsi
);
2323 else if (store_p
&& !load_p
)
2325 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2326 gcall
= build_tm_store (loc
, lhs
, rhs
, gsi
);
2330 tree lhs_addr
, rhs_addr
, tmp
;
2333 transaction_subcode_ior (region
, GTMA_HAVE_LOAD
);
2335 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2337 /* ??? Figure out if there's any possible overlap between the LHS
2338 and the RHS and if not, use MEMCPY. */
2340 if (load_p
&& is_gimple_reg (lhs
))
2342 tmp
= create_tmp_var (TREE_TYPE (lhs
));
2343 lhs_addr
= build_fold_addr_expr (tmp
);
2348 lhs_addr
= gimplify_addr (gsi
, lhs
);
2350 rhs_addr
= gimplify_addr (gsi
, rhs
);
2351 gcall
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE
),
2352 3, lhs_addr
, rhs_addr
,
2353 TYPE_SIZE_UNIT (TREE_TYPE (lhs
)));
2354 gimple_set_location (gcall
, loc
);
2355 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2359 gcall
= gimple_build_assign (lhs
, tmp
);
2360 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2364 /* Now that we have the load/store in its instrumented form, add
2365 thread private addresses to the log if applicable. */
2367 requires_barrier (region
->entry_block
, lhs
, gcall
);
2369 // The calls to build_tm_{store,load} above inserted the instrumented
2370 // call into the stream.
2371 // gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2375 /* Expand a call statement as appropriate for a transaction. That is,
2376 either verify that the call does not affect the transaction, or
2377 redirect the call to a clone that handles transactions, or change
2378 the transaction state to IRREVOCABLE. Return true if the call is
2379 one of the builtins that end a transaction. */
2382 expand_call_tm (struct tm_region
*region
,
2383 gimple_stmt_iterator
*gsi
)
2385 gcall
*stmt
= as_a
<gcall
*> (gsi_stmt (*gsi
));
2386 tree lhs
= gimple_call_lhs (stmt
);
2388 struct cgraph_node
*node
;
2389 bool retval
= false;
2391 fn_decl
= gimple_call_fndecl (stmt
);
2393 if (fn_decl
== builtin_decl_explicit (BUILT_IN_TM_MEMCPY
)
2394 || fn_decl
== builtin_decl_explicit (BUILT_IN_TM_MEMMOVE
))
2395 transaction_subcode_ior (region
, GTMA_HAVE_STORE
| GTMA_HAVE_LOAD
);
2396 if (fn_decl
== builtin_decl_explicit (BUILT_IN_TM_MEMSET
))
2397 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2399 if (is_tm_pure_call (stmt
))
2403 retval
= is_tm_ending_fndecl (fn_decl
);
2406 /* Assume all non-const/pure calls write to memory, except
2407 transaction ending builtins. */
2408 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2411 /* For indirect calls, we already generated a call into the runtime. */
2414 tree fn
= gimple_call_fn (stmt
);
2416 /* We are guaranteed never to go irrevocable on a safe or pure
2417 call, and the pure call was handled above. */
2418 if (is_tm_safe (fn
))
2421 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
2426 node
= cgraph_node::get (fn_decl
);
2427 /* All calls should have cgraph here. */
2430 /* We can have a nodeless call here if some pass after IPA-tm
2431 added uninstrumented calls. For example, loop distribution
2432 can transform certain loop constructs into __builtin_mem*
2433 calls. In this case, see if we have a suitable TM
2434 replacement and fill in the gaps. */
2435 gcc_assert (DECL_BUILT_IN_CLASS (fn_decl
) == BUILT_IN_NORMAL
);
2436 enum built_in_function code
= DECL_FUNCTION_CODE (fn_decl
);
2437 gcc_assert (code
== BUILT_IN_MEMCPY
2438 || code
== BUILT_IN_MEMMOVE
2439 || code
== BUILT_IN_MEMSET
);
2441 tree repl
= find_tm_replacement_function (fn_decl
);
2444 gimple_call_set_fndecl (stmt
, repl
);
2446 node
= cgraph_node::create (repl
);
2447 node
->local
.tm_may_enter_irr
= false;
2448 return expand_call_tm (region
, gsi
);
2452 if (node
->local
.tm_may_enter_irr
)
2453 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
2455 if (is_tm_abort (fn_decl
))
2457 transaction_subcode_ior (region
, GTMA_HAVE_ABORT
);
2461 /* Instrument the store if needed.
2463 If the assignment happens inside the function call (return slot
2464 optimization), there is no instrumentation to be done, since
2465 the callee should have done the right thing. */
2466 if (lhs
&& requires_barrier (region
->entry_block
, lhs
, stmt
)
2467 && !gimple_call_return_slot_opt_p (stmt
))
2469 tree tmp
= create_tmp_reg (TREE_TYPE (lhs
));
2470 location_t loc
= gimple_location (stmt
);
2471 edge fallthru_edge
= NULL
;
2472 gassign
*assign_stmt
;
2474 /* Remember if the call was going to throw. */
2475 if (stmt_can_throw_internal (stmt
))
2479 basic_block bb
= gimple_bb (stmt
);
2481 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2482 if (e
->flags
& EDGE_FALLTHRU
)
2489 gimple_call_set_lhs (stmt
, tmp
);
2491 assign_stmt
= gimple_build_assign (lhs
, tmp
);
2492 gimple_set_location (assign_stmt
, loc
);
2494 /* We cannot throw in the middle of a BB. If the call was going
2495 to throw, place the instrumentation on the fallthru edge, so
2496 the call remains the last statement in the block. */
2499 gimple_seq fallthru_seq
= gimple_seq_alloc_with_stmt (assign_stmt
);
2500 gimple_stmt_iterator fallthru_gsi
= gsi_start (fallthru_seq
);
2501 expand_assign_tm (region
, &fallthru_gsi
);
2502 gsi_insert_seq_on_edge (fallthru_edge
, fallthru_seq
);
2503 pending_edge_inserts_p
= true;
2507 gsi_insert_after (gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
2508 expand_assign_tm (region
, gsi
);
2511 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2518 /* Expand all statements in BB as appropriate for being inside
2522 expand_block_tm (struct tm_region
*region
, basic_block bb
)
2524 gimple_stmt_iterator gsi
;
2526 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); )
2528 gimple stmt
= gsi_stmt (gsi
);
2529 switch (gimple_code (stmt
))
2532 /* Only memory reads/writes need to be instrumented. */
2533 if (gimple_assign_single_p (stmt
)
2534 && !gimple_clobber_p (stmt
))
2536 expand_assign_tm (region
, &gsi
);
2542 if (expand_call_tm (region
, &gsi
))
2552 if (!gsi_end_p (gsi
))
2557 /* Return the list of basic-blocks in REGION.
2559 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2560 following a TM_IRREVOCABLE call.
2562 INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the
2563 uninstrumented code path blocks in the list of basic blocks
2564 returned, false otherwise. */
2566 static vec
<basic_block
>
2567 get_tm_region_blocks (basic_block entry_block
,
2570 bitmap all_region_blocks
,
2571 bool stop_at_irrevocable_p
,
2572 bool include_uninstrumented_p
= true)
2574 vec
<basic_block
> bbs
= vNULL
;
2578 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
2581 bbs
.safe_push (entry_block
);
2582 bitmap_set_bit (visited_blocks
, entry_block
->index
);
2586 basic_block bb
= bbs
[i
++];
2589 bitmap_bit_p (exit_blocks
, bb
->index
))
2592 if (stop_at_irrevocable_p
2594 && bitmap_bit_p (irr_blocks
, bb
->index
))
2597 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2598 if ((include_uninstrumented_p
2599 || !(e
->flags
& EDGE_TM_UNINSTRUMENTED
))
2600 && !bitmap_bit_p (visited_blocks
, e
->dest
->index
))
2602 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
2603 bbs
.safe_push (e
->dest
);
2606 while (i
< bbs
.length ());
2608 if (all_region_blocks
)
2609 bitmap_ior_into (all_region_blocks
, visited_blocks
);
2611 BITMAP_FREE (visited_blocks
);
2615 // Callback data for collect_bb2reg.
2618 vec
<tm_region_p
> *bb2reg
;
2619 bool include_uninstrumented_p
;
2622 // Callback for expand_regions, collect innermost region data for each bb.
2624 collect_bb2reg (struct tm_region
*region
, void *data
)
2626 struct bb2reg_stuff
*stuff
= (struct bb2reg_stuff
*)data
;
2627 vec
<tm_region_p
> *bb2reg
= stuff
->bb2reg
;
2628 vec
<basic_block
> queue
;
2632 queue
= get_tm_region_blocks (region
->entry_block
,
2633 region
->exit_blocks
,
2636 /*stop_at_irr_p=*/true,
2637 stuff
->include_uninstrumented_p
);
2639 // We expect expand_region to perform a post-order traversal of the region
2640 // tree. Therefore the last region seen for any bb is the innermost.
2641 FOR_EACH_VEC_ELT (queue
, i
, bb
)
2642 (*bb2reg
)[bb
->index
] = region
;
2648 // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to
2649 // which a basic block belongs. Note that we only consider the instrumented
2650 // code paths for the region; the uninstrumented code paths are ignored if
2651 // INCLUDE_UNINSTRUMENTED_P is false.
2653 // ??? This data is very similar to the bb_regions array that is collected
2654 // during tm_region_init. Or, rather, this data is similar to what could
2655 // be used within tm_region_init. The actual computation in tm_region_init
2656 // begins and ends with bb_regions entirely full of NULL pointers, due to
2657 // the way in which pointers are swapped in and out of the array.
2659 // ??? Our callers expect that blocks are not shared between transactions.
2660 // When the optimizers get too smart, and blocks are shared, then during
2661 // the tm_mark phase we'll add log entries to only one of the two transactions,
2662 // and in the tm_edge phase we'll add edges to the CFG that create invalid
2663 // cycles. The symptom being SSA defs that do not dominate their uses.
2664 // Note that the optimizers were locally correct with their transformation,
2665 // as we have no info within the program that suggests that the blocks cannot
2668 // ??? There is currently a hack inside tree-ssa-pre.c to work around the
2669 // only known instance of this block sharing.
2671 static vec
<tm_region_p
>
2672 get_bb_regions_instrumented (bool traverse_clones
,
2673 bool include_uninstrumented_p
)
2675 unsigned n
= last_basic_block_for_fn (cfun
);
2676 struct bb2reg_stuff stuff
;
2677 vec
<tm_region_p
> ret
;
2680 ret
.safe_grow_cleared (n
);
2681 stuff
.bb2reg
= &ret
;
2682 stuff
.include_uninstrumented_p
= include_uninstrumented_p
;
2683 expand_regions (all_tm_regions
, collect_bb2reg
, &stuff
, traverse_clones
);
2688 /* Set the IN_TRANSACTION for all gimple statements that appear in a
2692 compute_transaction_bits (void)
2694 struct tm_region
*region
;
2695 vec
<basic_block
> queue
;
2699 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2700 certainly don't need it to calculate CDI_DOMINATOR info. */
2703 FOR_EACH_BB_FN (bb
, cfun
)
2704 bb
->flags
&= ~BB_IN_TRANSACTION
;
2706 for (region
= all_tm_regions
; region
; region
= region
->next
)
2708 queue
= get_tm_region_blocks (region
->entry_block
,
2709 region
->exit_blocks
,
2712 /*stop_at_irr_p=*/true);
2713 for (i
= 0; queue
.iterate (i
, &bb
); ++i
)
2714 bb
->flags
|= BB_IN_TRANSACTION
;
2719 bitmap_obstack_release (&tm_obstack
);
2722 /* Replace the GIMPLE_TRANSACTION in this region with the corresponding
2723 call to BUILT_IN_TM_START. */
2726 expand_transaction (struct tm_region
*region
, void *data ATTRIBUTE_UNUSED
)
2728 tree tm_start
= builtin_decl_explicit (BUILT_IN_TM_START
);
2729 basic_block transaction_bb
= gimple_bb (region
->transaction_stmt
);
2730 tree tm_state
= region
->tm_state
;
2731 tree tm_state_type
= TREE_TYPE (tm_state
);
2732 edge abort_edge
= NULL
;
2733 edge inst_edge
= NULL
;
2734 edge uninst_edge
= NULL
;
2735 edge fallthru_edge
= NULL
;
2737 // Identify the various successors of the transaction start.
2741 FOR_EACH_EDGE (e
, i
, transaction_bb
->succs
)
2743 if (e
->flags
& EDGE_TM_ABORT
)
2745 else if (e
->flags
& EDGE_TM_UNINSTRUMENTED
)
2749 if (e
->flags
& EDGE_FALLTHRU
)
2754 /* ??? There are plenty of bits here we're not computing. */
2756 int subcode
= gimple_transaction_subcode (region
->get_transaction_stmt ());
2758 if (subcode
& GTMA_DOES_GO_IRREVOCABLE
)
2759 flags
|= PR_DOESGOIRREVOCABLE
;
2760 if ((subcode
& GTMA_MAY_ENTER_IRREVOCABLE
) == 0)
2761 flags
|= PR_HASNOIRREVOCABLE
;
2762 /* If the transaction does not have an abort in lexical scope and is not
2763 marked as an outer transaction, then it will never abort. */
2764 if ((subcode
& GTMA_HAVE_ABORT
) == 0 && (subcode
& GTMA_IS_OUTER
) == 0)
2765 flags
|= PR_HASNOABORT
;
2766 if ((subcode
& GTMA_HAVE_STORE
) == 0)
2767 flags
|= PR_READONLY
;
2768 if (inst_edge
&& !(subcode
& GTMA_HAS_NO_INSTRUMENTATION
))
2769 flags
|= PR_INSTRUMENTEDCODE
;
2771 flags
|= PR_UNINSTRUMENTEDCODE
;
2772 if (subcode
& GTMA_IS_OUTER
)
2773 region
->original_transaction_was_outer
= true;
2774 tree t
= build_int_cst (tm_state_type
, flags
);
2775 gcall
*call
= gimple_build_call (tm_start
, 1, t
);
2776 gimple_call_set_lhs (call
, tm_state
);
2777 gimple_set_location (call
, gimple_location (region
->transaction_stmt
));
2779 // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START.
2780 gimple_stmt_iterator gsi
= gsi_last_bb (transaction_bb
);
2781 gcc_assert (gsi_stmt (gsi
) == region
->transaction_stmt
);
2782 gsi_insert_before (&gsi
, call
, GSI_SAME_STMT
);
2783 gsi_remove (&gsi
, true);
2784 region
->transaction_stmt
= call
;
2787 // Generate log saves.
2788 if (!tm_log_save_addresses
.is_empty ())
2789 tm_log_emit_saves (region
->entry_block
, transaction_bb
);
2791 // In the beginning, we've no tests to perform on transaction restart.
2792 // Note that after this point, transaction_bb becomes the "most recent
2793 // block containing tests for the transaction".
2794 region
->restart_block
= region
->entry_block
;
2796 // Generate log restores.
2797 if (!tm_log_save_addresses
.is_empty ())
2799 basic_block test_bb
= create_empty_bb (transaction_bb
);
2800 basic_block code_bb
= create_empty_bb (test_bb
);
2801 basic_block join_bb
= create_empty_bb (code_bb
);
2802 add_bb_to_loop (test_bb
, transaction_bb
->loop_father
);
2803 add_bb_to_loop (code_bb
, transaction_bb
->loop_father
);
2804 add_bb_to_loop (join_bb
, transaction_bb
->loop_father
);
2805 if (region
->restart_block
== region
->entry_block
)
2806 region
->restart_block
= test_bb
;
2808 tree t1
= create_tmp_reg (tm_state_type
);
2809 tree t2
= build_int_cst (tm_state_type
, A_RESTORELIVEVARIABLES
);
2810 gimple stmt
= gimple_build_assign (t1
, BIT_AND_EXPR
, tm_state
, t2
);
2811 gimple_stmt_iterator gsi
= gsi_last_bb (test_bb
);
2812 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2814 t2
= build_int_cst (tm_state_type
, 0);
2815 stmt
= gimple_build_cond (NE_EXPR
, t1
, t2
, NULL
, NULL
);
2816 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2818 tm_log_emit_restores (region
->entry_block
, code_bb
);
2820 edge ei
= make_edge (transaction_bb
, test_bb
, EDGE_FALLTHRU
);
2821 edge et
= make_edge (test_bb
, code_bb
, EDGE_TRUE_VALUE
);
2822 edge ef
= make_edge (test_bb
, join_bb
, EDGE_FALSE_VALUE
);
2823 redirect_edge_pred (fallthru_edge
, join_bb
);
2825 join_bb
->frequency
= test_bb
->frequency
= transaction_bb
->frequency
;
2826 join_bb
->count
= test_bb
->count
= transaction_bb
->count
;
2828 ei
->probability
= PROB_ALWAYS
;
2829 et
->probability
= PROB_LIKELY
;
2830 ef
->probability
= PROB_UNLIKELY
;
2831 et
->count
= apply_probability (test_bb
->count
, et
->probability
);
2832 ef
->count
= apply_probability (test_bb
->count
, ef
->probability
);
2834 code_bb
->count
= et
->count
;
2835 code_bb
->frequency
= EDGE_FREQUENCY (et
);
2837 transaction_bb
= join_bb
;
2840 // If we have an ABORT edge, create a test to perform the abort.
2843 basic_block test_bb
= create_empty_bb (transaction_bb
);
2844 add_bb_to_loop (test_bb
, transaction_bb
->loop_father
);
2845 if (region
->restart_block
== region
->entry_block
)
2846 region
->restart_block
= test_bb
;
2848 tree t1
= create_tmp_reg (tm_state_type
);
2849 tree t2
= build_int_cst (tm_state_type
, A_ABORTTRANSACTION
);
2850 gimple stmt
= gimple_build_assign (t1
, BIT_AND_EXPR
, tm_state
, t2
);
2851 gimple_stmt_iterator gsi
= gsi_last_bb (test_bb
);
2852 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2854 t2
= build_int_cst (tm_state_type
, 0);
2855 stmt
= gimple_build_cond (NE_EXPR
, t1
, t2
, NULL
, NULL
);
2856 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2858 edge ei
= make_edge (transaction_bb
, test_bb
, EDGE_FALLTHRU
);
2859 test_bb
->frequency
= transaction_bb
->frequency
;
2860 test_bb
->count
= transaction_bb
->count
;
2861 ei
->probability
= PROB_ALWAYS
;
2863 // Not abort edge. If both are live, chose one at random as we'll
2864 // we'll be fixing that up below.
2865 redirect_edge_pred (fallthru_edge
, test_bb
);
2866 fallthru_edge
->flags
= EDGE_FALSE_VALUE
;
2867 fallthru_edge
->probability
= PROB_VERY_LIKELY
;
2868 fallthru_edge
->count
2869 = apply_probability (test_bb
->count
, fallthru_edge
->probability
);
2872 redirect_edge_pred (abort_edge
, test_bb
);
2873 abort_edge
->flags
= EDGE_TRUE_VALUE
;
2874 abort_edge
->probability
= PROB_VERY_UNLIKELY
;
2876 = apply_probability (test_bb
->count
, abort_edge
->probability
);
2878 transaction_bb
= test_bb
;
2881 // If we have both instrumented and uninstrumented code paths, select one.
2882 if (inst_edge
&& uninst_edge
)
2884 basic_block test_bb
= create_empty_bb (transaction_bb
);
2885 add_bb_to_loop (test_bb
, transaction_bb
->loop_father
);
2886 if (region
->restart_block
== region
->entry_block
)
2887 region
->restart_block
= test_bb
;
2889 tree t1
= create_tmp_reg (tm_state_type
);
2890 tree t2
= build_int_cst (tm_state_type
, A_RUNUNINSTRUMENTEDCODE
);
2892 gimple stmt
= gimple_build_assign (t1
, BIT_AND_EXPR
, tm_state
, t2
);
2893 gimple_stmt_iterator gsi
= gsi_last_bb (test_bb
);
2894 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2896 t2
= build_int_cst (tm_state_type
, 0);
2897 stmt
= gimple_build_cond (NE_EXPR
, t1
, t2
, NULL
, NULL
);
2898 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2900 // Create the edge into test_bb first, as we want to copy values
2901 // out of the fallthru edge.
2902 edge e
= make_edge (transaction_bb
, test_bb
, fallthru_edge
->flags
);
2903 e
->probability
= fallthru_edge
->probability
;
2904 test_bb
->count
= e
->count
= fallthru_edge
->count
;
2905 test_bb
->frequency
= EDGE_FREQUENCY (e
);
2907 // Now update the edges to the inst/uninist implementations.
2908 // For now assume that the paths are equally likely. When using HTM,
2909 // we'll try the uninst path first and fallback to inst path if htm
2910 // buffers are exceeded. Without HTM we start with the inst path and
2911 // use the uninst path when falling back to serial mode.
2912 redirect_edge_pred (inst_edge
, test_bb
);
2913 inst_edge
->flags
= EDGE_FALSE_VALUE
;
2914 inst_edge
->probability
= REG_BR_PROB_BASE
/ 2;
2916 = apply_probability (test_bb
->count
, inst_edge
->probability
);
2918 redirect_edge_pred (uninst_edge
, test_bb
);
2919 uninst_edge
->flags
= EDGE_TRUE_VALUE
;
2920 uninst_edge
->probability
= REG_BR_PROB_BASE
/ 2;
2922 = apply_probability (test_bb
->count
, uninst_edge
->probability
);
2925 // If we have no previous special cases, and we have PHIs at the beginning
2926 // of the atomic region, this means we have a loop at the beginning of the
2927 // atomic region that shares the first block. This can cause problems with
2928 // the transaction restart abnormal edges to be added in the tm_edges pass.
2929 // Solve this by adding a new empty block to receive the abnormal edges.
2930 if (region
->restart_block
== region
->entry_block
2931 && phi_nodes (region
->entry_block
))
2933 basic_block empty_bb
= create_empty_bb (transaction_bb
);
2934 region
->restart_block
= empty_bb
;
2935 add_bb_to_loop (empty_bb
, transaction_bb
->loop_father
);
2937 redirect_edge_pred (fallthru_edge
, empty_bb
);
2938 make_edge (transaction_bb
, empty_bb
, EDGE_FALLTHRU
);
2944 /* Generate the temporary to be used for the return value of
2945 BUILT_IN_TM_START. */
2948 generate_tm_state (struct tm_region
*region
, void *data ATTRIBUTE_UNUSED
)
2950 tree tm_start
= builtin_decl_explicit (BUILT_IN_TM_START
);
2952 create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start
)), "tm_state");
2954 // Reset the subcode, post optimizations. We'll fill this in
2955 // again as we process blocks.
2956 if (region
->exit_blocks
)
2958 gtransaction
*transaction_stmt
= region
->get_transaction_stmt ();
2959 unsigned int subcode
= gimple_transaction_subcode (transaction_stmt
);
2961 if (subcode
& GTMA_DOES_GO_IRREVOCABLE
)
2962 subcode
&= (GTMA_DECLARATION_MASK
| GTMA_DOES_GO_IRREVOCABLE
2963 | GTMA_MAY_ENTER_IRREVOCABLE
2964 | GTMA_HAS_NO_INSTRUMENTATION
);
2966 subcode
&= GTMA_DECLARATION_MASK
;
2967 gimple_transaction_set_subcode (transaction_stmt
, subcode
);
2973 // Propagate flags from inner transactions outwards.
2975 propagate_tm_flags_out (struct tm_region
*region
)
2979 propagate_tm_flags_out (region
->inner
);
2981 if (region
->outer
&& region
->outer
->transaction_stmt
)
2984 = gimple_transaction_subcode (region
->get_transaction_stmt ());
2985 s
&= (GTMA_HAVE_ABORT
| GTMA_HAVE_LOAD
| GTMA_HAVE_STORE
2986 | GTMA_MAY_ENTER_IRREVOCABLE
);
2987 s
|= gimple_transaction_subcode (region
->outer
->get_transaction_stmt ());
2988 gimple_transaction_set_subcode (region
->outer
->get_transaction_stmt (),
2992 propagate_tm_flags_out (region
->next
);
2995 /* Entry point to the MARK phase of TM expansion. Here we replace
2996 transactional memory statements with calls to builtins, and function
2997 calls with their transactional clones (if available). But we don't
2998 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
3001 execute_tm_mark (void)
3003 pending_edge_inserts_p
= false;
3005 expand_regions (all_tm_regions
, generate_tm_state
, NULL
,
3006 /*traverse_clones=*/true);
3010 vec
<tm_region_p
> bb_regions
3011 = get_bb_regions_instrumented (/*traverse_clones=*/true,
3012 /*include_uninstrumented_p=*/false);
3013 struct tm_region
*r
;
3016 // Expand memory operations into calls into the runtime.
3017 // This collects log entries as well.
3018 FOR_EACH_VEC_ELT (bb_regions
, i
, r
)
3022 if (r
->transaction_stmt
)
3025 = gimple_transaction_subcode (r
->get_transaction_stmt ());
3027 /* If we're sure to go irrevocable, there won't be
3028 anything to expand, since the run-time will go
3029 irrevocable right away. */
3030 if (sub
& GTMA_DOES_GO_IRREVOCABLE
3031 && sub
& GTMA_MAY_ENTER_IRREVOCABLE
)
3034 expand_block_tm (r
, BASIC_BLOCK_FOR_FN (cfun
, i
));
3038 bb_regions
.release ();
3040 // Propagate flags from inner transactions outwards.
3041 propagate_tm_flags_out (all_tm_regions
);
3043 // Expand GIMPLE_TRANSACTIONs into calls into the runtime.
3044 expand_regions (all_tm_regions
, expand_transaction
, NULL
,
3045 /*traverse_clones=*/false);
3050 if (pending_edge_inserts_p
)
3051 gsi_commit_edge_inserts ();
3052 free_dominance_info (CDI_DOMINATORS
);
3058 const pass_data pass_data_tm_mark
=
3060 GIMPLE_PASS
, /* type */
3061 "tmmark", /* name */
3062 OPTGROUP_NONE
, /* optinfo_flags */
3063 TV_TRANS_MEM
, /* tv_id */
3064 ( PROP_ssa
| PROP_cfg
), /* properties_required */
3065 0, /* properties_provided */
3066 0, /* properties_destroyed */
3067 0, /* todo_flags_start */
3068 TODO_update_ssa
, /* todo_flags_finish */
3071 class pass_tm_mark
: public gimple_opt_pass
3074 pass_tm_mark (gcc::context
*ctxt
)
3075 : gimple_opt_pass (pass_data_tm_mark
, ctxt
)
3078 /* opt_pass methods: */
3079 virtual unsigned int execute (function
*) { return execute_tm_mark (); }
3081 }; // class pass_tm_mark
3086 make_pass_tm_mark (gcc::context
*ctxt
)
3088 return new pass_tm_mark (ctxt
);
3092 /* Create an abnormal edge from STMT at iter, splitting the block
3093 as necessary. Adjust *PNEXT as needed for the split block. */
3096 split_bb_make_tm_edge (gimple stmt
, basic_block dest_bb
,
3097 gimple_stmt_iterator iter
, gimple_stmt_iterator
*pnext
)
3099 basic_block bb
= gimple_bb (stmt
);
3100 if (!gsi_one_before_end_p (iter
))
3102 edge e
= split_block (bb
, stmt
);
3103 *pnext
= gsi_start_bb (e
->dest
);
3105 make_edge (bb
, dest_bb
, EDGE_ABNORMAL
);
3107 // Record the need for the edge for the benefit of the rtl passes.
3108 if (cfun
->gimple_df
->tm_restart
== NULL
)
3109 cfun
->gimple_df
->tm_restart
3110 = hash_table
<tm_restart_hasher
>::create_ggc (31);
3112 struct tm_restart_node dummy
;
3114 dummy
.label_or_list
= gimple_block_label (dest_bb
);
3116 tm_restart_node
**slot
= cfun
->gimple_df
->tm_restart
->find_slot (&dummy
,
3118 struct tm_restart_node
*n
= *slot
;
3121 n
= ggc_alloc
<tm_restart_node
> ();
3126 tree old
= n
->label_or_list
;
3127 if (TREE_CODE (old
) == LABEL_DECL
)
3128 old
= tree_cons (NULL
, old
, NULL
);
3129 n
->label_or_list
= tree_cons (NULL
, dummy
.label_or_list
, old
);
3133 /* Split block BB as necessary for every builtin function we added, and
3134 wire up the abnormal back edges implied by the transaction restart. */
3137 expand_block_edges (struct tm_region
*const region
, basic_block bb
)
3139 gimple_stmt_iterator gsi
, next_gsi
;
3141 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi
= next_gsi
)
3143 gimple stmt
= gsi_stmt (gsi
);
3147 gsi_next (&next_gsi
);
3149 // ??? Shouldn't we split for any non-pure, non-irrevocable function?
3150 call_stmt
= dyn_cast
<gcall
*> (stmt
);
3152 || (gimple_call_flags (call_stmt
) & ECF_TM_BUILTIN
) == 0)
3155 if (DECL_FUNCTION_CODE (gimple_call_fndecl (call_stmt
))
3156 == BUILT_IN_TM_ABORT
)
3158 // If we have a ``_transaction_cancel [[outer]]'', there is only
3159 // one abnormal edge: to the transaction marked OUTER.
3160 // All compiler-generated instances of BUILT_IN_TM_ABORT have a
3161 // constant argument, which we can examine here. Users invoking
3162 // TM_ABORT directly get what they deserve.
3163 tree arg
= gimple_call_arg (call_stmt
, 0);
3164 if (TREE_CODE (arg
) == INTEGER_CST
3165 && (TREE_INT_CST_LOW (arg
) & AR_OUTERABORT
) != 0
3166 && !decl_is_tm_clone (current_function_decl
))
3168 // Find the GTMA_IS_OUTER transaction.
3169 for (struct tm_region
*o
= region
; o
; o
= o
->outer
)
3170 if (o
->original_transaction_was_outer
)
3172 split_bb_make_tm_edge (call_stmt
, o
->restart_block
,
3177 // Otherwise, the front-end should have semantically checked
3178 // outer aborts, but in either case the target region is not
3179 // within this function.
3183 // Non-outer, TM aborts have an abnormal edge to the inner-most
3184 // transaction, the one being aborted;
3185 split_bb_make_tm_edge (call_stmt
, region
->restart_block
, gsi
,
3189 // All TM builtins have an abnormal edge to the outer-most transaction.
3190 // We never restart inner transactions. For tm clones, we know a-priori
3191 // that the outer-most transaction is outside the function.
3192 if (decl_is_tm_clone (current_function_decl
))
3195 if (cfun
->gimple_df
->tm_restart
== NULL
)
3196 cfun
->gimple_df
->tm_restart
3197 = hash_table
<tm_restart_hasher
>::create_ggc (31);
3199 // All TM builtins have an abnormal edge to the outer-most transaction.
3200 // We never restart inner transactions.
3201 for (struct tm_region
*o
= region
; o
; o
= o
->outer
)
3204 split_bb_make_tm_edge (call_stmt
, o
->restart_block
, gsi
, &next_gsi
);
3208 // Delete any tail-call annotation that may have been added.
3209 // The tail-call pass may have mis-identified the commit as being
3210 // a candidate because we had not yet added this restart edge.
3211 gimple_call_set_tail (call_stmt
, false);
3215 /* Entry point to the final expansion of transactional nodes. */
3219 const pass_data pass_data_tm_edges
=
3221 GIMPLE_PASS
, /* type */
3222 "tmedge", /* name */
3223 OPTGROUP_NONE
, /* optinfo_flags */
3224 TV_TRANS_MEM
, /* tv_id */
3225 ( PROP_ssa
| PROP_cfg
), /* properties_required */
3226 0, /* properties_provided */
3227 0, /* properties_destroyed */
3228 0, /* todo_flags_start */
3229 TODO_update_ssa
, /* todo_flags_finish */
3232 class pass_tm_edges
: public gimple_opt_pass
3235 pass_tm_edges (gcc::context
*ctxt
)
3236 : gimple_opt_pass (pass_data_tm_edges
, ctxt
)
3239 /* opt_pass methods: */
3240 virtual unsigned int execute (function
*);
3242 }; // class pass_tm_edges
3245 pass_tm_edges::execute (function
*fun
)
3247 vec
<tm_region_p
> bb_regions
3248 = get_bb_regions_instrumented (/*traverse_clones=*/false,
3249 /*include_uninstrumented_p=*/true);
3250 struct tm_region
*r
;
3253 FOR_EACH_VEC_ELT (bb_regions
, i
, r
)
3255 expand_block_edges (r
, BASIC_BLOCK_FOR_FN (fun
, i
));
3257 bb_regions
.release ();
3259 /* We've got to release the dominance info now, to indicate that it
3260 must be rebuilt completely. Otherwise we'll crash trying to update
3261 the SSA web in the TODO section following this pass. */
3262 free_dominance_info (CDI_DOMINATORS
);
3263 bitmap_obstack_release (&tm_obstack
);
3264 all_tm_regions
= NULL
;
3272 make_pass_tm_edges (gcc::context
*ctxt
)
3274 return new pass_tm_edges (ctxt
);
3277 /* Helper function for expand_regions. Expand REGION and recurse to
3278 the inner region. Call CALLBACK on each region. CALLBACK returns
3279 NULL to continue the traversal, otherwise a non-null value which
3280 this function will return as well. TRAVERSE_CLONES is true if we
3281 should traverse transactional clones. */
3284 expand_regions_1 (struct tm_region
*region
,
3285 void *(*callback
)(struct tm_region
*, void *),
3287 bool traverse_clones
)
3289 void *retval
= NULL
;
3290 if (region
->exit_blocks
3291 || (traverse_clones
&& decl_is_tm_clone (current_function_decl
)))
3293 retval
= callback (region
, data
);
3299 retval
= expand_regions (region
->inner
, callback
, data
, traverse_clones
);
3306 /* Traverse the regions enclosed and including REGION. Execute
3307 CALLBACK for each region, passing DATA. CALLBACK returns NULL to
3308 continue the traversal, otherwise a non-null value which this
3309 function will return as well. TRAVERSE_CLONES is true if we should
3310 traverse transactional clones. */
3313 expand_regions (struct tm_region
*region
,
3314 void *(*callback
)(struct tm_region
*, void *),
3316 bool traverse_clones
)
3318 void *retval
= NULL
;
3321 retval
= expand_regions_1 (region
, callback
, data
, traverse_clones
);
3324 region
= region
->next
;
3330 /* A unique TM memory operation. */
3331 typedef struct tm_memop
3333 /* Unique ID that all memory operations to the same location have. */
3334 unsigned int value_id
;
3335 /* Address of load/store. */
3339 /* TM memory operation hashtable helpers. */
3341 struct tm_memop_hasher
: typed_free_remove
<tm_memop
>
3343 typedef tm_memop value_type
;
3344 typedef tm_memop compare_type
;
3345 static inline hashval_t
hash (const value_type
*);
3346 static inline bool equal (const value_type
*, const compare_type
*);
3349 /* Htab support. Return a hash value for a `tm_memop'. */
3351 tm_memop_hasher::hash (const value_type
*mem
)
3353 tree addr
= mem
->addr
;
3354 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
3355 actually done with operand_equal_p (see tm_memop_eq). */
3356 if (TREE_CODE (addr
) == ADDR_EXPR
)
3357 addr
= TREE_OPERAND (addr
, 0);
3358 return iterative_hash_expr (addr
, 0);
3361 /* Htab support. Return true if two tm_memop's are the same. */
3363 tm_memop_hasher::equal (const value_type
*mem1
, const compare_type
*mem2
)
3365 return operand_equal_p (mem1
->addr
, mem2
->addr
, 0);
3368 /* Sets for solving data flow equations in the memory optimization pass. */
3369 struct tm_memopt_bitmaps
3371 /* Stores available to this BB upon entry. Basically, stores that
3372 dominate this BB. */
3373 bitmap store_avail_in
;
3374 /* Stores available at the end of this BB. */
3375 bitmap store_avail_out
;
3376 bitmap store_antic_in
;
3377 bitmap store_antic_out
;
3378 /* Reads available to this BB upon entry. Basically, reads that
3379 dominate this BB. */
3380 bitmap read_avail_in
;
3381 /* Reads available at the end of this BB. */
3382 bitmap read_avail_out
;
3383 /* Reads performed in this BB. */
3385 /* Writes performed in this BB. */
3388 /* Temporary storage for pass. */
3389 /* Is the current BB in the worklist? */
3390 bool avail_in_worklist_p
;
3391 /* Have we visited this BB? */
3395 static bitmap_obstack tm_memopt_obstack
;
3397 /* Unique counter for TM loads and stores. Loads and stores of the
3398 same address get the same ID. */
3399 static unsigned int tm_memopt_value_id
;
3400 static hash_table
<tm_memop_hasher
> *tm_memopt_value_numbers
;
3402 #define STORE_AVAIL_IN(BB) \
3403 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
3404 #define STORE_AVAIL_OUT(BB) \
3405 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
3406 #define STORE_ANTIC_IN(BB) \
3407 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
3408 #define STORE_ANTIC_OUT(BB) \
3409 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
3410 #define READ_AVAIL_IN(BB) \
3411 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
3412 #define READ_AVAIL_OUT(BB) \
3413 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
3414 #define READ_LOCAL(BB) \
3415 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
3416 #define STORE_LOCAL(BB) \
3417 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
3418 #define AVAIL_IN_WORKLIST_P(BB) \
3419 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
3420 #define BB_VISITED_P(BB) \
3421 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
3423 /* Given a TM load/store in STMT, return the value number for the address
3427 tm_memopt_value_number (gimple stmt
, enum insert_option op
)
3429 struct tm_memop tmpmem
, *mem
;
3432 gcc_assert (is_tm_load (stmt
) || is_tm_store (stmt
));
3433 tmpmem
.addr
= gimple_call_arg (stmt
, 0);
3434 slot
= tm_memopt_value_numbers
->find_slot (&tmpmem
, op
);
3437 else if (op
== INSERT
)
3439 mem
= XNEW (struct tm_memop
);
3441 mem
->value_id
= tm_memopt_value_id
++;
3442 mem
->addr
= tmpmem
.addr
;
3446 return mem
->value_id
;
3449 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
3452 tm_memopt_accumulate_memops (basic_block bb
)
3454 gimple_stmt_iterator gsi
;
3456 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3458 gimple stmt
= gsi_stmt (gsi
);
3462 if (is_tm_store (stmt
))
3463 bits
= STORE_LOCAL (bb
);
3464 else if (is_tm_load (stmt
))
3465 bits
= READ_LOCAL (bb
);
3469 loc
= tm_memopt_value_number (stmt
, INSERT
);
3470 bitmap_set_bit (bits
, loc
);
3473 fprintf (dump_file
, "TM memopt (%s): value num=%d, BB=%d, addr=",
3474 is_tm_load (stmt
) ? "LOAD" : "STORE", loc
,
3475 gimple_bb (stmt
)->index
);
3476 print_generic_expr (dump_file
, gimple_call_arg (stmt
, 0), 0);
3477 fprintf (dump_file
, "\n");
3482 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
3485 dump_tm_memopt_set (const char *set_name
, bitmap bits
)
3489 const char *comma
= "";
3491 fprintf (dump_file
, "TM memopt: %s: [", set_name
);
3492 EXECUTE_IF_SET_IN_BITMAP (bits
, 0, i
, bi
)
3494 hash_table
<tm_memop_hasher
>::iterator hi
;
3495 struct tm_memop
*mem
= NULL
;
3497 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
3498 FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers
, mem
, tm_memop_t
, hi
)
3499 if (mem
->value_id
== i
)
3501 gcc_assert (mem
->value_id
== i
);
3502 fprintf (dump_file
, "%s", comma
);
3504 print_generic_expr (dump_file
, mem
->addr
, 0);
3506 fprintf (dump_file
, "]\n");
3509 /* Prettily dump all of the memopt sets in BLOCKS. */
3512 dump_tm_memopt_sets (vec
<basic_block
> blocks
)
3517 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3519 fprintf (dump_file
, "------------BB %d---------\n", bb
->index
);
3520 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb
));
3521 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb
));
3522 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb
));
3523 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb
));
3524 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb
));
3525 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb
));
3529 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3532 tm_memopt_compute_avin (basic_block bb
)
3537 /* Seed with the AVOUT of any predecessor. */
3538 for (ix
= 0; ix
< EDGE_COUNT (bb
->preds
); ix
++)
3540 e
= EDGE_PRED (bb
, ix
);
3541 /* Make sure we have already visited this BB, and is thus
3544 If e->src->aux is NULL, this predecessor is actually on an
3545 enclosing transaction. We only care about the current
3546 transaction, so ignore it. */
3547 if (e
->src
->aux
&& BB_VISITED_P (e
->src
))
3549 bitmap_copy (STORE_AVAIL_IN (bb
), STORE_AVAIL_OUT (e
->src
));
3550 bitmap_copy (READ_AVAIL_IN (bb
), READ_AVAIL_OUT (e
->src
));
3555 for (; ix
< EDGE_COUNT (bb
->preds
); ix
++)
3557 e
= EDGE_PRED (bb
, ix
);
3558 if (e
->src
->aux
&& BB_VISITED_P (e
->src
))
3560 bitmap_and_into (STORE_AVAIL_IN (bb
), STORE_AVAIL_OUT (e
->src
));
3561 bitmap_and_into (READ_AVAIL_IN (bb
), READ_AVAIL_OUT (e
->src
));
3565 BB_VISITED_P (bb
) = true;
3568 /* Compute the STORE_ANTIC_IN for the basic block BB. */
3571 tm_memopt_compute_antin (basic_block bb
)
3576 /* Seed with the ANTIC_OUT of any successor. */
3577 for (ix
= 0; ix
< EDGE_COUNT (bb
->succs
); ix
++)
3579 e
= EDGE_SUCC (bb
, ix
);
3580 /* Make sure we have already visited this BB, and is thus
3582 if (BB_VISITED_P (e
->dest
))
3584 bitmap_copy (STORE_ANTIC_IN (bb
), STORE_ANTIC_OUT (e
->dest
));
3589 for (; ix
< EDGE_COUNT (bb
->succs
); ix
++)
3591 e
= EDGE_SUCC (bb
, ix
);
3592 if (BB_VISITED_P (e
->dest
))
3593 bitmap_and_into (STORE_ANTIC_IN (bb
), STORE_ANTIC_OUT (e
->dest
));
3596 BB_VISITED_P (bb
) = true;
3599 /* Compute the AVAIL sets for every basic block in BLOCKS.
3601 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3603 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3604 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3606 This is basically what we do in lcm's compute_available(), but here
3607 we calculate two sets of sets (one for STOREs and one for READs),
3608 and we work on a region instead of the entire CFG.
3610 REGION is the TM region.
3611 BLOCKS are the basic blocks in the region. */
3614 tm_memopt_compute_available (struct tm_region
*region
,
3615 vec
<basic_block
> blocks
)
3618 basic_block
*worklist
, *qin
, *qout
, *qend
, bb
;
3619 unsigned int qlen
, i
;
3623 /* Allocate a worklist array/queue. Entries are only added to the
3624 list if they were not already on the list. So the size is
3625 bounded by the number of basic blocks in the region. */
3626 qlen
= blocks
.length () - 1;
3627 qin
= qout
= worklist
=
3628 XNEWVEC (basic_block
, qlen
);
3630 /* Put every block in the region on the worklist. */
3631 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3633 /* Seed AVAIL_OUT with the LOCAL set. */
3634 bitmap_ior_into (STORE_AVAIL_OUT (bb
), STORE_LOCAL (bb
));
3635 bitmap_ior_into (READ_AVAIL_OUT (bb
), READ_LOCAL (bb
));
3637 AVAIL_IN_WORKLIST_P (bb
) = true;
3638 /* No need to insert the entry block, since it has an AVIN of
3639 null, and an AVOUT that has already been seeded in. */
3640 if (bb
!= region
->entry_block
)
3644 /* The entry block has been initialized with the local sets. */
3645 BB_VISITED_P (region
->entry_block
) = true;
3648 qend
= &worklist
[qlen
];
3650 /* Iterate until the worklist is empty. */
3653 /* Take the first entry off the worklist. */
3660 /* This block can be added to the worklist again if necessary. */
3661 AVAIL_IN_WORKLIST_P (bb
) = false;
3662 tm_memopt_compute_avin (bb
);
3664 /* Note: We do not add the LOCAL sets here because we already
3665 seeded the AVAIL_OUT sets with them. */
3666 changed
= bitmap_ior_into (STORE_AVAIL_OUT (bb
), STORE_AVAIL_IN (bb
));
3667 changed
|= bitmap_ior_into (READ_AVAIL_OUT (bb
), READ_AVAIL_IN (bb
));
3669 && (region
->exit_blocks
== NULL
3670 || !bitmap_bit_p (region
->exit_blocks
, bb
->index
)))
3671 /* If the out state of this block changed, then we need to add
3672 its successors to the worklist if they are not already in. */
3673 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3674 if (!AVAIL_IN_WORKLIST_P (e
->dest
)
3675 && e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
3678 AVAIL_IN_WORKLIST_P (e
->dest
) = true;
3689 dump_tm_memopt_sets (blocks
);
3692 /* Compute ANTIC sets for every basic block in BLOCKS.
3694 We compute STORE_ANTIC_OUT as follows:
3696 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3697 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3699 REGION is the TM region.
3700 BLOCKS are the basic blocks in the region. */
3703 tm_memopt_compute_antic (struct tm_region
*region
,
3704 vec
<basic_block
> blocks
)
3707 basic_block
*worklist
, *qin
, *qout
, *qend
, bb
;
3712 /* Allocate a worklist array/queue. Entries are only added to the
3713 list if they were not already on the list. So the size is
3714 bounded by the number of basic blocks in the region. */
3715 qin
= qout
= worklist
= XNEWVEC (basic_block
, blocks
.length ());
3717 for (qlen
= 0, i
= blocks
.length () - 1; i
>= 0; --i
)
3721 /* Seed ANTIC_OUT with the LOCAL set. */
3722 bitmap_ior_into (STORE_ANTIC_OUT (bb
), STORE_LOCAL (bb
));
3724 /* Put every block in the region on the worklist. */
3725 AVAIL_IN_WORKLIST_P (bb
) = true;
3726 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3727 and their ANTIC_OUT has already been seeded in. */
3728 if (region
->exit_blocks
3729 && !bitmap_bit_p (region
->exit_blocks
, bb
->index
))
3736 /* The exit blocks have been initialized with the local sets. */
3737 if (region
->exit_blocks
)
3741 EXECUTE_IF_SET_IN_BITMAP (region
->exit_blocks
, 0, i
, bi
)
3742 BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun
, i
)) = true;
3746 qend
= &worklist
[qlen
];
3748 /* Iterate until the worklist is empty. */
3751 /* Take the first entry off the worklist. */
3758 /* This block can be added to the worklist again if necessary. */
3759 AVAIL_IN_WORKLIST_P (bb
) = false;
3760 tm_memopt_compute_antin (bb
);
3762 /* Note: We do not add the LOCAL sets here because we already
3763 seeded the ANTIC_OUT sets with them. */
3764 if (bitmap_ior_into (STORE_ANTIC_OUT (bb
), STORE_ANTIC_IN (bb
))
3765 && bb
!= region
->entry_block
)
3766 /* If the out state of this block changed, then we need to add
3767 its predecessors to the worklist if they are not already in. */
3768 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3769 if (!AVAIL_IN_WORKLIST_P (e
->src
))
3772 AVAIL_IN_WORKLIST_P (e
->src
) = true;
3783 dump_tm_memopt_sets (blocks
);
3786 /* Offsets of load variants from TM_LOAD. For example,
3787 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3788 See gtm-builtins.def. */
3789 #define TRANSFORM_RAR 1
3790 #define TRANSFORM_RAW 2
3791 #define TRANSFORM_RFW 3
3792 /* Offsets of store variants from TM_STORE. */
3793 #define TRANSFORM_WAR 1
3794 #define TRANSFORM_WAW 2
3796 /* Inform about a load/store optimization. */
3799 dump_tm_memopt_transform (gimple stmt
)
3803 fprintf (dump_file
, "TM memopt: transforming: ");
3804 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3805 fprintf (dump_file
, "\n");
3809 /* Perform a read/write optimization. Replaces the TM builtin in STMT
3810 by a builtin that is OFFSET entries down in the builtins table in
3811 gtm-builtins.def. */
3814 tm_memopt_transform_stmt (unsigned int offset
,
3816 gimple_stmt_iterator
*gsi
)
3818 tree fn
= gimple_call_fn (stmt
);
3819 gcc_assert (TREE_CODE (fn
) == ADDR_EXPR
);
3820 TREE_OPERAND (fn
, 0)
3821 = builtin_decl_explicit ((enum built_in_function
)
3822 (DECL_FUNCTION_CODE (TREE_OPERAND (fn
, 0))
3824 gimple_call_set_fn (stmt
, fn
);
3825 gsi_replace (gsi
, stmt
, true);
3826 dump_tm_memopt_transform (stmt
);
3829 /* Perform the actual TM memory optimization transformations in the
3830 basic blocks in BLOCKS. */
3833 tm_memopt_transform_blocks (vec
<basic_block
> blocks
)
3837 gimple_stmt_iterator gsi
;
3839 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3841 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3843 gimple stmt
= gsi_stmt (gsi
);
3844 bitmap read_avail
= READ_AVAIL_IN (bb
);
3845 bitmap store_avail
= STORE_AVAIL_IN (bb
);
3846 bitmap store_antic
= STORE_ANTIC_OUT (bb
);
3849 if (is_tm_simple_load (stmt
))
3851 gcall
*call_stmt
= as_a
<gcall
*> (stmt
);
3852 loc
= tm_memopt_value_number (stmt
, NO_INSERT
);
3853 if (store_avail
&& bitmap_bit_p (store_avail
, loc
))
3854 tm_memopt_transform_stmt (TRANSFORM_RAW
, call_stmt
, &gsi
);
3855 else if (store_antic
&& bitmap_bit_p (store_antic
, loc
))
3857 tm_memopt_transform_stmt (TRANSFORM_RFW
, call_stmt
, &gsi
);
3858 bitmap_set_bit (store_avail
, loc
);
3860 else if (read_avail
&& bitmap_bit_p (read_avail
, loc
))
3861 tm_memopt_transform_stmt (TRANSFORM_RAR
, call_stmt
, &gsi
);
3863 bitmap_set_bit (read_avail
, loc
);
3865 else if (is_tm_simple_store (stmt
))
3867 gcall
*call_stmt
= as_a
<gcall
*> (stmt
);
3868 loc
= tm_memopt_value_number (stmt
, NO_INSERT
);
3869 if (store_avail
&& bitmap_bit_p (store_avail
, loc
))
3870 tm_memopt_transform_stmt (TRANSFORM_WAW
, call_stmt
, &gsi
);
3873 if (read_avail
&& bitmap_bit_p (read_avail
, loc
))
3874 tm_memopt_transform_stmt (TRANSFORM_WAR
, call_stmt
, &gsi
);
3875 bitmap_set_bit (store_avail
, loc
);
3882 /* Return a new set of bitmaps for a BB. */
3884 static struct tm_memopt_bitmaps
*
3885 tm_memopt_init_sets (void)
3887 struct tm_memopt_bitmaps
*b
3888 = XOBNEW (&tm_memopt_obstack
.obstack
, struct tm_memopt_bitmaps
);
3889 b
->store_avail_in
= BITMAP_ALLOC (&tm_memopt_obstack
);
3890 b
->store_avail_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3891 b
->store_antic_in
= BITMAP_ALLOC (&tm_memopt_obstack
);
3892 b
->store_antic_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3893 b
->store_avail_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3894 b
->read_avail_in
= BITMAP_ALLOC (&tm_memopt_obstack
);
3895 b
->read_avail_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3896 b
->read_local
= BITMAP_ALLOC (&tm_memopt_obstack
);
3897 b
->store_local
= BITMAP_ALLOC (&tm_memopt_obstack
);
3901 /* Free sets computed for each BB. */
3904 tm_memopt_free_sets (vec
<basic_block
> blocks
)
3909 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3913 /* Clear the visited bit for every basic block in BLOCKS. */
3916 tm_memopt_clear_visited (vec
<basic_block
> blocks
)
3921 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3922 BB_VISITED_P (bb
) = false;
3925 /* Replace TM load/stores with hints for the runtime. We handle
3926 things like read-after-write, write-after-read, read-after-read,
3927 read-for-write, etc. */
3930 execute_tm_memopt (void)
3932 struct tm_region
*region
;
3933 vec
<basic_block
> bbs
;
3935 tm_memopt_value_id
= 0;
3936 tm_memopt_value_numbers
= new hash_table
<tm_memop_hasher
> (10);
3938 for (region
= all_tm_regions
; region
; region
= region
->next
)
3940 /* All the TM stores/loads in the current region. */
3944 bitmap_obstack_initialize (&tm_memopt_obstack
);
3946 /* Save all BBs for the current region. */
3947 bbs
= get_tm_region_blocks (region
->entry_block
,
3948 region
->exit_blocks
,
3953 /* Collect all the memory operations. */
3954 for (i
= 0; bbs
.iterate (i
, &bb
); ++i
)
3956 bb
->aux
= tm_memopt_init_sets ();
3957 tm_memopt_accumulate_memops (bb
);
3960 /* Solve data flow equations and transform each block accordingly. */
3961 tm_memopt_clear_visited (bbs
);
3962 tm_memopt_compute_available (region
, bbs
);
3963 tm_memopt_clear_visited (bbs
);
3964 tm_memopt_compute_antic (region
, bbs
);
3965 tm_memopt_transform_blocks (bbs
);
3967 tm_memopt_free_sets (bbs
);
3969 bitmap_obstack_release (&tm_memopt_obstack
);
3970 tm_memopt_value_numbers
->empty ();
3973 delete tm_memopt_value_numbers
;
3974 tm_memopt_value_numbers
= NULL
;
3980 const pass_data pass_data_tm_memopt
=
3982 GIMPLE_PASS
, /* type */
3983 "tmmemopt", /* name */
3984 OPTGROUP_NONE
, /* optinfo_flags */
3985 TV_TRANS_MEM
, /* tv_id */
3986 ( PROP_ssa
| PROP_cfg
), /* properties_required */
3987 0, /* properties_provided */
3988 0, /* properties_destroyed */
3989 0, /* todo_flags_start */
3990 0, /* todo_flags_finish */
3993 class pass_tm_memopt
: public gimple_opt_pass
3996 pass_tm_memopt (gcc::context
*ctxt
)
3997 : gimple_opt_pass (pass_data_tm_memopt
, ctxt
)
4000 /* opt_pass methods: */
4001 virtual bool gate (function
*) { return flag_tm
&& optimize
> 0; }
4002 virtual unsigned int execute (function
*) { return execute_tm_memopt (); }
4004 }; // class pass_tm_memopt
4009 make_pass_tm_memopt (gcc::context
*ctxt
)
4011 return new pass_tm_memopt (ctxt
);
4015 /* Interprocedual analysis for the creation of transactional clones.
4016 The aim of this pass is to find which functions are referenced in
4017 a non-irrevocable transaction context, and for those over which
4018 we have control (or user directive), create a version of the
4019 function which uses only the transactional interface to reference
4020 protected memories. This analysis proceeds in several steps:
4022 (1) Collect the set of all possible transactional clones:
4024 (a) For all local public functions marked tm_callable, push
4025 it onto the tm_callee queue.
4027 (b) For all local functions, scan for calls in transaction blocks.
4028 Push the caller and callee onto the tm_caller and tm_callee
4029 queues. Count the number of callers for each callee.
4031 (c) For each local function on the callee list, assume we will
4032 create a transactional clone. Push *all* calls onto the
4033 callee queues; count the number of clone callers separately
4034 to the number of original callers.
4036 (2) Propagate irrevocable status up the dominator tree:
4038 (a) Any external function on the callee list that is not marked
4039 tm_callable is irrevocable. Push all callers of such onto
4042 (b) For each function on the worklist, mark each block that
4043 contains an irrevocable call. Use the AND operator to
4044 propagate that mark up the dominator tree.
4046 (c) If we reach the entry block for a possible transactional
4047 clone, then the transactional clone is irrevocable, and
4048 we should not create the clone after all. Push all
4049 callers onto the worklist.
4051 (d) Place tm_irrevocable calls at the beginning of the relevant
4052 blocks. Special case here is the entry block for the entire
4053 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
4054 the library to begin the region in serial mode. Decrement
4055 the call count for all callees in the irrevocable region.
4057 (3) Create the transactional clones:
4059 Any tm_callee that still has a non-zero call count is cloned.
4062 /* This structure is stored in the AUX field of each cgraph_node. */
4063 struct tm_ipa_cg_data
4065 /* The clone of the function that got created. */
4066 struct cgraph_node
*clone
;
4068 /* The tm regions in the normal function. */
4069 struct tm_region
*all_tm_regions
;
4071 /* The blocks of the normal/clone functions that contain irrevocable
4072 calls, or blocks that are post-dominated by irrevocable calls. */
4073 bitmap irrevocable_blocks_normal
;
4074 bitmap irrevocable_blocks_clone
;
4076 /* The blocks of the normal function that are involved in transactions. */
4077 bitmap transaction_blocks_normal
;
4079 /* The number of callers to the transactional clone of this function
4080 from normal and transactional clones respectively. */
4081 unsigned tm_callers_normal
;
4082 unsigned tm_callers_clone
;
4084 /* True if all calls to this function's transactional clone
4085 are irrevocable. Also automatically true if the function
4086 has no transactional clone. */
4087 bool is_irrevocable
;
4089 /* Flags indicating the presence of this function in various queues. */
4090 bool in_callee_queue
;
4093 /* Flags indicating the kind of scan desired while in the worklist. */
4094 bool want_irr_scan_normal
;
4097 typedef vec
<cgraph_node
*> cgraph_node_queue
;
4099 /* Return the ipa data associated with NODE, allocating zeroed memory
4100 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
4101 and set *NODE accordingly. */
4103 static struct tm_ipa_cg_data
*
4104 get_cg_data (struct cgraph_node
**node
, bool traverse_aliases
)
4106 struct tm_ipa_cg_data
*d
;
4108 if (traverse_aliases
&& (*node
)->alias
)
4109 *node
= (*node
)->get_alias_target ();
4111 d
= (struct tm_ipa_cg_data
*) (*node
)->aux
;
4115 d
= (struct tm_ipa_cg_data
*)
4116 obstack_alloc (&tm_obstack
.obstack
, sizeof (*d
));
4117 (*node
)->aux
= (void *) d
;
4118 memset (d
, 0, sizeof (*d
));
4124 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
4125 it is already present. */
4128 maybe_push_queue (struct cgraph_node
*node
,
4129 cgraph_node_queue
*queue_p
, bool *in_queue_p
)
4134 queue_p
->safe_push (node
);
4138 /* Duplicate the basic blocks in QUEUE for use in the uninstrumented
4139 code path. QUEUE are the basic blocks inside the transaction
4140 represented in REGION.
4142 Later in split_code_paths() we will add the conditional to choose
4143 between the two alternatives. */
4146 ipa_uninstrument_transaction (struct tm_region
*region
,
4147 vec
<basic_block
> queue
)
4149 gimple transaction
= region
->transaction_stmt
;
4150 basic_block transaction_bb
= gimple_bb (transaction
);
4151 int n
= queue
.length ();
4152 basic_block
*new_bbs
= XNEWVEC (basic_block
, n
);
4154 copy_bbs (queue
.address (), n
, new_bbs
, NULL
, 0, NULL
, NULL
, transaction_bb
,
4156 edge e
= make_edge (transaction_bb
, new_bbs
[0], EDGE_TM_UNINSTRUMENTED
);
4157 add_phi_args_after_copy (new_bbs
, n
, e
);
4159 // Now we will have a GIMPLE_ATOMIC with 3 possible edges out of it.
4160 // a) EDGE_FALLTHRU into the transaction
4161 // b) EDGE_TM_ABORT out of the transaction
4162 // c) EDGE_TM_UNINSTRUMENTED into the uninstrumented blocks.
4167 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
4168 Queue all callees within block BB. */
4171 ipa_tm_scan_calls_block (cgraph_node_queue
*callees_p
,
4172 basic_block bb
, bool for_clone
)
4174 gimple_stmt_iterator gsi
;
4176 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4178 gimple stmt
= gsi_stmt (gsi
);
4179 if (is_gimple_call (stmt
) && !is_tm_pure_call (stmt
))
4181 tree fndecl
= gimple_call_fndecl (stmt
);
4184 struct tm_ipa_cg_data
*d
;
4186 struct cgraph_node
*node
;
4188 if (is_tm_ending_fndecl (fndecl
))
4190 if (find_tm_replacement_function (fndecl
))
4193 node
= cgraph_node::get (fndecl
);
4194 gcc_assert (node
!= NULL
);
4195 d
= get_cg_data (&node
, true);
4197 pcallers
= (for_clone
? &d
->tm_callers_clone
4198 : &d
->tm_callers_normal
);
4201 maybe_push_queue (node
, callees_p
, &d
->in_callee_queue
);
4207 /* Scan all calls in NODE that are within a transaction region,
4208 and push the resulting nodes into the callee queue. */
4211 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data
*d
,
4212 cgraph_node_queue
*callees_p
)
4214 struct tm_region
*r
;
4216 d
->transaction_blocks_normal
= BITMAP_ALLOC (&tm_obstack
);
4217 d
->all_tm_regions
= all_tm_regions
;
4219 for (r
= all_tm_regions
; r
; r
= r
->next
)
4221 vec
<basic_block
> bbs
;
4225 bbs
= get_tm_region_blocks (r
->entry_block
, r
->exit_blocks
, NULL
,
4226 d
->transaction_blocks_normal
, false);
4228 // Generate the uninstrumented code path for this transaction.
4229 ipa_uninstrument_transaction (r
, bbs
);
4231 FOR_EACH_VEC_ELT (bbs
, i
, bb
)
4232 ipa_tm_scan_calls_block (callees_p
, bb
, false);
4237 // ??? copy_bbs should maintain cgraph edges for the blocks as it is
4238 // copying them, rather than forcing us to do this externally.
4239 cgraph_edge::rebuild_edges ();
4241 // ??? In ipa_uninstrument_transaction we don't try to update dominators
4242 // because copy_bbs doesn't return a VEC like iterate_fix_dominators expects.
4243 // Instead, just release dominators here so update_ssa recomputes them.
4244 free_dominance_info (CDI_DOMINATORS
);
4246 // When building the uninstrumented code path, copy_bbs will have invoked
4247 // create_new_def_for starting an "ssa update context". There is only one
4248 // instance of this context, so resolve ssa updates before moving on to
4249 // the next function.
4250 update_ssa (TODO_update_ssa
);
4253 /* Scan all calls in NODE as if this is the transactional clone,
4254 and push the destinations into the callee queue. */
4257 ipa_tm_scan_calls_clone (struct cgraph_node
*node
,
4258 cgraph_node_queue
*callees_p
)
4260 struct function
*fn
= DECL_STRUCT_FUNCTION (node
->decl
);
4263 FOR_EACH_BB_FN (bb
, fn
)
4264 ipa_tm_scan_calls_block (callees_p
, bb
, true);
4267 /* The function NODE has been detected to be irrevocable. Push all
4268 of its callers onto WORKLIST for the purpose of re-scanning them. */
4271 ipa_tm_note_irrevocable (struct cgraph_node
*node
,
4272 cgraph_node_queue
*worklist_p
)
4274 struct tm_ipa_cg_data
*d
= get_cg_data (&node
, true);
4275 struct cgraph_edge
*e
;
4277 d
->is_irrevocable
= true;
4279 for (e
= node
->callers
; e
; e
= e
->next_caller
)
4282 struct cgraph_node
*caller
;
4284 /* Don't examine recursive calls. */
4285 if (e
->caller
== node
)
4287 /* Even if we think we can go irrevocable, believe the user
4289 if (is_tm_safe_or_pure (e
->caller
->decl
))
4293 d
= get_cg_data (&caller
, true);
4295 /* Check if the callee is in a transactional region. If so,
4296 schedule the function for normal re-scan as well. */
4297 bb
= gimple_bb (e
->call_stmt
);
4298 gcc_assert (bb
!= NULL
);
4299 if (d
->transaction_blocks_normal
4300 && bitmap_bit_p (d
->transaction_blocks_normal
, bb
->index
))
4301 d
->want_irr_scan_normal
= true;
4303 maybe_push_queue (caller
, worklist_p
, &d
->in_worklist
);
4307 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
4308 within the block is irrevocable. */
4311 ipa_tm_scan_irr_block (basic_block bb
)
4313 gimple_stmt_iterator gsi
;
4316 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4318 gimple stmt
= gsi_stmt (gsi
);
4319 switch (gimple_code (stmt
))
4322 if (gimple_assign_single_p (stmt
))
4324 tree lhs
= gimple_assign_lhs (stmt
);
4325 tree rhs
= gimple_assign_rhs1 (stmt
);
4326 if (volatile_var_p (lhs
) || volatile_var_p (rhs
))
4333 tree lhs
= gimple_call_lhs (stmt
);
4334 if (lhs
&& volatile_var_p (lhs
))
4337 if (is_tm_pure_call (stmt
))
4340 fn
= gimple_call_fn (stmt
);
4342 /* Functions with the attribute are by definition irrevocable. */
4343 if (is_tm_irrevocable (fn
))
4346 /* For direct function calls, go ahead and check for replacement
4347 functions, or transitive irrevocable functions. For indirect
4348 functions, we'll ask the runtime. */
4349 if (TREE_CODE (fn
) == ADDR_EXPR
)
4351 struct tm_ipa_cg_data
*d
;
4352 struct cgraph_node
*node
;
4354 fn
= TREE_OPERAND (fn
, 0);
4355 if (is_tm_ending_fndecl (fn
))
4357 if (find_tm_replacement_function (fn
))
4360 node
= cgraph_node::get (fn
);
4361 d
= get_cg_data (&node
, true);
4363 /* Return true if irrevocable, but above all, believe
4365 if (d
->is_irrevocable
4366 && !is_tm_safe_or_pure (fn
))
4373 /* ??? The Approved Method of indicating that an inline
4374 assembly statement is not relevant to the transaction
4375 is to wrap it in a __tm_waiver block. This is not
4376 yet implemented, so we can't check for it. */
4377 if (is_tm_safe (current_function_decl
))
4379 tree t
= build1 (NOP_EXPR
, void_type_node
, size_zero_node
);
4380 SET_EXPR_LOCATION (t
, gimple_location (stmt
));
4381 error ("%Kasm not allowed in %<transaction_safe%> function", t
);
4393 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking
4394 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
4395 scanning past OLD_IRR or EXIT_BLOCKS. */
4398 ipa_tm_scan_irr_blocks (vec
<basic_block
> *pqueue
, bitmap new_irr
,
4399 bitmap old_irr
, bitmap exit_blocks
)
4401 bool any_new_irr
= false;
4404 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
4408 basic_block bb
= pqueue
->pop ();
4410 /* Don't re-scan blocks we know already are irrevocable. */
4411 if (old_irr
&& bitmap_bit_p (old_irr
, bb
->index
))
4414 if (ipa_tm_scan_irr_block (bb
))
4416 bitmap_set_bit (new_irr
, bb
->index
);
4419 else if (exit_blocks
== NULL
|| !bitmap_bit_p (exit_blocks
, bb
->index
))
4421 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4422 if (!bitmap_bit_p (visited_blocks
, e
->dest
->index
))
4424 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
4425 pqueue
->safe_push (e
->dest
);
4429 while (!pqueue
->is_empty ());
4431 BITMAP_FREE (visited_blocks
);
4436 /* Propagate the irrevocable property both up and down the dominator tree.
4437 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
4438 TM regions; OLD_IRR are the results of a previous scan of the dominator
4439 tree which has been fully propagated; NEW_IRR is the set of new blocks
4440 which are gaining the irrevocable property during the current scan. */
4443 ipa_tm_propagate_irr (basic_block entry_block
, bitmap new_irr
,
4444 bitmap old_irr
, bitmap exit_blocks
)
4446 vec
<basic_block
> bbs
;
4447 bitmap all_region_blocks
;
4449 /* If this block is in the old set, no need to rescan. */
4450 if (old_irr
&& bitmap_bit_p (old_irr
, entry_block
->index
))
4453 all_region_blocks
= BITMAP_ALLOC (&tm_obstack
);
4454 bbs
= get_tm_region_blocks (entry_block
, exit_blocks
, NULL
,
4455 all_region_blocks
, false);
4458 basic_block bb
= bbs
.pop ();
4459 bool this_irr
= bitmap_bit_p (new_irr
, bb
->index
);
4460 bool all_son_irr
= false;
4464 /* Propagate up. If my children are, I am too, but we must have
4465 at least one child that is. */
4468 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4470 if (!bitmap_bit_p (new_irr
, e
->dest
->index
))
4472 all_son_irr
= false;
4480 /* Add block to new_irr if it hasn't already been processed. */
4481 if (!old_irr
|| !bitmap_bit_p (old_irr
, bb
->index
))
4483 bitmap_set_bit (new_irr
, bb
->index
);
4489 /* Propagate down to everyone we immediately dominate. */
4493 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
4495 son
= next_dom_son (CDI_DOMINATORS
, son
))
4497 /* Make sure block is actually in a TM region, and it
4498 isn't already in old_irr. */
4499 if ((!old_irr
|| !bitmap_bit_p (old_irr
, son
->index
))
4500 && bitmap_bit_p (all_region_blocks
, son
->index
))
4501 bitmap_set_bit (new_irr
, son
->index
);
4505 while (!bbs
.is_empty ());
4507 BITMAP_FREE (all_region_blocks
);
4512 ipa_tm_decrement_clone_counts (basic_block bb
, bool for_clone
)
4514 gimple_stmt_iterator gsi
;
4516 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4518 gimple stmt
= gsi_stmt (gsi
);
4519 if (is_gimple_call (stmt
) && !is_tm_pure_call (stmt
))
4521 tree fndecl
= gimple_call_fndecl (stmt
);
4524 struct tm_ipa_cg_data
*d
;
4526 struct cgraph_node
*tnode
;
4528 if (is_tm_ending_fndecl (fndecl
))
4530 if (find_tm_replacement_function (fndecl
))
4533 tnode
= cgraph_node::get (fndecl
);
4534 d
= get_cg_data (&tnode
, true);
4536 pcallers
= (for_clone
? &d
->tm_callers_clone
4537 : &d
->tm_callers_normal
);
4539 gcc_assert (*pcallers
> 0);
4546 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
4547 as well as other irrevocable actions such as inline assembly. Mark all
4548 such blocks as irrevocable and decrement the number of calls to
4549 transactional clones. Return true if, for the transactional clone, the
4550 entire function is irrevocable. */
4553 ipa_tm_scan_irr_function (struct cgraph_node
*node
, bool for_clone
)
4555 struct tm_ipa_cg_data
*d
;
4556 bitmap new_irr
, old_irr
;
4559 /* Builtin operators (operator new, and such). */
4560 if (DECL_STRUCT_FUNCTION (node
->decl
) == NULL
4561 || DECL_STRUCT_FUNCTION (node
->decl
)->cfg
== NULL
)
4564 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
4565 calculate_dominance_info (CDI_DOMINATORS
);
4567 d
= get_cg_data (&node
, true);
4568 auto_vec
<basic_block
, 10> queue
;
4569 new_irr
= BITMAP_ALLOC (&tm_obstack
);
4571 /* Scan each tm region, propagating irrevocable status through the tree. */
4574 old_irr
= d
->irrevocable_blocks_clone
;
4575 queue
.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
4576 if (ipa_tm_scan_irr_blocks (&queue
, new_irr
, old_irr
, NULL
))
4578 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)),
4581 ret
= bitmap_bit_p (new_irr
,
4582 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
))->index
);
4587 struct tm_region
*region
;
4589 old_irr
= d
->irrevocable_blocks_normal
;
4590 for (region
= d
->all_tm_regions
; region
; region
= region
->next
)
4592 queue
.quick_push (region
->entry_block
);
4593 if (ipa_tm_scan_irr_blocks (&queue
, new_irr
, old_irr
,
4594 region
->exit_blocks
))
4595 ipa_tm_propagate_irr (region
->entry_block
, new_irr
, old_irr
,
4596 region
->exit_blocks
);
4600 /* If we found any new irrevocable blocks, reduce the call count for
4601 transactional clones within the irrevocable blocks. Save the new
4602 set of irrevocable blocks for next time. */
4603 if (!bitmap_empty_p (new_irr
))
4605 bitmap_iterator bmi
;
4608 EXECUTE_IF_SET_IN_BITMAP (new_irr
, 0, i
, bmi
)
4609 ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun
, i
),
4614 bitmap_ior_into (old_irr
, new_irr
);
4615 BITMAP_FREE (new_irr
);
4618 d
->irrevocable_blocks_clone
= new_irr
;
4620 d
->irrevocable_blocks_normal
= new_irr
;
4622 if (dump_file
&& new_irr
)
4625 bitmap_iterator bmi
;
4628 dname
= lang_hooks
.decl_printable_name (current_function_decl
, 2);
4629 EXECUTE_IF_SET_IN_BITMAP (new_irr
, 0, i
, bmi
)
4630 fprintf (dump_file
, "%s: bb %d goes irrevocable\n", dname
, i
);
4634 BITMAP_FREE (new_irr
);
4641 /* Return true if, for the transactional clone of NODE, any call
4642 may enter irrevocable mode. */
4645 ipa_tm_mayenterirr_function (struct cgraph_node
*node
)
4647 struct tm_ipa_cg_data
*d
;
4651 d
= get_cg_data (&node
, true);
4653 flags
= flags_from_decl_or_type (decl
);
4655 /* Handle some TM builtins. Ordinarily these aren't actually generated
4656 at this point, but handling these functions when written in by the
4657 user makes it easier to build unit tests. */
4658 if (flags
& ECF_TM_BUILTIN
)
4661 /* Filter out all functions that are marked. */
4662 if (flags
& ECF_TM_PURE
)
4664 if (is_tm_safe (decl
))
4666 if (is_tm_irrevocable (decl
))
4668 if (is_tm_callable (decl
))
4670 if (find_tm_replacement_function (decl
))
4673 /* If we aren't seeing the final version of the function we don't
4674 know what it will contain at runtime. */
4675 if (node
->get_availability () < AVAIL_AVAILABLE
)
4678 /* If the function must go irrevocable, then of course true. */
4679 if (d
->is_irrevocable
)
4682 /* If there are any blocks marked irrevocable, then the function
4683 as a whole may enter irrevocable. */
4684 if (d
->irrevocable_blocks_clone
)
4687 /* We may have previously marked this function as tm_may_enter_irr;
4688 see pass_diagnose_tm_blocks. */
4689 if (node
->local
.tm_may_enter_irr
)
4692 /* Recurse on the main body for aliases. In general, this will
4693 result in one of the bits above being set so that we will not
4694 have to recurse next time. */
4696 return ipa_tm_mayenterirr_function (cgraph_node::get (node
->thunk
.alias
));
4698 /* What remains is unmarked local functions without items that force
4699 the function to go irrevocable. */
4703 /* Diagnose calls from transaction_safe functions to unmarked
4704 functions that are determined to not be safe. */
4707 ipa_tm_diagnose_tm_safe (struct cgraph_node
*node
)
4709 struct cgraph_edge
*e
;
4711 for (e
= node
->callees
; e
; e
= e
->next_callee
)
4712 if (!is_tm_callable (e
->callee
->decl
)
4713 && e
->callee
->local
.tm_may_enter_irr
)
4714 error_at (gimple_location (e
->call_stmt
),
4715 "unsafe function call %qD within "
4716 "%<transaction_safe%> function", e
->callee
->decl
);
4719 /* Diagnose call from atomic transactions to unmarked functions
4720 that are determined to not be safe. */
4723 ipa_tm_diagnose_transaction (struct cgraph_node
*node
,
4724 struct tm_region
*all_tm_regions
)
4726 struct tm_region
*r
;
4728 for (r
= all_tm_regions
; r
; r
= r
->next
)
4729 if (gimple_transaction_subcode (r
->get_transaction_stmt ())
4732 /* Atomic transactions can be nested inside relaxed. */
4734 ipa_tm_diagnose_transaction (node
, r
->inner
);
4738 vec
<basic_block
> bbs
;
4739 gimple_stmt_iterator gsi
;
4743 bbs
= get_tm_region_blocks (r
->entry_block
, r
->exit_blocks
,
4744 r
->irr_blocks
, NULL
, false);
4746 for (i
= 0; bbs
.iterate (i
, &bb
); ++i
)
4747 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4749 gimple stmt
= gsi_stmt (gsi
);
4752 if (gimple_code (stmt
) == GIMPLE_ASM
)
4754 error_at (gimple_location (stmt
),
4755 "asm not allowed in atomic transaction");
4759 if (!is_gimple_call (stmt
))
4761 fndecl
= gimple_call_fndecl (stmt
);
4763 /* Indirect function calls have been diagnosed already. */
4767 /* Stop at the end of the transaction. */
4768 if (is_tm_ending_fndecl (fndecl
))
4770 if (bitmap_bit_p (r
->exit_blocks
, bb
->index
))
4775 /* Marked functions have been diagnosed already. */
4776 if (is_tm_pure_call (stmt
))
4778 if (is_tm_callable (fndecl
))
4781 if (cgraph_node::local_info (fndecl
)->tm_may_enter_irr
)
4782 error_at (gimple_location (stmt
),
4783 "unsafe function call %qD within "
4784 "atomic transaction", fndecl
);
4791 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4792 OLD_DECL. The returned value is a freshly malloced pointer that
4793 should be freed by the caller. */
4796 tm_mangle (tree old_asm_id
)
4798 const char *old_asm_name
;
4801 struct demangle_component
*dc
;
4804 /* Determine if the symbol is already a valid C++ mangled name. Do this
4805 even for C, which might be interfacing with C++ code via appropriately
4806 ugly identifiers. */
4807 /* ??? We could probably do just as well checking for "_Z" and be done. */
4808 old_asm_name
= IDENTIFIER_POINTER (old_asm_id
);
4809 dc
= cplus_demangle_v3_components (old_asm_name
, DMGL_NO_OPTS
, &alloc
);
4816 sprintf (length
, "%u", IDENTIFIER_LENGTH (old_asm_id
));
4817 tm_name
= concat ("_ZGTt", length
, old_asm_name
, NULL
);
4821 old_asm_name
+= 2; /* Skip _Z */
4825 case DEMANGLE_COMPONENT_TRANSACTION_CLONE
:
4826 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE
:
4827 /* Don't play silly games, you! */
4830 case DEMANGLE_COMPONENT_HIDDEN_ALIAS
:
4831 /* I'd really like to know if we can ever be passed one of
4832 these from the C++ front end. The Logical Thing would
4833 seem that hidden-alias should be outer-most, so that we
4834 get hidden-alias of a transaction-clone and not vice-versa. */
4842 tm_name
= concat ("_ZGTt", old_asm_name
, NULL
);
4846 new_asm_id
= get_identifier (tm_name
);
4853 ipa_tm_mark_force_output_node (struct cgraph_node
*node
)
4855 node
->mark_force_output ();
4856 node
->analyzed
= true;
4860 ipa_tm_mark_forced_by_abi_node (struct cgraph_node
*node
)
4862 node
->forced_by_abi
= true;
4863 node
->analyzed
= true;
4866 /* Callback data for ipa_tm_create_version_alias. */
4867 struct create_version_alias_info
4869 struct cgraph_node
*old_node
;
4873 /* A subroutine of ipa_tm_create_version, called via
4874 cgraph_for_node_and_aliases. Create new tm clones for each of
4875 the existing aliases. */
4877 ipa_tm_create_version_alias (struct cgraph_node
*node
, void *data
)
4879 struct create_version_alias_info
*info
4880 = (struct create_version_alias_info
*)data
;
4881 tree old_decl
, new_decl
, tm_name
;
4882 struct cgraph_node
*new_node
;
4884 if (!node
->cpp_implicit_alias
)
4887 old_decl
= node
->decl
;
4888 tm_name
= tm_mangle (DECL_ASSEMBLER_NAME (old_decl
));
4889 new_decl
= build_decl (DECL_SOURCE_LOCATION (old_decl
),
4890 TREE_CODE (old_decl
), tm_name
,
4891 TREE_TYPE (old_decl
));
4893 SET_DECL_ASSEMBLER_NAME (new_decl
, tm_name
);
4894 SET_DECL_RTL (new_decl
, NULL
);
4896 /* Based loosely on C++'s make_alias_for(). */
4897 TREE_PUBLIC (new_decl
) = TREE_PUBLIC (old_decl
);
4898 DECL_CONTEXT (new_decl
) = DECL_CONTEXT (old_decl
);
4899 DECL_LANG_SPECIFIC (new_decl
) = DECL_LANG_SPECIFIC (old_decl
);
4900 TREE_READONLY (new_decl
) = TREE_READONLY (old_decl
);
4901 DECL_EXTERNAL (new_decl
) = 0;
4902 DECL_ARTIFICIAL (new_decl
) = 1;
4903 TREE_ADDRESSABLE (new_decl
) = 1;
4904 TREE_USED (new_decl
) = 1;
4905 TREE_SYMBOL_REFERENCED (tm_name
) = 1;
4907 /* Perform the same remapping to the comdat group. */
4908 if (DECL_ONE_ONLY (new_decl
))
4909 varpool_node::get (new_decl
)->set_comdat_group
4910 (tm_mangle (decl_comdat_group_id (old_decl
)));
4912 new_node
= cgraph_node::create_same_body_alias (new_decl
, info
->new_decl
);
4913 new_node
->tm_clone
= true;
4914 new_node
->externally_visible
= info
->old_node
->externally_visible
;
4915 new_node
->no_reorder
= info
->old_node
->no_reorder
;
4916 /* ?? Do not traverse aliases here. */
4917 get_cg_data (&node
, false)->clone
= new_node
;
4919 record_tm_clone_pair (old_decl
, new_decl
);
4921 if (info
->old_node
->force_output
4922 || info
->old_node
->ref_list
.first_referring ())
4923 ipa_tm_mark_force_output_node (new_node
);
4924 if (info
->old_node
->forced_by_abi
)
4925 ipa_tm_mark_forced_by_abi_node (new_node
);
4929 /* Create a copy of the function (possibly declaration only) of OLD_NODE,
4930 appropriate for the transactional clone. */
4933 ipa_tm_create_version (struct cgraph_node
*old_node
)
4935 tree new_decl
, old_decl
, tm_name
;
4936 struct cgraph_node
*new_node
;
4938 old_decl
= old_node
->decl
;
4939 new_decl
= copy_node (old_decl
);
4941 /* DECL_ASSEMBLER_NAME needs to be set before we call
4942 cgraph_copy_node_for_versioning below, because cgraph_node will
4943 fill the assembler_name_hash. */
4944 tm_name
= tm_mangle (DECL_ASSEMBLER_NAME (old_decl
));
4945 SET_DECL_ASSEMBLER_NAME (new_decl
, tm_name
);
4946 SET_DECL_RTL (new_decl
, NULL
);
4947 TREE_SYMBOL_REFERENCED (tm_name
) = 1;
4949 /* Perform the same remapping to the comdat group. */
4950 if (DECL_ONE_ONLY (new_decl
))
4951 varpool_node::get (new_decl
)->set_comdat_group
4952 (tm_mangle (DECL_COMDAT_GROUP (old_decl
)));
4954 gcc_assert (!old_node
->ipa_transforms_to_apply
.exists ());
4955 new_node
= old_node
->create_version_clone (new_decl
, vNULL
, NULL
);
4956 new_node
->local
.local
= false;
4957 new_node
->externally_visible
= old_node
->externally_visible
;
4958 new_node
->lowered
= true;
4959 new_node
->tm_clone
= 1;
4960 get_cg_data (&old_node
, true)->clone
= new_node
;
4962 if (old_node
->get_availability () >= AVAIL_INTERPOSABLE
)
4964 /* Remap extern inline to static inline. */
4965 /* ??? Is it worth trying to use make_decl_one_only? */
4966 if (DECL_DECLARED_INLINE_P (new_decl
) && DECL_EXTERNAL (new_decl
))
4968 DECL_EXTERNAL (new_decl
) = 0;
4969 TREE_PUBLIC (new_decl
) = 0;
4970 DECL_WEAK (new_decl
) = 0;
4973 tree_function_versioning (old_decl
, new_decl
,
4978 record_tm_clone_pair (old_decl
, new_decl
);
4980 symtab
->call_cgraph_insertion_hooks (new_node
);
4981 if (old_node
->force_output
4982 || old_node
->ref_list
.first_referring ())
4983 ipa_tm_mark_force_output_node (new_node
);
4984 if (old_node
->forced_by_abi
)
4985 ipa_tm_mark_forced_by_abi_node (new_node
);
4987 /* Do the same thing, but for any aliases of the original node. */
4989 struct create_version_alias_info data
;
4990 data
.old_node
= old_node
;
4991 data
.new_decl
= new_decl
;
4992 old_node
->call_for_symbol_thunks_and_aliases (ipa_tm_create_version_alias
,
4997 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
5000 ipa_tm_insert_irr_call (struct cgraph_node
*node
, struct tm_region
*region
,
5003 gimple_stmt_iterator gsi
;
5006 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
5008 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE
),
5009 1, build_int_cst (NULL_TREE
, MODE_SERIALIRREVOCABLE
));
5011 split_block_after_labels (bb
);
5012 gsi
= gsi_after_labels (bb
);
5013 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
5015 node
->create_edge (cgraph_node::get_create
5016 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE
)),
5018 compute_call_stmt_bb_frequency (node
->decl
,
5022 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */
5025 ipa_tm_insert_gettmclone_call (struct cgraph_node
*node
,
5026 struct tm_region
*region
,
5027 gimple_stmt_iterator
*gsi
, gcall
*stmt
)
5029 tree gettm_fn
, ret
, old_fn
, callfn
;
5034 old_fn
= gimple_call_fn (stmt
);
5036 if (TREE_CODE (old_fn
) == ADDR_EXPR
)
5038 tree fndecl
= TREE_OPERAND (old_fn
, 0);
5039 tree clone
= get_tm_clone_pair (fndecl
);
5041 /* By transforming the call into a TM_GETTMCLONE, we are
5042 technically taking the address of the original function and
5043 its clone. Explain this so inlining will know this function
5045 cgraph_node::get (fndecl
)->mark_address_taken () ;
5047 cgraph_node::get (clone
)->mark_address_taken ();
5050 safe
= is_tm_safe (TREE_TYPE (old_fn
));
5051 gettm_fn
= builtin_decl_explicit (safe
? BUILT_IN_TM_GETTMCLONE_SAFE
5052 : BUILT_IN_TM_GETTMCLONE_IRR
);
5053 ret
= create_tmp_var (ptr_type_node
);
5056 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
5058 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
5059 if (TREE_CODE (old_fn
) == OBJ_TYPE_REF
)
5060 old_fn
= OBJ_TYPE_REF_EXPR (old_fn
);
5062 g
= gimple_build_call (gettm_fn
, 1, old_fn
);
5063 ret
= make_ssa_name (ret
, g
);
5064 gimple_call_set_lhs (g
, ret
);
5066 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
5068 node
->create_edge (cgraph_node::get_create (gettm_fn
), g
, 0,
5069 compute_call_stmt_bb_frequency (node
->decl
,
5072 /* Cast return value from tm_gettmclone* into appropriate function
5074 callfn
= create_tmp_var (TREE_TYPE (old_fn
));
5075 g2
= gimple_build_assign (callfn
,
5076 fold_build1 (NOP_EXPR
, TREE_TYPE (callfn
), ret
));
5077 callfn
= make_ssa_name (callfn
, g2
);
5078 gimple_assign_set_lhs (g2
, callfn
);
5079 gsi_insert_before (gsi
, g2
, GSI_SAME_STMT
);
5081 /* ??? This is a hack to preserve the NOTHROW bit on the call,
5082 which we would have derived from the decl. Failure to save
5083 this bit means we might have to split the basic block. */
5084 if (gimple_call_nothrow_p (stmt
))
5085 gimple_call_set_nothrow (stmt
, true);
5087 gimple_call_set_fn (stmt
, callfn
);
5089 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
5090 for a call statement. Fix it. */
5092 tree lhs
= gimple_call_lhs (stmt
);
5093 tree rettype
= TREE_TYPE (gimple_call_fntype (stmt
));
5095 && !useless_type_conversion_p (TREE_TYPE (lhs
), rettype
))
5099 temp
= create_tmp_reg (rettype
);
5100 gimple_call_set_lhs (stmt
, temp
);
5102 g2
= gimple_build_assign (lhs
,
5103 fold_build1 (VIEW_CONVERT_EXPR
,
5104 TREE_TYPE (lhs
), temp
));
5105 gsi_insert_after (gsi
, g2
, GSI_SAME_STMT
);
5110 cgraph_edge
*e
= cgraph_node::get (current_function_decl
)->get_edge (stmt
);
5111 if (e
&& e
->indirect_info
)
5112 e
->indirect_info
->polymorphic
= false;
5117 /* Helper function for ipa_tm_transform_calls*. Given a call
5118 statement in GSI which resides inside transaction REGION, redirect
5119 the call to either its wrapper function, or its clone. */
5122 ipa_tm_transform_calls_redirect (struct cgraph_node
*node
,
5123 struct tm_region
*region
,
5124 gimple_stmt_iterator
*gsi
,
5125 bool *need_ssa_rename_p
)
5127 gcall
*stmt
= as_a
<gcall
*> (gsi_stmt (*gsi
));
5128 struct cgraph_node
*new_node
;
5129 struct cgraph_edge
*e
= node
->get_edge (stmt
);
5130 tree fndecl
= gimple_call_fndecl (stmt
);
5132 /* For indirect calls, pass the address through the runtime. */
5135 *need_ssa_rename_p
|=
5136 ipa_tm_insert_gettmclone_call (node
, region
, gsi
, stmt
);
5140 /* Handle some TM builtins. Ordinarily these aren't actually generated
5141 at this point, but handling these functions when written in by the
5142 user makes it easier to build unit tests. */
5143 if (flags_from_decl_or_type (fndecl
) & ECF_TM_BUILTIN
)
5146 /* Fixup recursive calls inside clones. */
5147 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
5148 for recursion but not update the call statements themselves? */
5149 if (e
->caller
== e
->callee
&& decl_is_tm_clone (current_function_decl
))
5151 gimple_call_set_fndecl (stmt
, current_function_decl
);
5155 /* If there is a replacement, use it. */
5156 fndecl
= find_tm_replacement_function (fndecl
);
5159 new_node
= cgraph_node::get_create (fndecl
);
5161 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
5163 We can't do this earlier in record_tm_replacement because
5164 cgraph_remove_unreachable_nodes is called before we inject
5165 references to the node. Further, we can't do this in some
5166 nice central place in ipa_tm_execute because we don't have
5167 the exact list of wrapper functions that would be used.
5168 Marking more wrappers than necessary results in the creation
5169 of unnecessary cgraph_nodes, which can cause some of the
5170 other IPA passes to crash.
5172 We do need to mark these nodes so that we get the proper
5173 result in expand_call_tm. */
5174 /* ??? This seems broken. How is it that we're marking the
5175 CALLEE as may_enter_irr? Surely we should be marking the
5176 CALLER. Also note that find_tm_replacement_function also
5177 contains mappings into the TM runtime, e.g. memcpy. These
5178 we know won't go irrevocable. */
5179 new_node
->local
.tm_may_enter_irr
= 1;
5183 struct tm_ipa_cg_data
*d
;
5184 struct cgraph_node
*tnode
= e
->callee
;
5186 d
= get_cg_data (&tnode
, true);
5187 new_node
= d
->clone
;
5189 /* As we've already skipped pure calls and appropriate builtins,
5190 and we've already marked irrevocable blocks, if we can't come
5191 up with a static replacement, then ask the runtime. */
5192 if (new_node
== NULL
)
5194 *need_ssa_rename_p
|=
5195 ipa_tm_insert_gettmclone_call (node
, region
, gsi
, stmt
);
5199 fndecl
= new_node
->decl
;
5202 e
->redirect_callee (new_node
);
5203 gimple_call_set_fndecl (stmt
, fndecl
);
5206 /* Helper function for ipa_tm_transform_calls. For a given BB,
5207 install calls to tm_irrevocable when IRR_BLOCKS are reached,
5208 redirect other calls to the generated transactional clone. */
5211 ipa_tm_transform_calls_1 (struct cgraph_node
*node
, struct tm_region
*region
,
5212 basic_block bb
, bitmap irr_blocks
)
5214 gimple_stmt_iterator gsi
;
5215 bool need_ssa_rename
= false;
5217 if (irr_blocks
&& bitmap_bit_p (irr_blocks
, bb
->index
))
5219 ipa_tm_insert_irr_call (node
, region
, bb
);
5223 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5225 gimple stmt
= gsi_stmt (gsi
);
5227 if (!is_gimple_call (stmt
))
5229 if (is_tm_pure_call (stmt
))
5232 /* Redirect edges to the appropriate replacement or clone. */
5233 ipa_tm_transform_calls_redirect (node
, region
, &gsi
, &need_ssa_rename
);
5236 return need_ssa_rename
;
5239 /* Walk the CFG for REGION, beginning at BB. Install calls to
5240 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
5241 the generated transactional clone. */
5244 ipa_tm_transform_calls (struct cgraph_node
*node
, struct tm_region
*region
,
5245 basic_block bb
, bitmap irr_blocks
)
5247 bool need_ssa_rename
= false;
5250 auto_vec
<basic_block
> queue
;
5251 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
5253 queue
.safe_push (bb
);
5259 ipa_tm_transform_calls_1 (node
, region
, bb
, irr_blocks
);
5261 if (irr_blocks
&& bitmap_bit_p (irr_blocks
, bb
->index
))
5264 if (region
&& bitmap_bit_p (region
->exit_blocks
, bb
->index
))
5267 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
5268 if (!bitmap_bit_p (visited_blocks
, e
->dest
->index
))
5270 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
5271 queue
.safe_push (e
->dest
);
5274 while (!queue
.is_empty ());
5276 BITMAP_FREE (visited_blocks
);
5278 return need_ssa_rename
;
5281 /* Transform the calls within the TM regions within NODE. */
5284 ipa_tm_transform_transaction (struct cgraph_node
*node
)
5286 struct tm_ipa_cg_data
*d
;
5287 struct tm_region
*region
;
5288 bool need_ssa_rename
= false;
5290 d
= get_cg_data (&node
, true);
5292 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
5293 calculate_dominance_info (CDI_DOMINATORS
);
5295 for (region
= d
->all_tm_regions
; region
; region
= region
->next
)
5297 /* If we're sure to go irrevocable, don't transform anything. */
5298 if (d
->irrevocable_blocks_normal
5299 && bitmap_bit_p (d
->irrevocable_blocks_normal
,
5300 region
->entry_block
->index
))
5302 transaction_subcode_ior (region
, GTMA_DOES_GO_IRREVOCABLE
5303 | GTMA_MAY_ENTER_IRREVOCABLE
5304 | GTMA_HAS_NO_INSTRUMENTATION
);
5309 ipa_tm_transform_calls (node
, region
, region
->entry_block
,
5310 d
->irrevocable_blocks_normal
);
5313 if (need_ssa_rename
)
5314 update_ssa (TODO_update_ssa_only_virtuals
);
5319 /* Transform the calls within the transactional clone of NODE. */
5322 ipa_tm_transform_clone (struct cgraph_node
*node
)
5324 struct tm_ipa_cg_data
*d
;
5325 bool need_ssa_rename
;
5327 d
= get_cg_data (&node
, true);
5329 /* If this function makes no calls and has no irrevocable blocks,
5330 then there's nothing to do. */
5331 /* ??? Remove non-aborting top-level transactions. */
5332 if (!node
->callees
&& !node
->indirect_calls
&& !d
->irrevocable_blocks_clone
)
5335 push_cfun (DECL_STRUCT_FUNCTION (d
->clone
->decl
));
5336 calculate_dominance_info (CDI_DOMINATORS
);
5339 ipa_tm_transform_calls (d
->clone
, NULL
,
5340 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)),
5341 d
->irrevocable_blocks_clone
);
5343 if (need_ssa_rename
)
5344 update_ssa (TODO_update_ssa_only_virtuals
);
5349 /* Main entry point for the transactional memory IPA pass. */
5352 ipa_tm_execute (void)
5354 cgraph_node_queue tm_callees
= cgraph_node_queue ();
5355 /* List of functions that will go irrevocable. */
5356 cgraph_node_queue irr_worklist
= cgraph_node_queue ();
5358 struct cgraph_node
*node
;
5359 struct tm_ipa_cg_data
*d
;
5360 enum availability a
;
5363 #ifdef ENABLE_CHECKING
5364 cgraph_node::verify_cgraph_nodes ();
5367 bitmap_obstack_initialize (&tm_obstack
);
5368 initialize_original_copy_tables ();
5370 /* For all local functions marked tm_callable, queue them. */
5371 FOR_EACH_DEFINED_FUNCTION (node
)
5372 if (is_tm_callable (node
->decl
)
5373 && node
->get_availability () >= AVAIL_INTERPOSABLE
)
5375 d
= get_cg_data (&node
, true);
5376 maybe_push_queue (node
, &tm_callees
, &d
->in_callee_queue
);
5379 /* For all local reachable functions... */
5380 FOR_EACH_DEFINED_FUNCTION (node
)
5382 && node
->get_availability () >= AVAIL_INTERPOSABLE
)
5384 /* ... marked tm_pure, record that fact for the runtime by
5385 indicating that the pure function is its own tm_callable.
5386 No need to do this if the function's address can't be taken. */
5387 if (is_tm_pure (node
->decl
))
5389 if (!node
->local
.local
)
5390 record_tm_clone_pair (node
->decl
, node
->decl
);
5394 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
5395 calculate_dominance_info (CDI_DOMINATORS
);
5397 tm_region_init (NULL
);
5400 d
= get_cg_data (&node
, true);
5402 /* Scan for calls that are in each transaction, and
5403 generate the uninstrumented code path. */
5404 ipa_tm_scan_calls_transaction (d
, &tm_callees
);
5406 /* Put it in the worklist so we can scan the function
5407 later (ipa_tm_scan_irr_function) and mark the
5408 irrevocable blocks. */
5409 maybe_push_queue (node
, &irr_worklist
, &d
->in_worklist
);
5410 d
->want_irr_scan_normal
= true;
5416 /* For every local function on the callee list, scan as if we will be
5417 creating a transactional clone, queueing all new functions we find
5419 for (i
= 0; i
< tm_callees
.length (); ++i
)
5421 node
= tm_callees
[i
];
5422 a
= node
->get_availability ();
5423 d
= get_cg_data (&node
, true);
5425 /* Put it in the worklist so we can scan the function later
5426 (ipa_tm_scan_irr_function) and mark the irrevocable
5428 maybe_push_queue (node
, &irr_worklist
, &d
->in_worklist
);
5430 /* Some callees cannot be arbitrarily cloned. These will always be
5431 irrevocable. Mark these now, so that we need not scan them. */
5432 if (is_tm_irrevocable (node
->decl
))
5433 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5434 else if (a
<= AVAIL_NOT_AVAILABLE
5435 && !is_tm_safe_or_pure (node
->decl
))
5436 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5437 else if (a
>= AVAIL_INTERPOSABLE
)
5439 if (!tree_versionable_function_p (node
->decl
))
5440 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5441 else if (!d
->is_irrevocable
)
5443 /* If this is an alias, make sure its base is queued as well.
5444 we need not scan the callees now, as the base will do. */
5447 node
= cgraph_node::get (node
->thunk
.alias
);
5448 d
= get_cg_data (&node
, true);
5449 maybe_push_queue (node
, &tm_callees
, &d
->in_callee_queue
);
5453 /* Add all nodes called by this function into
5454 tm_callees as well. */
5455 ipa_tm_scan_calls_clone (node
, &tm_callees
);
5460 /* Iterate scans until no more work to be done. Prefer not to use
5461 vec::pop because the worklist tends to follow a breadth-first
5462 search of the callgraph, which should allow convergance with a
5463 minimum number of scans. But we also don't want the worklist
5464 array to grow without bound, so we shift the array up periodically. */
5465 for (i
= 0; i
< irr_worklist
.length (); ++i
)
5467 if (i
> 256 && i
== irr_worklist
.length () / 8)
5469 irr_worklist
.block_remove (0, i
);
5473 node
= irr_worklist
[i
];
5474 d
= get_cg_data (&node
, true);
5475 d
->in_worklist
= false;
5477 if (d
->want_irr_scan_normal
)
5479 d
->want_irr_scan_normal
= false;
5480 ipa_tm_scan_irr_function (node
, false);
5482 if (d
->in_callee_queue
&& ipa_tm_scan_irr_function (node
, true))
5483 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5486 /* For every function on the callee list, collect the tm_may_enter_irr
5488 irr_worklist
.truncate (0);
5489 for (i
= 0; i
< tm_callees
.length (); ++i
)
5491 node
= tm_callees
[i
];
5492 if (ipa_tm_mayenterirr_function (node
))
5494 d
= get_cg_data (&node
, true);
5495 gcc_assert (d
->in_worklist
== false);
5496 maybe_push_queue (node
, &irr_worklist
, &d
->in_worklist
);
5500 /* Propagate the tm_may_enter_irr bit to callers until stable. */
5501 for (i
= 0; i
< irr_worklist
.length (); ++i
)
5503 struct cgraph_node
*caller
;
5504 struct cgraph_edge
*e
;
5505 struct ipa_ref
*ref
;
5507 if (i
> 256 && i
== irr_worklist
.length () / 8)
5509 irr_worklist
.block_remove (0, i
);
5513 node
= irr_worklist
[i
];
5514 d
= get_cg_data (&node
, true);
5515 d
->in_worklist
= false;
5516 node
->local
.tm_may_enter_irr
= true;
5518 /* Propagate back to normal callers. */
5519 for (e
= node
->callers
; e
; e
= e
->next_caller
)
5522 if (!is_tm_safe_or_pure (caller
->decl
)
5523 && !caller
->local
.tm_may_enter_irr
)
5525 d
= get_cg_data (&caller
, true);
5526 maybe_push_queue (caller
, &irr_worklist
, &d
->in_worklist
);
5530 /* Propagate back to referring aliases as well. */
5531 FOR_EACH_ALIAS (node
, ref
)
5533 caller
= dyn_cast
<cgraph_node
*> (ref
->referring
);
5534 if (!caller
->local
.tm_may_enter_irr
)
5536 /* ?? Do not traverse aliases here. */
5537 d
= get_cg_data (&caller
, false);
5538 maybe_push_queue (caller
, &irr_worklist
, &d
->in_worklist
);
5543 /* Now validate all tm_safe functions, and all atomic regions in
5545 FOR_EACH_DEFINED_FUNCTION (node
)
5547 && node
->get_availability () >= AVAIL_INTERPOSABLE
)
5549 d
= get_cg_data (&node
, true);
5550 if (is_tm_safe (node
->decl
))
5551 ipa_tm_diagnose_tm_safe (node
);
5552 else if (d
->all_tm_regions
)
5553 ipa_tm_diagnose_transaction (node
, d
->all_tm_regions
);
5556 /* Create clones. Do those that are not irrevocable and have a
5557 positive call count. Do those publicly visible functions that
5558 the user directed us to clone. */
5559 for (i
= 0; i
< tm_callees
.length (); ++i
)
5563 node
= tm_callees
[i
];
5564 if (node
->cpp_implicit_alias
)
5567 a
= node
->get_availability ();
5568 d
= get_cg_data (&node
, true);
5570 if (a
<= AVAIL_NOT_AVAILABLE
)
5571 doit
= is_tm_callable (node
->decl
);
5572 else if (a
<= AVAIL_AVAILABLE
&& is_tm_callable (node
->decl
))
5574 else if (!d
->is_irrevocable
5575 && d
->tm_callers_normal
+ d
->tm_callers_clone
> 0)
5579 ipa_tm_create_version (node
);
5582 /* Redirect calls to the new clones, and insert irrevocable marks. */
5583 for (i
= 0; i
< tm_callees
.length (); ++i
)
5585 node
= tm_callees
[i
];
5588 d
= get_cg_data (&node
, true);
5590 ipa_tm_transform_clone (node
);
5593 FOR_EACH_DEFINED_FUNCTION (node
)
5595 && node
->get_availability () >= AVAIL_INTERPOSABLE
)
5597 d
= get_cg_data (&node
, true);
5598 if (d
->all_tm_regions
)
5599 ipa_tm_transform_transaction (node
);
5602 /* Free and clear all data structures. */
5603 tm_callees
.release ();
5604 irr_worklist
.release ();
5605 bitmap_obstack_release (&tm_obstack
);
5606 free_original_copy_tables ();
5608 FOR_EACH_FUNCTION (node
)
5611 #ifdef ENABLE_CHECKING
5612 cgraph_node::verify_cgraph_nodes ();
5620 const pass_data pass_data_ipa_tm
=
5622 SIMPLE_IPA_PASS
, /* type */
5624 OPTGROUP_NONE
, /* optinfo_flags */
5625 TV_TRANS_MEM
, /* tv_id */
5626 ( PROP_ssa
| PROP_cfg
), /* properties_required */
5627 0, /* properties_provided */
5628 0, /* properties_destroyed */
5629 0, /* todo_flags_start */
5630 0, /* todo_flags_finish */
5633 class pass_ipa_tm
: public simple_ipa_opt_pass
5636 pass_ipa_tm (gcc::context
*ctxt
)
5637 : simple_ipa_opt_pass (pass_data_ipa_tm
, ctxt
)
5640 /* opt_pass methods: */
5641 virtual bool gate (function
*) { return flag_tm
; }
5642 virtual unsigned int execute (function
*) { return ipa_tm_execute (); }
5644 }; // class pass_ipa_tm
5648 simple_ipa_opt_pass
*
5649 make_pass_ipa_tm (gcc::context
*ctxt
)
5651 return new pass_ipa_tm (ctxt
);
5654 #include "gt-trans-mem.h"