1 /* Passes for transactional memory support.
2 Copyright (C) 2008-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "fold-const.h"
30 #include "hard-reg-set.h"
32 #include "dominance.h"
34 #include "basic-block.h"
35 #include "tree-ssa-alias.h"
36 #include "internal-fn.h"
38 #include "gimple-expr.h"
44 #include "gimple-iterator.h"
45 #include "gimplify-me.h"
46 #include "gimple-walk.h"
47 #include "gimple-ssa.h"
50 #include "stringpool.h"
51 #include "tree-ssanames.h"
52 #include "tree-into-ssa.h"
53 #include "tree-pass.h"
54 #include "tree-inline.h"
55 #include "diagnostic-core.h"
58 #include "trans-mem.h"
61 #include "langhooks.h"
62 #include "gimple-pretty-print.h"
64 #include "tree-ssa-address.h"
67 #define A_RUNINSTRUMENTEDCODE 0x0001
68 #define A_RUNUNINSTRUMENTEDCODE 0x0002
69 #define A_SAVELIVEVARIABLES 0x0004
70 #define A_RESTORELIVEVARIABLES 0x0008
71 #define A_ABORTTRANSACTION 0x0010
73 #define AR_USERABORT 0x0001
74 #define AR_USERRETRY 0x0002
75 #define AR_TMCONFLICT 0x0004
76 #define AR_EXCEPTIONBLOCKABORT 0x0008
77 #define AR_OUTERABORT 0x0010
79 #define MODE_SERIALIRREVOCABLE 0x0000
82 /* The representation of a transaction changes several times during the
83 lowering process. In the beginning, in the front-end we have the
84 GENERIC tree TRANSACTION_EXPR. For example,
92 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
93 trivially replaced with a GIMPLE_TRANSACTION node.
95 During pass_lower_tm, we examine the body of transactions looking
96 for aborts. Transactions that do not contain an abort may be
97 merged into an outer transaction. We also add a TRY-FINALLY node
98 to arrange for the transaction to be committed on any exit.
100 [??? Think about how this arrangement affects throw-with-commit
101 and throw-with-abort operations. In this case we want the TRY to
102 handle gotos, but not to catch any exceptions because the transaction
103 will already be closed.]
105 GIMPLE_TRANSACTION [label=NULL] {
112 __builtin___tm_abort ();
114 __builtin___tm_commit ();
118 During pass_lower_eh, we create EH regions for the transactions,
119 intermixed with the regular EH stuff. This gives us a nice persistent
120 mapping (all the way through rtl) from transactional memory operation
121 back to the transaction, which allows us to get the abnormal edges
122 correct to model transaction aborts and restarts:
124 GIMPLE_TRANSACTION [label=over]
130 __builtin___tm_abort ();
131 __builtin___tm_commit ();
134 This is the end of all_lowering_passes, and so is what is present
135 during the IPA passes, and through all of the optimization passes.
137 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
138 functions and mark functions for cloning.
140 At the end of gimple optimization, before exiting SSA form,
141 pass_tm_edges replaces statements that perform transactional
142 memory operations with the appropriate TM builtins, and swap
143 out function calls with their transactional clones. At this
144 point we introduce the abnormal transaction restart edges and
145 complete lowering of the GIMPLE_TRANSACTION node.
147 x = __builtin___tm_start (MAY_ABORT);
149 if (x & abort_transaction)
152 t0 = __builtin___tm_load (global);
154 __builtin___tm_store (&global, t1);
156 __builtin___tm_abort ();
157 __builtin___tm_commit ();
161 static void *expand_regions (struct tm_region
*,
162 void *(*callback
)(struct tm_region
*, void *),
166 /* Return the attributes we want to examine for X, or NULL if it's not
167 something we examine. We look at function types, but allow pointers
168 to function types and function decls and peek through. */
171 get_attrs_for (const_tree x
)
176 switch (TREE_CODE (x
))
179 return TYPE_ATTRIBUTES (TREE_TYPE (x
));
186 if (TREE_CODE (x
) != POINTER_TYPE
)
192 if (TREE_CODE (x
) != FUNCTION_TYPE
&& TREE_CODE (x
) != METHOD_TYPE
)
198 return TYPE_ATTRIBUTES (x
);
202 /* Return true if X has been marked TM_PURE. */
205 is_tm_pure (const_tree x
)
209 switch (TREE_CODE (x
))
220 if (TREE_CODE (x
) != POINTER_TYPE
)
226 if (TREE_CODE (x
) != FUNCTION_TYPE
&& TREE_CODE (x
) != METHOD_TYPE
)
231 flags
= flags_from_decl_or_type (x
);
232 return (flags
& ECF_TM_PURE
) != 0;
235 /* Return true if X has been marked TM_IRREVOCABLE. */
238 is_tm_irrevocable (tree x
)
240 tree attrs
= get_attrs_for (x
);
242 if (attrs
&& lookup_attribute ("transaction_unsafe", attrs
))
245 /* A call to the irrevocable builtin is by definition,
247 if (TREE_CODE (x
) == ADDR_EXPR
)
248 x
= TREE_OPERAND (x
, 0);
249 if (TREE_CODE (x
) == FUNCTION_DECL
250 && DECL_BUILT_IN_CLASS (x
) == BUILT_IN_NORMAL
251 && DECL_FUNCTION_CODE (x
) == BUILT_IN_TM_IRREVOCABLE
)
257 /* Return true if X has been marked TM_SAFE. */
260 is_tm_safe (const_tree x
)
264 tree attrs
= get_attrs_for (x
);
267 if (lookup_attribute ("transaction_safe", attrs
))
269 if (lookup_attribute ("transaction_may_cancel_outer", attrs
))
276 /* Return true if CALL is const, or tm_pure. */
279 is_tm_pure_call (gimple call
)
281 tree fn
= gimple_call_fn (call
);
283 if (TREE_CODE (fn
) == ADDR_EXPR
)
285 fn
= TREE_OPERAND (fn
, 0);
286 gcc_assert (TREE_CODE (fn
) == FUNCTION_DECL
);
291 return is_tm_pure (fn
);
294 /* Return true if X has been marked TM_CALLABLE. */
297 is_tm_callable (tree x
)
299 tree attrs
= get_attrs_for (x
);
302 if (lookup_attribute ("transaction_callable", attrs
))
304 if (lookup_attribute ("transaction_safe", attrs
))
306 if (lookup_attribute ("transaction_may_cancel_outer", attrs
))
312 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
315 is_tm_may_cancel_outer (tree x
)
317 tree attrs
= get_attrs_for (x
);
319 return lookup_attribute ("transaction_may_cancel_outer", attrs
) != NULL
;
323 /* Return true for built in functions that "end" a transaction. */
326 is_tm_ending_fndecl (tree fndecl
)
328 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
329 switch (DECL_FUNCTION_CODE (fndecl
))
331 case BUILT_IN_TM_COMMIT
:
332 case BUILT_IN_TM_COMMIT_EH
:
333 case BUILT_IN_TM_ABORT
:
334 case BUILT_IN_TM_IRREVOCABLE
:
343 /* Return true if STMT is a built in function call that "ends" a
347 is_tm_ending (gimple stmt
)
351 if (gimple_code (stmt
) != GIMPLE_CALL
)
354 fndecl
= gimple_call_fndecl (stmt
);
355 return (fndecl
!= NULL_TREE
356 && is_tm_ending_fndecl (fndecl
));
359 /* Return true if STMT is a TM load. */
362 is_tm_load (gimple stmt
)
366 if (gimple_code (stmt
) != GIMPLE_CALL
)
369 fndecl
= gimple_call_fndecl (stmt
);
370 return (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
371 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl
)));
374 /* Same as above, but for simple TM loads, that is, not the
375 after-write, after-read, etc optimized variants. */
378 is_tm_simple_load (gimple stmt
)
382 if (gimple_code (stmt
) != GIMPLE_CALL
)
385 fndecl
= gimple_call_fndecl (stmt
);
386 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
388 enum built_in_function fcode
= DECL_FUNCTION_CODE (fndecl
);
389 return (fcode
== BUILT_IN_TM_LOAD_1
390 || fcode
== BUILT_IN_TM_LOAD_2
391 || fcode
== BUILT_IN_TM_LOAD_4
392 || fcode
== BUILT_IN_TM_LOAD_8
393 || fcode
== BUILT_IN_TM_LOAD_FLOAT
394 || fcode
== BUILT_IN_TM_LOAD_DOUBLE
395 || fcode
== BUILT_IN_TM_LOAD_LDOUBLE
396 || fcode
== BUILT_IN_TM_LOAD_M64
397 || fcode
== BUILT_IN_TM_LOAD_M128
398 || fcode
== BUILT_IN_TM_LOAD_M256
);
403 /* Return true if STMT is a TM store. */
406 is_tm_store (gimple stmt
)
410 if (gimple_code (stmt
) != GIMPLE_CALL
)
413 fndecl
= gimple_call_fndecl (stmt
);
414 return (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
415 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl
)));
418 /* Same as above, but for simple TM stores, that is, not the
419 after-write, after-read, etc optimized variants. */
422 is_tm_simple_store (gimple stmt
)
426 if (gimple_code (stmt
) != GIMPLE_CALL
)
429 fndecl
= gimple_call_fndecl (stmt
);
430 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
432 enum built_in_function fcode
= DECL_FUNCTION_CODE (fndecl
);
433 return (fcode
== BUILT_IN_TM_STORE_1
434 || fcode
== BUILT_IN_TM_STORE_2
435 || fcode
== BUILT_IN_TM_STORE_4
436 || fcode
== BUILT_IN_TM_STORE_8
437 || fcode
== BUILT_IN_TM_STORE_FLOAT
438 || fcode
== BUILT_IN_TM_STORE_DOUBLE
439 || fcode
== BUILT_IN_TM_STORE_LDOUBLE
440 || fcode
== BUILT_IN_TM_STORE_M64
441 || fcode
== BUILT_IN_TM_STORE_M128
442 || fcode
== BUILT_IN_TM_STORE_M256
);
447 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */
450 is_tm_abort (tree fndecl
)
453 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
454 && DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_TM_ABORT
);
457 /* Build a GENERIC tree for a user abort. This is called by front ends
458 while transforming the __tm_abort statement. */
461 build_tm_abort_call (location_t loc
, bool is_outer
)
463 return build_call_expr_loc (loc
, builtin_decl_explicit (BUILT_IN_TM_ABORT
), 1,
464 build_int_cst (integer_type_node
,
466 | (is_outer
? AR_OUTERABORT
: 0)));
469 /* Map for aribtrary function replacement under TM, as created
470 by the tm_wrap attribute. */
472 struct tm_wrapper_hasher
: ggc_cache_ptr_hash
<tree_map
>
474 static inline hashval_t
hash (tree_map
*m
) { return m
->hash
; }
476 equal (tree_map
*a
, tree_map
*b
)
478 return a
->base
.from
== b
->base
.from
;
482 keep_cache_entry (tree_map
*&m
)
484 return ggc_marked_p (m
->base
.from
);
488 static GTY((cache
)) hash_table
<tm_wrapper_hasher
> *tm_wrap_map
;
491 record_tm_replacement (tree from
, tree to
)
493 struct tree_map
**slot
, *h
;
495 /* Do not inline wrapper functions that will get replaced in the TM
498 Suppose you have foo() that will get replaced into tmfoo(). Make
499 sure the inliner doesn't try to outsmart us and inline foo()
500 before we get a chance to do the TM replacement. */
501 DECL_UNINLINABLE (from
) = 1;
503 if (tm_wrap_map
== NULL
)
504 tm_wrap_map
= hash_table
<tm_wrapper_hasher
>::create_ggc (32);
506 h
= ggc_alloc
<tree_map
> ();
507 h
->hash
= htab_hash_pointer (from
);
511 slot
= tm_wrap_map
->find_slot_with_hash (h
, h
->hash
, INSERT
);
515 /* Return a TM-aware replacement function for DECL. */
518 find_tm_replacement_function (tree fndecl
)
522 struct tree_map
*h
, in
;
524 in
.base
.from
= fndecl
;
525 in
.hash
= htab_hash_pointer (fndecl
);
526 h
= tm_wrap_map
->find_with_hash (&in
, in
.hash
);
531 /* ??? We may well want TM versions of most of the common <string.h>
532 functions. For now, we've already these two defined. */
533 /* Adjust expand_call_tm() attributes as necessary for the cases
535 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
536 switch (DECL_FUNCTION_CODE (fndecl
))
538 case BUILT_IN_MEMCPY
:
539 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY
);
540 case BUILT_IN_MEMMOVE
:
541 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE
);
542 case BUILT_IN_MEMSET
:
543 return builtin_decl_explicit (BUILT_IN_TM_MEMSET
);
551 /* When appropriate, record TM replacement for memory allocation functions.
553 FROM is the FNDECL to wrap. */
555 tm_malloc_replacement (tree from
)
560 if (TREE_CODE (from
) != FUNCTION_DECL
)
563 /* If we have a previous replacement, the user must be explicitly
564 wrapping malloc/calloc/free. They better know what they're
566 if (find_tm_replacement_function (from
))
569 str
= IDENTIFIER_POINTER (DECL_NAME (from
));
571 if (!strcmp (str
, "malloc"))
572 to
= builtin_decl_explicit (BUILT_IN_TM_MALLOC
);
573 else if (!strcmp (str
, "calloc"))
574 to
= builtin_decl_explicit (BUILT_IN_TM_CALLOC
);
575 else if (!strcmp (str
, "free"))
576 to
= builtin_decl_explicit (BUILT_IN_TM_FREE
);
580 TREE_NOTHROW (to
) = 0;
582 record_tm_replacement (from
, to
);
585 /* Diagnostics for tm_safe functions/regions. Called by the front end
586 once we've lowered the function to high-gimple. */
588 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
589 Process exactly one statement. WI->INFO is set to non-null when in
590 the context of a tm_safe function, and null for a __transaction block. */
592 #define DIAG_TM_OUTER 1
593 #define DIAG_TM_SAFE 2
594 #define DIAG_TM_RELAXED 4
598 unsigned int summary_flags
: 8;
599 unsigned int block_flags
: 8;
600 unsigned int func_flags
: 8;
601 unsigned int saw_volatile
: 1;
605 /* Return true if T is a volatile variable of some kind. */
608 volatile_var_p (tree t
)
610 return (SSA_VAR_P (t
)
611 && TREE_THIS_VOLATILE (TREE_TYPE (t
)));
614 /* Tree callback function for diagnose_tm pass. */
617 diagnose_tm_1_op (tree
*tp
, int *walk_subtrees ATTRIBUTE_UNUSED
,
620 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
621 struct diagnose_tm
*d
= (struct diagnose_tm
*) wi
->info
;
623 if (volatile_var_p (*tp
)
624 && d
->block_flags
& DIAG_TM_SAFE
628 error_at (gimple_location (d
->stmt
),
629 "invalid volatile use of %qD inside transaction",
637 is_tm_safe_or_pure (const_tree x
)
639 return is_tm_safe (x
) || is_tm_pure (x
);
643 diagnose_tm_1 (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
644 struct walk_stmt_info
*wi
)
646 gimple stmt
= gsi_stmt (*gsi
);
647 struct diagnose_tm
*d
= (struct diagnose_tm
*) wi
->info
;
649 /* Save stmt for use in leaf analysis. */
652 switch (gimple_code (stmt
))
656 tree fn
= gimple_call_fn (stmt
);
658 if ((d
->summary_flags
& DIAG_TM_OUTER
) == 0
659 && is_tm_may_cancel_outer (fn
))
660 error_at (gimple_location (stmt
),
661 "%<transaction_may_cancel_outer%> function call not within"
662 " outer transaction or %<transaction_may_cancel_outer%>");
664 if (d
->summary_flags
& DIAG_TM_SAFE
)
666 bool is_safe
, direct_call_p
;
669 if (TREE_CODE (fn
) == ADDR_EXPR
670 && TREE_CODE (TREE_OPERAND (fn
, 0)) == FUNCTION_DECL
)
672 direct_call_p
= true;
673 replacement
= TREE_OPERAND (fn
, 0);
674 replacement
= find_tm_replacement_function (replacement
);
680 direct_call_p
= false;
681 replacement
= NULL_TREE
;
684 if (is_tm_safe_or_pure (fn
))
686 else if (is_tm_callable (fn
) || is_tm_irrevocable (fn
))
688 /* A function explicitly marked transaction_callable as
689 opposed to transaction_safe is being defined to be
690 unsafe as part of its ABI, regardless of its contents. */
693 else if (direct_call_p
)
695 if (IS_TYPE_OR_DECL_P (fn
)
696 && flags_from_decl_or_type (fn
) & ECF_TM_BUILTIN
)
698 else if (replacement
)
700 /* ??? At present we've been considering replacements
701 merely transaction_callable, and therefore might
702 enter irrevocable. The tm_wrap attribute has not
703 yet made it into the new language spec. */
708 /* ??? Diagnostics for unmarked direct calls moved into
709 the IPA pass. Section 3.2 of the spec details how
710 functions not marked should be considered "implicitly
711 safe" based on having examined the function body. */
717 /* An unmarked indirect call. Consider it unsafe even
718 though optimization may yet figure out how to inline. */
724 if (TREE_CODE (fn
) == ADDR_EXPR
)
725 fn
= TREE_OPERAND (fn
, 0);
726 if (d
->block_flags
& DIAG_TM_SAFE
)
729 error_at (gimple_location (stmt
),
730 "unsafe function call %qD within "
731 "atomic transaction", fn
);
734 if (!DECL_P (fn
) || DECL_NAME (fn
))
735 error_at (gimple_location (stmt
),
736 "unsafe function call %qE within "
737 "atomic transaction", fn
);
739 error_at (gimple_location (stmt
),
740 "unsafe indirect function call within "
741 "atomic transaction");
747 error_at (gimple_location (stmt
),
748 "unsafe function call %qD within "
749 "%<transaction_safe%> function", fn
);
752 if (!DECL_P (fn
) || DECL_NAME (fn
))
753 error_at (gimple_location (stmt
),
754 "unsafe function call %qE within "
755 "%<transaction_safe%> function", fn
);
757 error_at (gimple_location (stmt
),
758 "unsafe indirect function call within "
759 "%<transaction_safe%> function");
768 /* ??? We ought to come up with a way to add attributes to
769 asm statements, and then add "transaction_safe" to it.
770 Either that or get the language spec to resurrect __tm_waiver. */
771 if (d
->block_flags
& DIAG_TM_SAFE
)
772 error_at (gimple_location (stmt
),
773 "asm not allowed in atomic transaction");
774 else if (d
->func_flags
& DIAG_TM_SAFE
)
775 error_at (gimple_location (stmt
),
776 "asm not allowed in %<transaction_safe%> function");
779 case GIMPLE_TRANSACTION
:
781 gtransaction
*trans_stmt
= as_a
<gtransaction
*> (stmt
);
782 unsigned char inner_flags
= DIAG_TM_SAFE
;
784 if (gimple_transaction_subcode (trans_stmt
) & GTMA_IS_RELAXED
)
786 if (d
->block_flags
& DIAG_TM_SAFE
)
787 error_at (gimple_location (stmt
),
788 "relaxed transaction in atomic transaction");
789 else if (d
->func_flags
& DIAG_TM_SAFE
)
790 error_at (gimple_location (stmt
),
791 "relaxed transaction in %<transaction_safe%> function");
792 inner_flags
= DIAG_TM_RELAXED
;
794 else if (gimple_transaction_subcode (trans_stmt
) & GTMA_IS_OUTER
)
797 error_at (gimple_location (stmt
),
798 "outer transaction in transaction");
799 else if (d
->func_flags
& DIAG_TM_OUTER
)
800 error_at (gimple_location (stmt
),
801 "outer transaction in "
802 "%<transaction_may_cancel_outer%> function");
803 else if (d
->func_flags
& DIAG_TM_SAFE
)
804 error_at (gimple_location (stmt
),
805 "outer transaction in %<transaction_safe%> function");
806 inner_flags
|= DIAG_TM_OUTER
;
809 *handled_ops_p
= true;
810 if (gimple_transaction_body (trans_stmt
))
812 struct walk_stmt_info wi_inner
;
813 struct diagnose_tm d_inner
;
815 memset (&d_inner
, 0, sizeof (d_inner
));
816 d_inner
.func_flags
= d
->func_flags
;
817 d_inner
.block_flags
= d
->block_flags
| inner_flags
;
818 d_inner
.summary_flags
= d_inner
.func_flags
| d_inner
.block_flags
;
820 memset (&wi_inner
, 0, sizeof (wi_inner
));
821 wi_inner
.info
= &d_inner
;
823 walk_gimple_seq (gimple_transaction_body (trans_stmt
),
824 diagnose_tm_1
, diagnose_tm_1_op
, &wi_inner
);
837 diagnose_tm_blocks (void)
839 struct walk_stmt_info wi
;
840 struct diagnose_tm d
;
842 memset (&d
, 0, sizeof (d
));
843 if (is_tm_may_cancel_outer (current_function_decl
))
844 d
.func_flags
= DIAG_TM_OUTER
| DIAG_TM_SAFE
;
845 else if (is_tm_safe (current_function_decl
))
846 d
.func_flags
= DIAG_TM_SAFE
;
847 d
.summary_flags
= d
.func_flags
;
849 memset (&wi
, 0, sizeof (wi
));
852 walk_gimple_seq (gimple_body (current_function_decl
),
853 diagnose_tm_1
, diagnose_tm_1_op
, &wi
);
860 const pass_data pass_data_diagnose_tm_blocks
=
862 GIMPLE_PASS
, /* type */
863 "*diagnose_tm_blocks", /* name */
864 OPTGROUP_NONE
, /* optinfo_flags */
865 TV_TRANS_MEM
, /* tv_id */
866 PROP_gimple_any
, /* properties_required */
867 0, /* properties_provided */
868 0, /* properties_destroyed */
869 0, /* todo_flags_start */
870 0, /* todo_flags_finish */
873 class pass_diagnose_tm_blocks
: public gimple_opt_pass
876 pass_diagnose_tm_blocks (gcc::context
*ctxt
)
877 : gimple_opt_pass (pass_data_diagnose_tm_blocks
, ctxt
)
880 /* opt_pass methods: */
881 virtual bool gate (function
*) { return flag_tm
; }
882 virtual unsigned int execute (function
*) { return diagnose_tm_blocks (); }
884 }; // class pass_diagnose_tm_blocks
889 make_pass_diagnose_tm_blocks (gcc::context
*ctxt
)
891 return new pass_diagnose_tm_blocks (ctxt
);
894 /* Instead of instrumenting thread private memory, we save the
895 addresses in a log which we later use to save/restore the addresses
896 upon transaction start/restart.
898 The log is keyed by address, where each element contains individual
899 statements among different code paths that perform the store.
901 This log is later used to generate either plain save/restore of the
902 addresses upon transaction start/restart, or calls to the ITM_L*
905 So for something like:
907 struct large { int x[1000]; };
908 struct large lala = { 0 };
914 We can either save/restore:
917 trxn = _ITM_startTransaction ();
918 if (trxn & a_saveLiveVariables)
919 tmp_lala1 = lala.x[i];
920 else if (a & a_restoreLiveVariables)
921 lala.x[i] = tmp_lala1;
923 or use the logging functions:
926 trxn = _ITM_startTransaction ();
927 _ITM_LU4 (&lala.x[i]);
929 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
930 far up the dominator tree to shadow all of the writes to a given
931 location (thus reducing the total number of logging calls), but not
932 so high as to be called on a path that does not perform a
935 /* One individual log entry. We may have multiple statements for the
936 same location if neither dominate each other (on different
938 typedef struct tm_log_entry
940 /* Address to save. */
942 /* Entry block for the transaction this address occurs in. */
943 basic_block entry_block
;
944 /* Dominating statements the store occurs in. */
946 /* Initially, while we are building the log, we place a nonzero
947 value here to mean that this address *will* be saved with a
948 save/restore sequence. Later, when generating the save sequence
949 we place the SSA temp generated here. */
954 /* Log entry hashtable helpers. */
956 struct log_entry_hasher
: pointer_hash
<tm_log_entry
>
958 static inline hashval_t
hash (const tm_log_entry
*);
959 static inline bool equal (const tm_log_entry
*, const tm_log_entry
*);
960 static inline void remove (tm_log_entry
*);
963 /* Htab support. Return hash value for a `tm_log_entry'. */
965 log_entry_hasher::hash (const tm_log_entry
*log
)
967 return iterative_hash_expr (log
->addr
, 0);
970 /* Htab support. Return true if two log entries are the same. */
972 log_entry_hasher::equal (const tm_log_entry
*log1
, const tm_log_entry
*log2
)
976 rth: I suggest that we get rid of the component refs etc.
977 I.e. resolve the reference to base + offset.
979 We may need to actually finish a merge with mainline for this,
980 since we'd like to be presented with Richi's MEM_REF_EXPRs more
981 often than not. But in the meantime your tm_log_entry could save
982 the results of get_inner_reference.
984 See: g++.dg/tm/pr46653.C
987 /* Special case plain equality because operand_equal_p() below will
988 return FALSE if the addresses are equal but they have
989 side-effects (e.g. a volatile address). */
990 if (log1
->addr
== log2
->addr
)
993 return operand_equal_p (log1
->addr
, log2
->addr
, 0);
996 /* Htab support. Free one tm_log_entry. */
998 log_entry_hasher::remove (tm_log_entry
*lp
)
1000 lp
->stmts
.release ();
1005 /* The actual log. */
1006 static hash_table
<log_entry_hasher
> *tm_log
;
1008 /* Addresses to log with a save/restore sequence. These should be in
1010 static vec
<tree
> tm_log_save_addresses
;
1012 enum thread_memory_type
1016 mem_transaction_local
,
1020 typedef struct tm_new_mem_map
1022 /* SSA_NAME being dereferenced. */
1024 enum thread_memory_type local_new_memory
;
1027 /* Hashtable helpers. */
1029 struct tm_mem_map_hasher
: free_ptr_hash
<tm_new_mem_map_t
>
1031 static inline hashval_t
hash (const tm_new_mem_map_t
*);
1032 static inline bool equal (const tm_new_mem_map_t
*, const tm_new_mem_map_t
*);
1036 tm_mem_map_hasher::hash (const tm_new_mem_map_t
*v
)
1038 return (intptr_t)v
->val
>> 4;
1042 tm_mem_map_hasher::equal (const tm_new_mem_map_t
*v
, const tm_new_mem_map_t
*c
)
1044 return v
->val
== c
->val
;
1047 /* Map for an SSA_NAME originally pointing to a non aliased new piece
1048 of memory (malloc, alloc, etc). */
1049 static hash_table
<tm_mem_map_hasher
> *tm_new_mem_hash
;
1051 /* Initialize logging data structures. */
1055 tm_log
= new hash_table
<log_entry_hasher
> (10);
1056 tm_new_mem_hash
= new hash_table
<tm_mem_map_hasher
> (5);
1057 tm_log_save_addresses
.create (5);
1060 /* Free logging data structures. */
1062 tm_log_delete (void)
1066 delete tm_new_mem_hash
;
1067 tm_new_mem_hash
= NULL
;
1068 tm_log_save_addresses
.release ();
1071 /* Return true if MEM is a transaction invariant memory for the TM
1072 region starting at REGION_ENTRY_BLOCK. */
1074 transaction_invariant_address_p (const_tree mem
, basic_block region_entry_block
)
1076 if ((TREE_CODE (mem
) == INDIRECT_REF
|| TREE_CODE (mem
) == MEM_REF
)
1077 && TREE_CODE (TREE_OPERAND (mem
, 0)) == SSA_NAME
)
1081 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem
, 0)));
1082 return def_bb
!= region_entry_block
1083 && dominated_by_p (CDI_DOMINATORS
, region_entry_block
, def_bb
);
1086 mem
= strip_invariant_refs (mem
);
1087 return mem
&& (CONSTANT_CLASS_P (mem
) || decl_address_invariant_p (mem
));
1090 /* Given an address ADDR in STMT, find it in the memory log or add it,
1091 making sure to keep only the addresses highest in the dominator
1094 ENTRY_BLOCK is the entry_block for the transaction.
1096 If we find the address in the log, make sure it's either the same
1097 address, or an equivalent one that dominates ADDR.
1099 If we find the address, but neither ADDR dominates the found
1100 address, nor the found one dominates ADDR, we're on different
1101 execution paths. Add it.
1103 If known, ENTRY_BLOCK is the entry block for the region, otherwise
1106 tm_log_add (basic_block entry_block
, tree addr
, gimple stmt
)
1108 tm_log_entry
**slot
;
1109 struct tm_log_entry l
, *lp
;
1112 slot
= tm_log
->find_slot (&l
, INSERT
);
1115 tree type
= TREE_TYPE (addr
);
1117 lp
= XNEW (struct tm_log_entry
);
1121 /* Small invariant addresses can be handled as save/restores. */
1123 && transaction_invariant_address_p (lp
->addr
, entry_block
)
1124 && TYPE_SIZE_UNIT (type
) != NULL
1125 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
))
1126 && ((HOST_WIDE_INT
) tree_to_uhwi (TYPE_SIZE_UNIT (type
))
1127 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE
))
1128 /* We must be able to copy this type normally. I.e., no
1129 special constructors and the like. */
1130 && !TREE_ADDRESSABLE (type
))
1132 lp
->save_var
= create_tmp_reg (TREE_TYPE (lp
->addr
), "tm_save");
1133 lp
->stmts
.create (0);
1134 lp
->entry_block
= entry_block
;
1135 /* Save addresses separately in dominator order so we don't
1136 get confused by overlapping addresses in the save/restore
1138 tm_log_save_addresses
.safe_push (lp
->addr
);
1142 /* Use the logging functions. */
1143 lp
->stmts
.create (5);
1144 lp
->stmts
.quick_push (stmt
);
1145 lp
->save_var
= NULL
;
1155 /* If we're generating a save/restore sequence, we don't care
1156 about statements. */
1160 for (i
= 0; lp
->stmts
.iterate (i
, &oldstmt
); ++i
)
1162 if (stmt
== oldstmt
)
1164 /* We already have a store to the same address, higher up the
1165 dominator tree. Nothing to do. */
1166 if (dominated_by_p (CDI_DOMINATORS
,
1167 gimple_bb (stmt
), gimple_bb (oldstmt
)))
1169 /* We should be processing blocks in dominator tree order. */
1170 gcc_assert (!dominated_by_p (CDI_DOMINATORS
,
1171 gimple_bb (oldstmt
), gimple_bb (stmt
)));
1173 /* Store is on a different code path. */
1174 lp
->stmts
.safe_push (stmt
);
1178 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1179 result, insert the new statements before GSI. */
1182 gimplify_addr (gimple_stmt_iterator
*gsi
, tree x
)
1184 if (TREE_CODE (x
) == TARGET_MEM_REF
)
1185 x
= tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x
)), x
);
1187 x
= build_fold_addr_expr (x
);
1188 return force_gimple_operand_gsi (gsi
, x
, true, NULL
, true, GSI_SAME_STMT
);
1191 /* Instrument one address with the logging functions.
1192 ADDR is the address to save.
1193 STMT is the statement before which to place it. */
1195 tm_log_emit_stmt (tree addr
, gimple stmt
)
1197 tree type
= TREE_TYPE (addr
);
1198 tree size
= TYPE_SIZE_UNIT (type
);
1199 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
1201 enum built_in_function code
= BUILT_IN_TM_LOG
;
1203 if (type
== float_type_node
)
1204 code
= BUILT_IN_TM_LOG_FLOAT
;
1205 else if (type
== double_type_node
)
1206 code
= BUILT_IN_TM_LOG_DOUBLE
;
1207 else if (type
== long_double_type_node
)
1208 code
= BUILT_IN_TM_LOG_LDOUBLE
;
1209 else if (tree_fits_uhwi_p (size
))
1211 unsigned int n
= tree_to_uhwi (size
);
1215 code
= BUILT_IN_TM_LOG_1
;
1218 code
= BUILT_IN_TM_LOG_2
;
1221 code
= BUILT_IN_TM_LOG_4
;
1224 code
= BUILT_IN_TM_LOG_8
;
1227 code
= BUILT_IN_TM_LOG
;
1228 if (TREE_CODE (type
) == VECTOR_TYPE
)
1230 if (n
== 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64
))
1231 code
= BUILT_IN_TM_LOG_M64
;
1232 else if (n
== 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128
))
1233 code
= BUILT_IN_TM_LOG_M128
;
1234 else if (n
== 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256
))
1235 code
= BUILT_IN_TM_LOG_M256
;
1241 addr
= gimplify_addr (&gsi
, addr
);
1242 if (code
== BUILT_IN_TM_LOG
)
1243 log
= gimple_build_call (builtin_decl_explicit (code
), 2, addr
, size
);
1245 log
= gimple_build_call (builtin_decl_explicit (code
), 1, addr
);
1246 gsi_insert_before (&gsi
, log
, GSI_SAME_STMT
);
1249 /* Go through the log and instrument address that must be instrumented
1250 with the logging functions. Leave the save/restore addresses for
1255 hash_table
<log_entry_hasher
>::iterator hi
;
1256 struct tm_log_entry
*lp
;
1258 FOR_EACH_HASH_TABLE_ELEMENT (*tm_log
, lp
, tm_log_entry_t
, hi
)
1265 fprintf (dump_file
, "TM thread private mem logging: ");
1266 print_generic_expr (dump_file
, lp
->addr
, 0);
1267 fprintf (dump_file
, "\n");
1273 fprintf (dump_file
, "DUMPING to variable\n");
1279 fprintf (dump_file
, "DUMPING with logging functions\n");
1280 for (i
= 0; lp
->stmts
.iterate (i
, &stmt
); ++i
)
1281 tm_log_emit_stmt (lp
->addr
, stmt
);
1286 /* Emit the save sequence for the corresponding addresses in the log.
1287 ENTRY_BLOCK is the entry block for the transaction.
1288 BB is the basic block to insert the code in. */
1290 tm_log_emit_saves (basic_block entry_block
, basic_block bb
)
1293 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
1295 struct tm_log_entry l
, *lp
;
1297 for (i
= 0; i
< tm_log_save_addresses
.length (); ++i
)
1299 l
.addr
= tm_log_save_addresses
[i
];
1300 lp
= *(tm_log
->find_slot (&l
, NO_INSERT
));
1301 gcc_assert (lp
->save_var
!= NULL
);
1303 /* We only care about variables in the current transaction. */
1304 if (lp
->entry_block
!= entry_block
)
1307 stmt
= gimple_build_assign (lp
->save_var
, unshare_expr (lp
->addr
));
1309 /* Make sure we can create an SSA_NAME for this type. For
1310 instance, aggregates aren't allowed, in which case the system
1311 will create a VOP for us and everything will just work. */
1312 if (is_gimple_reg_type (TREE_TYPE (lp
->save_var
)))
1314 lp
->save_var
= make_ssa_name (lp
->save_var
, stmt
);
1315 gimple_assign_set_lhs (stmt
, lp
->save_var
);
1318 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1322 /* Emit the restore sequence for the corresponding addresses in the log.
1323 ENTRY_BLOCK is the entry block for the transaction.
1324 BB is the basic block to insert the code in. */
1326 tm_log_emit_restores (basic_block entry_block
, basic_block bb
)
1329 struct tm_log_entry l
, *lp
;
1330 gimple_stmt_iterator gsi
;
1333 for (i
= tm_log_save_addresses
.length () - 1; i
>= 0; i
--)
1335 l
.addr
= tm_log_save_addresses
[i
];
1336 lp
= *(tm_log
->find_slot (&l
, NO_INSERT
));
1337 gcc_assert (lp
->save_var
!= NULL
);
1339 /* We only care about variables in the current transaction. */
1340 if (lp
->entry_block
!= entry_block
)
1343 /* Restores are in LIFO order from the saves in case we have
1345 gsi
= gsi_start_bb (bb
);
1347 stmt
= gimple_build_assign (unshare_expr (lp
->addr
), lp
->save_var
);
1348 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
1353 static tree
lower_sequence_tm (gimple_stmt_iterator
*, bool *,
1354 struct walk_stmt_info
*);
1355 static tree
lower_sequence_no_tm (gimple_stmt_iterator
*, bool *,
1356 struct walk_stmt_info
*);
1358 /* Evaluate an address X being dereferenced and determine if it
1359 originally points to a non aliased new chunk of memory (malloc,
1362 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1363 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1364 Return MEM_NON_LOCAL otherwise.
1366 ENTRY_BLOCK is the entry block to the transaction containing the
1367 dereference of X. */
1368 static enum thread_memory_type
1369 thread_private_new_memory (basic_block entry_block
, tree x
)
1372 enum tree_code code
;
1373 tm_new_mem_map_t
**slot
;
1374 tm_new_mem_map_t elt
, *elt_p
;
1376 enum thread_memory_type retval
= mem_transaction_local
;
1379 || TREE_CODE (x
) != SSA_NAME
1380 /* Possible uninitialized use, or a function argument. In
1381 either case, we don't care. */
1382 || SSA_NAME_IS_DEFAULT_DEF (x
))
1383 return mem_non_local
;
1385 /* Look in cache first. */
1387 slot
= tm_new_mem_hash
->find_slot (&elt
, INSERT
);
1390 return elt_p
->local_new_memory
;
1392 /* Optimistically assume the memory is transaction local during
1393 processing. This catches recursion into this variable. */
1394 *slot
= elt_p
= XNEW (tm_new_mem_map_t
);
1396 elt_p
->local_new_memory
= mem_transaction_local
;
1398 /* Search DEF chain to find the original definition of this address. */
1401 if (ptr_deref_may_alias_global_p (x
))
1403 /* Address escapes. This is not thread-private. */
1404 retval
= mem_non_local
;
1405 goto new_memory_ret
;
1408 stmt
= SSA_NAME_DEF_STMT (x
);
1410 /* If the malloc call is outside the transaction, this is
1412 if (retval
!= mem_thread_local
1413 && !dominated_by_p (CDI_DOMINATORS
, gimple_bb (stmt
), entry_block
))
1414 retval
= mem_thread_local
;
1416 if (is_gimple_assign (stmt
))
1418 code
= gimple_assign_rhs_code (stmt
);
1419 /* x = foo ==> foo */
1420 if (code
== SSA_NAME
)
1421 x
= gimple_assign_rhs1 (stmt
);
1422 /* x = foo + n ==> foo */
1423 else if (code
== POINTER_PLUS_EXPR
)
1424 x
= gimple_assign_rhs1 (stmt
);
1425 /* x = (cast*) foo ==> foo */
1426 else if (code
== VIEW_CONVERT_EXPR
|| CONVERT_EXPR_CODE_P (code
))
1427 x
= gimple_assign_rhs1 (stmt
);
1428 /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
1429 else if (code
== COND_EXPR
)
1431 tree op1
= gimple_assign_rhs2 (stmt
);
1432 tree op2
= gimple_assign_rhs3 (stmt
);
1433 enum thread_memory_type mem
;
1434 retval
= thread_private_new_memory (entry_block
, op1
);
1435 if (retval
== mem_non_local
)
1436 goto new_memory_ret
;
1437 mem
= thread_private_new_memory (entry_block
, op2
);
1438 retval
= MIN (retval
, mem
);
1439 goto new_memory_ret
;
1443 retval
= mem_non_local
;
1444 goto new_memory_ret
;
1449 if (gimple_code (stmt
) == GIMPLE_PHI
)
1452 enum thread_memory_type mem
;
1453 tree phi_result
= gimple_phi_result (stmt
);
1455 /* If any of the ancestors are non-local, we are sure to
1456 be non-local. Otherwise we can avoid doing anything
1457 and inherit what has already been generated. */
1459 for (i
= 0; i
< gimple_phi_num_args (stmt
); ++i
)
1461 tree op
= PHI_ARG_DEF (stmt
, i
);
1463 /* Exclude self-assignment. */
1464 if (phi_result
== op
)
1467 mem
= thread_private_new_memory (entry_block
, op
);
1468 if (mem
== mem_non_local
)
1471 goto new_memory_ret
;
1473 retval
= MIN (retval
, mem
);
1475 goto new_memory_ret
;
1480 while (TREE_CODE (x
) == SSA_NAME
);
1482 if (stmt
&& is_gimple_call (stmt
) && gimple_call_flags (stmt
) & ECF_MALLOC
)
1483 /* Thread-local or transaction-local. */
1486 retval
= mem_non_local
;
1489 elt_p
->local_new_memory
= retval
;
1493 /* Determine whether X has to be instrumented using a read
1496 ENTRY_BLOCK is the entry block for the region where stmt resides
1497 in. NULL if unknown.
1499 STMT is the statement in which X occurs in. It is used for thread
1500 private memory instrumentation. If no TPM instrumentation is
1501 desired, STMT should be null. */
1503 requires_barrier (basic_block entry_block
, tree x
, gimple stmt
)
1506 while (handled_component_p (x
))
1507 x
= TREE_OPERAND (x
, 0);
1509 switch (TREE_CODE (x
))
1514 enum thread_memory_type ret
;
1516 ret
= thread_private_new_memory (entry_block
, TREE_OPERAND (x
, 0));
1517 if (ret
== mem_non_local
)
1519 if (stmt
&& ret
== mem_thread_local
)
1520 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1521 tm_log_add (entry_block
, orig
, stmt
);
1523 /* Transaction-locals require nothing at all. For malloc, a
1524 transaction restart frees the memory and we reallocate.
1525 For alloca, the stack pointer gets reset by the retry and
1530 case TARGET_MEM_REF
:
1531 if (TREE_CODE (TMR_BASE (x
)) != ADDR_EXPR
)
1533 x
= TREE_OPERAND (TMR_BASE (x
), 0);
1534 if (TREE_CODE (x
) == PARM_DECL
)
1536 gcc_assert (TREE_CODE (x
) == VAR_DECL
);
1542 if (DECL_BY_REFERENCE (x
))
1544 /* ??? This value is a pointer, but aggregate_value_p has been
1545 jigged to return true which confuses needs_to_live_in_memory.
1546 This ought to be cleaned up generically.
1548 FIXME: Verify this still happens after the next mainline
1549 merge. Testcase ie g++.dg/tm/pr47554.C.
1554 if (is_global_var (x
))
1555 return !TREE_READONLY (x
);
1556 if (/* FIXME: This condition should actually go below in the
1557 tm_log_add() call, however is_call_clobbered() depends on
1558 aliasing info which is not available during
1559 gimplification. Since requires_barrier() gets called
1560 during lower_sequence_tm/gimplification, leave the call
1561 to needs_to_live_in_memory until we eliminate
1562 lower_sequence_tm altogether. */
1563 needs_to_live_in_memory (x
))
1567 /* For local memory that doesn't escape (aka thread private
1568 memory), we can either save the value at the beginning of
1569 the transaction and restore on restart, or call a tm
1570 function to dynamically save and restore on restart
1573 tm_log_add (entry_block
, orig
, stmt
);
1582 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1583 a transaction region. */
1586 examine_assign_tm (unsigned *state
, gimple_stmt_iterator
*gsi
)
1588 gimple stmt
= gsi_stmt (*gsi
);
1590 if (requires_barrier (/*entry_block=*/NULL
, gimple_assign_rhs1 (stmt
), NULL
))
1591 *state
|= GTMA_HAVE_LOAD
;
1592 if (requires_barrier (/*entry_block=*/NULL
, gimple_assign_lhs (stmt
), NULL
))
1593 *state
|= GTMA_HAVE_STORE
;
1596 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1599 examine_call_tm (unsigned *state
, gimple_stmt_iterator
*gsi
)
1601 gimple stmt
= gsi_stmt (*gsi
);
1604 if (is_tm_pure_call (stmt
))
1607 /* Check if this call is a transaction abort. */
1608 fn
= gimple_call_fndecl (stmt
);
1609 if (is_tm_abort (fn
))
1610 *state
|= GTMA_HAVE_ABORT
;
1612 /* Note that something may happen. */
1613 *state
|= GTMA_HAVE_LOAD
| GTMA_HAVE_STORE
;
1616 /* Lower a GIMPLE_TRANSACTION statement. */
1619 lower_transaction (gimple_stmt_iterator
*gsi
, struct walk_stmt_info
*wi
)
1622 gtransaction
*stmt
= as_a
<gtransaction
*> (gsi_stmt (*gsi
));
1623 unsigned int *outer_state
= (unsigned int *) wi
->info
;
1624 unsigned int this_state
= 0;
1625 struct walk_stmt_info this_wi
;
1627 /* First, lower the body. The scanning that we do inside gives
1628 us some idea of what we're dealing with. */
1629 memset (&this_wi
, 0, sizeof (this_wi
));
1630 this_wi
.info
= (void *) &this_state
;
1631 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt
),
1632 lower_sequence_tm
, NULL
, &this_wi
);
1634 /* If there was absolutely nothing transaction related inside the
1635 transaction, we may elide it. Likewise if this is a nested
1636 transaction and does not contain an abort. */
1638 || (!(this_state
& GTMA_HAVE_ABORT
) && outer_state
!= NULL
))
1641 *outer_state
|= this_state
;
1643 gsi_insert_seq_before (gsi
, gimple_transaction_body (stmt
),
1645 gimple_transaction_set_body (stmt
, NULL
);
1647 gsi_remove (gsi
, true);
1648 wi
->removed_stmt
= true;
1652 /* Wrap the body of the transaction in a try-finally node so that
1653 the commit call is always properly called. */
1654 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT
), 0);
1655 if (flag_exceptions
)
1658 gimple_seq n_seq
, e_seq
;
1660 n_seq
= gimple_seq_alloc_with_stmt (g
);
1663 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER
),
1664 1, integer_zero_node
);
1665 ptr
= create_tmp_var (ptr_type_node
);
1666 gimple_call_set_lhs (g
, ptr
);
1667 gimple_seq_add_stmt (&e_seq
, g
);
1669 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH
),
1671 gimple_seq_add_stmt (&e_seq
, g
);
1673 g
= gimple_build_eh_else (n_seq
, e_seq
);
1676 g
= gimple_build_try (gimple_transaction_body (stmt
),
1677 gimple_seq_alloc_with_stmt (g
), GIMPLE_TRY_FINALLY
);
1678 gsi_insert_after (gsi
, g
, GSI_CONTINUE_LINKING
);
1680 gimple_transaction_set_body (stmt
, NULL
);
1682 /* If the transaction calls abort or if this is an outer transaction,
1683 add an "over" label afterwards. */
1684 if ((this_state
& (GTMA_HAVE_ABORT
))
1685 || (gimple_transaction_subcode (stmt
) & GTMA_IS_OUTER
))
1687 tree label
= create_artificial_label (UNKNOWN_LOCATION
);
1688 gimple_transaction_set_label (stmt
, label
);
1689 gsi_insert_after (gsi
, gimple_build_label (label
), GSI_CONTINUE_LINKING
);
1692 /* Record the set of operations found for use later. */
1693 this_state
|= gimple_transaction_subcode (stmt
) & GTMA_DECLARATION_MASK
;
1694 gimple_transaction_set_subcode (stmt
, this_state
);
1697 /* Iterate through the statements in the sequence, lowering them all
1698 as appropriate for being in a transaction. */
1701 lower_sequence_tm (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1702 struct walk_stmt_info
*wi
)
1704 unsigned int *state
= (unsigned int *) wi
->info
;
1705 gimple stmt
= gsi_stmt (*gsi
);
1707 *handled_ops_p
= true;
1708 switch (gimple_code (stmt
))
1711 /* Only memory reads/writes need to be instrumented. */
1712 if (gimple_assign_single_p (stmt
))
1713 examine_assign_tm (state
, gsi
);
1717 examine_call_tm (state
, gsi
);
1721 *state
|= GTMA_MAY_ENTER_IRREVOCABLE
;
1724 case GIMPLE_TRANSACTION
:
1725 lower_transaction (gsi
, wi
);
1729 *handled_ops_p
= !gimple_has_substatements (stmt
);
1736 /* Iterate through the statements in the sequence, lowering them all
1737 as appropriate for being outside of a transaction. */
1740 lower_sequence_no_tm (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1741 struct walk_stmt_info
* wi
)
1743 gimple stmt
= gsi_stmt (*gsi
);
1745 if (gimple_code (stmt
) == GIMPLE_TRANSACTION
)
1747 *handled_ops_p
= true;
1748 lower_transaction (gsi
, wi
);
1751 *handled_ops_p
= !gimple_has_substatements (stmt
);
1756 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1757 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1758 been moved out, and all the data required for constructing a proper
1759 CFG has been recorded. */
1762 execute_lower_tm (void)
1764 struct walk_stmt_info wi
;
1767 /* Transactional clones aren't created until a later pass. */
1768 gcc_assert (!decl_is_tm_clone (current_function_decl
));
1770 body
= gimple_body (current_function_decl
);
1771 memset (&wi
, 0, sizeof (wi
));
1772 walk_gimple_seq_mod (&body
, lower_sequence_no_tm
, NULL
, &wi
);
1773 gimple_set_body (current_function_decl
, body
);
1780 const pass_data pass_data_lower_tm
=
1782 GIMPLE_PASS
, /* type */
1783 "tmlower", /* name */
1784 OPTGROUP_NONE
, /* optinfo_flags */
1785 TV_TRANS_MEM
, /* tv_id */
1786 PROP_gimple_lcf
, /* properties_required */
1787 0, /* properties_provided */
1788 0, /* properties_destroyed */
1789 0, /* todo_flags_start */
1790 0, /* todo_flags_finish */
1793 class pass_lower_tm
: public gimple_opt_pass
1796 pass_lower_tm (gcc::context
*ctxt
)
1797 : gimple_opt_pass (pass_data_lower_tm
, ctxt
)
1800 /* opt_pass methods: */
1801 virtual bool gate (function
*) { return flag_tm
; }
1802 virtual unsigned int execute (function
*) { return execute_lower_tm (); }
1804 }; // class pass_lower_tm
1809 make_pass_lower_tm (gcc::context
*ctxt
)
1811 return new pass_lower_tm (ctxt
);
1814 /* Collect region information for each transaction. */
1820 /* The field "transaction_stmt" is initially a gtransaction *,
1821 but eventually gets lowered to a gcall *(to BUILT_IN_TM_START).
1823 Helper method to get it as a gtransaction *, with code-checking
1824 in a checked-build. */
1827 get_transaction_stmt () const
1829 return as_a
<gtransaction
*> (transaction_stmt
);
1834 /* Link to the next unnested transaction. */
1835 struct tm_region
*next
;
1837 /* Link to the next inner transaction. */
1838 struct tm_region
*inner
;
1840 /* Link to the next outer transaction. */
1841 struct tm_region
*outer
;
1843 /* The GIMPLE_TRANSACTION statement beginning this transaction.
1844 After TM_MARK, this gets replaced by a call to
1846 Hence this will be either a gtransaction *or a gcall *. */
1847 gimple transaction_stmt
;
1849 /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
1850 BUILT_IN_TM_START, this field is true if the transaction is an
1851 outer transaction. */
1852 bool original_transaction_was_outer
;
1854 /* Return value from BUILT_IN_TM_START. */
1857 /* The entry block to this region. This will always be the first
1858 block of the body of the transaction. */
1859 basic_block entry_block
;
1861 /* The first block after an expanded call to _ITM_beginTransaction. */
1862 basic_block restart_block
;
1864 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1865 These blocks are still a part of the region (i.e., the border is
1866 inclusive). Note that this set is only complete for paths in the CFG
1867 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1868 the edge to the "over" label. */
1871 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1875 typedef struct tm_region
*tm_region_p
;
1877 /* True if there are pending edge statements to be committed for the
1878 current function being scanned in the tmmark pass. */
1879 bool pending_edge_inserts_p
;
1881 static struct tm_region
*all_tm_regions
;
1882 static bitmap_obstack tm_obstack
;
1885 /* A subroutine of tm_region_init. Record the existence of the
1886 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1888 static struct tm_region
*
1889 tm_region_init_0 (struct tm_region
*outer
, basic_block bb
,
1892 struct tm_region
*region
;
1894 region
= (struct tm_region
*)
1895 obstack_alloc (&tm_obstack
.obstack
, sizeof (struct tm_region
));
1899 region
->next
= outer
->inner
;
1900 outer
->inner
= region
;
1904 region
->next
= all_tm_regions
;
1905 all_tm_regions
= region
;
1907 region
->inner
= NULL
;
1908 region
->outer
= outer
;
1910 region
->transaction_stmt
= stmt
;
1911 region
->original_transaction_was_outer
= false;
1912 region
->tm_state
= NULL
;
1914 /* There are either one or two edges out of the block containing
1915 the GIMPLE_TRANSACTION, one to the actual region and one to the
1916 "over" label if the region contains an abort. The former will
1917 always be the one marked FALLTHRU. */
1918 region
->entry_block
= FALLTHRU_EDGE (bb
)->dest
;
1920 region
->exit_blocks
= BITMAP_ALLOC (&tm_obstack
);
1921 region
->irr_blocks
= BITMAP_ALLOC (&tm_obstack
);
1926 /* A subroutine of tm_region_init. Record all the exit and
1927 irrevocable blocks in BB into the region's exit_blocks and
1928 irr_blocks bitmaps. Returns the new region being scanned. */
1930 static struct tm_region
*
1931 tm_region_init_1 (struct tm_region
*region
, basic_block bb
)
1933 gimple_stmt_iterator gsi
;
1937 || (!region
->irr_blocks
&& !region
->exit_blocks
))
1940 /* Check to see if this is the end of a region by seeing if it
1941 contains a call to __builtin_tm_commit{,_eh}. Note that the
1942 outermost region for DECL_IS_TM_CLONE need not collect this. */
1943 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
1946 if (gimple_code (g
) == GIMPLE_CALL
)
1948 tree fn
= gimple_call_fndecl (g
);
1949 if (fn
&& DECL_BUILT_IN_CLASS (fn
) == BUILT_IN_NORMAL
)
1951 if ((DECL_FUNCTION_CODE (fn
) == BUILT_IN_TM_COMMIT
1952 || DECL_FUNCTION_CODE (fn
) == BUILT_IN_TM_COMMIT_EH
)
1953 && region
->exit_blocks
)
1955 bitmap_set_bit (region
->exit_blocks
, bb
->index
);
1956 region
= region
->outer
;
1959 if (DECL_FUNCTION_CODE (fn
) == BUILT_IN_TM_IRREVOCABLE
)
1960 bitmap_set_bit (region
->irr_blocks
, bb
->index
);
1967 /* Collect all of the transaction regions within the current function
1968 and record them in ALL_TM_REGIONS. The REGION parameter may specify
1969 an "outermost" region for use by tm clones. */
1972 tm_region_init (struct tm_region
*region
)
1978 auto_vec
<basic_block
> queue
;
1979 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
1980 struct tm_region
*old_region
;
1981 auto_vec
<tm_region_p
> bb_regions
;
1983 all_tm_regions
= region
;
1984 bb
= single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
1986 /* We could store this information in bb->aux, but we may get called
1987 through get_all_tm_blocks() from another pass that may be already
1989 bb_regions
.safe_grow_cleared (last_basic_block_for_fn (cfun
));
1991 queue
.safe_push (bb
);
1992 bb_regions
[bb
->index
] = region
;
1996 region
= bb_regions
[bb
->index
];
1997 bb_regions
[bb
->index
] = NULL
;
1999 /* Record exit and irrevocable blocks. */
2000 region
= tm_region_init_1 (region
, bb
);
2002 /* Check for the last statement in the block beginning a new region. */
2004 old_region
= region
;
2006 if (gtransaction
*trans_stmt
= dyn_cast
<gtransaction
*> (g
))
2007 region
= tm_region_init_0 (region
, bb
, trans_stmt
);
2009 /* Process subsequent blocks. */
2010 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2011 if (!bitmap_bit_p (visited_blocks
, e
->dest
->index
))
2013 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
2014 queue
.safe_push (e
->dest
);
2016 /* If the current block started a new region, make sure that only
2017 the entry block of the new region is associated with this region.
2018 Other successors are still part of the old region. */
2019 if (old_region
!= region
&& e
->dest
!= region
->entry_block
)
2020 bb_regions
[e
->dest
->index
] = old_region
;
2022 bb_regions
[e
->dest
->index
] = region
;
2025 while (!queue
.is_empty ());
2026 BITMAP_FREE (visited_blocks
);
2029 /* The "gate" function for all transactional memory expansion and optimization
2030 passes. We collect region information for each top-level transaction, and
2031 if we don't find any, we skip all of the TM passes. Each region will have
2032 all of the exit blocks recorded, and the originating statement. */
2040 calculate_dominance_info (CDI_DOMINATORS
);
2041 bitmap_obstack_initialize (&tm_obstack
);
2043 /* If the function is a TM_CLONE, then the entire function is the region. */
2044 if (decl_is_tm_clone (current_function_decl
))
2046 struct tm_region
*region
= (struct tm_region
*)
2047 obstack_alloc (&tm_obstack
.obstack
, sizeof (struct tm_region
));
2048 memset (region
, 0, sizeof (*region
));
2049 region
->entry_block
= single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
2050 /* For a clone, the entire function is the region. But even if
2051 we don't need to record any exit blocks, we may need to
2052 record irrevocable blocks. */
2053 region
->irr_blocks
= BITMAP_ALLOC (&tm_obstack
);
2055 tm_region_init (region
);
2059 tm_region_init (NULL
);
2061 /* If we didn't find any regions, cleanup and skip the whole tree
2062 of tm-related optimizations. */
2063 if (all_tm_regions
== NULL
)
2065 bitmap_obstack_release (&tm_obstack
);
2075 const pass_data pass_data_tm_init
=
2077 GIMPLE_PASS
, /* type */
2078 "*tminit", /* name */
2079 OPTGROUP_NONE
, /* optinfo_flags */
2080 TV_TRANS_MEM
, /* tv_id */
2081 ( PROP_ssa
| PROP_cfg
), /* properties_required */
2082 0, /* properties_provided */
2083 0, /* properties_destroyed */
2084 0, /* todo_flags_start */
2085 0, /* todo_flags_finish */
2088 class pass_tm_init
: public gimple_opt_pass
2091 pass_tm_init (gcc::context
*ctxt
)
2092 : gimple_opt_pass (pass_data_tm_init
, ctxt
)
2095 /* opt_pass methods: */
2096 virtual bool gate (function
*) { return gate_tm_init (); }
2098 }; // class pass_tm_init
2103 make_pass_tm_init (gcc::context
*ctxt
)
2105 return new pass_tm_init (ctxt
);
2108 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
2109 represented by STATE. */
2112 transaction_subcode_ior (struct tm_region
*region
, unsigned flags
)
2114 if (region
&& region
->transaction_stmt
)
2116 gtransaction
*transaction_stmt
= region
->get_transaction_stmt ();
2117 flags
|= gimple_transaction_subcode (transaction_stmt
);
2118 gimple_transaction_set_subcode (transaction_stmt
, flags
);
2122 /* Construct a memory load in a transactional context. Return the
2123 gimple statement performing the load, or NULL if there is no
2124 TM_LOAD builtin of the appropriate size to do the load.
2126 LOC is the location to use for the new statement(s). */
2129 build_tm_load (location_t loc
, tree lhs
, tree rhs
, gimple_stmt_iterator
*gsi
)
2131 enum built_in_function code
= END_BUILTINS
;
2132 tree t
, type
= TREE_TYPE (rhs
), decl
;
2135 if (type
== float_type_node
)
2136 code
= BUILT_IN_TM_LOAD_FLOAT
;
2137 else if (type
== double_type_node
)
2138 code
= BUILT_IN_TM_LOAD_DOUBLE
;
2139 else if (type
== long_double_type_node
)
2140 code
= BUILT_IN_TM_LOAD_LDOUBLE
;
2141 else if (TYPE_SIZE_UNIT (type
) != NULL
2142 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
)))
2144 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type
)))
2147 code
= BUILT_IN_TM_LOAD_1
;
2150 code
= BUILT_IN_TM_LOAD_2
;
2153 code
= BUILT_IN_TM_LOAD_4
;
2156 code
= BUILT_IN_TM_LOAD_8
;
2161 if (code
== END_BUILTINS
)
2163 decl
= targetm
.vectorize
.builtin_tm_load (type
);
2168 decl
= builtin_decl_explicit (code
);
2170 t
= gimplify_addr (gsi
, rhs
);
2171 gcall
= gimple_build_call (decl
, 1, t
);
2172 gimple_set_location (gcall
, loc
);
2174 t
= TREE_TYPE (TREE_TYPE (decl
));
2175 if (useless_type_conversion_p (type
, t
))
2177 gimple_call_set_lhs (gcall
, lhs
);
2178 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2185 temp
= create_tmp_reg (t
);
2186 gimple_call_set_lhs (gcall
, temp
);
2187 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2189 t
= fold_build1 (VIEW_CONVERT_EXPR
, type
, temp
);
2190 g
= gimple_build_assign (lhs
, t
);
2191 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
2198 /* Similarly for storing TYPE in a transactional context. */
2201 build_tm_store (location_t loc
, tree lhs
, tree rhs
, gimple_stmt_iterator
*gsi
)
2203 enum built_in_function code
= END_BUILTINS
;
2204 tree t
, fn
, type
= TREE_TYPE (rhs
), simple_type
;
2207 if (type
== float_type_node
)
2208 code
= BUILT_IN_TM_STORE_FLOAT
;
2209 else if (type
== double_type_node
)
2210 code
= BUILT_IN_TM_STORE_DOUBLE
;
2211 else if (type
== long_double_type_node
)
2212 code
= BUILT_IN_TM_STORE_LDOUBLE
;
2213 else if (TYPE_SIZE_UNIT (type
) != NULL
2214 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
)))
2216 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type
)))
2219 code
= BUILT_IN_TM_STORE_1
;
2222 code
= BUILT_IN_TM_STORE_2
;
2225 code
= BUILT_IN_TM_STORE_4
;
2228 code
= BUILT_IN_TM_STORE_8
;
2233 if (code
== END_BUILTINS
)
2235 fn
= targetm
.vectorize
.builtin_tm_store (type
);
2240 fn
= builtin_decl_explicit (code
);
2242 simple_type
= TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn
))));
2244 if (TREE_CODE (rhs
) == CONSTRUCTOR
)
2246 /* Handle the easy initialization to zero. */
2247 if (!CONSTRUCTOR_ELTS (rhs
))
2248 rhs
= build_int_cst (simple_type
, 0);
2251 /* ...otherwise punt to the caller and probably use
2252 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2253 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2258 else if (!useless_type_conversion_p (simple_type
, type
))
2263 temp
= create_tmp_reg (simple_type
);
2264 t
= fold_build1 (VIEW_CONVERT_EXPR
, simple_type
, rhs
);
2265 g
= gimple_build_assign (temp
, t
);
2266 gimple_set_location (g
, loc
);
2267 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
2272 t
= gimplify_addr (gsi
, lhs
);
2273 gcall
= gimple_build_call (fn
, 2, t
, rhs
);
2274 gimple_set_location (gcall
, loc
);
2275 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2281 /* Expand an assignment statement into transactional builtins. */
2284 expand_assign_tm (struct tm_region
*region
, gimple_stmt_iterator
*gsi
)
2286 gimple stmt
= gsi_stmt (*gsi
);
2287 location_t loc
= gimple_location (stmt
);
2288 tree lhs
= gimple_assign_lhs (stmt
);
2289 tree rhs
= gimple_assign_rhs1 (stmt
);
2290 bool store_p
= requires_barrier (region
->entry_block
, lhs
, NULL
);
2291 bool load_p
= requires_barrier (region
->entry_block
, rhs
, NULL
);
2292 gimple gcall
= NULL
;
2294 if (!load_p
&& !store_p
)
2296 /* Add thread private addresses to log if applicable. */
2297 requires_barrier (region
->entry_block
, lhs
, stmt
);
2302 // Remove original load/store statement.
2303 gsi_remove (gsi
, true);
2305 if (load_p
&& !store_p
)
2307 transaction_subcode_ior (region
, GTMA_HAVE_LOAD
);
2308 gcall
= build_tm_load (loc
, lhs
, rhs
, gsi
);
2310 else if (store_p
&& !load_p
)
2312 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2313 gcall
= build_tm_store (loc
, lhs
, rhs
, gsi
);
2317 tree lhs_addr
, rhs_addr
, tmp
;
2320 transaction_subcode_ior (region
, GTMA_HAVE_LOAD
);
2322 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2324 /* ??? Figure out if there's any possible overlap between the LHS
2325 and the RHS and if not, use MEMCPY. */
2327 if (load_p
&& is_gimple_reg (lhs
))
2329 tmp
= create_tmp_var (TREE_TYPE (lhs
));
2330 lhs_addr
= build_fold_addr_expr (tmp
);
2335 lhs_addr
= gimplify_addr (gsi
, lhs
);
2337 rhs_addr
= gimplify_addr (gsi
, rhs
);
2338 gcall
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE
),
2339 3, lhs_addr
, rhs_addr
,
2340 TYPE_SIZE_UNIT (TREE_TYPE (lhs
)));
2341 gimple_set_location (gcall
, loc
);
2342 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2346 gcall
= gimple_build_assign (lhs
, tmp
);
2347 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2351 /* Now that we have the load/store in its instrumented form, add
2352 thread private addresses to the log if applicable. */
2354 requires_barrier (region
->entry_block
, lhs
, gcall
);
2356 // The calls to build_tm_{store,load} above inserted the instrumented
2357 // call into the stream.
2358 // gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2362 /* Expand a call statement as appropriate for a transaction. That is,
2363 either verify that the call does not affect the transaction, or
2364 redirect the call to a clone that handles transactions, or change
2365 the transaction state to IRREVOCABLE. Return true if the call is
2366 one of the builtins that end a transaction. */
2369 expand_call_tm (struct tm_region
*region
,
2370 gimple_stmt_iterator
*gsi
)
2372 gcall
*stmt
= as_a
<gcall
*> (gsi_stmt (*gsi
));
2373 tree lhs
= gimple_call_lhs (stmt
);
2375 struct cgraph_node
*node
;
2376 bool retval
= false;
2378 fn_decl
= gimple_call_fndecl (stmt
);
2380 if (fn_decl
== builtin_decl_explicit (BUILT_IN_TM_MEMCPY
)
2381 || fn_decl
== builtin_decl_explicit (BUILT_IN_TM_MEMMOVE
))
2382 transaction_subcode_ior (region
, GTMA_HAVE_STORE
| GTMA_HAVE_LOAD
);
2383 if (fn_decl
== builtin_decl_explicit (BUILT_IN_TM_MEMSET
))
2384 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2386 if (is_tm_pure_call (stmt
))
2390 retval
= is_tm_ending_fndecl (fn_decl
);
2393 /* Assume all non-const/pure calls write to memory, except
2394 transaction ending builtins. */
2395 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2398 /* For indirect calls, we already generated a call into the runtime. */
2401 tree fn
= gimple_call_fn (stmt
);
2403 /* We are guaranteed never to go irrevocable on a safe or pure
2404 call, and the pure call was handled above. */
2405 if (is_tm_safe (fn
))
2408 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
2413 node
= cgraph_node::get (fn_decl
);
2414 /* All calls should have cgraph here. */
2417 /* We can have a nodeless call here if some pass after IPA-tm
2418 added uninstrumented calls. For example, loop distribution
2419 can transform certain loop constructs into __builtin_mem*
2420 calls. In this case, see if we have a suitable TM
2421 replacement and fill in the gaps. */
2422 gcc_assert (DECL_BUILT_IN_CLASS (fn_decl
) == BUILT_IN_NORMAL
);
2423 enum built_in_function code
= DECL_FUNCTION_CODE (fn_decl
);
2424 gcc_assert (code
== BUILT_IN_MEMCPY
2425 || code
== BUILT_IN_MEMMOVE
2426 || code
== BUILT_IN_MEMSET
);
2428 tree repl
= find_tm_replacement_function (fn_decl
);
2431 gimple_call_set_fndecl (stmt
, repl
);
2433 node
= cgraph_node::create (repl
);
2434 node
->local
.tm_may_enter_irr
= false;
2435 return expand_call_tm (region
, gsi
);
2439 if (node
->local
.tm_may_enter_irr
)
2440 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
2442 if (is_tm_abort (fn_decl
))
2444 transaction_subcode_ior (region
, GTMA_HAVE_ABORT
);
2448 /* Instrument the store if needed.
2450 If the assignment happens inside the function call (return slot
2451 optimization), there is no instrumentation to be done, since
2452 the callee should have done the right thing. */
2453 if (lhs
&& requires_barrier (region
->entry_block
, lhs
, stmt
)
2454 && !gimple_call_return_slot_opt_p (stmt
))
2456 tree tmp
= create_tmp_reg (TREE_TYPE (lhs
));
2457 location_t loc
= gimple_location (stmt
);
2458 edge fallthru_edge
= NULL
;
2459 gassign
*assign_stmt
;
2461 /* Remember if the call was going to throw. */
2462 if (stmt_can_throw_internal (stmt
))
2466 basic_block bb
= gimple_bb (stmt
);
2468 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2469 if (e
->flags
& EDGE_FALLTHRU
)
2476 gimple_call_set_lhs (stmt
, tmp
);
2478 assign_stmt
= gimple_build_assign (lhs
, tmp
);
2479 gimple_set_location (assign_stmt
, loc
);
2481 /* We cannot throw in the middle of a BB. If the call was going
2482 to throw, place the instrumentation on the fallthru edge, so
2483 the call remains the last statement in the block. */
2486 gimple_seq fallthru_seq
= gimple_seq_alloc_with_stmt (assign_stmt
);
2487 gimple_stmt_iterator fallthru_gsi
= gsi_start (fallthru_seq
);
2488 expand_assign_tm (region
, &fallthru_gsi
);
2489 gsi_insert_seq_on_edge (fallthru_edge
, fallthru_seq
);
2490 pending_edge_inserts_p
= true;
2494 gsi_insert_after (gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
2495 expand_assign_tm (region
, gsi
);
2498 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2505 /* Expand all statements in BB as appropriate for being inside
2509 expand_block_tm (struct tm_region
*region
, basic_block bb
)
2511 gimple_stmt_iterator gsi
;
2513 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); )
2515 gimple stmt
= gsi_stmt (gsi
);
2516 switch (gimple_code (stmt
))
2519 /* Only memory reads/writes need to be instrumented. */
2520 if (gimple_assign_single_p (stmt
)
2521 && !gimple_clobber_p (stmt
))
2523 expand_assign_tm (region
, &gsi
);
2529 if (expand_call_tm (region
, &gsi
))
2539 if (!gsi_end_p (gsi
))
2544 /* Return the list of basic-blocks in REGION.
2546 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2547 following a TM_IRREVOCABLE call.
2549 INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the
2550 uninstrumented code path blocks in the list of basic blocks
2551 returned, false otherwise. */
2553 static vec
<basic_block
>
2554 get_tm_region_blocks (basic_block entry_block
,
2557 bitmap all_region_blocks
,
2558 bool stop_at_irrevocable_p
,
2559 bool include_uninstrumented_p
= true)
2561 vec
<basic_block
> bbs
= vNULL
;
2565 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
2568 bbs
.safe_push (entry_block
);
2569 bitmap_set_bit (visited_blocks
, entry_block
->index
);
2573 basic_block bb
= bbs
[i
++];
2576 bitmap_bit_p (exit_blocks
, bb
->index
))
2579 if (stop_at_irrevocable_p
2581 && bitmap_bit_p (irr_blocks
, bb
->index
))
2584 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2585 if ((include_uninstrumented_p
2586 || !(e
->flags
& EDGE_TM_UNINSTRUMENTED
))
2587 && !bitmap_bit_p (visited_blocks
, e
->dest
->index
))
2589 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
2590 bbs
.safe_push (e
->dest
);
2593 while (i
< bbs
.length ());
2595 if (all_region_blocks
)
2596 bitmap_ior_into (all_region_blocks
, visited_blocks
);
2598 BITMAP_FREE (visited_blocks
);
2602 // Callback data for collect_bb2reg.
2605 vec
<tm_region_p
> *bb2reg
;
2606 bool include_uninstrumented_p
;
2609 // Callback for expand_regions, collect innermost region data for each bb.
2611 collect_bb2reg (struct tm_region
*region
, void *data
)
2613 struct bb2reg_stuff
*stuff
= (struct bb2reg_stuff
*)data
;
2614 vec
<tm_region_p
> *bb2reg
= stuff
->bb2reg
;
2615 vec
<basic_block
> queue
;
2619 queue
= get_tm_region_blocks (region
->entry_block
,
2620 region
->exit_blocks
,
2623 /*stop_at_irr_p=*/true,
2624 stuff
->include_uninstrumented_p
);
2626 // We expect expand_region to perform a post-order traversal of the region
2627 // tree. Therefore the last region seen for any bb is the innermost.
2628 FOR_EACH_VEC_ELT (queue
, i
, bb
)
2629 (*bb2reg
)[bb
->index
] = region
;
2635 // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to
2636 // which a basic block belongs. Note that we only consider the instrumented
2637 // code paths for the region; the uninstrumented code paths are ignored if
2638 // INCLUDE_UNINSTRUMENTED_P is false.
2640 // ??? This data is very similar to the bb_regions array that is collected
2641 // during tm_region_init. Or, rather, this data is similar to what could
2642 // be used within tm_region_init. The actual computation in tm_region_init
2643 // begins and ends with bb_regions entirely full of NULL pointers, due to
2644 // the way in which pointers are swapped in and out of the array.
2646 // ??? Our callers expect that blocks are not shared between transactions.
2647 // When the optimizers get too smart, and blocks are shared, then during
2648 // the tm_mark phase we'll add log entries to only one of the two transactions,
2649 // and in the tm_edge phase we'll add edges to the CFG that create invalid
2650 // cycles. The symptom being SSA defs that do not dominate their uses.
2651 // Note that the optimizers were locally correct with their transformation,
2652 // as we have no info within the program that suggests that the blocks cannot
2655 // ??? There is currently a hack inside tree-ssa-pre.c to work around the
2656 // only known instance of this block sharing.
2658 static vec
<tm_region_p
>
2659 get_bb_regions_instrumented (bool traverse_clones
,
2660 bool include_uninstrumented_p
)
2662 unsigned n
= last_basic_block_for_fn (cfun
);
2663 struct bb2reg_stuff stuff
;
2664 vec
<tm_region_p
> ret
;
2667 ret
.safe_grow_cleared (n
);
2668 stuff
.bb2reg
= &ret
;
2669 stuff
.include_uninstrumented_p
= include_uninstrumented_p
;
2670 expand_regions (all_tm_regions
, collect_bb2reg
, &stuff
, traverse_clones
);
2675 /* Set the IN_TRANSACTION for all gimple statements that appear in a
2679 compute_transaction_bits (void)
2681 struct tm_region
*region
;
2682 vec
<basic_block
> queue
;
2686 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2687 certainly don't need it to calculate CDI_DOMINATOR info. */
2690 FOR_EACH_BB_FN (bb
, cfun
)
2691 bb
->flags
&= ~BB_IN_TRANSACTION
;
2693 for (region
= all_tm_regions
; region
; region
= region
->next
)
2695 queue
= get_tm_region_blocks (region
->entry_block
,
2696 region
->exit_blocks
,
2699 /*stop_at_irr_p=*/true);
2700 for (i
= 0; queue
.iterate (i
, &bb
); ++i
)
2701 bb
->flags
|= BB_IN_TRANSACTION
;
2706 bitmap_obstack_release (&tm_obstack
);
2709 /* Replace the GIMPLE_TRANSACTION in this region with the corresponding
2710 call to BUILT_IN_TM_START. */
2713 expand_transaction (struct tm_region
*region
, void *data ATTRIBUTE_UNUSED
)
2715 tree tm_start
= builtin_decl_explicit (BUILT_IN_TM_START
);
2716 basic_block transaction_bb
= gimple_bb (region
->transaction_stmt
);
2717 tree tm_state
= region
->tm_state
;
2718 tree tm_state_type
= TREE_TYPE (tm_state
);
2719 edge abort_edge
= NULL
;
2720 edge inst_edge
= NULL
;
2721 edge uninst_edge
= NULL
;
2722 edge fallthru_edge
= NULL
;
2724 // Identify the various successors of the transaction start.
2728 FOR_EACH_EDGE (e
, i
, transaction_bb
->succs
)
2730 if (e
->flags
& EDGE_TM_ABORT
)
2732 else if (e
->flags
& EDGE_TM_UNINSTRUMENTED
)
2736 if (e
->flags
& EDGE_FALLTHRU
)
2741 /* ??? There are plenty of bits here we're not computing. */
2743 int subcode
= gimple_transaction_subcode (region
->get_transaction_stmt ());
2745 if (subcode
& GTMA_DOES_GO_IRREVOCABLE
)
2746 flags
|= PR_DOESGOIRREVOCABLE
;
2747 if ((subcode
& GTMA_MAY_ENTER_IRREVOCABLE
) == 0)
2748 flags
|= PR_HASNOIRREVOCABLE
;
2749 /* If the transaction does not have an abort in lexical scope and is not
2750 marked as an outer transaction, then it will never abort. */
2751 if ((subcode
& GTMA_HAVE_ABORT
) == 0 && (subcode
& GTMA_IS_OUTER
) == 0)
2752 flags
|= PR_HASNOABORT
;
2753 if ((subcode
& GTMA_HAVE_STORE
) == 0)
2754 flags
|= PR_READONLY
;
2755 if (inst_edge
&& !(subcode
& GTMA_HAS_NO_INSTRUMENTATION
))
2756 flags
|= PR_INSTRUMENTEDCODE
;
2758 flags
|= PR_UNINSTRUMENTEDCODE
;
2759 if (subcode
& GTMA_IS_OUTER
)
2760 region
->original_transaction_was_outer
= true;
2761 tree t
= build_int_cst (tm_state_type
, flags
);
2762 gcall
*call
= gimple_build_call (tm_start
, 1, t
);
2763 gimple_call_set_lhs (call
, tm_state
);
2764 gimple_set_location (call
, gimple_location (region
->transaction_stmt
));
2766 // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START.
2767 gimple_stmt_iterator gsi
= gsi_last_bb (transaction_bb
);
2768 gcc_assert (gsi_stmt (gsi
) == region
->transaction_stmt
);
2769 gsi_insert_before (&gsi
, call
, GSI_SAME_STMT
);
2770 gsi_remove (&gsi
, true);
2771 region
->transaction_stmt
= call
;
2774 // Generate log saves.
2775 if (!tm_log_save_addresses
.is_empty ())
2776 tm_log_emit_saves (region
->entry_block
, transaction_bb
);
2778 // In the beginning, we've no tests to perform on transaction restart.
2779 // Note that after this point, transaction_bb becomes the "most recent
2780 // block containing tests for the transaction".
2781 region
->restart_block
= region
->entry_block
;
2783 // Generate log restores.
2784 if (!tm_log_save_addresses
.is_empty ())
2786 basic_block test_bb
= create_empty_bb (transaction_bb
);
2787 basic_block code_bb
= create_empty_bb (test_bb
);
2788 basic_block join_bb
= create_empty_bb (code_bb
);
2789 add_bb_to_loop (test_bb
, transaction_bb
->loop_father
);
2790 add_bb_to_loop (code_bb
, transaction_bb
->loop_father
);
2791 add_bb_to_loop (join_bb
, transaction_bb
->loop_father
);
2792 if (region
->restart_block
== region
->entry_block
)
2793 region
->restart_block
= test_bb
;
2795 tree t1
= create_tmp_reg (tm_state_type
);
2796 tree t2
= build_int_cst (tm_state_type
, A_RESTORELIVEVARIABLES
);
2797 gimple stmt
= gimple_build_assign (t1
, BIT_AND_EXPR
, tm_state
, t2
);
2798 gimple_stmt_iterator gsi
= gsi_last_bb (test_bb
);
2799 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2801 t2
= build_int_cst (tm_state_type
, 0);
2802 stmt
= gimple_build_cond (NE_EXPR
, t1
, t2
, NULL
, NULL
);
2803 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2805 tm_log_emit_restores (region
->entry_block
, code_bb
);
2807 edge ei
= make_edge (transaction_bb
, test_bb
, EDGE_FALLTHRU
);
2808 edge et
= make_edge (test_bb
, code_bb
, EDGE_TRUE_VALUE
);
2809 edge ef
= make_edge (test_bb
, join_bb
, EDGE_FALSE_VALUE
);
2810 redirect_edge_pred (fallthru_edge
, join_bb
);
2812 join_bb
->frequency
= test_bb
->frequency
= transaction_bb
->frequency
;
2813 join_bb
->count
= test_bb
->count
= transaction_bb
->count
;
2815 ei
->probability
= PROB_ALWAYS
;
2816 et
->probability
= PROB_LIKELY
;
2817 ef
->probability
= PROB_UNLIKELY
;
2818 et
->count
= apply_probability (test_bb
->count
, et
->probability
);
2819 ef
->count
= apply_probability (test_bb
->count
, ef
->probability
);
2821 code_bb
->count
= et
->count
;
2822 code_bb
->frequency
= EDGE_FREQUENCY (et
);
2824 transaction_bb
= join_bb
;
2827 // If we have an ABORT edge, create a test to perform the abort.
2830 basic_block test_bb
= create_empty_bb (transaction_bb
);
2831 add_bb_to_loop (test_bb
, transaction_bb
->loop_father
);
2832 if (region
->restart_block
== region
->entry_block
)
2833 region
->restart_block
= test_bb
;
2835 tree t1
= create_tmp_reg (tm_state_type
);
2836 tree t2
= build_int_cst (tm_state_type
, A_ABORTTRANSACTION
);
2837 gimple stmt
= gimple_build_assign (t1
, BIT_AND_EXPR
, tm_state
, t2
);
2838 gimple_stmt_iterator gsi
= gsi_last_bb (test_bb
);
2839 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2841 t2
= build_int_cst (tm_state_type
, 0);
2842 stmt
= gimple_build_cond (NE_EXPR
, t1
, t2
, NULL
, NULL
);
2843 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2845 edge ei
= make_edge (transaction_bb
, test_bb
, EDGE_FALLTHRU
);
2846 test_bb
->frequency
= transaction_bb
->frequency
;
2847 test_bb
->count
= transaction_bb
->count
;
2848 ei
->probability
= PROB_ALWAYS
;
2850 // Not abort edge. If both are live, chose one at random as we'll
2851 // we'll be fixing that up below.
2852 redirect_edge_pred (fallthru_edge
, test_bb
);
2853 fallthru_edge
->flags
= EDGE_FALSE_VALUE
;
2854 fallthru_edge
->probability
= PROB_VERY_LIKELY
;
2855 fallthru_edge
->count
2856 = apply_probability (test_bb
->count
, fallthru_edge
->probability
);
2859 redirect_edge_pred (abort_edge
, test_bb
);
2860 abort_edge
->flags
= EDGE_TRUE_VALUE
;
2861 abort_edge
->probability
= PROB_VERY_UNLIKELY
;
2863 = apply_probability (test_bb
->count
, abort_edge
->probability
);
2865 transaction_bb
= test_bb
;
2868 // If we have both instrumented and uninstrumented code paths, select one.
2869 if (inst_edge
&& uninst_edge
)
2871 basic_block test_bb
= create_empty_bb (transaction_bb
);
2872 add_bb_to_loop (test_bb
, transaction_bb
->loop_father
);
2873 if (region
->restart_block
== region
->entry_block
)
2874 region
->restart_block
= test_bb
;
2876 tree t1
= create_tmp_reg (tm_state_type
);
2877 tree t2
= build_int_cst (tm_state_type
, A_RUNUNINSTRUMENTEDCODE
);
2879 gimple stmt
= gimple_build_assign (t1
, BIT_AND_EXPR
, tm_state
, t2
);
2880 gimple_stmt_iterator gsi
= gsi_last_bb (test_bb
);
2881 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2883 t2
= build_int_cst (tm_state_type
, 0);
2884 stmt
= gimple_build_cond (NE_EXPR
, t1
, t2
, NULL
, NULL
);
2885 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2887 // Create the edge into test_bb first, as we want to copy values
2888 // out of the fallthru edge.
2889 edge e
= make_edge (transaction_bb
, test_bb
, fallthru_edge
->flags
);
2890 e
->probability
= fallthru_edge
->probability
;
2891 test_bb
->count
= e
->count
= fallthru_edge
->count
;
2892 test_bb
->frequency
= EDGE_FREQUENCY (e
);
2894 // Now update the edges to the inst/uninist implementations.
2895 // For now assume that the paths are equally likely. When using HTM,
2896 // we'll try the uninst path first and fallback to inst path if htm
2897 // buffers are exceeded. Without HTM we start with the inst path and
2898 // use the uninst path when falling back to serial mode.
2899 redirect_edge_pred (inst_edge
, test_bb
);
2900 inst_edge
->flags
= EDGE_FALSE_VALUE
;
2901 inst_edge
->probability
= REG_BR_PROB_BASE
/ 2;
2903 = apply_probability (test_bb
->count
, inst_edge
->probability
);
2905 redirect_edge_pred (uninst_edge
, test_bb
);
2906 uninst_edge
->flags
= EDGE_TRUE_VALUE
;
2907 uninst_edge
->probability
= REG_BR_PROB_BASE
/ 2;
2909 = apply_probability (test_bb
->count
, uninst_edge
->probability
);
2912 // If we have no previous special cases, and we have PHIs at the beginning
2913 // of the atomic region, this means we have a loop at the beginning of the
2914 // atomic region that shares the first block. This can cause problems with
2915 // the transaction restart abnormal edges to be added in the tm_edges pass.
2916 // Solve this by adding a new empty block to receive the abnormal edges.
2917 if (region
->restart_block
== region
->entry_block
2918 && phi_nodes (region
->entry_block
))
2920 basic_block empty_bb
= create_empty_bb (transaction_bb
);
2921 region
->restart_block
= empty_bb
;
2922 add_bb_to_loop (empty_bb
, transaction_bb
->loop_father
);
2924 redirect_edge_pred (fallthru_edge
, empty_bb
);
2925 make_edge (transaction_bb
, empty_bb
, EDGE_FALLTHRU
);
2931 /* Generate the temporary to be used for the return value of
2932 BUILT_IN_TM_START. */
2935 generate_tm_state (struct tm_region
*region
, void *data ATTRIBUTE_UNUSED
)
2937 tree tm_start
= builtin_decl_explicit (BUILT_IN_TM_START
);
2939 create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start
)), "tm_state");
2941 // Reset the subcode, post optimizations. We'll fill this in
2942 // again as we process blocks.
2943 if (region
->exit_blocks
)
2945 gtransaction
*transaction_stmt
= region
->get_transaction_stmt ();
2946 unsigned int subcode
= gimple_transaction_subcode (transaction_stmt
);
2948 if (subcode
& GTMA_DOES_GO_IRREVOCABLE
)
2949 subcode
&= (GTMA_DECLARATION_MASK
| GTMA_DOES_GO_IRREVOCABLE
2950 | GTMA_MAY_ENTER_IRREVOCABLE
2951 | GTMA_HAS_NO_INSTRUMENTATION
);
2953 subcode
&= GTMA_DECLARATION_MASK
;
2954 gimple_transaction_set_subcode (transaction_stmt
, subcode
);
2960 // Propagate flags from inner transactions outwards.
2962 propagate_tm_flags_out (struct tm_region
*region
)
2966 propagate_tm_flags_out (region
->inner
);
2968 if (region
->outer
&& region
->outer
->transaction_stmt
)
2971 = gimple_transaction_subcode (region
->get_transaction_stmt ());
2972 s
&= (GTMA_HAVE_ABORT
| GTMA_HAVE_LOAD
| GTMA_HAVE_STORE
2973 | GTMA_MAY_ENTER_IRREVOCABLE
);
2974 s
|= gimple_transaction_subcode (region
->outer
->get_transaction_stmt ());
2975 gimple_transaction_set_subcode (region
->outer
->get_transaction_stmt (),
2979 propagate_tm_flags_out (region
->next
);
2982 /* Entry point to the MARK phase of TM expansion. Here we replace
2983 transactional memory statements with calls to builtins, and function
2984 calls with their transactional clones (if available). But we don't
2985 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
2988 execute_tm_mark (void)
2990 pending_edge_inserts_p
= false;
2992 expand_regions (all_tm_regions
, generate_tm_state
, NULL
,
2993 /*traverse_clones=*/true);
2997 vec
<tm_region_p
> bb_regions
2998 = get_bb_regions_instrumented (/*traverse_clones=*/true,
2999 /*include_uninstrumented_p=*/false);
3000 struct tm_region
*r
;
3003 // Expand memory operations into calls into the runtime.
3004 // This collects log entries as well.
3005 FOR_EACH_VEC_ELT (bb_regions
, i
, r
)
3009 if (r
->transaction_stmt
)
3012 = gimple_transaction_subcode (r
->get_transaction_stmt ());
3014 /* If we're sure to go irrevocable, there won't be
3015 anything to expand, since the run-time will go
3016 irrevocable right away. */
3017 if (sub
& GTMA_DOES_GO_IRREVOCABLE
3018 && sub
& GTMA_MAY_ENTER_IRREVOCABLE
)
3021 expand_block_tm (r
, BASIC_BLOCK_FOR_FN (cfun
, i
));
3025 bb_regions
.release ();
3027 // Propagate flags from inner transactions outwards.
3028 propagate_tm_flags_out (all_tm_regions
);
3030 // Expand GIMPLE_TRANSACTIONs into calls into the runtime.
3031 expand_regions (all_tm_regions
, expand_transaction
, NULL
,
3032 /*traverse_clones=*/false);
3037 if (pending_edge_inserts_p
)
3038 gsi_commit_edge_inserts ();
3039 free_dominance_info (CDI_DOMINATORS
);
3045 const pass_data pass_data_tm_mark
=
3047 GIMPLE_PASS
, /* type */
3048 "tmmark", /* name */
3049 OPTGROUP_NONE
, /* optinfo_flags */
3050 TV_TRANS_MEM
, /* tv_id */
3051 ( PROP_ssa
| PROP_cfg
), /* properties_required */
3052 0, /* properties_provided */
3053 0, /* properties_destroyed */
3054 0, /* todo_flags_start */
3055 TODO_update_ssa
, /* todo_flags_finish */
3058 class pass_tm_mark
: public gimple_opt_pass
3061 pass_tm_mark (gcc::context
*ctxt
)
3062 : gimple_opt_pass (pass_data_tm_mark
, ctxt
)
3065 /* opt_pass methods: */
3066 virtual unsigned int execute (function
*) { return execute_tm_mark (); }
3068 }; // class pass_tm_mark
3073 make_pass_tm_mark (gcc::context
*ctxt
)
3075 return new pass_tm_mark (ctxt
);
3079 /* Create an abnormal edge from STMT at iter, splitting the block
3080 as necessary. Adjust *PNEXT as needed for the split block. */
3083 split_bb_make_tm_edge (gimple stmt
, basic_block dest_bb
,
3084 gimple_stmt_iterator iter
, gimple_stmt_iterator
*pnext
)
3086 basic_block bb
= gimple_bb (stmt
);
3087 if (!gsi_one_before_end_p (iter
))
3089 edge e
= split_block (bb
, stmt
);
3090 *pnext
= gsi_start_bb (e
->dest
);
3092 make_edge (bb
, dest_bb
, EDGE_ABNORMAL
);
3094 // Record the need for the edge for the benefit of the rtl passes.
3095 if (cfun
->gimple_df
->tm_restart
== NULL
)
3096 cfun
->gimple_df
->tm_restart
3097 = hash_table
<tm_restart_hasher
>::create_ggc (31);
3099 struct tm_restart_node dummy
;
3101 dummy
.label_or_list
= gimple_block_label (dest_bb
);
3103 tm_restart_node
**slot
= cfun
->gimple_df
->tm_restart
->find_slot (&dummy
,
3105 struct tm_restart_node
*n
= *slot
;
3108 n
= ggc_alloc
<tm_restart_node
> ();
3113 tree old
= n
->label_or_list
;
3114 if (TREE_CODE (old
) == LABEL_DECL
)
3115 old
= tree_cons (NULL
, old
, NULL
);
3116 n
->label_or_list
= tree_cons (NULL
, dummy
.label_or_list
, old
);
3120 /* Split block BB as necessary for every builtin function we added, and
3121 wire up the abnormal back edges implied by the transaction restart. */
3124 expand_block_edges (struct tm_region
*const region
, basic_block bb
)
3126 gimple_stmt_iterator gsi
, next_gsi
;
3128 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi
= next_gsi
)
3130 gimple stmt
= gsi_stmt (gsi
);
3134 gsi_next (&next_gsi
);
3136 // ??? Shouldn't we split for any non-pure, non-irrevocable function?
3137 call_stmt
= dyn_cast
<gcall
*> (stmt
);
3139 || (gimple_call_flags (call_stmt
) & ECF_TM_BUILTIN
) == 0)
3142 if (DECL_FUNCTION_CODE (gimple_call_fndecl (call_stmt
))
3143 == BUILT_IN_TM_ABORT
)
3145 // If we have a ``_transaction_cancel [[outer]]'', there is only
3146 // one abnormal edge: to the transaction marked OUTER.
3147 // All compiler-generated instances of BUILT_IN_TM_ABORT have a
3148 // constant argument, which we can examine here. Users invoking
3149 // TM_ABORT directly get what they deserve.
3150 tree arg
= gimple_call_arg (call_stmt
, 0);
3151 if (TREE_CODE (arg
) == INTEGER_CST
3152 && (TREE_INT_CST_LOW (arg
) & AR_OUTERABORT
) != 0
3153 && !decl_is_tm_clone (current_function_decl
))
3155 // Find the GTMA_IS_OUTER transaction.
3156 for (struct tm_region
*o
= region
; o
; o
= o
->outer
)
3157 if (o
->original_transaction_was_outer
)
3159 split_bb_make_tm_edge (call_stmt
, o
->restart_block
,
3164 // Otherwise, the front-end should have semantically checked
3165 // outer aborts, but in either case the target region is not
3166 // within this function.
3170 // Non-outer, TM aborts have an abnormal edge to the inner-most
3171 // transaction, the one being aborted;
3172 split_bb_make_tm_edge (call_stmt
, region
->restart_block
, gsi
,
3176 // All TM builtins have an abnormal edge to the outer-most transaction.
3177 // We never restart inner transactions. For tm clones, we know a-priori
3178 // that the outer-most transaction is outside the function.
3179 if (decl_is_tm_clone (current_function_decl
))
3182 if (cfun
->gimple_df
->tm_restart
== NULL
)
3183 cfun
->gimple_df
->tm_restart
3184 = hash_table
<tm_restart_hasher
>::create_ggc (31);
3186 // All TM builtins have an abnormal edge to the outer-most transaction.
3187 // We never restart inner transactions.
3188 for (struct tm_region
*o
= region
; o
; o
= o
->outer
)
3191 split_bb_make_tm_edge (call_stmt
, o
->restart_block
, gsi
, &next_gsi
);
3195 // Delete any tail-call annotation that may have been added.
3196 // The tail-call pass may have mis-identified the commit as being
3197 // a candidate because we had not yet added this restart edge.
3198 gimple_call_set_tail (call_stmt
, false);
3202 /* Entry point to the final expansion of transactional nodes. */
3206 const pass_data pass_data_tm_edges
=
3208 GIMPLE_PASS
, /* type */
3209 "tmedge", /* name */
3210 OPTGROUP_NONE
, /* optinfo_flags */
3211 TV_TRANS_MEM
, /* tv_id */
3212 ( PROP_ssa
| PROP_cfg
), /* properties_required */
3213 0, /* properties_provided */
3214 0, /* properties_destroyed */
3215 0, /* todo_flags_start */
3216 TODO_update_ssa
, /* todo_flags_finish */
3219 class pass_tm_edges
: public gimple_opt_pass
3222 pass_tm_edges (gcc::context
*ctxt
)
3223 : gimple_opt_pass (pass_data_tm_edges
, ctxt
)
3226 /* opt_pass methods: */
3227 virtual unsigned int execute (function
*);
3229 }; // class pass_tm_edges
3232 pass_tm_edges::execute (function
*fun
)
3234 vec
<tm_region_p
> bb_regions
3235 = get_bb_regions_instrumented (/*traverse_clones=*/false,
3236 /*include_uninstrumented_p=*/true);
3237 struct tm_region
*r
;
3240 FOR_EACH_VEC_ELT (bb_regions
, i
, r
)
3242 expand_block_edges (r
, BASIC_BLOCK_FOR_FN (fun
, i
));
3244 bb_regions
.release ();
3246 /* We've got to release the dominance info now, to indicate that it
3247 must be rebuilt completely. Otherwise we'll crash trying to update
3248 the SSA web in the TODO section following this pass. */
3249 free_dominance_info (CDI_DOMINATORS
);
3250 bitmap_obstack_release (&tm_obstack
);
3251 all_tm_regions
= NULL
;
3259 make_pass_tm_edges (gcc::context
*ctxt
)
3261 return new pass_tm_edges (ctxt
);
3264 /* Helper function for expand_regions. Expand REGION and recurse to
3265 the inner region. Call CALLBACK on each region. CALLBACK returns
3266 NULL to continue the traversal, otherwise a non-null value which
3267 this function will return as well. TRAVERSE_CLONES is true if we
3268 should traverse transactional clones. */
3271 expand_regions_1 (struct tm_region
*region
,
3272 void *(*callback
)(struct tm_region
*, void *),
3274 bool traverse_clones
)
3276 void *retval
= NULL
;
3277 if (region
->exit_blocks
3278 || (traverse_clones
&& decl_is_tm_clone (current_function_decl
)))
3280 retval
= callback (region
, data
);
3286 retval
= expand_regions (region
->inner
, callback
, data
, traverse_clones
);
3293 /* Traverse the regions enclosed and including REGION. Execute
3294 CALLBACK for each region, passing DATA. CALLBACK returns NULL to
3295 continue the traversal, otherwise a non-null value which this
3296 function will return as well. TRAVERSE_CLONES is true if we should
3297 traverse transactional clones. */
3300 expand_regions (struct tm_region
*region
,
3301 void *(*callback
)(struct tm_region
*, void *),
3303 bool traverse_clones
)
3305 void *retval
= NULL
;
3308 retval
= expand_regions_1 (region
, callback
, data
, traverse_clones
);
3311 region
= region
->next
;
3317 /* A unique TM memory operation. */
3318 typedef struct tm_memop
3320 /* Unique ID that all memory operations to the same location have. */
3321 unsigned int value_id
;
3322 /* Address of load/store. */
3326 /* TM memory operation hashtable helpers. */
3328 struct tm_memop_hasher
: free_ptr_hash
<tm_memop
>
3330 static inline hashval_t
hash (const tm_memop
*);
3331 static inline bool equal (const tm_memop
*, const tm_memop
*);
3334 /* Htab support. Return a hash value for a `tm_memop'. */
3336 tm_memop_hasher::hash (const tm_memop
*mem
)
3338 tree addr
= mem
->addr
;
3339 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
3340 actually done with operand_equal_p (see tm_memop_eq). */
3341 if (TREE_CODE (addr
) == ADDR_EXPR
)
3342 addr
= TREE_OPERAND (addr
, 0);
3343 return iterative_hash_expr (addr
, 0);
3346 /* Htab support. Return true if two tm_memop's are the same. */
3348 tm_memop_hasher::equal (const tm_memop
*mem1
, const tm_memop
*mem2
)
3350 return operand_equal_p (mem1
->addr
, mem2
->addr
, 0);
3353 /* Sets for solving data flow equations in the memory optimization pass. */
3354 struct tm_memopt_bitmaps
3356 /* Stores available to this BB upon entry. Basically, stores that
3357 dominate this BB. */
3358 bitmap store_avail_in
;
3359 /* Stores available at the end of this BB. */
3360 bitmap store_avail_out
;
3361 bitmap store_antic_in
;
3362 bitmap store_antic_out
;
3363 /* Reads available to this BB upon entry. Basically, reads that
3364 dominate this BB. */
3365 bitmap read_avail_in
;
3366 /* Reads available at the end of this BB. */
3367 bitmap read_avail_out
;
3368 /* Reads performed in this BB. */
3370 /* Writes performed in this BB. */
3373 /* Temporary storage for pass. */
3374 /* Is the current BB in the worklist? */
3375 bool avail_in_worklist_p
;
3376 /* Have we visited this BB? */
3380 static bitmap_obstack tm_memopt_obstack
;
3382 /* Unique counter for TM loads and stores. Loads and stores of the
3383 same address get the same ID. */
3384 static unsigned int tm_memopt_value_id
;
3385 static hash_table
<tm_memop_hasher
> *tm_memopt_value_numbers
;
3387 #define STORE_AVAIL_IN(BB) \
3388 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
3389 #define STORE_AVAIL_OUT(BB) \
3390 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
3391 #define STORE_ANTIC_IN(BB) \
3392 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
3393 #define STORE_ANTIC_OUT(BB) \
3394 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
3395 #define READ_AVAIL_IN(BB) \
3396 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
3397 #define READ_AVAIL_OUT(BB) \
3398 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
3399 #define READ_LOCAL(BB) \
3400 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
3401 #define STORE_LOCAL(BB) \
3402 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
3403 #define AVAIL_IN_WORKLIST_P(BB) \
3404 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
3405 #define BB_VISITED_P(BB) \
3406 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
3408 /* Given a TM load/store in STMT, return the value number for the address
3412 tm_memopt_value_number (gimple stmt
, enum insert_option op
)
3414 struct tm_memop tmpmem
, *mem
;
3417 gcc_assert (is_tm_load (stmt
) || is_tm_store (stmt
));
3418 tmpmem
.addr
= gimple_call_arg (stmt
, 0);
3419 slot
= tm_memopt_value_numbers
->find_slot (&tmpmem
, op
);
3422 else if (op
== INSERT
)
3424 mem
= XNEW (struct tm_memop
);
3426 mem
->value_id
= tm_memopt_value_id
++;
3427 mem
->addr
= tmpmem
.addr
;
3431 return mem
->value_id
;
3434 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
3437 tm_memopt_accumulate_memops (basic_block bb
)
3439 gimple_stmt_iterator gsi
;
3441 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3443 gimple stmt
= gsi_stmt (gsi
);
3447 if (is_tm_store (stmt
))
3448 bits
= STORE_LOCAL (bb
);
3449 else if (is_tm_load (stmt
))
3450 bits
= READ_LOCAL (bb
);
3454 loc
= tm_memopt_value_number (stmt
, INSERT
);
3455 bitmap_set_bit (bits
, loc
);
3458 fprintf (dump_file
, "TM memopt (%s): value num=%d, BB=%d, addr=",
3459 is_tm_load (stmt
) ? "LOAD" : "STORE", loc
,
3460 gimple_bb (stmt
)->index
);
3461 print_generic_expr (dump_file
, gimple_call_arg (stmt
, 0), 0);
3462 fprintf (dump_file
, "\n");
3467 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
3470 dump_tm_memopt_set (const char *set_name
, bitmap bits
)
3474 const char *comma
= "";
3476 fprintf (dump_file
, "TM memopt: %s: [", set_name
);
3477 EXECUTE_IF_SET_IN_BITMAP (bits
, 0, i
, bi
)
3479 hash_table
<tm_memop_hasher
>::iterator hi
;
3480 struct tm_memop
*mem
= NULL
;
3482 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
3483 FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers
, mem
, tm_memop_t
, hi
)
3484 if (mem
->value_id
== i
)
3486 gcc_assert (mem
->value_id
== i
);
3487 fprintf (dump_file
, "%s", comma
);
3489 print_generic_expr (dump_file
, mem
->addr
, 0);
3491 fprintf (dump_file
, "]\n");
3494 /* Prettily dump all of the memopt sets in BLOCKS. */
3497 dump_tm_memopt_sets (vec
<basic_block
> blocks
)
3502 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3504 fprintf (dump_file
, "------------BB %d---------\n", bb
->index
);
3505 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb
));
3506 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb
));
3507 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb
));
3508 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb
));
3509 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb
));
3510 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb
));
3514 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3517 tm_memopt_compute_avin (basic_block bb
)
3522 /* Seed with the AVOUT of any predecessor. */
3523 for (ix
= 0; ix
< EDGE_COUNT (bb
->preds
); ix
++)
3525 e
= EDGE_PRED (bb
, ix
);
3526 /* Make sure we have already visited this BB, and is thus
3529 If e->src->aux is NULL, this predecessor is actually on an
3530 enclosing transaction. We only care about the current
3531 transaction, so ignore it. */
3532 if (e
->src
->aux
&& BB_VISITED_P (e
->src
))
3534 bitmap_copy (STORE_AVAIL_IN (bb
), STORE_AVAIL_OUT (e
->src
));
3535 bitmap_copy (READ_AVAIL_IN (bb
), READ_AVAIL_OUT (e
->src
));
3540 for (; ix
< EDGE_COUNT (bb
->preds
); ix
++)
3542 e
= EDGE_PRED (bb
, ix
);
3543 if (e
->src
->aux
&& BB_VISITED_P (e
->src
))
3545 bitmap_and_into (STORE_AVAIL_IN (bb
), STORE_AVAIL_OUT (e
->src
));
3546 bitmap_and_into (READ_AVAIL_IN (bb
), READ_AVAIL_OUT (e
->src
));
3550 BB_VISITED_P (bb
) = true;
3553 /* Compute the STORE_ANTIC_IN for the basic block BB. */
3556 tm_memopt_compute_antin (basic_block bb
)
3561 /* Seed with the ANTIC_OUT of any successor. */
3562 for (ix
= 0; ix
< EDGE_COUNT (bb
->succs
); ix
++)
3564 e
= EDGE_SUCC (bb
, ix
);
3565 /* Make sure we have already visited this BB, and is thus
3567 if (BB_VISITED_P (e
->dest
))
3569 bitmap_copy (STORE_ANTIC_IN (bb
), STORE_ANTIC_OUT (e
->dest
));
3574 for (; ix
< EDGE_COUNT (bb
->succs
); ix
++)
3576 e
= EDGE_SUCC (bb
, ix
);
3577 if (BB_VISITED_P (e
->dest
))
3578 bitmap_and_into (STORE_ANTIC_IN (bb
), STORE_ANTIC_OUT (e
->dest
));
3581 BB_VISITED_P (bb
) = true;
3584 /* Compute the AVAIL sets for every basic block in BLOCKS.
3586 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3588 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3589 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3591 This is basically what we do in lcm's compute_available(), but here
3592 we calculate two sets of sets (one for STOREs and one for READs),
3593 and we work on a region instead of the entire CFG.
3595 REGION is the TM region.
3596 BLOCKS are the basic blocks in the region. */
3599 tm_memopt_compute_available (struct tm_region
*region
,
3600 vec
<basic_block
> blocks
)
3603 basic_block
*worklist
, *qin
, *qout
, *qend
, bb
;
3604 unsigned int qlen
, i
;
3608 /* Allocate a worklist array/queue. Entries are only added to the
3609 list if they were not already on the list. So the size is
3610 bounded by the number of basic blocks in the region. */
3611 qlen
= blocks
.length () - 1;
3612 qin
= qout
= worklist
=
3613 XNEWVEC (basic_block
, qlen
);
3615 /* Put every block in the region on the worklist. */
3616 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3618 /* Seed AVAIL_OUT with the LOCAL set. */
3619 bitmap_ior_into (STORE_AVAIL_OUT (bb
), STORE_LOCAL (bb
));
3620 bitmap_ior_into (READ_AVAIL_OUT (bb
), READ_LOCAL (bb
));
3622 AVAIL_IN_WORKLIST_P (bb
) = true;
3623 /* No need to insert the entry block, since it has an AVIN of
3624 null, and an AVOUT that has already been seeded in. */
3625 if (bb
!= region
->entry_block
)
3629 /* The entry block has been initialized with the local sets. */
3630 BB_VISITED_P (region
->entry_block
) = true;
3633 qend
= &worklist
[qlen
];
3635 /* Iterate until the worklist is empty. */
3638 /* Take the first entry off the worklist. */
3645 /* This block can be added to the worklist again if necessary. */
3646 AVAIL_IN_WORKLIST_P (bb
) = false;
3647 tm_memopt_compute_avin (bb
);
3649 /* Note: We do not add the LOCAL sets here because we already
3650 seeded the AVAIL_OUT sets with them. */
3651 changed
= bitmap_ior_into (STORE_AVAIL_OUT (bb
), STORE_AVAIL_IN (bb
));
3652 changed
|= bitmap_ior_into (READ_AVAIL_OUT (bb
), READ_AVAIL_IN (bb
));
3654 && (region
->exit_blocks
== NULL
3655 || !bitmap_bit_p (region
->exit_blocks
, bb
->index
)))
3656 /* If the out state of this block changed, then we need to add
3657 its successors to the worklist if they are not already in. */
3658 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3659 if (!AVAIL_IN_WORKLIST_P (e
->dest
)
3660 && e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
3663 AVAIL_IN_WORKLIST_P (e
->dest
) = true;
3674 dump_tm_memopt_sets (blocks
);
3677 /* Compute ANTIC sets for every basic block in BLOCKS.
3679 We compute STORE_ANTIC_OUT as follows:
3681 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3682 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3684 REGION is the TM region.
3685 BLOCKS are the basic blocks in the region. */
3688 tm_memopt_compute_antic (struct tm_region
*region
,
3689 vec
<basic_block
> blocks
)
3692 basic_block
*worklist
, *qin
, *qout
, *qend
, bb
;
3697 /* Allocate a worklist array/queue. Entries are only added to the
3698 list if they were not already on the list. So the size is
3699 bounded by the number of basic blocks in the region. */
3700 qin
= qout
= worklist
= XNEWVEC (basic_block
, blocks
.length ());
3702 for (qlen
= 0, i
= blocks
.length () - 1; i
>= 0; --i
)
3706 /* Seed ANTIC_OUT with the LOCAL set. */
3707 bitmap_ior_into (STORE_ANTIC_OUT (bb
), STORE_LOCAL (bb
));
3709 /* Put every block in the region on the worklist. */
3710 AVAIL_IN_WORKLIST_P (bb
) = true;
3711 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3712 and their ANTIC_OUT has already been seeded in. */
3713 if (region
->exit_blocks
3714 && !bitmap_bit_p (region
->exit_blocks
, bb
->index
))
3721 /* The exit blocks have been initialized with the local sets. */
3722 if (region
->exit_blocks
)
3726 EXECUTE_IF_SET_IN_BITMAP (region
->exit_blocks
, 0, i
, bi
)
3727 BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun
, i
)) = true;
3731 qend
= &worklist
[qlen
];
3733 /* Iterate until the worklist is empty. */
3736 /* Take the first entry off the worklist. */
3743 /* This block can be added to the worklist again if necessary. */
3744 AVAIL_IN_WORKLIST_P (bb
) = false;
3745 tm_memopt_compute_antin (bb
);
3747 /* Note: We do not add the LOCAL sets here because we already
3748 seeded the ANTIC_OUT sets with them. */
3749 if (bitmap_ior_into (STORE_ANTIC_OUT (bb
), STORE_ANTIC_IN (bb
))
3750 && bb
!= region
->entry_block
)
3751 /* If the out state of this block changed, then we need to add
3752 its predecessors to the worklist if they are not already in. */
3753 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3754 if (!AVAIL_IN_WORKLIST_P (e
->src
))
3757 AVAIL_IN_WORKLIST_P (e
->src
) = true;
3768 dump_tm_memopt_sets (blocks
);
3771 /* Offsets of load variants from TM_LOAD. For example,
3772 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3773 See gtm-builtins.def. */
3774 #define TRANSFORM_RAR 1
3775 #define TRANSFORM_RAW 2
3776 #define TRANSFORM_RFW 3
3777 /* Offsets of store variants from TM_STORE. */
3778 #define TRANSFORM_WAR 1
3779 #define TRANSFORM_WAW 2
3781 /* Inform about a load/store optimization. */
3784 dump_tm_memopt_transform (gimple stmt
)
3788 fprintf (dump_file
, "TM memopt: transforming: ");
3789 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3790 fprintf (dump_file
, "\n");
3794 /* Perform a read/write optimization. Replaces the TM builtin in STMT
3795 by a builtin that is OFFSET entries down in the builtins table in
3796 gtm-builtins.def. */
3799 tm_memopt_transform_stmt (unsigned int offset
,
3801 gimple_stmt_iterator
*gsi
)
3803 tree fn
= gimple_call_fn (stmt
);
3804 gcc_assert (TREE_CODE (fn
) == ADDR_EXPR
);
3805 TREE_OPERAND (fn
, 0)
3806 = builtin_decl_explicit ((enum built_in_function
)
3807 (DECL_FUNCTION_CODE (TREE_OPERAND (fn
, 0))
3809 gimple_call_set_fn (stmt
, fn
);
3810 gsi_replace (gsi
, stmt
, true);
3811 dump_tm_memopt_transform (stmt
);
3814 /* Perform the actual TM memory optimization transformations in the
3815 basic blocks in BLOCKS. */
3818 tm_memopt_transform_blocks (vec
<basic_block
> blocks
)
3822 gimple_stmt_iterator gsi
;
3824 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3826 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3828 gimple stmt
= gsi_stmt (gsi
);
3829 bitmap read_avail
= READ_AVAIL_IN (bb
);
3830 bitmap store_avail
= STORE_AVAIL_IN (bb
);
3831 bitmap store_antic
= STORE_ANTIC_OUT (bb
);
3834 if (is_tm_simple_load (stmt
))
3836 gcall
*call_stmt
= as_a
<gcall
*> (stmt
);
3837 loc
= tm_memopt_value_number (stmt
, NO_INSERT
);
3838 if (store_avail
&& bitmap_bit_p (store_avail
, loc
))
3839 tm_memopt_transform_stmt (TRANSFORM_RAW
, call_stmt
, &gsi
);
3840 else if (store_antic
&& bitmap_bit_p (store_antic
, loc
))
3842 tm_memopt_transform_stmt (TRANSFORM_RFW
, call_stmt
, &gsi
);
3843 bitmap_set_bit (store_avail
, loc
);
3845 else if (read_avail
&& bitmap_bit_p (read_avail
, loc
))
3846 tm_memopt_transform_stmt (TRANSFORM_RAR
, call_stmt
, &gsi
);
3848 bitmap_set_bit (read_avail
, loc
);
3850 else if (is_tm_simple_store (stmt
))
3852 gcall
*call_stmt
= as_a
<gcall
*> (stmt
);
3853 loc
= tm_memopt_value_number (stmt
, NO_INSERT
);
3854 if (store_avail
&& bitmap_bit_p (store_avail
, loc
))
3855 tm_memopt_transform_stmt (TRANSFORM_WAW
, call_stmt
, &gsi
);
3858 if (read_avail
&& bitmap_bit_p (read_avail
, loc
))
3859 tm_memopt_transform_stmt (TRANSFORM_WAR
, call_stmt
, &gsi
);
3860 bitmap_set_bit (store_avail
, loc
);
3867 /* Return a new set of bitmaps for a BB. */
3869 static struct tm_memopt_bitmaps
*
3870 tm_memopt_init_sets (void)
3872 struct tm_memopt_bitmaps
*b
3873 = XOBNEW (&tm_memopt_obstack
.obstack
, struct tm_memopt_bitmaps
);
3874 b
->store_avail_in
= BITMAP_ALLOC (&tm_memopt_obstack
);
3875 b
->store_avail_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3876 b
->store_antic_in
= BITMAP_ALLOC (&tm_memopt_obstack
);
3877 b
->store_antic_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3878 b
->store_avail_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3879 b
->read_avail_in
= BITMAP_ALLOC (&tm_memopt_obstack
);
3880 b
->read_avail_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3881 b
->read_local
= BITMAP_ALLOC (&tm_memopt_obstack
);
3882 b
->store_local
= BITMAP_ALLOC (&tm_memopt_obstack
);
3886 /* Free sets computed for each BB. */
3889 tm_memopt_free_sets (vec
<basic_block
> blocks
)
3894 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3898 /* Clear the visited bit for every basic block in BLOCKS. */
3901 tm_memopt_clear_visited (vec
<basic_block
> blocks
)
3906 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3907 BB_VISITED_P (bb
) = false;
3910 /* Replace TM load/stores with hints for the runtime. We handle
3911 things like read-after-write, write-after-read, read-after-read,
3912 read-for-write, etc. */
3915 execute_tm_memopt (void)
3917 struct tm_region
*region
;
3918 vec
<basic_block
> bbs
;
3920 tm_memopt_value_id
= 0;
3921 tm_memopt_value_numbers
= new hash_table
<tm_memop_hasher
> (10);
3923 for (region
= all_tm_regions
; region
; region
= region
->next
)
3925 /* All the TM stores/loads in the current region. */
3929 bitmap_obstack_initialize (&tm_memopt_obstack
);
3931 /* Save all BBs for the current region. */
3932 bbs
= get_tm_region_blocks (region
->entry_block
,
3933 region
->exit_blocks
,
3938 /* Collect all the memory operations. */
3939 for (i
= 0; bbs
.iterate (i
, &bb
); ++i
)
3941 bb
->aux
= tm_memopt_init_sets ();
3942 tm_memopt_accumulate_memops (bb
);
3945 /* Solve data flow equations and transform each block accordingly. */
3946 tm_memopt_clear_visited (bbs
);
3947 tm_memopt_compute_available (region
, bbs
);
3948 tm_memopt_clear_visited (bbs
);
3949 tm_memopt_compute_antic (region
, bbs
);
3950 tm_memopt_transform_blocks (bbs
);
3952 tm_memopt_free_sets (bbs
);
3954 bitmap_obstack_release (&tm_memopt_obstack
);
3955 tm_memopt_value_numbers
->empty ();
3958 delete tm_memopt_value_numbers
;
3959 tm_memopt_value_numbers
= NULL
;
3965 const pass_data pass_data_tm_memopt
=
3967 GIMPLE_PASS
, /* type */
3968 "tmmemopt", /* name */
3969 OPTGROUP_NONE
, /* optinfo_flags */
3970 TV_TRANS_MEM
, /* tv_id */
3971 ( PROP_ssa
| PROP_cfg
), /* properties_required */
3972 0, /* properties_provided */
3973 0, /* properties_destroyed */
3974 0, /* todo_flags_start */
3975 0, /* todo_flags_finish */
3978 class pass_tm_memopt
: public gimple_opt_pass
3981 pass_tm_memopt (gcc::context
*ctxt
)
3982 : gimple_opt_pass (pass_data_tm_memopt
, ctxt
)
3985 /* opt_pass methods: */
3986 virtual bool gate (function
*) { return flag_tm
&& optimize
> 0; }
3987 virtual unsigned int execute (function
*) { return execute_tm_memopt (); }
3989 }; // class pass_tm_memopt
3994 make_pass_tm_memopt (gcc::context
*ctxt
)
3996 return new pass_tm_memopt (ctxt
);
4000 /* Interprocedual analysis for the creation of transactional clones.
4001 The aim of this pass is to find which functions are referenced in
4002 a non-irrevocable transaction context, and for those over which
4003 we have control (or user directive), create a version of the
4004 function which uses only the transactional interface to reference
4005 protected memories. This analysis proceeds in several steps:
4007 (1) Collect the set of all possible transactional clones:
4009 (a) For all local public functions marked tm_callable, push
4010 it onto the tm_callee queue.
4012 (b) For all local functions, scan for calls in transaction blocks.
4013 Push the caller and callee onto the tm_caller and tm_callee
4014 queues. Count the number of callers for each callee.
4016 (c) For each local function on the callee list, assume we will
4017 create a transactional clone. Push *all* calls onto the
4018 callee queues; count the number of clone callers separately
4019 to the number of original callers.
4021 (2) Propagate irrevocable status up the dominator tree:
4023 (a) Any external function on the callee list that is not marked
4024 tm_callable is irrevocable. Push all callers of such onto
4027 (b) For each function on the worklist, mark each block that
4028 contains an irrevocable call. Use the AND operator to
4029 propagate that mark up the dominator tree.
4031 (c) If we reach the entry block for a possible transactional
4032 clone, then the transactional clone is irrevocable, and
4033 we should not create the clone after all. Push all
4034 callers onto the worklist.
4036 (d) Place tm_irrevocable calls at the beginning of the relevant
4037 blocks. Special case here is the entry block for the entire
4038 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
4039 the library to begin the region in serial mode. Decrement
4040 the call count for all callees in the irrevocable region.
4042 (3) Create the transactional clones:
4044 Any tm_callee that still has a non-zero call count is cloned.
4047 /* This structure is stored in the AUX field of each cgraph_node. */
4048 struct tm_ipa_cg_data
4050 /* The clone of the function that got created. */
4051 struct cgraph_node
*clone
;
4053 /* The tm regions in the normal function. */
4054 struct tm_region
*all_tm_regions
;
4056 /* The blocks of the normal/clone functions that contain irrevocable
4057 calls, or blocks that are post-dominated by irrevocable calls. */
4058 bitmap irrevocable_blocks_normal
;
4059 bitmap irrevocable_blocks_clone
;
4061 /* The blocks of the normal function that are involved in transactions. */
4062 bitmap transaction_blocks_normal
;
4064 /* The number of callers to the transactional clone of this function
4065 from normal and transactional clones respectively. */
4066 unsigned tm_callers_normal
;
4067 unsigned tm_callers_clone
;
4069 /* True if all calls to this function's transactional clone
4070 are irrevocable. Also automatically true if the function
4071 has no transactional clone. */
4072 bool is_irrevocable
;
4074 /* Flags indicating the presence of this function in various queues. */
4075 bool in_callee_queue
;
4078 /* Flags indicating the kind of scan desired while in the worklist. */
4079 bool want_irr_scan_normal
;
4082 typedef vec
<cgraph_node
*> cgraph_node_queue
;
4084 /* Return the ipa data associated with NODE, allocating zeroed memory
4085 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
4086 and set *NODE accordingly. */
4088 static struct tm_ipa_cg_data
*
4089 get_cg_data (struct cgraph_node
**node
, bool traverse_aliases
)
4091 struct tm_ipa_cg_data
*d
;
4093 if (traverse_aliases
&& (*node
)->alias
)
4094 *node
= (*node
)->get_alias_target ();
4096 d
= (struct tm_ipa_cg_data
*) (*node
)->aux
;
4100 d
= (struct tm_ipa_cg_data
*)
4101 obstack_alloc (&tm_obstack
.obstack
, sizeof (*d
));
4102 (*node
)->aux
= (void *) d
;
4103 memset (d
, 0, sizeof (*d
));
4109 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
4110 it is already present. */
4113 maybe_push_queue (struct cgraph_node
*node
,
4114 cgraph_node_queue
*queue_p
, bool *in_queue_p
)
4119 queue_p
->safe_push (node
);
4123 /* Duplicate the basic blocks in QUEUE for use in the uninstrumented
4124 code path. QUEUE are the basic blocks inside the transaction
4125 represented in REGION.
4127 Later in split_code_paths() we will add the conditional to choose
4128 between the two alternatives. */
4131 ipa_uninstrument_transaction (struct tm_region
*region
,
4132 vec
<basic_block
> queue
)
4134 gimple transaction
= region
->transaction_stmt
;
4135 basic_block transaction_bb
= gimple_bb (transaction
);
4136 int n
= queue
.length ();
4137 basic_block
*new_bbs
= XNEWVEC (basic_block
, n
);
4139 copy_bbs (queue
.address (), n
, new_bbs
, NULL
, 0, NULL
, NULL
, transaction_bb
,
4141 edge e
= make_edge (transaction_bb
, new_bbs
[0], EDGE_TM_UNINSTRUMENTED
);
4142 add_phi_args_after_copy (new_bbs
, n
, e
);
4144 // Now we will have a GIMPLE_ATOMIC with 3 possible edges out of it.
4145 // a) EDGE_FALLTHRU into the transaction
4146 // b) EDGE_TM_ABORT out of the transaction
4147 // c) EDGE_TM_UNINSTRUMENTED into the uninstrumented blocks.
4152 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
4153 Queue all callees within block BB. */
4156 ipa_tm_scan_calls_block (cgraph_node_queue
*callees_p
,
4157 basic_block bb
, bool for_clone
)
4159 gimple_stmt_iterator gsi
;
4161 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4163 gimple stmt
= gsi_stmt (gsi
);
4164 if (is_gimple_call (stmt
) && !is_tm_pure_call (stmt
))
4166 tree fndecl
= gimple_call_fndecl (stmt
);
4169 struct tm_ipa_cg_data
*d
;
4171 struct cgraph_node
*node
;
4173 if (is_tm_ending_fndecl (fndecl
))
4175 if (find_tm_replacement_function (fndecl
))
4178 node
= cgraph_node::get (fndecl
);
4179 gcc_assert (node
!= NULL
);
4180 d
= get_cg_data (&node
, true);
4182 pcallers
= (for_clone
? &d
->tm_callers_clone
4183 : &d
->tm_callers_normal
);
4186 maybe_push_queue (node
, callees_p
, &d
->in_callee_queue
);
4192 /* Scan all calls in NODE that are within a transaction region,
4193 and push the resulting nodes into the callee queue. */
4196 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data
*d
,
4197 cgraph_node_queue
*callees_p
)
4199 struct tm_region
*r
;
4201 d
->transaction_blocks_normal
= BITMAP_ALLOC (&tm_obstack
);
4202 d
->all_tm_regions
= all_tm_regions
;
4204 for (r
= all_tm_regions
; r
; r
= r
->next
)
4206 vec
<basic_block
> bbs
;
4210 bbs
= get_tm_region_blocks (r
->entry_block
, r
->exit_blocks
, NULL
,
4211 d
->transaction_blocks_normal
, false);
4213 // Generate the uninstrumented code path for this transaction.
4214 ipa_uninstrument_transaction (r
, bbs
);
4216 FOR_EACH_VEC_ELT (bbs
, i
, bb
)
4217 ipa_tm_scan_calls_block (callees_p
, bb
, false);
4222 // ??? copy_bbs should maintain cgraph edges for the blocks as it is
4223 // copying them, rather than forcing us to do this externally.
4224 cgraph_edge::rebuild_edges ();
4226 // ??? In ipa_uninstrument_transaction we don't try to update dominators
4227 // because copy_bbs doesn't return a VEC like iterate_fix_dominators expects.
4228 // Instead, just release dominators here so update_ssa recomputes them.
4229 free_dominance_info (CDI_DOMINATORS
);
4231 // When building the uninstrumented code path, copy_bbs will have invoked
4232 // create_new_def_for starting an "ssa update context". There is only one
4233 // instance of this context, so resolve ssa updates before moving on to
4234 // the next function.
4235 update_ssa (TODO_update_ssa
);
4238 /* Scan all calls in NODE as if this is the transactional clone,
4239 and push the destinations into the callee queue. */
4242 ipa_tm_scan_calls_clone (struct cgraph_node
*node
,
4243 cgraph_node_queue
*callees_p
)
4245 struct function
*fn
= DECL_STRUCT_FUNCTION (node
->decl
);
4248 FOR_EACH_BB_FN (bb
, fn
)
4249 ipa_tm_scan_calls_block (callees_p
, bb
, true);
4252 /* The function NODE has been detected to be irrevocable. Push all
4253 of its callers onto WORKLIST for the purpose of re-scanning them. */
4256 ipa_tm_note_irrevocable (struct cgraph_node
*node
,
4257 cgraph_node_queue
*worklist_p
)
4259 struct tm_ipa_cg_data
*d
= get_cg_data (&node
, true);
4260 struct cgraph_edge
*e
;
4262 d
->is_irrevocable
= true;
4264 for (e
= node
->callers
; e
; e
= e
->next_caller
)
4267 struct cgraph_node
*caller
;
4269 /* Don't examine recursive calls. */
4270 if (e
->caller
== node
)
4272 /* Even if we think we can go irrevocable, believe the user
4274 if (is_tm_safe_or_pure (e
->caller
->decl
))
4278 d
= get_cg_data (&caller
, true);
4280 /* Check if the callee is in a transactional region. If so,
4281 schedule the function for normal re-scan as well. */
4282 bb
= gimple_bb (e
->call_stmt
);
4283 gcc_assert (bb
!= NULL
);
4284 if (d
->transaction_blocks_normal
4285 && bitmap_bit_p (d
->transaction_blocks_normal
, bb
->index
))
4286 d
->want_irr_scan_normal
= true;
4288 maybe_push_queue (caller
, worklist_p
, &d
->in_worklist
);
4292 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
4293 within the block is irrevocable. */
4296 ipa_tm_scan_irr_block (basic_block bb
)
4298 gimple_stmt_iterator gsi
;
4301 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4303 gimple stmt
= gsi_stmt (gsi
);
4304 switch (gimple_code (stmt
))
4307 if (gimple_assign_single_p (stmt
))
4309 tree lhs
= gimple_assign_lhs (stmt
);
4310 tree rhs
= gimple_assign_rhs1 (stmt
);
4311 if (volatile_var_p (lhs
) || volatile_var_p (rhs
))
4318 tree lhs
= gimple_call_lhs (stmt
);
4319 if (lhs
&& volatile_var_p (lhs
))
4322 if (is_tm_pure_call (stmt
))
4325 fn
= gimple_call_fn (stmt
);
4327 /* Functions with the attribute are by definition irrevocable. */
4328 if (is_tm_irrevocable (fn
))
4331 /* For direct function calls, go ahead and check for replacement
4332 functions, or transitive irrevocable functions. For indirect
4333 functions, we'll ask the runtime. */
4334 if (TREE_CODE (fn
) == ADDR_EXPR
)
4336 struct tm_ipa_cg_data
*d
;
4337 struct cgraph_node
*node
;
4339 fn
= TREE_OPERAND (fn
, 0);
4340 if (is_tm_ending_fndecl (fn
))
4342 if (find_tm_replacement_function (fn
))
4345 node
= cgraph_node::get (fn
);
4346 d
= get_cg_data (&node
, true);
4348 /* Return true if irrevocable, but above all, believe
4350 if (d
->is_irrevocable
4351 && !is_tm_safe_or_pure (fn
))
4358 /* ??? The Approved Method of indicating that an inline
4359 assembly statement is not relevant to the transaction
4360 is to wrap it in a __tm_waiver block. This is not
4361 yet implemented, so we can't check for it. */
4362 if (is_tm_safe (current_function_decl
))
4364 tree t
= build1 (NOP_EXPR
, void_type_node
, size_zero_node
);
4365 SET_EXPR_LOCATION (t
, gimple_location (stmt
));
4366 error ("%Kasm not allowed in %<transaction_safe%> function", t
);
4378 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking
4379 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
4380 scanning past OLD_IRR or EXIT_BLOCKS. */
4383 ipa_tm_scan_irr_blocks (vec
<basic_block
> *pqueue
, bitmap new_irr
,
4384 bitmap old_irr
, bitmap exit_blocks
)
4386 bool any_new_irr
= false;
4389 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
4393 basic_block bb
= pqueue
->pop ();
4395 /* Don't re-scan blocks we know already are irrevocable. */
4396 if (old_irr
&& bitmap_bit_p (old_irr
, bb
->index
))
4399 if (ipa_tm_scan_irr_block (bb
))
4401 bitmap_set_bit (new_irr
, bb
->index
);
4404 else if (exit_blocks
== NULL
|| !bitmap_bit_p (exit_blocks
, bb
->index
))
4406 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4407 if (!bitmap_bit_p (visited_blocks
, e
->dest
->index
))
4409 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
4410 pqueue
->safe_push (e
->dest
);
4414 while (!pqueue
->is_empty ());
4416 BITMAP_FREE (visited_blocks
);
4421 /* Propagate the irrevocable property both up and down the dominator tree.
4422 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
4423 TM regions; OLD_IRR are the results of a previous scan of the dominator
4424 tree which has been fully propagated; NEW_IRR is the set of new blocks
4425 which are gaining the irrevocable property during the current scan. */
4428 ipa_tm_propagate_irr (basic_block entry_block
, bitmap new_irr
,
4429 bitmap old_irr
, bitmap exit_blocks
)
4431 vec
<basic_block
> bbs
;
4432 bitmap all_region_blocks
;
4434 /* If this block is in the old set, no need to rescan. */
4435 if (old_irr
&& bitmap_bit_p (old_irr
, entry_block
->index
))
4438 all_region_blocks
= BITMAP_ALLOC (&tm_obstack
);
4439 bbs
= get_tm_region_blocks (entry_block
, exit_blocks
, NULL
,
4440 all_region_blocks
, false);
4443 basic_block bb
= bbs
.pop ();
4444 bool this_irr
= bitmap_bit_p (new_irr
, bb
->index
);
4445 bool all_son_irr
= false;
4449 /* Propagate up. If my children are, I am too, but we must have
4450 at least one child that is. */
4453 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4455 if (!bitmap_bit_p (new_irr
, e
->dest
->index
))
4457 all_son_irr
= false;
4465 /* Add block to new_irr if it hasn't already been processed. */
4466 if (!old_irr
|| !bitmap_bit_p (old_irr
, bb
->index
))
4468 bitmap_set_bit (new_irr
, bb
->index
);
4474 /* Propagate down to everyone we immediately dominate. */
4478 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
4480 son
= next_dom_son (CDI_DOMINATORS
, son
))
4482 /* Make sure block is actually in a TM region, and it
4483 isn't already in old_irr. */
4484 if ((!old_irr
|| !bitmap_bit_p (old_irr
, son
->index
))
4485 && bitmap_bit_p (all_region_blocks
, son
->index
))
4486 bitmap_set_bit (new_irr
, son
->index
);
4490 while (!bbs
.is_empty ());
4492 BITMAP_FREE (all_region_blocks
);
4497 ipa_tm_decrement_clone_counts (basic_block bb
, bool for_clone
)
4499 gimple_stmt_iterator gsi
;
4501 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4503 gimple stmt
= gsi_stmt (gsi
);
4504 if (is_gimple_call (stmt
) && !is_tm_pure_call (stmt
))
4506 tree fndecl
= gimple_call_fndecl (stmt
);
4509 struct tm_ipa_cg_data
*d
;
4511 struct cgraph_node
*tnode
;
4513 if (is_tm_ending_fndecl (fndecl
))
4515 if (find_tm_replacement_function (fndecl
))
4518 tnode
= cgraph_node::get (fndecl
);
4519 d
= get_cg_data (&tnode
, true);
4521 pcallers
= (for_clone
? &d
->tm_callers_clone
4522 : &d
->tm_callers_normal
);
4524 gcc_assert (*pcallers
> 0);
4531 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
4532 as well as other irrevocable actions such as inline assembly. Mark all
4533 such blocks as irrevocable and decrement the number of calls to
4534 transactional clones. Return true if, for the transactional clone, the
4535 entire function is irrevocable. */
4538 ipa_tm_scan_irr_function (struct cgraph_node
*node
, bool for_clone
)
4540 struct tm_ipa_cg_data
*d
;
4541 bitmap new_irr
, old_irr
;
4544 /* Builtin operators (operator new, and such). */
4545 if (DECL_STRUCT_FUNCTION (node
->decl
) == NULL
4546 || DECL_STRUCT_FUNCTION (node
->decl
)->cfg
== NULL
)
4549 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
4550 calculate_dominance_info (CDI_DOMINATORS
);
4552 d
= get_cg_data (&node
, true);
4553 auto_vec
<basic_block
, 10> queue
;
4554 new_irr
= BITMAP_ALLOC (&tm_obstack
);
4556 /* Scan each tm region, propagating irrevocable status through the tree. */
4559 old_irr
= d
->irrevocable_blocks_clone
;
4560 queue
.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
4561 if (ipa_tm_scan_irr_blocks (&queue
, new_irr
, old_irr
, NULL
))
4563 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)),
4566 ret
= bitmap_bit_p (new_irr
,
4567 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
))->index
);
4572 struct tm_region
*region
;
4574 old_irr
= d
->irrevocable_blocks_normal
;
4575 for (region
= d
->all_tm_regions
; region
; region
= region
->next
)
4577 queue
.quick_push (region
->entry_block
);
4578 if (ipa_tm_scan_irr_blocks (&queue
, new_irr
, old_irr
,
4579 region
->exit_blocks
))
4580 ipa_tm_propagate_irr (region
->entry_block
, new_irr
, old_irr
,
4581 region
->exit_blocks
);
4585 /* If we found any new irrevocable blocks, reduce the call count for
4586 transactional clones within the irrevocable blocks. Save the new
4587 set of irrevocable blocks for next time. */
4588 if (!bitmap_empty_p (new_irr
))
4590 bitmap_iterator bmi
;
4593 EXECUTE_IF_SET_IN_BITMAP (new_irr
, 0, i
, bmi
)
4594 ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun
, i
),
4599 bitmap_ior_into (old_irr
, new_irr
);
4600 BITMAP_FREE (new_irr
);
4603 d
->irrevocable_blocks_clone
= new_irr
;
4605 d
->irrevocable_blocks_normal
= new_irr
;
4607 if (dump_file
&& new_irr
)
4610 bitmap_iterator bmi
;
4613 dname
= lang_hooks
.decl_printable_name (current_function_decl
, 2);
4614 EXECUTE_IF_SET_IN_BITMAP (new_irr
, 0, i
, bmi
)
4615 fprintf (dump_file
, "%s: bb %d goes irrevocable\n", dname
, i
);
4619 BITMAP_FREE (new_irr
);
4626 /* Return true if, for the transactional clone of NODE, any call
4627 may enter irrevocable mode. */
4630 ipa_tm_mayenterirr_function (struct cgraph_node
*node
)
4632 struct tm_ipa_cg_data
*d
;
4636 d
= get_cg_data (&node
, true);
4638 flags
= flags_from_decl_or_type (decl
);
4640 /* Handle some TM builtins. Ordinarily these aren't actually generated
4641 at this point, but handling these functions when written in by the
4642 user makes it easier to build unit tests. */
4643 if (flags
& ECF_TM_BUILTIN
)
4646 /* Filter out all functions that are marked. */
4647 if (flags
& ECF_TM_PURE
)
4649 if (is_tm_safe (decl
))
4651 if (is_tm_irrevocable (decl
))
4653 if (is_tm_callable (decl
))
4655 if (find_tm_replacement_function (decl
))
4658 /* If we aren't seeing the final version of the function we don't
4659 know what it will contain at runtime. */
4660 if (node
->get_availability () < AVAIL_AVAILABLE
)
4663 /* If the function must go irrevocable, then of course true. */
4664 if (d
->is_irrevocable
)
4667 /* If there are any blocks marked irrevocable, then the function
4668 as a whole may enter irrevocable. */
4669 if (d
->irrevocable_blocks_clone
)
4672 /* We may have previously marked this function as tm_may_enter_irr;
4673 see pass_diagnose_tm_blocks. */
4674 if (node
->local
.tm_may_enter_irr
)
4677 /* Recurse on the main body for aliases. In general, this will
4678 result in one of the bits above being set so that we will not
4679 have to recurse next time. */
4681 return ipa_tm_mayenterirr_function (cgraph_node::get (node
->thunk
.alias
));
4683 /* What remains is unmarked local functions without items that force
4684 the function to go irrevocable. */
4688 /* Diagnose calls from transaction_safe functions to unmarked
4689 functions that are determined to not be safe. */
4692 ipa_tm_diagnose_tm_safe (struct cgraph_node
*node
)
4694 struct cgraph_edge
*e
;
4696 for (e
= node
->callees
; e
; e
= e
->next_callee
)
4697 if (!is_tm_callable (e
->callee
->decl
)
4698 && e
->callee
->local
.tm_may_enter_irr
)
4699 error_at (gimple_location (e
->call_stmt
),
4700 "unsafe function call %qD within "
4701 "%<transaction_safe%> function", e
->callee
->decl
);
4704 /* Diagnose call from atomic transactions to unmarked functions
4705 that are determined to not be safe. */
4708 ipa_tm_diagnose_transaction (struct cgraph_node
*node
,
4709 struct tm_region
*all_tm_regions
)
4711 struct tm_region
*r
;
4713 for (r
= all_tm_regions
; r
; r
= r
->next
)
4714 if (gimple_transaction_subcode (r
->get_transaction_stmt ())
4717 /* Atomic transactions can be nested inside relaxed. */
4719 ipa_tm_diagnose_transaction (node
, r
->inner
);
4723 vec
<basic_block
> bbs
;
4724 gimple_stmt_iterator gsi
;
4728 bbs
= get_tm_region_blocks (r
->entry_block
, r
->exit_blocks
,
4729 r
->irr_blocks
, NULL
, false);
4731 for (i
= 0; bbs
.iterate (i
, &bb
); ++i
)
4732 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4734 gimple stmt
= gsi_stmt (gsi
);
4737 if (gimple_code (stmt
) == GIMPLE_ASM
)
4739 error_at (gimple_location (stmt
),
4740 "asm not allowed in atomic transaction");
4744 if (!is_gimple_call (stmt
))
4746 fndecl
= gimple_call_fndecl (stmt
);
4748 /* Indirect function calls have been diagnosed already. */
4752 /* Stop at the end of the transaction. */
4753 if (is_tm_ending_fndecl (fndecl
))
4755 if (bitmap_bit_p (r
->exit_blocks
, bb
->index
))
4760 /* Marked functions have been diagnosed already. */
4761 if (is_tm_pure_call (stmt
))
4763 if (is_tm_callable (fndecl
))
4766 if (cgraph_node::local_info (fndecl
)->tm_may_enter_irr
)
4767 error_at (gimple_location (stmt
),
4768 "unsafe function call %qD within "
4769 "atomic transaction", fndecl
);
4776 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4777 OLD_DECL. The returned value is a freshly malloced pointer that
4778 should be freed by the caller. */
4781 tm_mangle (tree old_asm_id
)
4783 const char *old_asm_name
;
4786 struct demangle_component
*dc
;
4789 /* Determine if the symbol is already a valid C++ mangled name. Do this
4790 even for C, which might be interfacing with C++ code via appropriately
4791 ugly identifiers. */
4792 /* ??? We could probably do just as well checking for "_Z" and be done. */
4793 old_asm_name
= IDENTIFIER_POINTER (old_asm_id
);
4794 dc
= cplus_demangle_v3_components (old_asm_name
, DMGL_NO_OPTS
, &alloc
);
4801 sprintf (length
, "%u", IDENTIFIER_LENGTH (old_asm_id
));
4802 tm_name
= concat ("_ZGTt", length
, old_asm_name
, NULL
);
4806 old_asm_name
+= 2; /* Skip _Z */
4810 case DEMANGLE_COMPONENT_TRANSACTION_CLONE
:
4811 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE
:
4812 /* Don't play silly games, you! */
4815 case DEMANGLE_COMPONENT_HIDDEN_ALIAS
:
4816 /* I'd really like to know if we can ever be passed one of
4817 these from the C++ front end. The Logical Thing would
4818 seem that hidden-alias should be outer-most, so that we
4819 get hidden-alias of a transaction-clone and not vice-versa. */
4827 tm_name
= concat ("_ZGTt", old_asm_name
, NULL
);
4831 new_asm_id
= get_identifier (tm_name
);
4838 ipa_tm_mark_force_output_node (struct cgraph_node
*node
)
4840 node
->mark_force_output ();
4841 node
->analyzed
= true;
4845 ipa_tm_mark_forced_by_abi_node (struct cgraph_node
*node
)
4847 node
->forced_by_abi
= true;
4848 node
->analyzed
= true;
4851 /* Callback data for ipa_tm_create_version_alias. */
4852 struct create_version_alias_info
4854 struct cgraph_node
*old_node
;
4858 /* A subroutine of ipa_tm_create_version, called via
4859 cgraph_for_node_and_aliases. Create new tm clones for each of
4860 the existing aliases. */
4862 ipa_tm_create_version_alias (struct cgraph_node
*node
, void *data
)
4864 struct create_version_alias_info
*info
4865 = (struct create_version_alias_info
*)data
;
4866 tree old_decl
, new_decl
, tm_name
;
4867 struct cgraph_node
*new_node
;
4869 if (!node
->cpp_implicit_alias
)
4872 old_decl
= node
->decl
;
4873 tm_name
= tm_mangle (DECL_ASSEMBLER_NAME (old_decl
));
4874 new_decl
= build_decl (DECL_SOURCE_LOCATION (old_decl
),
4875 TREE_CODE (old_decl
), tm_name
,
4876 TREE_TYPE (old_decl
));
4878 SET_DECL_ASSEMBLER_NAME (new_decl
, tm_name
);
4879 SET_DECL_RTL (new_decl
, NULL
);
4881 /* Based loosely on C++'s make_alias_for(). */
4882 TREE_PUBLIC (new_decl
) = TREE_PUBLIC (old_decl
);
4883 DECL_CONTEXT (new_decl
) = DECL_CONTEXT (old_decl
);
4884 DECL_LANG_SPECIFIC (new_decl
) = DECL_LANG_SPECIFIC (old_decl
);
4885 TREE_READONLY (new_decl
) = TREE_READONLY (old_decl
);
4886 DECL_EXTERNAL (new_decl
) = 0;
4887 DECL_ARTIFICIAL (new_decl
) = 1;
4888 TREE_ADDRESSABLE (new_decl
) = 1;
4889 TREE_USED (new_decl
) = 1;
4890 TREE_SYMBOL_REFERENCED (tm_name
) = 1;
4892 /* Perform the same remapping to the comdat group. */
4893 if (DECL_ONE_ONLY (new_decl
))
4894 varpool_node::get (new_decl
)->set_comdat_group
4895 (tm_mangle (decl_comdat_group_id (old_decl
)));
4897 new_node
= cgraph_node::create_same_body_alias (new_decl
, info
->new_decl
);
4898 new_node
->tm_clone
= true;
4899 new_node
->externally_visible
= info
->old_node
->externally_visible
;
4900 new_node
->no_reorder
= info
->old_node
->no_reorder
;
4901 /* ?? Do not traverse aliases here. */
4902 get_cg_data (&node
, false)->clone
= new_node
;
4904 record_tm_clone_pair (old_decl
, new_decl
);
4906 if (info
->old_node
->force_output
4907 || info
->old_node
->ref_list
.first_referring ())
4908 ipa_tm_mark_force_output_node (new_node
);
4909 if (info
->old_node
->forced_by_abi
)
4910 ipa_tm_mark_forced_by_abi_node (new_node
);
4914 /* Create a copy of the function (possibly declaration only) of OLD_NODE,
4915 appropriate for the transactional clone. */
4918 ipa_tm_create_version (struct cgraph_node
*old_node
)
4920 tree new_decl
, old_decl
, tm_name
;
4921 struct cgraph_node
*new_node
;
4923 old_decl
= old_node
->decl
;
4924 new_decl
= copy_node (old_decl
);
4926 /* DECL_ASSEMBLER_NAME needs to be set before we call
4927 cgraph_copy_node_for_versioning below, because cgraph_node will
4928 fill the assembler_name_hash. */
4929 tm_name
= tm_mangle (DECL_ASSEMBLER_NAME (old_decl
));
4930 SET_DECL_ASSEMBLER_NAME (new_decl
, tm_name
);
4931 SET_DECL_RTL (new_decl
, NULL
);
4932 TREE_SYMBOL_REFERENCED (tm_name
) = 1;
4934 /* Perform the same remapping to the comdat group. */
4935 if (DECL_ONE_ONLY (new_decl
))
4936 varpool_node::get (new_decl
)->set_comdat_group
4937 (tm_mangle (DECL_COMDAT_GROUP (old_decl
)));
4939 gcc_assert (!old_node
->ipa_transforms_to_apply
.exists ());
4940 new_node
= old_node
->create_version_clone (new_decl
, vNULL
, NULL
);
4941 new_node
->local
.local
= false;
4942 new_node
->externally_visible
= old_node
->externally_visible
;
4943 new_node
->lowered
= true;
4944 new_node
->tm_clone
= 1;
4945 if (!old_node
->implicit_section
)
4946 new_node
->set_section (old_node
->get_section ());
4947 get_cg_data (&old_node
, true)->clone
= new_node
;
4949 if (old_node
->get_availability () >= AVAIL_INTERPOSABLE
)
4951 /* Remap extern inline to static inline. */
4952 /* ??? Is it worth trying to use make_decl_one_only? */
4953 if (DECL_DECLARED_INLINE_P (new_decl
) && DECL_EXTERNAL (new_decl
))
4955 DECL_EXTERNAL (new_decl
) = 0;
4956 TREE_PUBLIC (new_decl
) = 0;
4957 DECL_WEAK (new_decl
) = 0;
4960 tree_function_versioning (old_decl
, new_decl
,
4965 record_tm_clone_pair (old_decl
, new_decl
);
4967 symtab
->call_cgraph_insertion_hooks (new_node
);
4968 if (old_node
->force_output
4969 || old_node
->ref_list
.first_referring ())
4970 ipa_tm_mark_force_output_node (new_node
);
4971 if (old_node
->forced_by_abi
)
4972 ipa_tm_mark_forced_by_abi_node (new_node
);
4974 /* Do the same thing, but for any aliases of the original node. */
4976 struct create_version_alias_info data
;
4977 data
.old_node
= old_node
;
4978 data
.new_decl
= new_decl
;
4979 old_node
->call_for_symbol_thunks_and_aliases (ipa_tm_create_version_alias
,
4984 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
4987 ipa_tm_insert_irr_call (struct cgraph_node
*node
, struct tm_region
*region
,
4990 gimple_stmt_iterator gsi
;
4993 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
4995 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE
),
4996 1, build_int_cst (NULL_TREE
, MODE_SERIALIRREVOCABLE
));
4998 split_block_after_labels (bb
);
4999 gsi
= gsi_after_labels (bb
);
5000 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
5002 node
->create_edge (cgraph_node::get_create
5003 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE
)),
5005 compute_call_stmt_bb_frequency (node
->decl
,
5009 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */
5012 ipa_tm_insert_gettmclone_call (struct cgraph_node
*node
,
5013 struct tm_region
*region
,
5014 gimple_stmt_iterator
*gsi
, gcall
*stmt
)
5016 tree gettm_fn
, ret
, old_fn
, callfn
;
5021 old_fn
= gimple_call_fn (stmt
);
5023 if (TREE_CODE (old_fn
) == ADDR_EXPR
)
5025 tree fndecl
= TREE_OPERAND (old_fn
, 0);
5026 tree clone
= get_tm_clone_pair (fndecl
);
5028 /* By transforming the call into a TM_GETTMCLONE, we are
5029 technically taking the address of the original function and
5030 its clone. Explain this so inlining will know this function
5032 cgraph_node::get (fndecl
)->mark_address_taken () ;
5034 cgraph_node::get (clone
)->mark_address_taken ();
5037 safe
= is_tm_safe (TREE_TYPE (old_fn
));
5038 gettm_fn
= builtin_decl_explicit (safe
? BUILT_IN_TM_GETTMCLONE_SAFE
5039 : BUILT_IN_TM_GETTMCLONE_IRR
);
5040 ret
= create_tmp_var (ptr_type_node
);
5043 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
5045 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
5046 if (TREE_CODE (old_fn
) == OBJ_TYPE_REF
)
5047 old_fn
= OBJ_TYPE_REF_EXPR (old_fn
);
5049 g
= gimple_build_call (gettm_fn
, 1, old_fn
);
5050 ret
= make_ssa_name (ret
, g
);
5051 gimple_call_set_lhs (g
, ret
);
5053 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
5055 node
->create_edge (cgraph_node::get_create (gettm_fn
), g
, 0,
5056 compute_call_stmt_bb_frequency (node
->decl
,
5059 /* Cast return value from tm_gettmclone* into appropriate function
5061 callfn
= create_tmp_var (TREE_TYPE (old_fn
));
5062 g2
= gimple_build_assign (callfn
,
5063 fold_build1 (NOP_EXPR
, TREE_TYPE (callfn
), ret
));
5064 callfn
= make_ssa_name (callfn
, g2
);
5065 gimple_assign_set_lhs (g2
, callfn
);
5066 gsi_insert_before (gsi
, g2
, GSI_SAME_STMT
);
5068 /* ??? This is a hack to preserve the NOTHROW bit on the call,
5069 which we would have derived from the decl. Failure to save
5070 this bit means we might have to split the basic block. */
5071 if (gimple_call_nothrow_p (stmt
))
5072 gimple_call_set_nothrow (stmt
, true);
5074 gimple_call_set_fn (stmt
, callfn
);
5076 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
5077 for a call statement. Fix it. */
5079 tree lhs
= gimple_call_lhs (stmt
);
5080 tree rettype
= TREE_TYPE (gimple_call_fntype (stmt
));
5082 && !useless_type_conversion_p (TREE_TYPE (lhs
), rettype
))
5086 temp
= create_tmp_reg (rettype
);
5087 gimple_call_set_lhs (stmt
, temp
);
5089 g2
= gimple_build_assign (lhs
,
5090 fold_build1 (VIEW_CONVERT_EXPR
,
5091 TREE_TYPE (lhs
), temp
));
5092 gsi_insert_after (gsi
, g2
, GSI_SAME_STMT
);
5097 cgraph_edge
*e
= cgraph_node::get (current_function_decl
)->get_edge (stmt
);
5098 if (e
&& e
->indirect_info
)
5099 e
->indirect_info
->polymorphic
= false;
5104 /* Helper function for ipa_tm_transform_calls*. Given a call
5105 statement in GSI which resides inside transaction REGION, redirect
5106 the call to either its wrapper function, or its clone. */
5109 ipa_tm_transform_calls_redirect (struct cgraph_node
*node
,
5110 struct tm_region
*region
,
5111 gimple_stmt_iterator
*gsi
,
5112 bool *need_ssa_rename_p
)
5114 gcall
*stmt
= as_a
<gcall
*> (gsi_stmt (*gsi
));
5115 struct cgraph_node
*new_node
;
5116 struct cgraph_edge
*e
= node
->get_edge (stmt
);
5117 tree fndecl
= gimple_call_fndecl (stmt
);
5119 /* For indirect calls, pass the address through the runtime. */
5122 *need_ssa_rename_p
|=
5123 ipa_tm_insert_gettmclone_call (node
, region
, gsi
, stmt
);
5127 /* Handle some TM builtins. Ordinarily these aren't actually generated
5128 at this point, but handling these functions when written in by the
5129 user makes it easier to build unit tests. */
5130 if (flags_from_decl_or_type (fndecl
) & ECF_TM_BUILTIN
)
5133 /* Fixup recursive calls inside clones. */
5134 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
5135 for recursion but not update the call statements themselves? */
5136 if (e
->caller
== e
->callee
&& decl_is_tm_clone (current_function_decl
))
5138 gimple_call_set_fndecl (stmt
, current_function_decl
);
5142 /* If there is a replacement, use it. */
5143 fndecl
= find_tm_replacement_function (fndecl
);
5146 new_node
= cgraph_node::get_create (fndecl
);
5148 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
5150 We can't do this earlier in record_tm_replacement because
5151 cgraph_remove_unreachable_nodes is called before we inject
5152 references to the node. Further, we can't do this in some
5153 nice central place in ipa_tm_execute because we don't have
5154 the exact list of wrapper functions that would be used.
5155 Marking more wrappers than necessary results in the creation
5156 of unnecessary cgraph_nodes, which can cause some of the
5157 other IPA passes to crash.
5159 We do need to mark these nodes so that we get the proper
5160 result in expand_call_tm. */
5161 /* ??? This seems broken. How is it that we're marking the
5162 CALLEE as may_enter_irr? Surely we should be marking the
5163 CALLER. Also note that find_tm_replacement_function also
5164 contains mappings into the TM runtime, e.g. memcpy. These
5165 we know won't go irrevocable. */
5166 new_node
->local
.tm_may_enter_irr
= 1;
5170 struct tm_ipa_cg_data
*d
;
5171 struct cgraph_node
*tnode
= e
->callee
;
5173 d
= get_cg_data (&tnode
, true);
5174 new_node
= d
->clone
;
5176 /* As we've already skipped pure calls and appropriate builtins,
5177 and we've already marked irrevocable blocks, if we can't come
5178 up with a static replacement, then ask the runtime. */
5179 if (new_node
== NULL
)
5181 *need_ssa_rename_p
|=
5182 ipa_tm_insert_gettmclone_call (node
, region
, gsi
, stmt
);
5186 fndecl
= new_node
->decl
;
5189 e
->redirect_callee (new_node
);
5190 gimple_call_set_fndecl (stmt
, fndecl
);
5193 /* Helper function for ipa_tm_transform_calls. For a given BB,
5194 install calls to tm_irrevocable when IRR_BLOCKS are reached,
5195 redirect other calls to the generated transactional clone. */
5198 ipa_tm_transform_calls_1 (struct cgraph_node
*node
, struct tm_region
*region
,
5199 basic_block bb
, bitmap irr_blocks
)
5201 gimple_stmt_iterator gsi
;
5202 bool need_ssa_rename
= false;
5204 if (irr_blocks
&& bitmap_bit_p (irr_blocks
, bb
->index
))
5206 ipa_tm_insert_irr_call (node
, region
, bb
);
5210 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5212 gimple stmt
= gsi_stmt (gsi
);
5214 if (!is_gimple_call (stmt
))
5216 if (is_tm_pure_call (stmt
))
5219 /* Redirect edges to the appropriate replacement or clone. */
5220 ipa_tm_transform_calls_redirect (node
, region
, &gsi
, &need_ssa_rename
);
5223 return need_ssa_rename
;
5226 /* Walk the CFG for REGION, beginning at BB. Install calls to
5227 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
5228 the generated transactional clone. */
5231 ipa_tm_transform_calls (struct cgraph_node
*node
, struct tm_region
*region
,
5232 basic_block bb
, bitmap irr_blocks
)
5234 bool need_ssa_rename
= false;
5237 auto_vec
<basic_block
> queue
;
5238 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
5240 queue
.safe_push (bb
);
5246 ipa_tm_transform_calls_1 (node
, region
, bb
, irr_blocks
);
5248 if (irr_blocks
&& bitmap_bit_p (irr_blocks
, bb
->index
))
5251 if (region
&& bitmap_bit_p (region
->exit_blocks
, bb
->index
))
5254 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
5255 if (!bitmap_bit_p (visited_blocks
, e
->dest
->index
))
5257 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
5258 queue
.safe_push (e
->dest
);
5261 while (!queue
.is_empty ());
5263 BITMAP_FREE (visited_blocks
);
5265 return need_ssa_rename
;
5268 /* Transform the calls within the TM regions within NODE. */
5271 ipa_tm_transform_transaction (struct cgraph_node
*node
)
5273 struct tm_ipa_cg_data
*d
;
5274 struct tm_region
*region
;
5275 bool need_ssa_rename
= false;
5277 d
= get_cg_data (&node
, true);
5279 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
5280 calculate_dominance_info (CDI_DOMINATORS
);
5282 for (region
= d
->all_tm_regions
; region
; region
= region
->next
)
5284 /* If we're sure to go irrevocable, don't transform anything. */
5285 if (d
->irrevocable_blocks_normal
5286 && bitmap_bit_p (d
->irrevocable_blocks_normal
,
5287 region
->entry_block
->index
))
5289 transaction_subcode_ior (region
, GTMA_DOES_GO_IRREVOCABLE
5290 | GTMA_MAY_ENTER_IRREVOCABLE
5291 | GTMA_HAS_NO_INSTRUMENTATION
);
5296 ipa_tm_transform_calls (node
, region
, region
->entry_block
,
5297 d
->irrevocable_blocks_normal
);
5300 if (need_ssa_rename
)
5301 update_ssa (TODO_update_ssa_only_virtuals
);
5306 /* Transform the calls within the transactional clone of NODE. */
5309 ipa_tm_transform_clone (struct cgraph_node
*node
)
5311 struct tm_ipa_cg_data
*d
;
5312 bool need_ssa_rename
;
5314 d
= get_cg_data (&node
, true);
5316 /* If this function makes no calls and has no irrevocable blocks,
5317 then there's nothing to do. */
5318 /* ??? Remove non-aborting top-level transactions. */
5319 if (!node
->callees
&& !node
->indirect_calls
&& !d
->irrevocable_blocks_clone
)
5322 push_cfun (DECL_STRUCT_FUNCTION (d
->clone
->decl
));
5323 calculate_dominance_info (CDI_DOMINATORS
);
5326 ipa_tm_transform_calls (d
->clone
, NULL
,
5327 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)),
5328 d
->irrevocable_blocks_clone
);
5330 if (need_ssa_rename
)
5331 update_ssa (TODO_update_ssa_only_virtuals
);
5336 /* Main entry point for the transactional memory IPA pass. */
5339 ipa_tm_execute (void)
5341 cgraph_node_queue tm_callees
= cgraph_node_queue ();
5342 /* List of functions that will go irrevocable. */
5343 cgraph_node_queue irr_worklist
= cgraph_node_queue ();
5345 struct cgraph_node
*node
;
5346 struct tm_ipa_cg_data
*d
;
5347 enum availability a
;
5350 #ifdef ENABLE_CHECKING
5351 cgraph_node::verify_cgraph_nodes ();
5354 bitmap_obstack_initialize (&tm_obstack
);
5355 initialize_original_copy_tables ();
5357 /* For all local functions marked tm_callable, queue them. */
5358 FOR_EACH_DEFINED_FUNCTION (node
)
5359 if (is_tm_callable (node
->decl
)
5360 && node
->get_availability () >= AVAIL_INTERPOSABLE
)
5362 d
= get_cg_data (&node
, true);
5363 maybe_push_queue (node
, &tm_callees
, &d
->in_callee_queue
);
5366 /* For all local reachable functions... */
5367 FOR_EACH_DEFINED_FUNCTION (node
)
5369 && node
->get_availability () >= AVAIL_INTERPOSABLE
)
5371 /* ... marked tm_pure, record that fact for the runtime by
5372 indicating that the pure function is its own tm_callable.
5373 No need to do this if the function's address can't be taken. */
5374 if (is_tm_pure (node
->decl
))
5376 if (!node
->local
.local
)
5377 record_tm_clone_pair (node
->decl
, node
->decl
);
5381 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
5382 calculate_dominance_info (CDI_DOMINATORS
);
5384 tm_region_init (NULL
);
5387 d
= get_cg_data (&node
, true);
5389 /* Scan for calls that are in each transaction, and
5390 generate the uninstrumented code path. */
5391 ipa_tm_scan_calls_transaction (d
, &tm_callees
);
5393 /* Put it in the worklist so we can scan the function
5394 later (ipa_tm_scan_irr_function) and mark the
5395 irrevocable blocks. */
5396 maybe_push_queue (node
, &irr_worklist
, &d
->in_worklist
);
5397 d
->want_irr_scan_normal
= true;
5403 /* For every local function on the callee list, scan as if we will be
5404 creating a transactional clone, queueing all new functions we find
5406 for (i
= 0; i
< tm_callees
.length (); ++i
)
5408 node
= tm_callees
[i
];
5409 a
= node
->get_availability ();
5410 d
= get_cg_data (&node
, true);
5412 /* Put it in the worklist so we can scan the function later
5413 (ipa_tm_scan_irr_function) and mark the irrevocable
5415 maybe_push_queue (node
, &irr_worklist
, &d
->in_worklist
);
5417 /* Some callees cannot be arbitrarily cloned. These will always be
5418 irrevocable. Mark these now, so that we need not scan them. */
5419 if (is_tm_irrevocable (node
->decl
))
5420 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5421 else if (a
<= AVAIL_NOT_AVAILABLE
5422 && !is_tm_safe_or_pure (node
->decl
))
5423 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5424 else if (a
>= AVAIL_INTERPOSABLE
)
5426 if (!tree_versionable_function_p (node
->decl
))
5427 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5428 else if (!d
->is_irrevocable
)
5430 /* If this is an alias, make sure its base is queued as well.
5431 we need not scan the callees now, as the base will do. */
5434 node
= cgraph_node::get (node
->thunk
.alias
);
5435 d
= get_cg_data (&node
, true);
5436 maybe_push_queue (node
, &tm_callees
, &d
->in_callee_queue
);
5440 /* Add all nodes called by this function into
5441 tm_callees as well. */
5442 ipa_tm_scan_calls_clone (node
, &tm_callees
);
5447 /* Iterate scans until no more work to be done. Prefer not to use
5448 vec::pop because the worklist tends to follow a breadth-first
5449 search of the callgraph, which should allow convergance with a
5450 minimum number of scans. But we also don't want the worklist
5451 array to grow without bound, so we shift the array up periodically. */
5452 for (i
= 0; i
< irr_worklist
.length (); ++i
)
5454 if (i
> 256 && i
== irr_worklist
.length () / 8)
5456 irr_worklist
.block_remove (0, i
);
5460 node
= irr_worklist
[i
];
5461 d
= get_cg_data (&node
, true);
5462 d
->in_worklist
= false;
5464 if (d
->want_irr_scan_normal
)
5466 d
->want_irr_scan_normal
= false;
5467 ipa_tm_scan_irr_function (node
, false);
5469 if (d
->in_callee_queue
&& ipa_tm_scan_irr_function (node
, true))
5470 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5473 /* For every function on the callee list, collect the tm_may_enter_irr
5475 irr_worklist
.truncate (0);
5476 for (i
= 0; i
< tm_callees
.length (); ++i
)
5478 node
= tm_callees
[i
];
5479 if (ipa_tm_mayenterirr_function (node
))
5481 d
= get_cg_data (&node
, true);
5482 gcc_assert (d
->in_worklist
== false);
5483 maybe_push_queue (node
, &irr_worklist
, &d
->in_worklist
);
5487 /* Propagate the tm_may_enter_irr bit to callers until stable. */
5488 for (i
= 0; i
< irr_worklist
.length (); ++i
)
5490 struct cgraph_node
*caller
;
5491 struct cgraph_edge
*e
;
5492 struct ipa_ref
*ref
;
5494 if (i
> 256 && i
== irr_worklist
.length () / 8)
5496 irr_worklist
.block_remove (0, i
);
5500 node
= irr_worklist
[i
];
5501 d
= get_cg_data (&node
, true);
5502 d
->in_worklist
= false;
5503 node
->local
.tm_may_enter_irr
= true;
5505 /* Propagate back to normal callers. */
5506 for (e
= node
->callers
; e
; e
= e
->next_caller
)
5509 if (!is_tm_safe_or_pure (caller
->decl
)
5510 && !caller
->local
.tm_may_enter_irr
)
5512 d
= get_cg_data (&caller
, true);
5513 maybe_push_queue (caller
, &irr_worklist
, &d
->in_worklist
);
5517 /* Propagate back to referring aliases as well. */
5518 FOR_EACH_ALIAS (node
, ref
)
5520 caller
= dyn_cast
<cgraph_node
*> (ref
->referring
);
5521 if (!caller
->local
.tm_may_enter_irr
)
5523 /* ?? Do not traverse aliases here. */
5524 d
= get_cg_data (&caller
, false);
5525 maybe_push_queue (caller
, &irr_worklist
, &d
->in_worklist
);
5530 /* Now validate all tm_safe functions, and all atomic regions in
5532 FOR_EACH_DEFINED_FUNCTION (node
)
5534 && node
->get_availability () >= AVAIL_INTERPOSABLE
)
5536 d
= get_cg_data (&node
, true);
5537 if (is_tm_safe (node
->decl
))
5538 ipa_tm_diagnose_tm_safe (node
);
5539 else if (d
->all_tm_regions
)
5540 ipa_tm_diagnose_transaction (node
, d
->all_tm_regions
);
5543 /* Create clones. Do those that are not irrevocable and have a
5544 positive call count. Do those publicly visible functions that
5545 the user directed us to clone. */
5546 for (i
= 0; i
< tm_callees
.length (); ++i
)
5550 node
= tm_callees
[i
];
5551 if (node
->cpp_implicit_alias
)
5554 a
= node
->get_availability ();
5555 d
= get_cg_data (&node
, true);
5557 if (a
<= AVAIL_NOT_AVAILABLE
)
5558 doit
= is_tm_callable (node
->decl
);
5559 else if (a
<= AVAIL_AVAILABLE
&& is_tm_callable (node
->decl
))
5561 else if (!d
->is_irrevocable
5562 && d
->tm_callers_normal
+ d
->tm_callers_clone
> 0)
5566 ipa_tm_create_version (node
);
5569 /* Redirect calls to the new clones, and insert irrevocable marks. */
5570 for (i
= 0; i
< tm_callees
.length (); ++i
)
5572 node
= tm_callees
[i
];
5575 d
= get_cg_data (&node
, true);
5577 ipa_tm_transform_clone (node
);
5580 FOR_EACH_DEFINED_FUNCTION (node
)
5582 && node
->get_availability () >= AVAIL_INTERPOSABLE
)
5584 d
= get_cg_data (&node
, true);
5585 if (d
->all_tm_regions
)
5586 ipa_tm_transform_transaction (node
);
5589 /* Free and clear all data structures. */
5590 tm_callees
.release ();
5591 irr_worklist
.release ();
5592 bitmap_obstack_release (&tm_obstack
);
5593 free_original_copy_tables ();
5595 FOR_EACH_FUNCTION (node
)
5598 #ifdef ENABLE_CHECKING
5599 cgraph_node::verify_cgraph_nodes ();
5607 const pass_data pass_data_ipa_tm
=
5609 SIMPLE_IPA_PASS
, /* type */
5611 OPTGROUP_NONE
, /* optinfo_flags */
5612 TV_TRANS_MEM
, /* tv_id */
5613 ( PROP_ssa
| PROP_cfg
), /* properties_required */
5614 0, /* properties_provided */
5615 0, /* properties_destroyed */
5616 0, /* todo_flags_start */
5617 0, /* todo_flags_finish */
5620 class pass_ipa_tm
: public simple_ipa_opt_pass
5623 pass_ipa_tm (gcc::context
*ctxt
)
5624 : simple_ipa_opt_pass (pass_data_ipa_tm
, ctxt
)
5627 /* opt_pass methods: */
5628 virtual bool gate (function
*) { return flag_tm
; }
5629 virtual unsigned int execute (function
*) { return ipa_tm_execute (); }
5631 }; // class pass_ipa_tm
5635 simple_ipa_opt_pass
*
5636 make_pass_ipa_tm (gcc::context
*ctxt
)
5638 return new pass_ipa_tm (ctxt
);
5641 #include "gt-trans-mem.h"