1 /* Passes for transactional memory support.
2 Copyright (C) 2008-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "fold-const.h"
31 #include "hard-reg-set.h"
34 #include "dominance.h"
36 #include "basic-block.h"
37 #include "tree-ssa-alias.h"
38 #include "internal-fn.h"
40 #include "gimple-expr.h"
47 #include "gimple-iterator.h"
48 #include "gimplify-me.h"
49 #include "gimple-walk.h"
50 #include "gimple-ssa.h"
51 #include "plugin-api.h"
55 #include "stringpool.h"
56 #include "tree-ssanames.h"
57 #include "tree-into-ssa.h"
58 #include "tree-pass.h"
59 #include "tree-inline.h"
60 #include "diagnostic-core.h"
63 #include "trans-mem.h"
66 #include "langhooks.h"
67 #include "gimple-pretty-print.h"
69 #include "tree-ssa-address.h"
72 #define A_RUNINSTRUMENTEDCODE 0x0001
73 #define A_RUNUNINSTRUMENTEDCODE 0x0002
74 #define A_SAVELIVEVARIABLES 0x0004
75 #define A_RESTORELIVEVARIABLES 0x0008
76 #define A_ABORTTRANSACTION 0x0010
78 #define AR_USERABORT 0x0001
79 #define AR_USERRETRY 0x0002
80 #define AR_TMCONFLICT 0x0004
81 #define AR_EXCEPTIONBLOCKABORT 0x0008
82 #define AR_OUTERABORT 0x0010
84 #define MODE_SERIALIRREVOCABLE 0x0000
87 /* The representation of a transaction changes several times during the
88 lowering process. In the beginning, in the front-end we have the
89 GENERIC tree TRANSACTION_EXPR. For example,
97 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
98 trivially replaced with a GIMPLE_TRANSACTION node.
100 During pass_lower_tm, we examine the body of transactions looking
101 for aborts. Transactions that do not contain an abort may be
102 merged into an outer transaction. We also add a TRY-FINALLY node
103 to arrange for the transaction to be committed on any exit.
105 [??? Think about how this arrangement affects throw-with-commit
106 and throw-with-abort operations. In this case we want the TRY to
107 handle gotos, but not to catch any exceptions because the transaction
108 will already be closed.]
110 GIMPLE_TRANSACTION [label=NULL] {
117 __builtin___tm_abort ();
119 __builtin___tm_commit ();
123 During pass_lower_eh, we create EH regions for the transactions,
124 intermixed with the regular EH stuff. This gives us a nice persistent
125 mapping (all the way through rtl) from transactional memory operation
126 back to the transaction, which allows us to get the abnormal edges
127 correct to model transaction aborts and restarts:
129 GIMPLE_TRANSACTION [label=over]
135 __builtin___tm_abort ();
136 __builtin___tm_commit ();
139 This is the end of all_lowering_passes, and so is what is present
140 during the IPA passes, and through all of the optimization passes.
142 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
143 functions and mark functions for cloning.
145 At the end of gimple optimization, before exiting SSA form,
146 pass_tm_edges replaces statements that perform transactional
147 memory operations with the appropriate TM builtins, and swap
148 out function calls with their transactional clones. At this
149 point we introduce the abnormal transaction restart edges and
150 complete lowering of the GIMPLE_TRANSACTION node.
152 x = __builtin___tm_start (MAY_ABORT);
154 if (x & abort_transaction)
157 t0 = __builtin___tm_load (global);
159 __builtin___tm_store (&global, t1);
161 __builtin___tm_abort ();
162 __builtin___tm_commit ();
166 static void *expand_regions (struct tm_region
*,
167 void *(*callback
)(struct tm_region
*, void *),
171 /* Return the attributes we want to examine for X, or NULL if it's not
172 something we examine. We look at function types, but allow pointers
173 to function types and function decls and peek through. */
176 get_attrs_for (const_tree x
)
181 switch (TREE_CODE (x
))
184 return TYPE_ATTRIBUTES (TREE_TYPE (x
));
191 if (TREE_CODE (x
) != POINTER_TYPE
)
197 if (TREE_CODE (x
) != FUNCTION_TYPE
&& TREE_CODE (x
) != METHOD_TYPE
)
203 return TYPE_ATTRIBUTES (x
);
207 /* Return true if X has been marked TM_PURE. */
210 is_tm_pure (const_tree x
)
214 switch (TREE_CODE (x
))
225 if (TREE_CODE (x
) != POINTER_TYPE
)
231 if (TREE_CODE (x
) != FUNCTION_TYPE
&& TREE_CODE (x
) != METHOD_TYPE
)
236 flags
= flags_from_decl_or_type (x
);
237 return (flags
& ECF_TM_PURE
) != 0;
240 /* Return true if X has been marked TM_IRREVOCABLE. */
243 is_tm_irrevocable (tree x
)
245 tree attrs
= get_attrs_for (x
);
247 if (attrs
&& lookup_attribute ("transaction_unsafe", attrs
))
250 /* A call to the irrevocable builtin is by definition,
252 if (TREE_CODE (x
) == ADDR_EXPR
)
253 x
= TREE_OPERAND (x
, 0);
254 if (TREE_CODE (x
) == FUNCTION_DECL
255 && DECL_BUILT_IN_CLASS (x
) == BUILT_IN_NORMAL
256 && DECL_FUNCTION_CODE (x
) == BUILT_IN_TM_IRREVOCABLE
)
262 /* Return true if X has been marked TM_SAFE. */
265 is_tm_safe (const_tree x
)
269 tree attrs
= get_attrs_for (x
);
272 if (lookup_attribute ("transaction_safe", attrs
))
274 if (lookup_attribute ("transaction_may_cancel_outer", attrs
))
281 /* Return true if CALL is const, or tm_pure. */
284 is_tm_pure_call (gimple call
)
286 tree fn
= gimple_call_fn (call
);
288 if (TREE_CODE (fn
) == ADDR_EXPR
)
290 fn
= TREE_OPERAND (fn
, 0);
291 gcc_assert (TREE_CODE (fn
) == FUNCTION_DECL
);
296 return is_tm_pure (fn
);
299 /* Return true if X has been marked TM_CALLABLE. */
302 is_tm_callable (tree x
)
304 tree attrs
= get_attrs_for (x
);
307 if (lookup_attribute ("transaction_callable", attrs
))
309 if (lookup_attribute ("transaction_safe", attrs
))
311 if (lookup_attribute ("transaction_may_cancel_outer", attrs
))
317 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
320 is_tm_may_cancel_outer (tree x
)
322 tree attrs
= get_attrs_for (x
);
324 return lookup_attribute ("transaction_may_cancel_outer", attrs
) != NULL
;
328 /* Return true for built in functions that "end" a transaction. */
331 is_tm_ending_fndecl (tree fndecl
)
333 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
334 switch (DECL_FUNCTION_CODE (fndecl
))
336 case BUILT_IN_TM_COMMIT
:
337 case BUILT_IN_TM_COMMIT_EH
:
338 case BUILT_IN_TM_ABORT
:
339 case BUILT_IN_TM_IRREVOCABLE
:
348 /* Return true if STMT is a built in function call that "ends" a
352 is_tm_ending (gimple stmt
)
356 if (gimple_code (stmt
) != GIMPLE_CALL
)
359 fndecl
= gimple_call_fndecl (stmt
);
360 return (fndecl
!= NULL_TREE
361 && is_tm_ending_fndecl (fndecl
));
364 /* Return true if STMT is a TM load. */
367 is_tm_load (gimple stmt
)
371 if (gimple_code (stmt
) != GIMPLE_CALL
)
374 fndecl
= gimple_call_fndecl (stmt
);
375 return (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
376 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl
)));
379 /* Same as above, but for simple TM loads, that is, not the
380 after-write, after-read, etc optimized variants. */
383 is_tm_simple_load (gimple stmt
)
387 if (gimple_code (stmt
) != GIMPLE_CALL
)
390 fndecl
= gimple_call_fndecl (stmt
);
391 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
393 enum built_in_function fcode
= DECL_FUNCTION_CODE (fndecl
);
394 return (fcode
== BUILT_IN_TM_LOAD_1
395 || fcode
== BUILT_IN_TM_LOAD_2
396 || fcode
== BUILT_IN_TM_LOAD_4
397 || fcode
== BUILT_IN_TM_LOAD_8
398 || fcode
== BUILT_IN_TM_LOAD_FLOAT
399 || fcode
== BUILT_IN_TM_LOAD_DOUBLE
400 || fcode
== BUILT_IN_TM_LOAD_LDOUBLE
401 || fcode
== BUILT_IN_TM_LOAD_M64
402 || fcode
== BUILT_IN_TM_LOAD_M128
403 || fcode
== BUILT_IN_TM_LOAD_M256
);
408 /* Return true if STMT is a TM store. */
411 is_tm_store (gimple stmt
)
415 if (gimple_code (stmt
) != GIMPLE_CALL
)
418 fndecl
= gimple_call_fndecl (stmt
);
419 return (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
420 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl
)));
423 /* Same as above, but for simple TM stores, that is, not the
424 after-write, after-read, etc optimized variants. */
427 is_tm_simple_store (gimple stmt
)
431 if (gimple_code (stmt
) != GIMPLE_CALL
)
434 fndecl
= gimple_call_fndecl (stmt
);
435 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
437 enum built_in_function fcode
= DECL_FUNCTION_CODE (fndecl
);
438 return (fcode
== BUILT_IN_TM_STORE_1
439 || fcode
== BUILT_IN_TM_STORE_2
440 || fcode
== BUILT_IN_TM_STORE_4
441 || fcode
== BUILT_IN_TM_STORE_8
442 || fcode
== BUILT_IN_TM_STORE_FLOAT
443 || fcode
== BUILT_IN_TM_STORE_DOUBLE
444 || fcode
== BUILT_IN_TM_STORE_LDOUBLE
445 || fcode
== BUILT_IN_TM_STORE_M64
446 || fcode
== BUILT_IN_TM_STORE_M128
447 || fcode
== BUILT_IN_TM_STORE_M256
);
452 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */
455 is_tm_abort (tree fndecl
)
458 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
459 && DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_TM_ABORT
);
462 /* Build a GENERIC tree for a user abort. This is called by front ends
463 while transforming the __tm_abort statement. */
466 build_tm_abort_call (location_t loc
, bool is_outer
)
468 return build_call_expr_loc (loc
, builtin_decl_explicit (BUILT_IN_TM_ABORT
), 1,
469 build_int_cst (integer_type_node
,
471 | (is_outer
? AR_OUTERABORT
: 0)));
474 /* Map for aribtrary function replacement under TM, as created
475 by the tm_wrap attribute. */
477 struct tm_wrapper_hasher
: ggc_cache_hasher
<tree_map
*>
479 static inline hashval_t
hash (tree_map
*m
) { return m
->hash
; }
481 equal (tree_map
*a
, tree_map
*b
)
483 return a
->base
.from
== b
->base
.from
;
487 handle_cache_entry (tree_map
*&m
)
489 extern void gt_ggc_mx (tree_map
*&);
490 if (m
== HTAB_EMPTY_ENTRY
|| m
== HTAB_DELETED_ENTRY
)
492 else if (ggc_marked_p (m
->base
.from
))
495 m
= static_cast<tree_map
*> (HTAB_DELETED_ENTRY
);
499 static GTY((cache
)) hash_table
<tm_wrapper_hasher
> *tm_wrap_map
;
502 record_tm_replacement (tree from
, tree to
)
504 struct tree_map
**slot
, *h
;
506 /* Do not inline wrapper functions that will get replaced in the TM
509 Suppose you have foo() that will get replaced into tmfoo(). Make
510 sure the inliner doesn't try to outsmart us and inline foo()
511 before we get a chance to do the TM replacement. */
512 DECL_UNINLINABLE (from
) = 1;
514 if (tm_wrap_map
== NULL
)
515 tm_wrap_map
= hash_table
<tm_wrapper_hasher
>::create_ggc (32);
517 h
= ggc_alloc
<tree_map
> ();
518 h
->hash
= htab_hash_pointer (from
);
522 slot
= tm_wrap_map
->find_slot_with_hash (h
, h
->hash
, INSERT
);
526 /* Return a TM-aware replacement function for DECL. */
529 find_tm_replacement_function (tree fndecl
)
533 struct tree_map
*h
, in
;
535 in
.base
.from
= fndecl
;
536 in
.hash
= htab_hash_pointer (fndecl
);
537 h
= tm_wrap_map
->find_with_hash (&in
, in
.hash
);
542 /* ??? We may well want TM versions of most of the common <string.h>
543 functions. For now, we've already these two defined. */
544 /* Adjust expand_call_tm() attributes as necessary for the cases
546 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
547 switch (DECL_FUNCTION_CODE (fndecl
))
549 case BUILT_IN_MEMCPY
:
550 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY
);
551 case BUILT_IN_MEMMOVE
:
552 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE
);
553 case BUILT_IN_MEMSET
:
554 return builtin_decl_explicit (BUILT_IN_TM_MEMSET
);
562 /* When appropriate, record TM replacement for memory allocation functions.
564 FROM is the FNDECL to wrap. */
566 tm_malloc_replacement (tree from
)
571 if (TREE_CODE (from
) != FUNCTION_DECL
)
574 /* If we have a previous replacement, the user must be explicitly
575 wrapping malloc/calloc/free. They better know what they're
577 if (find_tm_replacement_function (from
))
580 str
= IDENTIFIER_POINTER (DECL_NAME (from
));
582 if (!strcmp (str
, "malloc"))
583 to
= builtin_decl_explicit (BUILT_IN_TM_MALLOC
);
584 else if (!strcmp (str
, "calloc"))
585 to
= builtin_decl_explicit (BUILT_IN_TM_CALLOC
);
586 else if (!strcmp (str
, "free"))
587 to
= builtin_decl_explicit (BUILT_IN_TM_FREE
);
591 TREE_NOTHROW (to
) = 0;
593 record_tm_replacement (from
, to
);
596 /* Diagnostics for tm_safe functions/regions. Called by the front end
597 once we've lowered the function to high-gimple. */
599 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
600 Process exactly one statement. WI->INFO is set to non-null when in
601 the context of a tm_safe function, and null for a __transaction block. */
603 #define DIAG_TM_OUTER 1
604 #define DIAG_TM_SAFE 2
605 #define DIAG_TM_RELAXED 4
609 unsigned int summary_flags
: 8;
610 unsigned int block_flags
: 8;
611 unsigned int func_flags
: 8;
612 unsigned int saw_volatile
: 1;
616 /* Return true if T is a volatile variable of some kind. */
619 volatile_var_p (tree t
)
621 return (SSA_VAR_P (t
)
622 && TREE_THIS_VOLATILE (TREE_TYPE (t
)));
625 /* Tree callback function for diagnose_tm pass. */
628 diagnose_tm_1_op (tree
*tp
, int *walk_subtrees ATTRIBUTE_UNUSED
,
631 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
632 struct diagnose_tm
*d
= (struct diagnose_tm
*) wi
->info
;
634 if (volatile_var_p (*tp
)
635 && d
->block_flags
& DIAG_TM_SAFE
639 error_at (gimple_location (d
->stmt
),
640 "invalid volatile use of %qD inside transaction",
648 is_tm_safe_or_pure (const_tree x
)
650 return is_tm_safe (x
) || is_tm_pure (x
);
654 diagnose_tm_1 (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
655 struct walk_stmt_info
*wi
)
657 gimple stmt
= gsi_stmt (*gsi
);
658 struct diagnose_tm
*d
= (struct diagnose_tm
*) wi
->info
;
660 /* Save stmt for use in leaf analysis. */
663 switch (gimple_code (stmt
))
667 tree fn
= gimple_call_fn (stmt
);
669 if ((d
->summary_flags
& DIAG_TM_OUTER
) == 0
670 && is_tm_may_cancel_outer (fn
))
671 error_at (gimple_location (stmt
),
672 "%<transaction_may_cancel_outer%> function call not within"
673 " outer transaction or %<transaction_may_cancel_outer%>");
675 if (d
->summary_flags
& DIAG_TM_SAFE
)
677 bool is_safe
, direct_call_p
;
680 if (TREE_CODE (fn
) == ADDR_EXPR
681 && TREE_CODE (TREE_OPERAND (fn
, 0)) == FUNCTION_DECL
)
683 direct_call_p
= true;
684 replacement
= TREE_OPERAND (fn
, 0);
685 replacement
= find_tm_replacement_function (replacement
);
691 direct_call_p
= false;
692 replacement
= NULL_TREE
;
695 if (is_tm_safe_or_pure (fn
))
697 else if (is_tm_callable (fn
) || is_tm_irrevocable (fn
))
699 /* A function explicitly marked transaction_callable as
700 opposed to transaction_safe is being defined to be
701 unsafe as part of its ABI, regardless of its contents. */
704 else if (direct_call_p
)
706 if (IS_TYPE_OR_DECL_P (fn
)
707 && flags_from_decl_or_type (fn
) & ECF_TM_BUILTIN
)
709 else if (replacement
)
711 /* ??? At present we've been considering replacements
712 merely transaction_callable, and therefore might
713 enter irrevocable. The tm_wrap attribute has not
714 yet made it into the new language spec. */
719 /* ??? Diagnostics for unmarked direct calls moved into
720 the IPA pass. Section 3.2 of the spec details how
721 functions not marked should be considered "implicitly
722 safe" based on having examined the function body. */
728 /* An unmarked indirect call. Consider it unsafe even
729 though optimization may yet figure out how to inline. */
735 if (TREE_CODE (fn
) == ADDR_EXPR
)
736 fn
= TREE_OPERAND (fn
, 0);
737 if (d
->block_flags
& DIAG_TM_SAFE
)
740 error_at (gimple_location (stmt
),
741 "unsafe function call %qD within "
742 "atomic transaction", fn
);
745 if (!DECL_P (fn
) || DECL_NAME (fn
))
746 error_at (gimple_location (stmt
),
747 "unsafe function call %qE within "
748 "atomic transaction", fn
);
750 error_at (gimple_location (stmt
),
751 "unsafe indirect function call within "
752 "atomic transaction");
758 error_at (gimple_location (stmt
),
759 "unsafe function call %qD within "
760 "%<transaction_safe%> function", fn
);
763 if (!DECL_P (fn
) || DECL_NAME (fn
))
764 error_at (gimple_location (stmt
),
765 "unsafe function call %qE within "
766 "%<transaction_safe%> function", fn
);
768 error_at (gimple_location (stmt
),
769 "unsafe indirect function call within "
770 "%<transaction_safe%> function");
779 /* ??? We ought to come up with a way to add attributes to
780 asm statements, and then add "transaction_safe" to it.
781 Either that or get the language spec to resurrect __tm_waiver. */
782 if (d
->block_flags
& DIAG_TM_SAFE
)
783 error_at (gimple_location (stmt
),
784 "asm not allowed in atomic transaction");
785 else if (d
->func_flags
& DIAG_TM_SAFE
)
786 error_at (gimple_location (stmt
),
787 "asm not allowed in %<transaction_safe%> function");
790 case GIMPLE_TRANSACTION
:
792 gtransaction
*trans_stmt
= as_a
<gtransaction
*> (stmt
);
793 unsigned char inner_flags
= DIAG_TM_SAFE
;
795 if (gimple_transaction_subcode (trans_stmt
) & GTMA_IS_RELAXED
)
797 if (d
->block_flags
& DIAG_TM_SAFE
)
798 error_at (gimple_location (stmt
),
799 "relaxed transaction in atomic transaction");
800 else if (d
->func_flags
& DIAG_TM_SAFE
)
801 error_at (gimple_location (stmt
),
802 "relaxed transaction in %<transaction_safe%> function");
803 inner_flags
= DIAG_TM_RELAXED
;
805 else if (gimple_transaction_subcode (trans_stmt
) & GTMA_IS_OUTER
)
808 error_at (gimple_location (stmt
),
809 "outer transaction in transaction");
810 else if (d
->func_flags
& DIAG_TM_OUTER
)
811 error_at (gimple_location (stmt
),
812 "outer transaction in "
813 "%<transaction_may_cancel_outer%> function");
814 else if (d
->func_flags
& DIAG_TM_SAFE
)
815 error_at (gimple_location (stmt
),
816 "outer transaction in %<transaction_safe%> function");
817 inner_flags
|= DIAG_TM_OUTER
;
820 *handled_ops_p
= true;
821 if (gimple_transaction_body (trans_stmt
))
823 struct walk_stmt_info wi_inner
;
824 struct diagnose_tm d_inner
;
826 memset (&d_inner
, 0, sizeof (d_inner
));
827 d_inner
.func_flags
= d
->func_flags
;
828 d_inner
.block_flags
= d
->block_flags
| inner_flags
;
829 d_inner
.summary_flags
= d_inner
.func_flags
| d_inner
.block_flags
;
831 memset (&wi_inner
, 0, sizeof (wi_inner
));
832 wi_inner
.info
= &d_inner
;
834 walk_gimple_seq (gimple_transaction_body (trans_stmt
),
835 diagnose_tm_1
, diagnose_tm_1_op
, &wi_inner
);
848 diagnose_tm_blocks (void)
850 struct walk_stmt_info wi
;
851 struct diagnose_tm d
;
853 memset (&d
, 0, sizeof (d
));
854 if (is_tm_may_cancel_outer (current_function_decl
))
855 d
.func_flags
= DIAG_TM_OUTER
| DIAG_TM_SAFE
;
856 else if (is_tm_safe (current_function_decl
))
857 d
.func_flags
= DIAG_TM_SAFE
;
858 d
.summary_flags
= d
.func_flags
;
860 memset (&wi
, 0, sizeof (wi
));
863 walk_gimple_seq (gimple_body (current_function_decl
),
864 diagnose_tm_1
, diagnose_tm_1_op
, &wi
);
871 const pass_data pass_data_diagnose_tm_blocks
=
873 GIMPLE_PASS
, /* type */
874 "*diagnose_tm_blocks", /* name */
875 OPTGROUP_NONE
, /* optinfo_flags */
876 TV_TRANS_MEM
, /* tv_id */
877 PROP_gimple_any
, /* properties_required */
878 0, /* properties_provided */
879 0, /* properties_destroyed */
880 0, /* todo_flags_start */
881 0, /* todo_flags_finish */
884 class pass_diagnose_tm_blocks
: public gimple_opt_pass
887 pass_diagnose_tm_blocks (gcc::context
*ctxt
)
888 : gimple_opt_pass (pass_data_diagnose_tm_blocks
, ctxt
)
891 /* opt_pass methods: */
892 virtual bool gate (function
*) { return flag_tm
; }
893 virtual unsigned int execute (function
*) { return diagnose_tm_blocks (); }
895 }; // class pass_diagnose_tm_blocks
900 make_pass_diagnose_tm_blocks (gcc::context
*ctxt
)
902 return new pass_diagnose_tm_blocks (ctxt
);
905 /* Instead of instrumenting thread private memory, we save the
906 addresses in a log which we later use to save/restore the addresses
907 upon transaction start/restart.
909 The log is keyed by address, where each element contains individual
910 statements among different code paths that perform the store.
912 This log is later used to generate either plain save/restore of the
913 addresses upon transaction start/restart, or calls to the ITM_L*
916 So for something like:
918 struct large { int x[1000]; };
919 struct large lala = { 0 };
925 We can either save/restore:
928 trxn = _ITM_startTransaction ();
929 if (trxn & a_saveLiveVariables)
930 tmp_lala1 = lala.x[i];
931 else if (a & a_restoreLiveVariables)
932 lala.x[i] = tmp_lala1;
934 or use the logging functions:
937 trxn = _ITM_startTransaction ();
938 _ITM_LU4 (&lala.x[i]);
940 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
941 far up the dominator tree to shadow all of the writes to a given
942 location (thus reducing the total number of logging calls), but not
943 so high as to be called on a path that does not perform a
946 /* One individual log entry. We may have multiple statements for the
947 same location if neither dominate each other (on different
949 typedef struct tm_log_entry
951 /* Address to save. */
953 /* Entry block for the transaction this address occurs in. */
954 basic_block entry_block
;
955 /* Dominating statements the store occurs in. */
957 /* Initially, while we are building the log, we place a nonzero
958 value here to mean that this address *will* be saved with a
959 save/restore sequence. Later, when generating the save sequence
960 we place the SSA temp generated here. */
965 /* Log entry hashtable helpers. */
967 struct log_entry_hasher
969 typedef tm_log_entry
*value_type
;
970 typedef tm_log_entry
*compare_type
;
971 static inline hashval_t
hash (const tm_log_entry
*);
972 static inline bool equal (const tm_log_entry
*, const tm_log_entry
*);
973 static inline void remove (tm_log_entry
*);
976 /* Htab support. Return hash value for a `tm_log_entry'. */
978 log_entry_hasher::hash (const tm_log_entry
*log
)
980 return iterative_hash_expr (log
->addr
, 0);
983 /* Htab support. Return true if two log entries are the same. */
985 log_entry_hasher::equal (const tm_log_entry
*log1
, const tm_log_entry
*log2
)
989 rth: I suggest that we get rid of the component refs etc.
990 I.e. resolve the reference to base + offset.
992 We may need to actually finish a merge with mainline for this,
993 since we'd like to be presented with Richi's MEM_REF_EXPRs more
994 often than not. But in the meantime your tm_log_entry could save
995 the results of get_inner_reference.
997 See: g++.dg/tm/pr46653.C
1000 /* Special case plain equality because operand_equal_p() below will
1001 return FALSE if the addresses are equal but they have
1002 side-effects (e.g. a volatile address). */
1003 if (log1
->addr
== log2
->addr
)
1006 return operand_equal_p (log1
->addr
, log2
->addr
, 0);
1009 /* Htab support. Free one tm_log_entry. */
1011 log_entry_hasher::remove (tm_log_entry
*lp
)
1013 lp
->stmts
.release ();
1018 /* The actual log. */
1019 static hash_table
<log_entry_hasher
> *tm_log
;
1021 /* Addresses to log with a save/restore sequence. These should be in
1023 static vec
<tree
> tm_log_save_addresses
;
1025 enum thread_memory_type
1029 mem_transaction_local
,
1033 typedef struct tm_new_mem_map
1035 /* SSA_NAME being dereferenced. */
1037 enum thread_memory_type local_new_memory
;
1040 /* Hashtable helpers. */
1042 struct tm_mem_map_hasher
: typed_free_remove
<tm_new_mem_map_t
>
1044 typedef tm_new_mem_map_t
*value_type
;
1045 typedef tm_new_mem_map_t
*compare_type
;
1046 static inline hashval_t
hash (const tm_new_mem_map_t
*);
1047 static inline bool equal (const tm_new_mem_map_t
*, const tm_new_mem_map_t
*);
1051 tm_mem_map_hasher::hash (const tm_new_mem_map_t
*v
)
1053 return (intptr_t)v
->val
>> 4;
1057 tm_mem_map_hasher::equal (const tm_new_mem_map_t
*v
, const tm_new_mem_map_t
*c
)
1059 return v
->val
== c
->val
;
1062 /* Map for an SSA_NAME originally pointing to a non aliased new piece
1063 of memory (malloc, alloc, etc). */
1064 static hash_table
<tm_mem_map_hasher
> *tm_new_mem_hash
;
1066 /* Initialize logging data structures. */
1070 tm_log
= new hash_table
<log_entry_hasher
> (10);
1071 tm_new_mem_hash
= new hash_table
<tm_mem_map_hasher
> (5);
1072 tm_log_save_addresses
.create (5);
1075 /* Free logging data structures. */
1077 tm_log_delete (void)
1081 delete tm_new_mem_hash
;
1082 tm_new_mem_hash
= NULL
;
1083 tm_log_save_addresses
.release ();
1086 /* Return true if MEM is a transaction invariant memory for the TM
1087 region starting at REGION_ENTRY_BLOCK. */
1089 transaction_invariant_address_p (const_tree mem
, basic_block region_entry_block
)
1091 if ((TREE_CODE (mem
) == INDIRECT_REF
|| TREE_CODE (mem
) == MEM_REF
)
1092 && TREE_CODE (TREE_OPERAND (mem
, 0)) == SSA_NAME
)
1096 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem
, 0)));
1097 return def_bb
!= region_entry_block
1098 && dominated_by_p (CDI_DOMINATORS
, region_entry_block
, def_bb
);
1101 mem
= strip_invariant_refs (mem
);
1102 return mem
&& (CONSTANT_CLASS_P (mem
) || decl_address_invariant_p (mem
));
1105 /* Given an address ADDR in STMT, find it in the memory log or add it,
1106 making sure to keep only the addresses highest in the dominator
1109 ENTRY_BLOCK is the entry_block for the transaction.
1111 If we find the address in the log, make sure it's either the same
1112 address, or an equivalent one that dominates ADDR.
1114 If we find the address, but neither ADDR dominates the found
1115 address, nor the found one dominates ADDR, we're on different
1116 execution paths. Add it.
1118 If known, ENTRY_BLOCK is the entry block for the region, otherwise
1121 tm_log_add (basic_block entry_block
, tree addr
, gimple stmt
)
1123 tm_log_entry
**slot
;
1124 struct tm_log_entry l
, *lp
;
1127 slot
= tm_log
->find_slot (&l
, INSERT
);
1130 tree type
= TREE_TYPE (addr
);
1132 lp
= XNEW (struct tm_log_entry
);
1136 /* Small invariant addresses can be handled as save/restores. */
1138 && transaction_invariant_address_p (lp
->addr
, entry_block
)
1139 && TYPE_SIZE_UNIT (type
) != NULL
1140 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
))
1141 && ((HOST_WIDE_INT
) tree_to_uhwi (TYPE_SIZE_UNIT (type
))
1142 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE
))
1143 /* We must be able to copy this type normally. I.e., no
1144 special constructors and the like. */
1145 && !TREE_ADDRESSABLE (type
))
1147 lp
->save_var
= create_tmp_reg (TREE_TYPE (lp
->addr
), "tm_save");
1148 lp
->stmts
.create (0);
1149 lp
->entry_block
= entry_block
;
1150 /* Save addresses separately in dominator order so we don't
1151 get confused by overlapping addresses in the save/restore
1153 tm_log_save_addresses
.safe_push (lp
->addr
);
1157 /* Use the logging functions. */
1158 lp
->stmts
.create (5);
1159 lp
->stmts
.quick_push (stmt
);
1160 lp
->save_var
= NULL
;
1170 /* If we're generating a save/restore sequence, we don't care
1171 about statements. */
1175 for (i
= 0; lp
->stmts
.iterate (i
, &oldstmt
); ++i
)
1177 if (stmt
== oldstmt
)
1179 /* We already have a store to the same address, higher up the
1180 dominator tree. Nothing to do. */
1181 if (dominated_by_p (CDI_DOMINATORS
,
1182 gimple_bb (stmt
), gimple_bb (oldstmt
)))
1184 /* We should be processing blocks in dominator tree order. */
1185 gcc_assert (!dominated_by_p (CDI_DOMINATORS
,
1186 gimple_bb (oldstmt
), gimple_bb (stmt
)));
1188 /* Store is on a different code path. */
1189 lp
->stmts
.safe_push (stmt
);
1193 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1194 result, insert the new statements before GSI. */
1197 gimplify_addr (gimple_stmt_iterator
*gsi
, tree x
)
1199 if (TREE_CODE (x
) == TARGET_MEM_REF
)
1200 x
= tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x
)), x
);
1202 x
= build_fold_addr_expr (x
);
1203 return force_gimple_operand_gsi (gsi
, x
, true, NULL
, true, GSI_SAME_STMT
);
1206 /* Instrument one address with the logging functions.
1207 ADDR is the address to save.
1208 STMT is the statement before which to place it. */
1210 tm_log_emit_stmt (tree addr
, gimple stmt
)
1212 tree type
= TREE_TYPE (addr
);
1213 tree size
= TYPE_SIZE_UNIT (type
);
1214 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
1216 enum built_in_function code
= BUILT_IN_TM_LOG
;
1218 if (type
== float_type_node
)
1219 code
= BUILT_IN_TM_LOG_FLOAT
;
1220 else if (type
== double_type_node
)
1221 code
= BUILT_IN_TM_LOG_DOUBLE
;
1222 else if (type
== long_double_type_node
)
1223 code
= BUILT_IN_TM_LOG_LDOUBLE
;
1224 else if (tree_fits_uhwi_p (size
))
1226 unsigned int n
= tree_to_uhwi (size
);
1230 code
= BUILT_IN_TM_LOG_1
;
1233 code
= BUILT_IN_TM_LOG_2
;
1236 code
= BUILT_IN_TM_LOG_4
;
1239 code
= BUILT_IN_TM_LOG_8
;
1242 code
= BUILT_IN_TM_LOG
;
1243 if (TREE_CODE (type
) == VECTOR_TYPE
)
1245 if (n
== 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64
))
1246 code
= BUILT_IN_TM_LOG_M64
;
1247 else if (n
== 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128
))
1248 code
= BUILT_IN_TM_LOG_M128
;
1249 else if (n
== 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256
))
1250 code
= BUILT_IN_TM_LOG_M256
;
1256 addr
= gimplify_addr (&gsi
, addr
);
1257 if (code
== BUILT_IN_TM_LOG
)
1258 log
= gimple_build_call (builtin_decl_explicit (code
), 2, addr
, size
);
1260 log
= gimple_build_call (builtin_decl_explicit (code
), 1, addr
);
1261 gsi_insert_before (&gsi
, log
, GSI_SAME_STMT
);
1264 /* Go through the log and instrument address that must be instrumented
1265 with the logging functions. Leave the save/restore addresses for
1270 hash_table
<log_entry_hasher
>::iterator hi
;
1271 struct tm_log_entry
*lp
;
1273 FOR_EACH_HASH_TABLE_ELEMENT (*tm_log
, lp
, tm_log_entry_t
, hi
)
1280 fprintf (dump_file
, "TM thread private mem logging: ");
1281 print_generic_expr (dump_file
, lp
->addr
, 0);
1282 fprintf (dump_file
, "\n");
1288 fprintf (dump_file
, "DUMPING to variable\n");
1294 fprintf (dump_file
, "DUMPING with logging functions\n");
1295 for (i
= 0; lp
->stmts
.iterate (i
, &stmt
); ++i
)
1296 tm_log_emit_stmt (lp
->addr
, stmt
);
1301 /* Emit the save sequence for the corresponding addresses in the log.
1302 ENTRY_BLOCK is the entry block for the transaction.
1303 BB is the basic block to insert the code in. */
1305 tm_log_emit_saves (basic_block entry_block
, basic_block bb
)
1308 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
1310 struct tm_log_entry l
, *lp
;
1312 for (i
= 0; i
< tm_log_save_addresses
.length (); ++i
)
1314 l
.addr
= tm_log_save_addresses
[i
];
1315 lp
= *(tm_log
->find_slot (&l
, NO_INSERT
));
1316 gcc_assert (lp
->save_var
!= NULL
);
1318 /* We only care about variables in the current transaction. */
1319 if (lp
->entry_block
!= entry_block
)
1322 stmt
= gimple_build_assign (lp
->save_var
, unshare_expr (lp
->addr
));
1324 /* Make sure we can create an SSA_NAME for this type. For
1325 instance, aggregates aren't allowed, in which case the system
1326 will create a VOP for us and everything will just work. */
1327 if (is_gimple_reg_type (TREE_TYPE (lp
->save_var
)))
1329 lp
->save_var
= make_ssa_name (lp
->save_var
, stmt
);
1330 gimple_assign_set_lhs (stmt
, lp
->save_var
);
1333 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1337 /* Emit the restore sequence for the corresponding addresses in the log.
1338 ENTRY_BLOCK is the entry block for the transaction.
1339 BB is the basic block to insert the code in. */
1341 tm_log_emit_restores (basic_block entry_block
, basic_block bb
)
1344 struct tm_log_entry l
, *lp
;
1345 gimple_stmt_iterator gsi
;
1348 for (i
= tm_log_save_addresses
.length () - 1; i
>= 0; i
--)
1350 l
.addr
= tm_log_save_addresses
[i
];
1351 lp
= *(tm_log
->find_slot (&l
, NO_INSERT
));
1352 gcc_assert (lp
->save_var
!= NULL
);
1354 /* We only care about variables in the current transaction. */
1355 if (lp
->entry_block
!= entry_block
)
1358 /* Restores are in LIFO order from the saves in case we have
1360 gsi
= gsi_start_bb (bb
);
1362 stmt
= gimple_build_assign (unshare_expr (lp
->addr
), lp
->save_var
);
1363 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
1368 static tree
lower_sequence_tm (gimple_stmt_iterator
*, bool *,
1369 struct walk_stmt_info
*);
1370 static tree
lower_sequence_no_tm (gimple_stmt_iterator
*, bool *,
1371 struct walk_stmt_info
*);
1373 /* Evaluate an address X being dereferenced and determine if it
1374 originally points to a non aliased new chunk of memory (malloc,
1377 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1378 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1379 Return MEM_NON_LOCAL otherwise.
1381 ENTRY_BLOCK is the entry block to the transaction containing the
1382 dereference of X. */
1383 static enum thread_memory_type
1384 thread_private_new_memory (basic_block entry_block
, tree x
)
1387 enum tree_code code
;
1388 tm_new_mem_map_t
**slot
;
1389 tm_new_mem_map_t elt
, *elt_p
;
1391 enum thread_memory_type retval
= mem_transaction_local
;
1394 || TREE_CODE (x
) != SSA_NAME
1395 /* Possible uninitialized use, or a function argument. In
1396 either case, we don't care. */
1397 || SSA_NAME_IS_DEFAULT_DEF (x
))
1398 return mem_non_local
;
1400 /* Look in cache first. */
1402 slot
= tm_new_mem_hash
->find_slot (&elt
, INSERT
);
1405 return elt_p
->local_new_memory
;
1407 /* Optimistically assume the memory is transaction local during
1408 processing. This catches recursion into this variable. */
1409 *slot
= elt_p
= XNEW (tm_new_mem_map_t
);
1411 elt_p
->local_new_memory
= mem_transaction_local
;
1413 /* Search DEF chain to find the original definition of this address. */
1416 if (ptr_deref_may_alias_global_p (x
))
1418 /* Address escapes. This is not thread-private. */
1419 retval
= mem_non_local
;
1420 goto new_memory_ret
;
1423 stmt
= SSA_NAME_DEF_STMT (x
);
1425 /* If the malloc call is outside the transaction, this is
1427 if (retval
!= mem_thread_local
1428 && !dominated_by_p (CDI_DOMINATORS
, gimple_bb (stmt
), entry_block
))
1429 retval
= mem_thread_local
;
1431 if (is_gimple_assign (stmt
))
1433 code
= gimple_assign_rhs_code (stmt
);
1434 /* x = foo ==> foo */
1435 if (code
== SSA_NAME
)
1436 x
= gimple_assign_rhs1 (stmt
);
1437 /* x = foo + n ==> foo */
1438 else if (code
== POINTER_PLUS_EXPR
)
1439 x
= gimple_assign_rhs1 (stmt
);
1440 /* x = (cast*) foo ==> foo */
1441 else if (code
== VIEW_CONVERT_EXPR
|| CONVERT_EXPR_CODE_P (code
))
1442 x
= gimple_assign_rhs1 (stmt
);
1443 /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
1444 else if (code
== COND_EXPR
)
1446 tree op1
= gimple_assign_rhs2 (stmt
);
1447 tree op2
= gimple_assign_rhs3 (stmt
);
1448 enum thread_memory_type mem
;
1449 retval
= thread_private_new_memory (entry_block
, op1
);
1450 if (retval
== mem_non_local
)
1451 goto new_memory_ret
;
1452 mem
= thread_private_new_memory (entry_block
, op2
);
1453 retval
= MIN (retval
, mem
);
1454 goto new_memory_ret
;
1458 retval
= mem_non_local
;
1459 goto new_memory_ret
;
1464 if (gimple_code (stmt
) == GIMPLE_PHI
)
1467 enum thread_memory_type mem
;
1468 tree phi_result
= gimple_phi_result (stmt
);
1470 /* If any of the ancestors are non-local, we are sure to
1471 be non-local. Otherwise we can avoid doing anything
1472 and inherit what has already been generated. */
1474 for (i
= 0; i
< gimple_phi_num_args (stmt
); ++i
)
1476 tree op
= PHI_ARG_DEF (stmt
, i
);
1478 /* Exclude self-assignment. */
1479 if (phi_result
== op
)
1482 mem
= thread_private_new_memory (entry_block
, op
);
1483 if (mem
== mem_non_local
)
1486 goto new_memory_ret
;
1488 retval
= MIN (retval
, mem
);
1490 goto new_memory_ret
;
1495 while (TREE_CODE (x
) == SSA_NAME
);
1497 if (stmt
&& is_gimple_call (stmt
) && gimple_call_flags (stmt
) & ECF_MALLOC
)
1498 /* Thread-local or transaction-local. */
1501 retval
= mem_non_local
;
1504 elt_p
->local_new_memory
= retval
;
1508 /* Determine whether X has to be instrumented using a read
1511 ENTRY_BLOCK is the entry block for the region where stmt resides
1512 in. NULL if unknown.
1514 STMT is the statement in which X occurs in. It is used for thread
1515 private memory instrumentation. If no TPM instrumentation is
1516 desired, STMT should be null. */
1518 requires_barrier (basic_block entry_block
, tree x
, gimple stmt
)
1521 while (handled_component_p (x
))
1522 x
= TREE_OPERAND (x
, 0);
1524 switch (TREE_CODE (x
))
1529 enum thread_memory_type ret
;
1531 ret
= thread_private_new_memory (entry_block
, TREE_OPERAND (x
, 0));
1532 if (ret
== mem_non_local
)
1534 if (stmt
&& ret
== mem_thread_local
)
1535 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1536 tm_log_add (entry_block
, orig
, stmt
);
1538 /* Transaction-locals require nothing at all. For malloc, a
1539 transaction restart frees the memory and we reallocate.
1540 For alloca, the stack pointer gets reset by the retry and
1545 case TARGET_MEM_REF
:
1546 if (TREE_CODE (TMR_BASE (x
)) != ADDR_EXPR
)
1548 x
= TREE_OPERAND (TMR_BASE (x
), 0);
1549 if (TREE_CODE (x
) == PARM_DECL
)
1551 gcc_assert (TREE_CODE (x
) == VAR_DECL
);
1557 if (DECL_BY_REFERENCE (x
))
1559 /* ??? This value is a pointer, but aggregate_value_p has been
1560 jigged to return true which confuses needs_to_live_in_memory.
1561 This ought to be cleaned up generically.
1563 FIXME: Verify this still happens after the next mainline
1564 merge. Testcase ie g++.dg/tm/pr47554.C.
1569 if (is_global_var (x
))
1570 return !TREE_READONLY (x
);
1571 if (/* FIXME: This condition should actually go below in the
1572 tm_log_add() call, however is_call_clobbered() depends on
1573 aliasing info which is not available during
1574 gimplification. Since requires_barrier() gets called
1575 during lower_sequence_tm/gimplification, leave the call
1576 to needs_to_live_in_memory until we eliminate
1577 lower_sequence_tm altogether. */
1578 needs_to_live_in_memory (x
))
1582 /* For local memory that doesn't escape (aka thread private
1583 memory), we can either save the value at the beginning of
1584 the transaction and restore on restart, or call a tm
1585 function to dynamically save and restore on restart
1588 tm_log_add (entry_block
, orig
, stmt
);
1597 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1598 a transaction region. */
1601 examine_assign_tm (unsigned *state
, gimple_stmt_iterator
*gsi
)
1603 gimple stmt
= gsi_stmt (*gsi
);
1605 if (requires_barrier (/*entry_block=*/NULL
, gimple_assign_rhs1 (stmt
), NULL
))
1606 *state
|= GTMA_HAVE_LOAD
;
1607 if (requires_barrier (/*entry_block=*/NULL
, gimple_assign_lhs (stmt
), NULL
))
1608 *state
|= GTMA_HAVE_STORE
;
1611 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1614 examine_call_tm (unsigned *state
, gimple_stmt_iterator
*gsi
)
1616 gimple stmt
= gsi_stmt (*gsi
);
1619 if (is_tm_pure_call (stmt
))
1622 /* Check if this call is a transaction abort. */
1623 fn
= gimple_call_fndecl (stmt
);
1624 if (is_tm_abort (fn
))
1625 *state
|= GTMA_HAVE_ABORT
;
1627 /* Note that something may happen. */
1628 *state
|= GTMA_HAVE_LOAD
| GTMA_HAVE_STORE
;
1631 /* Lower a GIMPLE_TRANSACTION statement. */
1634 lower_transaction (gimple_stmt_iterator
*gsi
, struct walk_stmt_info
*wi
)
1637 gtransaction
*stmt
= as_a
<gtransaction
*> (gsi_stmt (*gsi
));
1638 unsigned int *outer_state
= (unsigned int *) wi
->info
;
1639 unsigned int this_state
= 0;
1640 struct walk_stmt_info this_wi
;
1642 /* First, lower the body. The scanning that we do inside gives
1643 us some idea of what we're dealing with. */
1644 memset (&this_wi
, 0, sizeof (this_wi
));
1645 this_wi
.info
= (void *) &this_state
;
1646 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt
),
1647 lower_sequence_tm
, NULL
, &this_wi
);
1649 /* If there was absolutely nothing transaction related inside the
1650 transaction, we may elide it. Likewise if this is a nested
1651 transaction and does not contain an abort. */
1653 || (!(this_state
& GTMA_HAVE_ABORT
) && outer_state
!= NULL
))
1656 *outer_state
|= this_state
;
1658 gsi_insert_seq_before (gsi
, gimple_transaction_body (stmt
),
1660 gimple_transaction_set_body (stmt
, NULL
);
1662 gsi_remove (gsi
, true);
1663 wi
->removed_stmt
= true;
1667 /* Wrap the body of the transaction in a try-finally node so that
1668 the commit call is always properly called. */
1669 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT
), 0);
1670 if (flag_exceptions
)
1673 gimple_seq n_seq
, e_seq
;
1675 n_seq
= gimple_seq_alloc_with_stmt (g
);
1678 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER
),
1679 1, integer_zero_node
);
1680 ptr
= create_tmp_var (ptr_type_node
);
1681 gimple_call_set_lhs (g
, ptr
);
1682 gimple_seq_add_stmt (&e_seq
, g
);
1684 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH
),
1686 gimple_seq_add_stmt (&e_seq
, g
);
1688 g
= gimple_build_eh_else (n_seq
, e_seq
);
1691 g
= gimple_build_try (gimple_transaction_body (stmt
),
1692 gimple_seq_alloc_with_stmt (g
), GIMPLE_TRY_FINALLY
);
1693 gsi_insert_after (gsi
, g
, GSI_CONTINUE_LINKING
);
1695 gimple_transaction_set_body (stmt
, NULL
);
1697 /* If the transaction calls abort or if this is an outer transaction,
1698 add an "over" label afterwards. */
1699 if ((this_state
& (GTMA_HAVE_ABORT
))
1700 || (gimple_transaction_subcode (stmt
) & GTMA_IS_OUTER
))
1702 tree label
= create_artificial_label (UNKNOWN_LOCATION
);
1703 gimple_transaction_set_label (stmt
, label
);
1704 gsi_insert_after (gsi
, gimple_build_label (label
), GSI_CONTINUE_LINKING
);
1707 /* Record the set of operations found for use later. */
1708 this_state
|= gimple_transaction_subcode (stmt
) & GTMA_DECLARATION_MASK
;
1709 gimple_transaction_set_subcode (stmt
, this_state
);
1712 /* Iterate through the statements in the sequence, lowering them all
1713 as appropriate for being in a transaction. */
1716 lower_sequence_tm (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1717 struct walk_stmt_info
*wi
)
1719 unsigned int *state
= (unsigned int *) wi
->info
;
1720 gimple stmt
= gsi_stmt (*gsi
);
1722 *handled_ops_p
= true;
1723 switch (gimple_code (stmt
))
1726 /* Only memory reads/writes need to be instrumented. */
1727 if (gimple_assign_single_p (stmt
))
1728 examine_assign_tm (state
, gsi
);
1732 examine_call_tm (state
, gsi
);
1736 *state
|= GTMA_MAY_ENTER_IRREVOCABLE
;
1739 case GIMPLE_TRANSACTION
:
1740 lower_transaction (gsi
, wi
);
1744 *handled_ops_p
= !gimple_has_substatements (stmt
);
1751 /* Iterate through the statements in the sequence, lowering them all
1752 as appropriate for being outside of a transaction. */
1755 lower_sequence_no_tm (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1756 struct walk_stmt_info
* wi
)
1758 gimple stmt
= gsi_stmt (*gsi
);
1760 if (gimple_code (stmt
) == GIMPLE_TRANSACTION
)
1762 *handled_ops_p
= true;
1763 lower_transaction (gsi
, wi
);
1766 *handled_ops_p
= !gimple_has_substatements (stmt
);
1771 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1772 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1773 been moved out, and all the data required for constructing a proper
1774 CFG has been recorded. */
1777 execute_lower_tm (void)
1779 struct walk_stmt_info wi
;
1782 /* Transactional clones aren't created until a later pass. */
1783 gcc_assert (!decl_is_tm_clone (current_function_decl
));
1785 body
= gimple_body (current_function_decl
);
1786 memset (&wi
, 0, sizeof (wi
));
1787 walk_gimple_seq_mod (&body
, lower_sequence_no_tm
, NULL
, &wi
);
1788 gimple_set_body (current_function_decl
, body
);
1795 const pass_data pass_data_lower_tm
=
1797 GIMPLE_PASS
, /* type */
1798 "tmlower", /* name */
1799 OPTGROUP_NONE
, /* optinfo_flags */
1800 TV_TRANS_MEM
, /* tv_id */
1801 PROP_gimple_lcf
, /* properties_required */
1802 0, /* properties_provided */
1803 0, /* properties_destroyed */
1804 0, /* todo_flags_start */
1805 0, /* todo_flags_finish */
1808 class pass_lower_tm
: public gimple_opt_pass
1811 pass_lower_tm (gcc::context
*ctxt
)
1812 : gimple_opt_pass (pass_data_lower_tm
, ctxt
)
1815 /* opt_pass methods: */
1816 virtual bool gate (function
*) { return flag_tm
; }
1817 virtual unsigned int execute (function
*) { return execute_lower_tm (); }
1819 }; // class pass_lower_tm
1824 make_pass_lower_tm (gcc::context
*ctxt
)
1826 return new pass_lower_tm (ctxt
);
1829 /* Collect region information for each transaction. */
1835 /* The field "transaction_stmt" is initially a gtransaction *,
1836 but eventually gets lowered to a gcall *(to BUILT_IN_TM_START).
1838 Helper method to get it as a gtransaction *, with code-checking
1839 in a checked-build. */
1842 get_transaction_stmt () const
1844 return as_a
<gtransaction
*> (transaction_stmt
);
1849 /* Link to the next unnested transaction. */
1850 struct tm_region
*next
;
1852 /* Link to the next inner transaction. */
1853 struct tm_region
*inner
;
1855 /* Link to the next outer transaction. */
1856 struct tm_region
*outer
;
1858 /* The GIMPLE_TRANSACTION statement beginning this transaction.
1859 After TM_MARK, this gets replaced by a call to
1861 Hence this will be either a gtransaction *or a gcall *. */
1862 gimple transaction_stmt
;
1864 /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
1865 BUILT_IN_TM_START, this field is true if the transaction is an
1866 outer transaction. */
1867 bool original_transaction_was_outer
;
1869 /* Return value from BUILT_IN_TM_START. */
1872 /* The entry block to this region. This will always be the first
1873 block of the body of the transaction. */
1874 basic_block entry_block
;
1876 /* The first block after an expanded call to _ITM_beginTransaction. */
1877 basic_block restart_block
;
1879 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1880 These blocks are still a part of the region (i.e., the border is
1881 inclusive). Note that this set is only complete for paths in the CFG
1882 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1883 the edge to the "over" label. */
1886 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1890 typedef struct tm_region
*tm_region_p
;
1892 /* True if there are pending edge statements to be committed for the
1893 current function being scanned in the tmmark pass. */
1894 bool pending_edge_inserts_p
;
1896 static struct tm_region
*all_tm_regions
;
1897 static bitmap_obstack tm_obstack
;
1900 /* A subroutine of tm_region_init. Record the existence of the
1901 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1903 static struct tm_region
*
1904 tm_region_init_0 (struct tm_region
*outer
, basic_block bb
,
1907 struct tm_region
*region
;
1909 region
= (struct tm_region
*)
1910 obstack_alloc (&tm_obstack
.obstack
, sizeof (struct tm_region
));
1914 region
->next
= outer
->inner
;
1915 outer
->inner
= region
;
1919 region
->next
= all_tm_regions
;
1920 all_tm_regions
= region
;
1922 region
->inner
= NULL
;
1923 region
->outer
= outer
;
1925 region
->transaction_stmt
= stmt
;
1926 region
->original_transaction_was_outer
= false;
1927 region
->tm_state
= NULL
;
1929 /* There are either one or two edges out of the block containing
1930 the GIMPLE_TRANSACTION, one to the actual region and one to the
1931 "over" label if the region contains an abort. The former will
1932 always be the one marked FALLTHRU. */
1933 region
->entry_block
= FALLTHRU_EDGE (bb
)->dest
;
1935 region
->exit_blocks
= BITMAP_ALLOC (&tm_obstack
);
1936 region
->irr_blocks
= BITMAP_ALLOC (&tm_obstack
);
1941 /* A subroutine of tm_region_init. Record all the exit and
1942 irrevocable blocks in BB into the region's exit_blocks and
1943 irr_blocks bitmaps. Returns the new region being scanned. */
1945 static struct tm_region
*
1946 tm_region_init_1 (struct tm_region
*region
, basic_block bb
)
1948 gimple_stmt_iterator gsi
;
1952 || (!region
->irr_blocks
&& !region
->exit_blocks
))
1955 /* Check to see if this is the end of a region by seeing if it
1956 contains a call to __builtin_tm_commit{,_eh}. Note that the
1957 outermost region for DECL_IS_TM_CLONE need not collect this. */
1958 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
1961 if (gimple_code (g
) == GIMPLE_CALL
)
1963 tree fn
= gimple_call_fndecl (g
);
1964 if (fn
&& DECL_BUILT_IN_CLASS (fn
) == BUILT_IN_NORMAL
)
1966 if ((DECL_FUNCTION_CODE (fn
) == BUILT_IN_TM_COMMIT
1967 || DECL_FUNCTION_CODE (fn
) == BUILT_IN_TM_COMMIT_EH
)
1968 && region
->exit_blocks
)
1970 bitmap_set_bit (region
->exit_blocks
, bb
->index
);
1971 region
= region
->outer
;
1974 if (DECL_FUNCTION_CODE (fn
) == BUILT_IN_TM_IRREVOCABLE
)
1975 bitmap_set_bit (region
->irr_blocks
, bb
->index
);
1982 /* Collect all of the transaction regions within the current function
1983 and record them in ALL_TM_REGIONS. The REGION parameter may specify
1984 an "outermost" region for use by tm clones. */
1987 tm_region_init (struct tm_region
*region
)
1993 auto_vec
<basic_block
> queue
;
1994 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
1995 struct tm_region
*old_region
;
1996 auto_vec
<tm_region_p
> bb_regions
;
1998 all_tm_regions
= region
;
1999 bb
= single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
2001 /* We could store this information in bb->aux, but we may get called
2002 through get_all_tm_blocks() from another pass that may be already
2004 bb_regions
.safe_grow_cleared (last_basic_block_for_fn (cfun
));
2006 queue
.safe_push (bb
);
2007 bb_regions
[bb
->index
] = region
;
2011 region
= bb_regions
[bb
->index
];
2012 bb_regions
[bb
->index
] = NULL
;
2014 /* Record exit and irrevocable blocks. */
2015 region
= tm_region_init_1 (region
, bb
);
2017 /* Check for the last statement in the block beginning a new region. */
2019 old_region
= region
;
2021 if (gtransaction
*trans_stmt
= dyn_cast
<gtransaction
*> (g
))
2022 region
= tm_region_init_0 (region
, bb
, trans_stmt
);
2024 /* Process subsequent blocks. */
2025 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2026 if (!bitmap_bit_p (visited_blocks
, e
->dest
->index
))
2028 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
2029 queue
.safe_push (e
->dest
);
2031 /* If the current block started a new region, make sure that only
2032 the entry block of the new region is associated with this region.
2033 Other successors are still part of the old region. */
2034 if (old_region
!= region
&& e
->dest
!= region
->entry_block
)
2035 bb_regions
[e
->dest
->index
] = old_region
;
2037 bb_regions
[e
->dest
->index
] = region
;
2040 while (!queue
.is_empty ());
2041 BITMAP_FREE (visited_blocks
);
2044 /* The "gate" function for all transactional memory expansion and optimization
2045 passes. We collect region information for each top-level transaction, and
2046 if we don't find any, we skip all of the TM passes. Each region will have
2047 all of the exit blocks recorded, and the originating statement. */
2055 calculate_dominance_info (CDI_DOMINATORS
);
2056 bitmap_obstack_initialize (&tm_obstack
);
2058 /* If the function is a TM_CLONE, then the entire function is the region. */
2059 if (decl_is_tm_clone (current_function_decl
))
2061 struct tm_region
*region
= (struct tm_region
*)
2062 obstack_alloc (&tm_obstack
.obstack
, sizeof (struct tm_region
));
2063 memset (region
, 0, sizeof (*region
));
2064 region
->entry_block
= single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
2065 /* For a clone, the entire function is the region. But even if
2066 we don't need to record any exit blocks, we may need to
2067 record irrevocable blocks. */
2068 region
->irr_blocks
= BITMAP_ALLOC (&tm_obstack
);
2070 tm_region_init (region
);
2074 tm_region_init (NULL
);
2076 /* If we didn't find any regions, cleanup and skip the whole tree
2077 of tm-related optimizations. */
2078 if (all_tm_regions
== NULL
)
2080 bitmap_obstack_release (&tm_obstack
);
2090 const pass_data pass_data_tm_init
=
2092 GIMPLE_PASS
, /* type */
2093 "*tminit", /* name */
2094 OPTGROUP_NONE
, /* optinfo_flags */
2095 TV_TRANS_MEM
, /* tv_id */
2096 ( PROP_ssa
| PROP_cfg
), /* properties_required */
2097 0, /* properties_provided */
2098 0, /* properties_destroyed */
2099 0, /* todo_flags_start */
2100 0, /* todo_flags_finish */
2103 class pass_tm_init
: public gimple_opt_pass
2106 pass_tm_init (gcc::context
*ctxt
)
2107 : gimple_opt_pass (pass_data_tm_init
, ctxt
)
2110 /* opt_pass methods: */
2111 virtual bool gate (function
*) { return gate_tm_init (); }
2113 }; // class pass_tm_init
2118 make_pass_tm_init (gcc::context
*ctxt
)
2120 return new pass_tm_init (ctxt
);
2123 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
2124 represented by STATE. */
2127 transaction_subcode_ior (struct tm_region
*region
, unsigned flags
)
2129 if (region
&& region
->transaction_stmt
)
2131 gtransaction
*transaction_stmt
= region
->get_transaction_stmt ();
2132 flags
|= gimple_transaction_subcode (transaction_stmt
);
2133 gimple_transaction_set_subcode (transaction_stmt
, flags
);
2137 /* Construct a memory load in a transactional context. Return the
2138 gimple statement performing the load, or NULL if there is no
2139 TM_LOAD builtin of the appropriate size to do the load.
2141 LOC is the location to use for the new statement(s). */
2144 build_tm_load (location_t loc
, tree lhs
, tree rhs
, gimple_stmt_iterator
*gsi
)
2146 enum built_in_function code
= END_BUILTINS
;
2147 tree t
, type
= TREE_TYPE (rhs
), decl
;
2150 if (type
== float_type_node
)
2151 code
= BUILT_IN_TM_LOAD_FLOAT
;
2152 else if (type
== double_type_node
)
2153 code
= BUILT_IN_TM_LOAD_DOUBLE
;
2154 else if (type
== long_double_type_node
)
2155 code
= BUILT_IN_TM_LOAD_LDOUBLE
;
2156 else if (TYPE_SIZE_UNIT (type
) != NULL
2157 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
)))
2159 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type
)))
2162 code
= BUILT_IN_TM_LOAD_1
;
2165 code
= BUILT_IN_TM_LOAD_2
;
2168 code
= BUILT_IN_TM_LOAD_4
;
2171 code
= BUILT_IN_TM_LOAD_8
;
2176 if (code
== END_BUILTINS
)
2178 decl
= targetm
.vectorize
.builtin_tm_load (type
);
2183 decl
= builtin_decl_explicit (code
);
2185 t
= gimplify_addr (gsi
, rhs
);
2186 gcall
= gimple_build_call (decl
, 1, t
);
2187 gimple_set_location (gcall
, loc
);
2189 t
= TREE_TYPE (TREE_TYPE (decl
));
2190 if (useless_type_conversion_p (type
, t
))
2192 gimple_call_set_lhs (gcall
, lhs
);
2193 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2200 temp
= create_tmp_reg (t
);
2201 gimple_call_set_lhs (gcall
, temp
);
2202 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2204 t
= fold_build1 (VIEW_CONVERT_EXPR
, type
, temp
);
2205 g
= gimple_build_assign (lhs
, t
);
2206 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
2213 /* Similarly for storing TYPE in a transactional context. */
2216 build_tm_store (location_t loc
, tree lhs
, tree rhs
, gimple_stmt_iterator
*gsi
)
2218 enum built_in_function code
= END_BUILTINS
;
2219 tree t
, fn
, type
= TREE_TYPE (rhs
), simple_type
;
2222 if (type
== float_type_node
)
2223 code
= BUILT_IN_TM_STORE_FLOAT
;
2224 else if (type
== double_type_node
)
2225 code
= BUILT_IN_TM_STORE_DOUBLE
;
2226 else if (type
== long_double_type_node
)
2227 code
= BUILT_IN_TM_STORE_LDOUBLE
;
2228 else if (TYPE_SIZE_UNIT (type
) != NULL
2229 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
)))
2231 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type
)))
2234 code
= BUILT_IN_TM_STORE_1
;
2237 code
= BUILT_IN_TM_STORE_2
;
2240 code
= BUILT_IN_TM_STORE_4
;
2243 code
= BUILT_IN_TM_STORE_8
;
2248 if (code
== END_BUILTINS
)
2250 fn
= targetm
.vectorize
.builtin_tm_store (type
);
2255 fn
= builtin_decl_explicit (code
);
2257 simple_type
= TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn
))));
2259 if (TREE_CODE (rhs
) == CONSTRUCTOR
)
2261 /* Handle the easy initialization to zero. */
2262 if (!CONSTRUCTOR_ELTS (rhs
))
2263 rhs
= build_int_cst (simple_type
, 0);
2266 /* ...otherwise punt to the caller and probably use
2267 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2268 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2273 else if (!useless_type_conversion_p (simple_type
, type
))
2278 temp
= create_tmp_reg (simple_type
);
2279 t
= fold_build1 (VIEW_CONVERT_EXPR
, simple_type
, rhs
);
2280 g
= gimple_build_assign (temp
, t
);
2281 gimple_set_location (g
, loc
);
2282 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
2287 t
= gimplify_addr (gsi
, lhs
);
2288 gcall
= gimple_build_call (fn
, 2, t
, rhs
);
2289 gimple_set_location (gcall
, loc
);
2290 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2296 /* Expand an assignment statement into transactional builtins. */
2299 expand_assign_tm (struct tm_region
*region
, gimple_stmt_iterator
*gsi
)
2301 gimple stmt
= gsi_stmt (*gsi
);
2302 location_t loc
= gimple_location (stmt
);
2303 tree lhs
= gimple_assign_lhs (stmt
);
2304 tree rhs
= gimple_assign_rhs1 (stmt
);
2305 bool store_p
= requires_barrier (region
->entry_block
, lhs
, NULL
);
2306 bool load_p
= requires_barrier (region
->entry_block
, rhs
, NULL
);
2307 gimple gcall
= NULL
;
2309 if (!load_p
&& !store_p
)
2311 /* Add thread private addresses to log if applicable. */
2312 requires_barrier (region
->entry_block
, lhs
, stmt
);
2317 // Remove original load/store statement.
2318 gsi_remove (gsi
, true);
2320 if (load_p
&& !store_p
)
2322 transaction_subcode_ior (region
, GTMA_HAVE_LOAD
);
2323 gcall
= build_tm_load (loc
, lhs
, rhs
, gsi
);
2325 else if (store_p
&& !load_p
)
2327 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2328 gcall
= build_tm_store (loc
, lhs
, rhs
, gsi
);
2332 tree lhs_addr
, rhs_addr
, tmp
;
2335 transaction_subcode_ior (region
, GTMA_HAVE_LOAD
);
2337 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2339 /* ??? Figure out if there's any possible overlap between the LHS
2340 and the RHS and if not, use MEMCPY. */
2342 if (load_p
&& is_gimple_reg (lhs
))
2344 tmp
= create_tmp_var (TREE_TYPE (lhs
));
2345 lhs_addr
= build_fold_addr_expr (tmp
);
2350 lhs_addr
= gimplify_addr (gsi
, lhs
);
2352 rhs_addr
= gimplify_addr (gsi
, rhs
);
2353 gcall
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE
),
2354 3, lhs_addr
, rhs_addr
,
2355 TYPE_SIZE_UNIT (TREE_TYPE (lhs
)));
2356 gimple_set_location (gcall
, loc
);
2357 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2361 gcall
= gimple_build_assign (lhs
, tmp
);
2362 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2366 /* Now that we have the load/store in its instrumented form, add
2367 thread private addresses to the log if applicable. */
2369 requires_barrier (region
->entry_block
, lhs
, gcall
);
2371 // The calls to build_tm_{store,load} above inserted the instrumented
2372 // call into the stream.
2373 // gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2377 /* Expand a call statement as appropriate for a transaction. That is,
2378 either verify that the call does not affect the transaction, or
2379 redirect the call to a clone that handles transactions, or change
2380 the transaction state to IRREVOCABLE. Return true if the call is
2381 one of the builtins that end a transaction. */
2384 expand_call_tm (struct tm_region
*region
,
2385 gimple_stmt_iterator
*gsi
)
2387 gcall
*stmt
= as_a
<gcall
*> (gsi_stmt (*gsi
));
2388 tree lhs
= gimple_call_lhs (stmt
);
2390 struct cgraph_node
*node
;
2391 bool retval
= false;
2393 fn_decl
= gimple_call_fndecl (stmt
);
2395 if (fn_decl
== builtin_decl_explicit (BUILT_IN_TM_MEMCPY
)
2396 || fn_decl
== builtin_decl_explicit (BUILT_IN_TM_MEMMOVE
))
2397 transaction_subcode_ior (region
, GTMA_HAVE_STORE
| GTMA_HAVE_LOAD
);
2398 if (fn_decl
== builtin_decl_explicit (BUILT_IN_TM_MEMSET
))
2399 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2401 if (is_tm_pure_call (stmt
))
2405 retval
= is_tm_ending_fndecl (fn_decl
);
2408 /* Assume all non-const/pure calls write to memory, except
2409 transaction ending builtins. */
2410 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2413 /* For indirect calls, we already generated a call into the runtime. */
2416 tree fn
= gimple_call_fn (stmt
);
2418 /* We are guaranteed never to go irrevocable on a safe or pure
2419 call, and the pure call was handled above. */
2420 if (is_tm_safe (fn
))
2423 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
2428 node
= cgraph_node::get (fn_decl
);
2429 /* All calls should have cgraph here. */
2432 /* We can have a nodeless call here if some pass after IPA-tm
2433 added uninstrumented calls. For example, loop distribution
2434 can transform certain loop constructs into __builtin_mem*
2435 calls. In this case, see if we have a suitable TM
2436 replacement and fill in the gaps. */
2437 gcc_assert (DECL_BUILT_IN_CLASS (fn_decl
) == BUILT_IN_NORMAL
);
2438 enum built_in_function code
= DECL_FUNCTION_CODE (fn_decl
);
2439 gcc_assert (code
== BUILT_IN_MEMCPY
2440 || code
== BUILT_IN_MEMMOVE
2441 || code
== BUILT_IN_MEMSET
);
2443 tree repl
= find_tm_replacement_function (fn_decl
);
2446 gimple_call_set_fndecl (stmt
, repl
);
2448 node
= cgraph_node::create (repl
);
2449 node
->local
.tm_may_enter_irr
= false;
2450 return expand_call_tm (region
, gsi
);
2454 if (node
->local
.tm_may_enter_irr
)
2455 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
2457 if (is_tm_abort (fn_decl
))
2459 transaction_subcode_ior (region
, GTMA_HAVE_ABORT
);
2463 /* Instrument the store if needed.
2465 If the assignment happens inside the function call (return slot
2466 optimization), there is no instrumentation to be done, since
2467 the callee should have done the right thing. */
2468 if (lhs
&& requires_barrier (region
->entry_block
, lhs
, stmt
)
2469 && !gimple_call_return_slot_opt_p (stmt
))
2471 tree tmp
= create_tmp_reg (TREE_TYPE (lhs
));
2472 location_t loc
= gimple_location (stmt
);
2473 edge fallthru_edge
= NULL
;
2474 gassign
*assign_stmt
;
2476 /* Remember if the call was going to throw. */
2477 if (stmt_can_throw_internal (stmt
))
2481 basic_block bb
= gimple_bb (stmt
);
2483 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2484 if (e
->flags
& EDGE_FALLTHRU
)
2491 gimple_call_set_lhs (stmt
, tmp
);
2493 assign_stmt
= gimple_build_assign (lhs
, tmp
);
2494 gimple_set_location (assign_stmt
, loc
);
2496 /* We cannot throw in the middle of a BB. If the call was going
2497 to throw, place the instrumentation on the fallthru edge, so
2498 the call remains the last statement in the block. */
2501 gimple_seq fallthru_seq
= gimple_seq_alloc_with_stmt (assign_stmt
);
2502 gimple_stmt_iterator fallthru_gsi
= gsi_start (fallthru_seq
);
2503 expand_assign_tm (region
, &fallthru_gsi
);
2504 gsi_insert_seq_on_edge (fallthru_edge
, fallthru_seq
);
2505 pending_edge_inserts_p
= true;
2509 gsi_insert_after (gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
2510 expand_assign_tm (region
, gsi
);
2513 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2520 /* Expand all statements in BB as appropriate for being inside
2524 expand_block_tm (struct tm_region
*region
, basic_block bb
)
2526 gimple_stmt_iterator gsi
;
2528 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); )
2530 gimple stmt
= gsi_stmt (gsi
);
2531 switch (gimple_code (stmt
))
2534 /* Only memory reads/writes need to be instrumented. */
2535 if (gimple_assign_single_p (stmt
)
2536 && !gimple_clobber_p (stmt
))
2538 expand_assign_tm (region
, &gsi
);
2544 if (expand_call_tm (region
, &gsi
))
2554 if (!gsi_end_p (gsi
))
2559 /* Return the list of basic-blocks in REGION.
2561 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2562 following a TM_IRREVOCABLE call.
2564 INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the
2565 uninstrumented code path blocks in the list of basic blocks
2566 returned, false otherwise. */
2568 static vec
<basic_block
>
2569 get_tm_region_blocks (basic_block entry_block
,
2572 bitmap all_region_blocks
,
2573 bool stop_at_irrevocable_p
,
2574 bool include_uninstrumented_p
= true)
2576 vec
<basic_block
> bbs
= vNULL
;
2580 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
2583 bbs
.safe_push (entry_block
);
2584 bitmap_set_bit (visited_blocks
, entry_block
->index
);
2588 basic_block bb
= bbs
[i
++];
2591 bitmap_bit_p (exit_blocks
, bb
->index
))
2594 if (stop_at_irrevocable_p
2596 && bitmap_bit_p (irr_blocks
, bb
->index
))
2599 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2600 if ((include_uninstrumented_p
2601 || !(e
->flags
& EDGE_TM_UNINSTRUMENTED
))
2602 && !bitmap_bit_p (visited_blocks
, e
->dest
->index
))
2604 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
2605 bbs
.safe_push (e
->dest
);
2608 while (i
< bbs
.length ());
2610 if (all_region_blocks
)
2611 bitmap_ior_into (all_region_blocks
, visited_blocks
);
2613 BITMAP_FREE (visited_blocks
);
2617 // Callback data for collect_bb2reg.
2620 vec
<tm_region_p
> *bb2reg
;
2621 bool include_uninstrumented_p
;
2624 // Callback for expand_regions, collect innermost region data for each bb.
2626 collect_bb2reg (struct tm_region
*region
, void *data
)
2628 struct bb2reg_stuff
*stuff
= (struct bb2reg_stuff
*)data
;
2629 vec
<tm_region_p
> *bb2reg
= stuff
->bb2reg
;
2630 vec
<basic_block
> queue
;
2634 queue
= get_tm_region_blocks (region
->entry_block
,
2635 region
->exit_blocks
,
2638 /*stop_at_irr_p=*/true,
2639 stuff
->include_uninstrumented_p
);
2641 // We expect expand_region to perform a post-order traversal of the region
2642 // tree. Therefore the last region seen for any bb is the innermost.
2643 FOR_EACH_VEC_ELT (queue
, i
, bb
)
2644 (*bb2reg
)[bb
->index
] = region
;
2650 // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to
2651 // which a basic block belongs. Note that we only consider the instrumented
2652 // code paths for the region; the uninstrumented code paths are ignored if
2653 // INCLUDE_UNINSTRUMENTED_P is false.
2655 // ??? This data is very similar to the bb_regions array that is collected
2656 // during tm_region_init. Or, rather, this data is similar to what could
2657 // be used within tm_region_init. The actual computation in tm_region_init
2658 // begins and ends with bb_regions entirely full of NULL pointers, due to
2659 // the way in which pointers are swapped in and out of the array.
2661 // ??? Our callers expect that blocks are not shared between transactions.
2662 // When the optimizers get too smart, and blocks are shared, then during
2663 // the tm_mark phase we'll add log entries to only one of the two transactions,
2664 // and in the tm_edge phase we'll add edges to the CFG that create invalid
2665 // cycles. The symptom being SSA defs that do not dominate their uses.
2666 // Note that the optimizers were locally correct with their transformation,
2667 // as we have no info within the program that suggests that the blocks cannot
2670 // ??? There is currently a hack inside tree-ssa-pre.c to work around the
2671 // only known instance of this block sharing.
2673 static vec
<tm_region_p
>
2674 get_bb_regions_instrumented (bool traverse_clones
,
2675 bool include_uninstrumented_p
)
2677 unsigned n
= last_basic_block_for_fn (cfun
);
2678 struct bb2reg_stuff stuff
;
2679 vec
<tm_region_p
> ret
;
2682 ret
.safe_grow_cleared (n
);
2683 stuff
.bb2reg
= &ret
;
2684 stuff
.include_uninstrumented_p
= include_uninstrumented_p
;
2685 expand_regions (all_tm_regions
, collect_bb2reg
, &stuff
, traverse_clones
);
2690 /* Set the IN_TRANSACTION for all gimple statements that appear in a
2694 compute_transaction_bits (void)
2696 struct tm_region
*region
;
2697 vec
<basic_block
> queue
;
2701 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2702 certainly don't need it to calculate CDI_DOMINATOR info. */
2705 FOR_EACH_BB_FN (bb
, cfun
)
2706 bb
->flags
&= ~BB_IN_TRANSACTION
;
2708 for (region
= all_tm_regions
; region
; region
= region
->next
)
2710 queue
= get_tm_region_blocks (region
->entry_block
,
2711 region
->exit_blocks
,
2714 /*stop_at_irr_p=*/true);
2715 for (i
= 0; queue
.iterate (i
, &bb
); ++i
)
2716 bb
->flags
|= BB_IN_TRANSACTION
;
2721 bitmap_obstack_release (&tm_obstack
);
2724 /* Replace the GIMPLE_TRANSACTION in this region with the corresponding
2725 call to BUILT_IN_TM_START. */
2728 expand_transaction (struct tm_region
*region
, void *data ATTRIBUTE_UNUSED
)
2730 tree tm_start
= builtin_decl_explicit (BUILT_IN_TM_START
);
2731 basic_block transaction_bb
= gimple_bb (region
->transaction_stmt
);
2732 tree tm_state
= region
->tm_state
;
2733 tree tm_state_type
= TREE_TYPE (tm_state
);
2734 edge abort_edge
= NULL
;
2735 edge inst_edge
= NULL
;
2736 edge uninst_edge
= NULL
;
2737 edge fallthru_edge
= NULL
;
2739 // Identify the various successors of the transaction start.
2743 FOR_EACH_EDGE (e
, i
, transaction_bb
->succs
)
2745 if (e
->flags
& EDGE_TM_ABORT
)
2747 else if (e
->flags
& EDGE_TM_UNINSTRUMENTED
)
2751 if (e
->flags
& EDGE_FALLTHRU
)
2756 /* ??? There are plenty of bits here we're not computing. */
2758 int subcode
= gimple_transaction_subcode (region
->get_transaction_stmt ());
2760 if (subcode
& GTMA_DOES_GO_IRREVOCABLE
)
2761 flags
|= PR_DOESGOIRREVOCABLE
;
2762 if ((subcode
& GTMA_MAY_ENTER_IRREVOCABLE
) == 0)
2763 flags
|= PR_HASNOIRREVOCABLE
;
2764 /* If the transaction does not have an abort in lexical scope and is not
2765 marked as an outer transaction, then it will never abort. */
2766 if ((subcode
& GTMA_HAVE_ABORT
) == 0 && (subcode
& GTMA_IS_OUTER
) == 0)
2767 flags
|= PR_HASNOABORT
;
2768 if ((subcode
& GTMA_HAVE_STORE
) == 0)
2769 flags
|= PR_READONLY
;
2770 if (inst_edge
&& !(subcode
& GTMA_HAS_NO_INSTRUMENTATION
))
2771 flags
|= PR_INSTRUMENTEDCODE
;
2773 flags
|= PR_UNINSTRUMENTEDCODE
;
2774 if (subcode
& GTMA_IS_OUTER
)
2775 region
->original_transaction_was_outer
= true;
2776 tree t
= build_int_cst (tm_state_type
, flags
);
2777 gcall
*call
= gimple_build_call (tm_start
, 1, t
);
2778 gimple_call_set_lhs (call
, tm_state
);
2779 gimple_set_location (call
, gimple_location (region
->transaction_stmt
));
2781 // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START.
2782 gimple_stmt_iterator gsi
= gsi_last_bb (transaction_bb
);
2783 gcc_assert (gsi_stmt (gsi
) == region
->transaction_stmt
);
2784 gsi_insert_before (&gsi
, call
, GSI_SAME_STMT
);
2785 gsi_remove (&gsi
, true);
2786 region
->transaction_stmt
= call
;
2789 // Generate log saves.
2790 if (!tm_log_save_addresses
.is_empty ())
2791 tm_log_emit_saves (region
->entry_block
, transaction_bb
);
2793 // In the beginning, we've no tests to perform on transaction restart.
2794 // Note that after this point, transaction_bb becomes the "most recent
2795 // block containing tests for the transaction".
2796 region
->restart_block
= region
->entry_block
;
2798 // Generate log restores.
2799 if (!tm_log_save_addresses
.is_empty ())
2801 basic_block test_bb
= create_empty_bb (transaction_bb
);
2802 basic_block code_bb
= create_empty_bb (test_bb
);
2803 basic_block join_bb
= create_empty_bb (code_bb
);
2804 add_bb_to_loop (test_bb
, transaction_bb
->loop_father
);
2805 add_bb_to_loop (code_bb
, transaction_bb
->loop_father
);
2806 add_bb_to_loop (join_bb
, transaction_bb
->loop_father
);
2807 if (region
->restart_block
== region
->entry_block
)
2808 region
->restart_block
= test_bb
;
2810 tree t1
= create_tmp_reg (tm_state_type
);
2811 tree t2
= build_int_cst (tm_state_type
, A_RESTORELIVEVARIABLES
);
2812 gimple stmt
= gimple_build_assign (t1
, BIT_AND_EXPR
, tm_state
, t2
);
2813 gimple_stmt_iterator gsi
= gsi_last_bb (test_bb
);
2814 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2816 t2
= build_int_cst (tm_state_type
, 0);
2817 stmt
= gimple_build_cond (NE_EXPR
, t1
, t2
, NULL
, NULL
);
2818 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2820 tm_log_emit_restores (region
->entry_block
, code_bb
);
2822 edge ei
= make_edge (transaction_bb
, test_bb
, EDGE_FALLTHRU
);
2823 edge et
= make_edge (test_bb
, code_bb
, EDGE_TRUE_VALUE
);
2824 edge ef
= make_edge (test_bb
, join_bb
, EDGE_FALSE_VALUE
);
2825 redirect_edge_pred (fallthru_edge
, join_bb
);
2827 join_bb
->frequency
= test_bb
->frequency
= transaction_bb
->frequency
;
2828 join_bb
->count
= test_bb
->count
= transaction_bb
->count
;
2830 ei
->probability
= PROB_ALWAYS
;
2831 et
->probability
= PROB_LIKELY
;
2832 ef
->probability
= PROB_UNLIKELY
;
2833 et
->count
= apply_probability (test_bb
->count
, et
->probability
);
2834 ef
->count
= apply_probability (test_bb
->count
, ef
->probability
);
2836 code_bb
->count
= et
->count
;
2837 code_bb
->frequency
= EDGE_FREQUENCY (et
);
2839 transaction_bb
= join_bb
;
2842 // If we have an ABORT edge, create a test to perform the abort.
2845 basic_block test_bb
= create_empty_bb (transaction_bb
);
2846 add_bb_to_loop (test_bb
, transaction_bb
->loop_father
);
2847 if (region
->restart_block
== region
->entry_block
)
2848 region
->restart_block
= test_bb
;
2850 tree t1
= create_tmp_reg (tm_state_type
);
2851 tree t2
= build_int_cst (tm_state_type
, A_ABORTTRANSACTION
);
2852 gimple stmt
= gimple_build_assign (t1
, BIT_AND_EXPR
, tm_state
, t2
);
2853 gimple_stmt_iterator gsi
= gsi_last_bb (test_bb
);
2854 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2856 t2
= build_int_cst (tm_state_type
, 0);
2857 stmt
= gimple_build_cond (NE_EXPR
, t1
, t2
, NULL
, NULL
);
2858 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2860 edge ei
= make_edge (transaction_bb
, test_bb
, EDGE_FALLTHRU
);
2861 test_bb
->frequency
= transaction_bb
->frequency
;
2862 test_bb
->count
= transaction_bb
->count
;
2863 ei
->probability
= PROB_ALWAYS
;
2865 // Not abort edge. If both are live, chose one at random as we'll
2866 // we'll be fixing that up below.
2867 redirect_edge_pred (fallthru_edge
, test_bb
);
2868 fallthru_edge
->flags
= EDGE_FALSE_VALUE
;
2869 fallthru_edge
->probability
= PROB_VERY_LIKELY
;
2870 fallthru_edge
->count
2871 = apply_probability (test_bb
->count
, fallthru_edge
->probability
);
2874 redirect_edge_pred (abort_edge
, test_bb
);
2875 abort_edge
->flags
= EDGE_TRUE_VALUE
;
2876 abort_edge
->probability
= PROB_VERY_UNLIKELY
;
2878 = apply_probability (test_bb
->count
, abort_edge
->probability
);
2880 transaction_bb
= test_bb
;
2883 // If we have both instrumented and uninstrumented code paths, select one.
2884 if (inst_edge
&& uninst_edge
)
2886 basic_block test_bb
= create_empty_bb (transaction_bb
);
2887 add_bb_to_loop (test_bb
, transaction_bb
->loop_father
);
2888 if (region
->restart_block
== region
->entry_block
)
2889 region
->restart_block
= test_bb
;
2891 tree t1
= create_tmp_reg (tm_state_type
);
2892 tree t2
= build_int_cst (tm_state_type
, A_RUNUNINSTRUMENTEDCODE
);
2894 gimple stmt
= gimple_build_assign (t1
, BIT_AND_EXPR
, tm_state
, t2
);
2895 gimple_stmt_iterator gsi
= gsi_last_bb (test_bb
);
2896 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2898 t2
= build_int_cst (tm_state_type
, 0);
2899 stmt
= gimple_build_cond (NE_EXPR
, t1
, t2
, NULL
, NULL
);
2900 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2902 // Create the edge into test_bb first, as we want to copy values
2903 // out of the fallthru edge.
2904 edge e
= make_edge (transaction_bb
, test_bb
, fallthru_edge
->flags
);
2905 e
->probability
= fallthru_edge
->probability
;
2906 test_bb
->count
= e
->count
= fallthru_edge
->count
;
2907 test_bb
->frequency
= EDGE_FREQUENCY (e
);
2909 // Now update the edges to the inst/uninist implementations.
2910 // For now assume that the paths are equally likely. When using HTM,
2911 // we'll try the uninst path first and fallback to inst path if htm
2912 // buffers are exceeded. Without HTM we start with the inst path and
2913 // use the uninst path when falling back to serial mode.
2914 redirect_edge_pred (inst_edge
, test_bb
);
2915 inst_edge
->flags
= EDGE_FALSE_VALUE
;
2916 inst_edge
->probability
= REG_BR_PROB_BASE
/ 2;
2918 = apply_probability (test_bb
->count
, inst_edge
->probability
);
2920 redirect_edge_pred (uninst_edge
, test_bb
);
2921 uninst_edge
->flags
= EDGE_TRUE_VALUE
;
2922 uninst_edge
->probability
= REG_BR_PROB_BASE
/ 2;
2924 = apply_probability (test_bb
->count
, uninst_edge
->probability
);
2927 // If we have no previous special cases, and we have PHIs at the beginning
2928 // of the atomic region, this means we have a loop at the beginning of the
2929 // atomic region that shares the first block. This can cause problems with
2930 // the transaction restart abnormal edges to be added in the tm_edges pass.
2931 // Solve this by adding a new empty block to receive the abnormal edges.
2932 if (region
->restart_block
== region
->entry_block
2933 && phi_nodes (region
->entry_block
))
2935 basic_block empty_bb
= create_empty_bb (transaction_bb
);
2936 region
->restart_block
= empty_bb
;
2937 add_bb_to_loop (empty_bb
, transaction_bb
->loop_father
);
2939 redirect_edge_pred (fallthru_edge
, empty_bb
);
2940 make_edge (transaction_bb
, empty_bb
, EDGE_FALLTHRU
);
2946 /* Generate the temporary to be used for the return value of
2947 BUILT_IN_TM_START. */
2950 generate_tm_state (struct tm_region
*region
, void *data ATTRIBUTE_UNUSED
)
2952 tree tm_start
= builtin_decl_explicit (BUILT_IN_TM_START
);
2954 create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start
)), "tm_state");
2956 // Reset the subcode, post optimizations. We'll fill this in
2957 // again as we process blocks.
2958 if (region
->exit_blocks
)
2960 gtransaction
*transaction_stmt
= region
->get_transaction_stmt ();
2961 unsigned int subcode
= gimple_transaction_subcode (transaction_stmt
);
2963 if (subcode
& GTMA_DOES_GO_IRREVOCABLE
)
2964 subcode
&= (GTMA_DECLARATION_MASK
| GTMA_DOES_GO_IRREVOCABLE
2965 | GTMA_MAY_ENTER_IRREVOCABLE
2966 | GTMA_HAS_NO_INSTRUMENTATION
);
2968 subcode
&= GTMA_DECLARATION_MASK
;
2969 gimple_transaction_set_subcode (transaction_stmt
, subcode
);
2975 // Propagate flags from inner transactions outwards.
2977 propagate_tm_flags_out (struct tm_region
*region
)
2981 propagate_tm_flags_out (region
->inner
);
2983 if (region
->outer
&& region
->outer
->transaction_stmt
)
2986 = gimple_transaction_subcode (region
->get_transaction_stmt ());
2987 s
&= (GTMA_HAVE_ABORT
| GTMA_HAVE_LOAD
| GTMA_HAVE_STORE
2988 | GTMA_MAY_ENTER_IRREVOCABLE
);
2989 s
|= gimple_transaction_subcode (region
->outer
->get_transaction_stmt ());
2990 gimple_transaction_set_subcode (region
->outer
->get_transaction_stmt (),
2994 propagate_tm_flags_out (region
->next
);
2997 /* Entry point to the MARK phase of TM expansion. Here we replace
2998 transactional memory statements with calls to builtins, and function
2999 calls with their transactional clones (if available). But we don't
3000 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
3003 execute_tm_mark (void)
3005 pending_edge_inserts_p
= false;
3007 expand_regions (all_tm_regions
, generate_tm_state
, NULL
,
3008 /*traverse_clones=*/true);
3012 vec
<tm_region_p
> bb_regions
3013 = get_bb_regions_instrumented (/*traverse_clones=*/true,
3014 /*include_uninstrumented_p=*/false);
3015 struct tm_region
*r
;
3018 // Expand memory operations into calls into the runtime.
3019 // This collects log entries as well.
3020 FOR_EACH_VEC_ELT (bb_regions
, i
, r
)
3024 if (r
->transaction_stmt
)
3027 = gimple_transaction_subcode (r
->get_transaction_stmt ());
3029 /* If we're sure to go irrevocable, there won't be
3030 anything to expand, since the run-time will go
3031 irrevocable right away. */
3032 if (sub
& GTMA_DOES_GO_IRREVOCABLE
3033 && sub
& GTMA_MAY_ENTER_IRREVOCABLE
)
3036 expand_block_tm (r
, BASIC_BLOCK_FOR_FN (cfun
, i
));
3040 bb_regions
.release ();
3042 // Propagate flags from inner transactions outwards.
3043 propagate_tm_flags_out (all_tm_regions
);
3045 // Expand GIMPLE_TRANSACTIONs into calls into the runtime.
3046 expand_regions (all_tm_regions
, expand_transaction
, NULL
,
3047 /*traverse_clones=*/false);
3052 if (pending_edge_inserts_p
)
3053 gsi_commit_edge_inserts ();
3054 free_dominance_info (CDI_DOMINATORS
);
3060 const pass_data pass_data_tm_mark
=
3062 GIMPLE_PASS
, /* type */
3063 "tmmark", /* name */
3064 OPTGROUP_NONE
, /* optinfo_flags */
3065 TV_TRANS_MEM
, /* tv_id */
3066 ( PROP_ssa
| PROP_cfg
), /* properties_required */
3067 0, /* properties_provided */
3068 0, /* properties_destroyed */
3069 0, /* todo_flags_start */
3070 TODO_update_ssa
, /* todo_flags_finish */
3073 class pass_tm_mark
: public gimple_opt_pass
3076 pass_tm_mark (gcc::context
*ctxt
)
3077 : gimple_opt_pass (pass_data_tm_mark
, ctxt
)
3080 /* opt_pass methods: */
3081 virtual unsigned int execute (function
*) { return execute_tm_mark (); }
3083 }; // class pass_tm_mark
3088 make_pass_tm_mark (gcc::context
*ctxt
)
3090 return new pass_tm_mark (ctxt
);
3094 /* Create an abnormal edge from STMT at iter, splitting the block
3095 as necessary. Adjust *PNEXT as needed for the split block. */
3098 split_bb_make_tm_edge (gimple stmt
, basic_block dest_bb
,
3099 gimple_stmt_iterator iter
, gimple_stmt_iterator
*pnext
)
3101 basic_block bb
= gimple_bb (stmt
);
3102 if (!gsi_one_before_end_p (iter
))
3104 edge e
= split_block (bb
, stmt
);
3105 *pnext
= gsi_start_bb (e
->dest
);
3107 make_edge (bb
, dest_bb
, EDGE_ABNORMAL
);
3109 // Record the need for the edge for the benefit of the rtl passes.
3110 if (cfun
->gimple_df
->tm_restart
== NULL
)
3111 cfun
->gimple_df
->tm_restart
3112 = hash_table
<tm_restart_hasher
>::create_ggc (31);
3114 struct tm_restart_node dummy
;
3116 dummy
.label_or_list
= gimple_block_label (dest_bb
);
3118 tm_restart_node
**slot
= cfun
->gimple_df
->tm_restart
->find_slot (&dummy
,
3120 struct tm_restart_node
*n
= *slot
;
3123 n
= ggc_alloc
<tm_restart_node
> ();
3128 tree old
= n
->label_or_list
;
3129 if (TREE_CODE (old
) == LABEL_DECL
)
3130 old
= tree_cons (NULL
, old
, NULL
);
3131 n
->label_or_list
= tree_cons (NULL
, dummy
.label_or_list
, old
);
3135 /* Split block BB as necessary for every builtin function we added, and
3136 wire up the abnormal back edges implied by the transaction restart. */
3139 expand_block_edges (struct tm_region
*const region
, basic_block bb
)
3141 gimple_stmt_iterator gsi
, next_gsi
;
3143 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi
= next_gsi
)
3145 gimple stmt
= gsi_stmt (gsi
);
3149 gsi_next (&next_gsi
);
3151 // ??? Shouldn't we split for any non-pure, non-irrevocable function?
3152 call_stmt
= dyn_cast
<gcall
*> (stmt
);
3154 || (gimple_call_flags (call_stmt
) & ECF_TM_BUILTIN
) == 0)
3157 if (DECL_FUNCTION_CODE (gimple_call_fndecl (call_stmt
))
3158 == BUILT_IN_TM_ABORT
)
3160 // If we have a ``_transaction_cancel [[outer]]'', there is only
3161 // one abnormal edge: to the transaction marked OUTER.
3162 // All compiler-generated instances of BUILT_IN_TM_ABORT have a
3163 // constant argument, which we can examine here. Users invoking
3164 // TM_ABORT directly get what they deserve.
3165 tree arg
= gimple_call_arg (call_stmt
, 0);
3166 if (TREE_CODE (arg
) == INTEGER_CST
3167 && (TREE_INT_CST_LOW (arg
) & AR_OUTERABORT
) != 0
3168 && !decl_is_tm_clone (current_function_decl
))
3170 // Find the GTMA_IS_OUTER transaction.
3171 for (struct tm_region
*o
= region
; o
; o
= o
->outer
)
3172 if (o
->original_transaction_was_outer
)
3174 split_bb_make_tm_edge (call_stmt
, o
->restart_block
,
3179 // Otherwise, the front-end should have semantically checked
3180 // outer aborts, but in either case the target region is not
3181 // within this function.
3185 // Non-outer, TM aborts have an abnormal edge to the inner-most
3186 // transaction, the one being aborted;
3187 split_bb_make_tm_edge (call_stmt
, region
->restart_block
, gsi
,
3191 // All TM builtins have an abnormal edge to the outer-most transaction.
3192 // We never restart inner transactions. For tm clones, we know a-priori
3193 // that the outer-most transaction is outside the function.
3194 if (decl_is_tm_clone (current_function_decl
))
3197 if (cfun
->gimple_df
->tm_restart
== NULL
)
3198 cfun
->gimple_df
->tm_restart
3199 = hash_table
<tm_restart_hasher
>::create_ggc (31);
3201 // All TM builtins have an abnormal edge to the outer-most transaction.
3202 // We never restart inner transactions.
3203 for (struct tm_region
*o
= region
; o
; o
= o
->outer
)
3206 split_bb_make_tm_edge (call_stmt
, o
->restart_block
, gsi
, &next_gsi
);
3210 // Delete any tail-call annotation that may have been added.
3211 // The tail-call pass may have mis-identified the commit as being
3212 // a candidate because we had not yet added this restart edge.
3213 gimple_call_set_tail (call_stmt
, false);
3217 /* Entry point to the final expansion of transactional nodes. */
3221 const pass_data pass_data_tm_edges
=
3223 GIMPLE_PASS
, /* type */
3224 "tmedge", /* name */
3225 OPTGROUP_NONE
, /* optinfo_flags */
3226 TV_TRANS_MEM
, /* tv_id */
3227 ( PROP_ssa
| PROP_cfg
), /* properties_required */
3228 0, /* properties_provided */
3229 0, /* properties_destroyed */
3230 0, /* todo_flags_start */
3231 TODO_update_ssa
, /* todo_flags_finish */
3234 class pass_tm_edges
: public gimple_opt_pass
3237 pass_tm_edges (gcc::context
*ctxt
)
3238 : gimple_opt_pass (pass_data_tm_edges
, ctxt
)
3241 /* opt_pass methods: */
3242 virtual unsigned int execute (function
*);
3244 }; // class pass_tm_edges
3247 pass_tm_edges::execute (function
*fun
)
3249 vec
<tm_region_p
> bb_regions
3250 = get_bb_regions_instrumented (/*traverse_clones=*/false,
3251 /*include_uninstrumented_p=*/true);
3252 struct tm_region
*r
;
3255 FOR_EACH_VEC_ELT (bb_regions
, i
, r
)
3257 expand_block_edges (r
, BASIC_BLOCK_FOR_FN (fun
, i
));
3259 bb_regions
.release ();
3261 /* We've got to release the dominance info now, to indicate that it
3262 must be rebuilt completely. Otherwise we'll crash trying to update
3263 the SSA web in the TODO section following this pass. */
3264 free_dominance_info (CDI_DOMINATORS
);
3265 bitmap_obstack_release (&tm_obstack
);
3266 all_tm_regions
= NULL
;
3274 make_pass_tm_edges (gcc::context
*ctxt
)
3276 return new pass_tm_edges (ctxt
);
3279 /* Helper function for expand_regions. Expand REGION and recurse to
3280 the inner region. Call CALLBACK on each region. CALLBACK returns
3281 NULL to continue the traversal, otherwise a non-null value which
3282 this function will return as well. TRAVERSE_CLONES is true if we
3283 should traverse transactional clones. */
3286 expand_regions_1 (struct tm_region
*region
,
3287 void *(*callback
)(struct tm_region
*, void *),
3289 bool traverse_clones
)
3291 void *retval
= NULL
;
3292 if (region
->exit_blocks
3293 || (traverse_clones
&& decl_is_tm_clone (current_function_decl
)))
3295 retval
= callback (region
, data
);
3301 retval
= expand_regions (region
->inner
, callback
, data
, traverse_clones
);
3308 /* Traverse the regions enclosed and including REGION. Execute
3309 CALLBACK for each region, passing DATA. CALLBACK returns NULL to
3310 continue the traversal, otherwise a non-null value which this
3311 function will return as well. TRAVERSE_CLONES is true if we should
3312 traverse transactional clones. */
3315 expand_regions (struct tm_region
*region
,
3316 void *(*callback
)(struct tm_region
*, void *),
3318 bool traverse_clones
)
3320 void *retval
= NULL
;
3323 retval
= expand_regions_1 (region
, callback
, data
, traverse_clones
);
3326 region
= region
->next
;
3332 /* A unique TM memory operation. */
3333 typedef struct tm_memop
3335 /* Unique ID that all memory operations to the same location have. */
3336 unsigned int value_id
;
3337 /* Address of load/store. */
3341 /* TM memory operation hashtable helpers. */
3343 struct tm_memop_hasher
: typed_free_remove
<tm_memop
>
3345 typedef tm_memop
*value_type
;
3346 typedef tm_memop
*compare_type
;
3347 static inline hashval_t
hash (const tm_memop
*);
3348 static inline bool equal (const tm_memop
*, const tm_memop
*);
3351 /* Htab support. Return a hash value for a `tm_memop'. */
3353 tm_memop_hasher::hash (const tm_memop
*mem
)
3355 tree addr
= mem
->addr
;
3356 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
3357 actually done with operand_equal_p (see tm_memop_eq). */
3358 if (TREE_CODE (addr
) == ADDR_EXPR
)
3359 addr
= TREE_OPERAND (addr
, 0);
3360 return iterative_hash_expr (addr
, 0);
3363 /* Htab support. Return true if two tm_memop's are the same. */
3365 tm_memop_hasher::equal (const tm_memop
*mem1
, const tm_memop
*mem2
)
3367 return operand_equal_p (mem1
->addr
, mem2
->addr
, 0);
3370 /* Sets for solving data flow equations in the memory optimization pass. */
3371 struct tm_memopt_bitmaps
3373 /* Stores available to this BB upon entry. Basically, stores that
3374 dominate this BB. */
3375 bitmap store_avail_in
;
3376 /* Stores available at the end of this BB. */
3377 bitmap store_avail_out
;
3378 bitmap store_antic_in
;
3379 bitmap store_antic_out
;
3380 /* Reads available to this BB upon entry. Basically, reads that
3381 dominate this BB. */
3382 bitmap read_avail_in
;
3383 /* Reads available at the end of this BB. */
3384 bitmap read_avail_out
;
3385 /* Reads performed in this BB. */
3387 /* Writes performed in this BB. */
3390 /* Temporary storage for pass. */
3391 /* Is the current BB in the worklist? */
3392 bool avail_in_worklist_p
;
3393 /* Have we visited this BB? */
3397 static bitmap_obstack tm_memopt_obstack
;
3399 /* Unique counter for TM loads and stores. Loads and stores of the
3400 same address get the same ID. */
3401 static unsigned int tm_memopt_value_id
;
3402 static hash_table
<tm_memop_hasher
> *tm_memopt_value_numbers
;
3404 #define STORE_AVAIL_IN(BB) \
3405 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
3406 #define STORE_AVAIL_OUT(BB) \
3407 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
3408 #define STORE_ANTIC_IN(BB) \
3409 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
3410 #define STORE_ANTIC_OUT(BB) \
3411 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
3412 #define READ_AVAIL_IN(BB) \
3413 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
3414 #define READ_AVAIL_OUT(BB) \
3415 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
3416 #define READ_LOCAL(BB) \
3417 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
3418 #define STORE_LOCAL(BB) \
3419 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
3420 #define AVAIL_IN_WORKLIST_P(BB) \
3421 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
3422 #define BB_VISITED_P(BB) \
3423 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
3425 /* Given a TM load/store in STMT, return the value number for the address
3429 tm_memopt_value_number (gimple stmt
, enum insert_option op
)
3431 struct tm_memop tmpmem
, *mem
;
3434 gcc_assert (is_tm_load (stmt
) || is_tm_store (stmt
));
3435 tmpmem
.addr
= gimple_call_arg (stmt
, 0);
3436 slot
= tm_memopt_value_numbers
->find_slot (&tmpmem
, op
);
3439 else if (op
== INSERT
)
3441 mem
= XNEW (struct tm_memop
);
3443 mem
->value_id
= tm_memopt_value_id
++;
3444 mem
->addr
= tmpmem
.addr
;
3448 return mem
->value_id
;
3451 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
3454 tm_memopt_accumulate_memops (basic_block bb
)
3456 gimple_stmt_iterator gsi
;
3458 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3460 gimple stmt
= gsi_stmt (gsi
);
3464 if (is_tm_store (stmt
))
3465 bits
= STORE_LOCAL (bb
);
3466 else if (is_tm_load (stmt
))
3467 bits
= READ_LOCAL (bb
);
3471 loc
= tm_memopt_value_number (stmt
, INSERT
);
3472 bitmap_set_bit (bits
, loc
);
3475 fprintf (dump_file
, "TM memopt (%s): value num=%d, BB=%d, addr=",
3476 is_tm_load (stmt
) ? "LOAD" : "STORE", loc
,
3477 gimple_bb (stmt
)->index
);
3478 print_generic_expr (dump_file
, gimple_call_arg (stmt
, 0), 0);
3479 fprintf (dump_file
, "\n");
3484 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
3487 dump_tm_memopt_set (const char *set_name
, bitmap bits
)
3491 const char *comma
= "";
3493 fprintf (dump_file
, "TM memopt: %s: [", set_name
);
3494 EXECUTE_IF_SET_IN_BITMAP (bits
, 0, i
, bi
)
3496 hash_table
<tm_memop_hasher
>::iterator hi
;
3497 struct tm_memop
*mem
= NULL
;
3499 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
3500 FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers
, mem
, tm_memop_t
, hi
)
3501 if (mem
->value_id
== i
)
3503 gcc_assert (mem
->value_id
== i
);
3504 fprintf (dump_file
, "%s", comma
);
3506 print_generic_expr (dump_file
, mem
->addr
, 0);
3508 fprintf (dump_file
, "]\n");
3511 /* Prettily dump all of the memopt sets in BLOCKS. */
3514 dump_tm_memopt_sets (vec
<basic_block
> blocks
)
3519 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3521 fprintf (dump_file
, "------------BB %d---------\n", bb
->index
);
3522 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb
));
3523 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb
));
3524 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb
));
3525 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb
));
3526 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb
));
3527 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb
));
3531 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3534 tm_memopt_compute_avin (basic_block bb
)
3539 /* Seed with the AVOUT of any predecessor. */
3540 for (ix
= 0; ix
< EDGE_COUNT (bb
->preds
); ix
++)
3542 e
= EDGE_PRED (bb
, ix
);
3543 /* Make sure we have already visited this BB, and is thus
3546 If e->src->aux is NULL, this predecessor is actually on an
3547 enclosing transaction. We only care about the current
3548 transaction, so ignore it. */
3549 if (e
->src
->aux
&& BB_VISITED_P (e
->src
))
3551 bitmap_copy (STORE_AVAIL_IN (bb
), STORE_AVAIL_OUT (e
->src
));
3552 bitmap_copy (READ_AVAIL_IN (bb
), READ_AVAIL_OUT (e
->src
));
3557 for (; ix
< EDGE_COUNT (bb
->preds
); ix
++)
3559 e
= EDGE_PRED (bb
, ix
);
3560 if (e
->src
->aux
&& BB_VISITED_P (e
->src
))
3562 bitmap_and_into (STORE_AVAIL_IN (bb
), STORE_AVAIL_OUT (e
->src
));
3563 bitmap_and_into (READ_AVAIL_IN (bb
), READ_AVAIL_OUT (e
->src
));
3567 BB_VISITED_P (bb
) = true;
3570 /* Compute the STORE_ANTIC_IN for the basic block BB. */
3573 tm_memopt_compute_antin (basic_block bb
)
3578 /* Seed with the ANTIC_OUT of any successor. */
3579 for (ix
= 0; ix
< EDGE_COUNT (bb
->succs
); ix
++)
3581 e
= EDGE_SUCC (bb
, ix
);
3582 /* Make sure we have already visited this BB, and is thus
3584 if (BB_VISITED_P (e
->dest
))
3586 bitmap_copy (STORE_ANTIC_IN (bb
), STORE_ANTIC_OUT (e
->dest
));
3591 for (; ix
< EDGE_COUNT (bb
->succs
); ix
++)
3593 e
= EDGE_SUCC (bb
, ix
);
3594 if (BB_VISITED_P (e
->dest
))
3595 bitmap_and_into (STORE_ANTIC_IN (bb
), STORE_ANTIC_OUT (e
->dest
));
3598 BB_VISITED_P (bb
) = true;
3601 /* Compute the AVAIL sets for every basic block in BLOCKS.
3603 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3605 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3606 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3608 This is basically what we do in lcm's compute_available(), but here
3609 we calculate two sets of sets (one for STOREs and one for READs),
3610 and we work on a region instead of the entire CFG.
3612 REGION is the TM region.
3613 BLOCKS are the basic blocks in the region. */
3616 tm_memopt_compute_available (struct tm_region
*region
,
3617 vec
<basic_block
> blocks
)
3620 basic_block
*worklist
, *qin
, *qout
, *qend
, bb
;
3621 unsigned int qlen
, i
;
3625 /* Allocate a worklist array/queue. Entries are only added to the
3626 list if they were not already on the list. So the size is
3627 bounded by the number of basic blocks in the region. */
3628 qlen
= blocks
.length () - 1;
3629 qin
= qout
= worklist
=
3630 XNEWVEC (basic_block
, qlen
);
3632 /* Put every block in the region on the worklist. */
3633 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3635 /* Seed AVAIL_OUT with the LOCAL set. */
3636 bitmap_ior_into (STORE_AVAIL_OUT (bb
), STORE_LOCAL (bb
));
3637 bitmap_ior_into (READ_AVAIL_OUT (bb
), READ_LOCAL (bb
));
3639 AVAIL_IN_WORKLIST_P (bb
) = true;
3640 /* No need to insert the entry block, since it has an AVIN of
3641 null, and an AVOUT that has already been seeded in. */
3642 if (bb
!= region
->entry_block
)
3646 /* The entry block has been initialized with the local sets. */
3647 BB_VISITED_P (region
->entry_block
) = true;
3650 qend
= &worklist
[qlen
];
3652 /* Iterate until the worklist is empty. */
3655 /* Take the first entry off the worklist. */
3662 /* This block can be added to the worklist again if necessary. */
3663 AVAIL_IN_WORKLIST_P (bb
) = false;
3664 tm_memopt_compute_avin (bb
);
3666 /* Note: We do not add the LOCAL sets here because we already
3667 seeded the AVAIL_OUT sets with them. */
3668 changed
= bitmap_ior_into (STORE_AVAIL_OUT (bb
), STORE_AVAIL_IN (bb
));
3669 changed
|= bitmap_ior_into (READ_AVAIL_OUT (bb
), READ_AVAIL_IN (bb
));
3671 && (region
->exit_blocks
== NULL
3672 || !bitmap_bit_p (region
->exit_blocks
, bb
->index
)))
3673 /* If the out state of this block changed, then we need to add
3674 its successors to the worklist if they are not already in. */
3675 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3676 if (!AVAIL_IN_WORKLIST_P (e
->dest
)
3677 && e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
3680 AVAIL_IN_WORKLIST_P (e
->dest
) = true;
3691 dump_tm_memopt_sets (blocks
);
3694 /* Compute ANTIC sets for every basic block in BLOCKS.
3696 We compute STORE_ANTIC_OUT as follows:
3698 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3699 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3701 REGION is the TM region.
3702 BLOCKS are the basic blocks in the region. */
3705 tm_memopt_compute_antic (struct tm_region
*region
,
3706 vec
<basic_block
> blocks
)
3709 basic_block
*worklist
, *qin
, *qout
, *qend
, bb
;
3714 /* Allocate a worklist array/queue. Entries are only added to the
3715 list if they were not already on the list. So the size is
3716 bounded by the number of basic blocks in the region. */
3717 qin
= qout
= worklist
= XNEWVEC (basic_block
, blocks
.length ());
3719 for (qlen
= 0, i
= blocks
.length () - 1; i
>= 0; --i
)
3723 /* Seed ANTIC_OUT with the LOCAL set. */
3724 bitmap_ior_into (STORE_ANTIC_OUT (bb
), STORE_LOCAL (bb
));
3726 /* Put every block in the region on the worklist. */
3727 AVAIL_IN_WORKLIST_P (bb
) = true;
3728 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3729 and their ANTIC_OUT has already been seeded in. */
3730 if (region
->exit_blocks
3731 && !bitmap_bit_p (region
->exit_blocks
, bb
->index
))
3738 /* The exit blocks have been initialized with the local sets. */
3739 if (region
->exit_blocks
)
3743 EXECUTE_IF_SET_IN_BITMAP (region
->exit_blocks
, 0, i
, bi
)
3744 BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun
, i
)) = true;
3748 qend
= &worklist
[qlen
];
3750 /* Iterate until the worklist is empty. */
3753 /* Take the first entry off the worklist. */
3760 /* This block can be added to the worklist again if necessary. */
3761 AVAIL_IN_WORKLIST_P (bb
) = false;
3762 tm_memopt_compute_antin (bb
);
3764 /* Note: We do not add the LOCAL sets here because we already
3765 seeded the ANTIC_OUT sets with them. */
3766 if (bitmap_ior_into (STORE_ANTIC_OUT (bb
), STORE_ANTIC_IN (bb
))
3767 && bb
!= region
->entry_block
)
3768 /* If the out state of this block changed, then we need to add
3769 its predecessors to the worklist if they are not already in. */
3770 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3771 if (!AVAIL_IN_WORKLIST_P (e
->src
))
3774 AVAIL_IN_WORKLIST_P (e
->src
) = true;
3785 dump_tm_memopt_sets (blocks
);
3788 /* Offsets of load variants from TM_LOAD. For example,
3789 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3790 See gtm-builtins.def. */
3791 #define TRANSFORM_RAR 1
3792 #define TRANSFORM_RAW 2
3793 #define TRANSFORM_RFW 3
3794 /* Offsets of store variants from TM_STORE. */
3795 #define TRANSFORM_WAR 1
3796 #define TRANSFORM_WAW 2
3798 /* Inform about a load/store optimization. */
3801 dump_tm_memopt_transform (gimple stmt
)
3805 fprintf (dump_file
, "TM memopt: transforming: ");
3806 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3807 fprintf (dump_file
, "\n");
3811 /* Perform a read/write optimization. Replaces the TM builtin in STMT
3812 by a builtin that is OFFSET entries down in the builtins table in
3813 gtm-builtins.def. */
3816 tm_memopt_transform_stmt (unsigned int offset
,
3818 gimple_stmt_iterator
*gsi
)
3820 tree fn
= gimple_call_fn (stmt
);
3821 gcc_assert (TREE_CODE (fn
) == ADDR_EXPR
);
3822 TREE_OPERAND (fn
, 0)
3823 = builtin_decl_explicit ((enum built_in_function
)
3824 (DECL_FUNCTION_CODE (TREE_OPERAND (fn
, 0))
3826 gimple_call_set_fn (stmt
, fn
);
3827 gsi_replace (gsi
, stmt
, true);
3828 dump_tm_memopt_transform (stmt
);
3831 /* Perform the actual TM memory optimization transformations in the
3832 basic blocks in BLOCKS. */
3835 tm_memopt_transform_blocks (vec
<basic_block
> blocks
)
3839 gimple_stmt_iterator gsi
;
3841 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3843 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3845 gimple stmt
= gsi_stmt (gsi
);
3846 bitmap read_avail
= READ_AVAIL_IN (bb
);
3847 bitmap store_avail
= STORE_AVAIL_IN (bb
);
3848 bitmap store_antic
= STORE_ANTIC_OUT (bb
);
3851 if (is_tm_simple_load (stmt
))
3853 gcall
*call_stmt
= as_a
<gcall
*> (stmt
);
3854 loc
= tm_memopt_value_number (stmt
, NO_INSERT
);
3855 if (store_avail
&& bitmap_bit_p (store_avail
, loc
))
3856 tm_memopt_transform_stmt (TRANSFORM_RAW
, call_stmt
, &gsi
);
3857 else if (store_antic
&& bitmap_bit_p (store_antic
, loc
))
3859 tm_memopt_transform_stmt (TRANSFORM_RFW
, call_stmt
, &gsi
);
3860 bitmap_set_bit (store_avail
, loc
);
3862 else if (read_avail
&& bitmap_bit_p (read_avail
, loc
))
3863 tm_memopt_transform_stmt (TRANSFORM_RAR
, call_stmt
, &gsi
);
3865 bitmap_set_bit (read_avail
, loc
);
3867 else if (is_tm_simple_store (stmt
))
3869 gcall
*call_stmt
= as_a
<gcall
*> (stmt
);
3870 loc
= tm_memopt_value_number (stmt
, NO_INSERT
);
3871 if (store_avail
&& bitmap_bit_p (store_avail
, loc
))
3872 tm_memopt_transform_stmt (TRANSFORM_WAW
, call_stmt
, &gsi
);
3875 if (read_avail
&& bitmap_bit_p (read_avail
, loc
))
3876 tm_memopt_transform_stmt (TRANSFORM_WAR
, call_stmt
, &gsi
);
3877 bitmap_set_bit (store_avail
, loc
);
3884 /* Return a new set of bitmaps for a BB. */
3886 static struct tm_memopt_bitmaps
*
3887 tm_memopt_init_sets (void)
3889 struct tm_memopt_bitmaps
*b
3890 = XOBNEW (&tm_memopt_obstack
.obstack
, struct tm_memopt_bitmaps
);
3891 b
->store_avail_in
= BITMAP_ALLOC (&tm_memopt_obstack
);
3892 b
->store_avail_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3893 b
->store_antic_in
= BITMAP_ALLOC (&tm_memopt_obstack
);
3894 b
->store_antic_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3895 b
->store_avail_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3896 b
->read_avail_in
= BITMAP_ALLOC (&tm_memopt_obstack
);
3897 b
->read_avail_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3898 b
->read_local
= BITMAP_ALLOC (&tm_memopt_obstack
);
3899 b
->store_local
= BITMAP_ALLOC (&tm_memopt_obstack
);
3903 /* Free sets computed for each BB. */
3906 tm_memopt_free_sets (vec
<basic_block
> blocks
)
3911 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3915 /* Clear the visited bit for every basic block in BLOCKS. */
3918 tm_memopt_clear_visited (vec
<basic_block
> blocks
)
3923 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3924 BB_VISITED_P (bb
) = false;
3927 /* Replace TM load/stores with hints for the runtime. We handle
3928 things like read-after-write, write-after-read, read-after-read,
3929 read-for-write, etc. */
3932 execute_tm_memopt (void)
3934 struct tm_region
*region
;
3935 vec
<basic_block
> bbs
;
3937 tm_memopt_value_id
= 0;
3938 tm_memopt_value_numbers
= new hash_table
<tm_memop_hasher
> (10);
3940 for (region
= all_tm_regions
; region
; region
= region
->next
)
3942 /* All the TM stores/loads in the current region. */
3946 bitmap_obstack_initialize (&tm_memopt_obstack
);
3948 /* Save all BBs for the current region. */
3949 bbs
= get_tm_region_blocks (region
->entry_block
,
3950 region
->exit_blocks
,
3955 /* Collect all the memory operations. */
3956 for (i
= 0; bbs
.iterate (i
, &bb
); ++i
)
3958 bb
->aux
= tm_memopt_init_sets ();
3959 tm_memopt_accumulate_memops (bb
);
3962 /* Solve data flow equations and transform each block accordingly. */
3963 tm_memopt_clear_visited (bbs
);
3964 tm_memopt_compute_available (region
, bbs
);
3965 tm_memopt_clear_visited (bbs
);
3966 tm_memopt_compute_antic (region
, bbs
);
3967 tm_memopt_transform_blocks (bbs
);
3969 tm_memopt_free_sets (bbs
);
3971 bitmap_obstack_release (&tm_memopt_obstack
);
3972 tm_memopt_value_numbers
->empty ();
3975 delete tm_memopt_value_numbers
;
3976 tm_memopt_value_numbers
= NULL
;
3982 const pass_data pass_data_tm_memopt
=
3984 GIMPLE_PASS
, /* type */
3985 "tmmemopt", /* name */
3986 OPTGROUP_NONE
, /* optinfo_flags */
3987 TV_TRANS_MEM
, /* tv_id */
3988 ( PROP_ssa
| PROP_cfg
), /* properties_required */
3989 0, /* properties_provided */
3990 0, /* properties_destroyed */
3991 0, /* todo_flags_start */
3992 0, /* todo_flags_finish */
3995 class pass_tm_memopt
: public gimple_opt_pass
3998 pass_tm_memopt (gcc::context
*ctxt
)
3999 : gimple_opt_pass (pass_data_tm_memopt
, ctxt
)
4002 /* opt_pass methods: */
4003 virtual bool gate (function
*) { return flag_tm
&& optimize
> 0; }
4004 virtual unsigned int execute (function
*) { return execute_tm_memopt (); }
4006 }; // class pass_tm_memopt
4011 make_pass_tm_memopt (gcc::context
*ctxt
)
4013 return new pass_tm_memopt (ctxt
);
4017 /* Interprocedual analysis for the creation of transactional clones.
4018 The aim of this pass is to find which functions are referenced in
4019 a non-irrevocable transaction context, and for those over which
4020 we have control (or user directive), create a version of the
4021 function which uses only the transactional interface to reference
4022 protected memories. This analysis proceeds in several steps:
4024 (1) Collect the set of all possible transactional clones:
4026 (a) For all local public functions marked tm_callable, push
4027 it onto the tm_callee queue.
4029 (b) For all local functions, scan for calls in transaction blocks.
4030 Push the caller and callee onto the tm_caller and tm_callee
4031 queues. Count the number of callers for each callee.
4033 (c) For each local function on the callee list, assume we will
4034 create a transactional clone. Push *all* calls onto the
4035 callee queues; count the number of clone callers separately
4036 to the number of original callers.
4038 (2) Propagate irrevocable status up the dominator tree:
4040 (a) Any external function on the callee list that is not marked
4041 tm_callable is irrevocable. Push all callers of such onto
4044 (b) For each function on the worklist, mark each block that
4045 contains an irrevocable call. Use the AND operator to
4046 propagate that mark up the dominator tree.
4048 (c) If we reach the entry block for a possible transactional
4049 clone, then the transactional clone is irrevocable, and
4050 we should not create the clone after all. Push all
4051 callers onto the worklist.
4053 (d) Place tm_irrevocable calls at the beginning of the relevant
4054 blocks. Special case here is the entry block for the entire
4055 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
4056 the library to begin the region in serial mode. Decrement
4057 the call count for all callees in the irrevocable region.
4059 (3) Create the transactional clones:
4061 Any tm_callee that still has a non-zero call count is cloned.
4064 /* This structure is stored in the AUX field of each cgraph_node. */
4065 struct tm_ipa_cg_data
4067 /* The clone of the function that got created. */
4068 struct cgraph_node
*clone
;
4070 /* The tm regions in the normal function. */
4071 struct tm_region
*all_tm_regions
;
4073 /* The blocks of the normal/clone functions that contain irrevocable
4074 calls, or blocks that are post-dominated by irrevocable calls. */
4075 bitmap irrevocable_blocks_normal
;
4076 bitmap irrevocable_blocks_clone
;
4078 /* The blocks of the normal function that are involved in transactions. */
4079 bitmap transaction_blocks_normal
;
4081 /* The number of callers to the transactional clone of this function
4082 from normal and transactional clones respectively. */
4083 unsigned tm_callers_normal
;
4084 unsigned tm_callers_clone
;
4086 /* True if all calls to this function's transactional clone
4087 are irrevocable. Also automatically true if the function
4088 has no transactional clone. */
4089 bool is_irrevocable
;
4091 /* Flags indicating the presence of this function in various queues. */
4092 bool in_callee_queue
;
4095 /* Flags indicating the kind of scan desired while in the worklist. */
4096 bool want_irr_scan_normal
;
4099 typedef vec
<cgraph_node
*> cgraph_node_queue
;
4101 /* Return the ipa data associated with NODE, allocating zeroed memory
4102 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
4103 and set *NODE accordingly. */
4105 static struct tm_ipa_cg_data
*
4106 get_cg_data (struct cgraph_node
**node
, bool traverse_aliases
)
4108 struct tm_ipa_cg_data
*d
;
4110 if (traverse_aliases
&& (*node
)->alias
)
4111 *node
= (*node
)->get_alias_target ();
4113 d
= (struct tm_ipa_cg_data
*) (*node
)->aux
;
4117 d
= (struct tm_ipa_cg_data
*)
4118 obstack_alloc (&tm_obstack
.obstack
, sizeof (*d
));
4119 (*node
)->aux
= (void *) d
;
4120 memset (d
, 0, sizeof (*d
));
4126 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
4127 it is already present. */
4130 maybe_push_queue (struct cgraph_node
*node
,
4131 cgraph_node_queue
*queue_p
, bool *in_queue_p
)
4136 queue_p
->safe_push (node
);
4140 /* Duplicate the basic blocks in QUEUE for use in the uninstrumented
4141 code path. QUEUE are the basic blocks inside the transaction
4142 represented in REGION.
4144 Later in split_code_paths() we will add the conditional to choose
4145 between the two alternatives. */
4148 ipa_uninstrument_transaction (struct tm_region
*region
,
4149 vec
<basic_block
> queue
)
4151 gimple transaction
= region
->transaction_stmt
;
4152 basic_block transaction_bb
= gimple_bb (transaction
);
4153 int n
= queue
.length ();
4154 basic_block
*new_bbs
= XNEWVEC (basic_block
, n
);
4156 copy_bbs (queue
.address (), n
, new_bbs
, NULL
, 0, NULL
, NULL
, transaction_bb
,
4158 edge e
= make_edge (transaction_bb
, new_bbs
[0], EDGE_TM_UNINSTRUMENTED
);
4159 add_phi_args_after_copy (new_bbs
, n
, e
);
4161 // Now we will have a GIMPLE_ATOMIC with 3 possible edges out of it.
4162 // a) EDGE_FALLTHRU into the transaction
4163 // b) EDGE_TM_ABORT out of the transaction
4164 // c) EDGE_TM_UNINSTRUMENTED into the uninstrumented blocks.
4169 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
4170 Queue all callees within block BB. */
4173 ipa_tm_scan_calls_block (cgraph_node_queue
*callees_p
,
4174 basic_block bb
, bool for_clone
)
4176 gimple_stmt_iterator gsi
;
4178 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4180 gimple stmt
= gsi_stmt (gsi
);
4181 if (is_gimple_call (stmt
) && !is_tm_pure_call (stmt
))
4183 tree fndecl
= gimple_call_fndecl (stmt
);
4186 struct tm_ipa_cg_data
*d
;
4188 struct cgraph_node
*node
;
4190 if (is_tm_ending_fndecl (fndecl
))
4192 if (find_tm_replacement_function (fndecl
))
4195 node
= cgraph_node::get (fndecl
);
4196 gcc_assert (node
!= NULL
);
4197 d
= get_cg_data (&node
, true);
4199 pcallers
= (for_clone
? &d
->tm_callers_clone
4200 : &d
->tm_callers_normal
);
4203 maybe_push_queue (node
, callees_p
, &d
->in_callee_queue
);
4209 /* Scan all calls in NODE that are within a transaction region,
4210 and push the resulting nodes into the callee queue. */
4213 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data
*d
,
4214 cgraph_node_queue
*callees_p
)
4216 struct tm_region
*r
;
4218 d
->transaction_blocks_normal
= BITMAP_ALLOC (&tm_obstack
);
4219 d
->all_tm_regions
= all_tm_regions
;
4221 for (r
= all_tm_regions
; r
; r
= r
->next
)
4223 vec
<basic_block
> bbs
;
4227 bbs
= get_tm_region_blocks (r
->entry_block
, r
->exit_blocks
, NULL
,
4228 d
->transaction_blocks_normal
, false);
4230 // Generate the uninstrumented code path for this transaction.
4231 ipa_uninstrument_transaction (r
, bbs
);
4233 FOR_EACH_VEC_ELT (bbs
, i
, bb
)
4234 ipa_tm_scan_calls_block (callees_p
, bb
, false);
4239 // ??? copy_bbs should maintain cgraph edges for the blocks as it is
4240 // copying them, rather than forcing us to do this externally.
4241 cgraph_edge::rebuild_edges ();
4243 // ??? In ipa_uninstrument_transaction we don't try to update dominators
4244 // because copy_bbs doesn't return a VEC like iterate_fix_dominators expects.
4245 // Instead, just release dominators here so update_ssa recomputes them.
4246 free_dominance_info (CDI_DOMINATORS
);
4248 // When building the uninstrumented code path, copy_bbs will have invoked
4249 // create_new_def_for starting an "ssa update context". There is only one
4250 // instance of this context, so resolve ssa updates before moving on to
4251 // the next function.
4252 update_ssa (TODO_update_ssa
);
4255 /* Scan all calls in NODE as if this is the transactional clone,
4256 and push the destinations into the callee queue. */
4259 ipa_tm_scan_calls_clone (struct cgraph_node
*node
,
4260 cgraph_node_queue
*callees_p
)
4262 struct function
*fn
= DECL_STRUCT_FUNCTION (node
->decl
);
4265 FOR_EACH_BB_FN (bb
, fn
)
4266 ipa_tm_scan_calls_block (callees_p
, bb
, true);
4269 /* The function NODE has been detected to be irrevocable. Push all
4270 of its callers onto WORKLIST for the purpose of re-scanning them. */
4273 ipa_tm_note_irrevocable (struct cgraph_node
*node
,
4274 cgraph_node_queue
*worklist_p
)
4276 struct tm_ipa_cg_data
*d
= get_cg_data (&node
, true);
4277 struct cgraph_edge
*e
;
4279 d
->is_irrevocable
= true;
4281 for (e
= node
->callers
; e
; e
= e
->next_caller
)
4284 struct cgraph_node
*caller
;
4286 /* Don't examine recursive calls. */
4287 if (e
->caller
== node
)
4289 /* Even if we think we can go irrevocable, believe the user
4291 if (is_tm_safe_or_pure (e
->caller
->decl
))
4295 d
= get_cg_data (&caller
, true);
4297 /* Check if the callee is in a transactional region. If so,
4298 schedule the function for normal re-scan as well. */
4299 bb
= gimple_bb (e
->call_stmt
);
4300 gcc_assert (bb
!= NULL
);
4301 if (d
->transaction_blocks_normal
4302 && bitmap_bit_p (d
->transaction_blocks_normal
, bb
->index
))
4303 d
->want_irr_scan_normal
= true;
4305 maybe_push_queue (caller
, worklist_p
, &d
->in_worklist
);
4309 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
4310 within the block is irrevocable. */
4313 ipa_tm_scan_irr_block (basic_block bb
)
4315 gimple_stmt_iterator gsi
;
4318 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4320 gimple stmt
= gsi_stmt (gsi
);
4321 switch (gimple_code (stmt
))
4324 if (gimple_assign_single_p (stmt
))
4326 tree lhs
= gimple_assign_lhs (stmt
);
4327 tree rhs
= gimple_assign_rhs1 (stmt
);
4328 if (volatile_var_p (lhs
) || volatile_var_p (rhs
))
4335 tree lhs
= gimple_call_lhs (stmt
);
4336 if (lhs
&& volatile_var_p (lhs
))
4339 if (is_tm_pure_call (stmt
))
4342 fn
= gimple_call_fn (stmt
);
4344 /* Functions with the attribute are by definition irrevocable. */
4345 if (is_tm_irrevocable (fn
))
4348 /* For direct function calls, go ahead and check for replacement
4349 functions, or transitive irrevocable functions. For indirect
4350 functions, we'll ask the runtime. */
4351 if (TREE_CODE (fn
) == ADDR_EXPR
)
4353 struct tm_ipa_cg_data
*d
;
4354 struct cgraph_node
*node
;
4356 fn
= TREE_OPERAND (fn
, 0);
4357 if (is_tm_ending_fndecl (fn
))
4359 if (find_tm_replacement_function (fn
))
4362 node
= cgraph_node::get (fn
);
4363 d
= get_cg_data (&node
, true);
4365 /* Return true if irrevocable, but above all, believe
4367 if (d
->is_irrevocable
4368 && !is_tm_safe_or_pure (fn
))
4375 /* ??? The Approved Method of indicating that an inline
4376 assembly statement is not relevant to the transaction
4377 is to wrap it in a __tm_waiver block. This is not
4378 yet implemented, so we can't check for it. */
4379 if (is_tm_safe (current_function_decl
))
4381 tree t
= build1 (NOP_EXPR
, void_type_node
, size_zero_node
);
4382 SET_EXPR_LOCATION (t
, gimple_location (stmt
));
4383 error ("%Kasm not allowed in %<transaction_safe%> function", t
);
4395 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking
4396 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
4397 scanning past OLD_IRR or EXIT_BLOCKS. */
4400 ipa_tm_scan_irr_blocks (vec
<basic_block
> *pqueue
, bitmap new_irr
,
4401 bitmap old_irr
, bitmap exit_blocks
)
4403 bool any_new_irr
= false;
4406 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
4410 basic_block bb
= pqueue
->pop ();
4412 /* Don't re-scan blocks we know already are irrevocable. */
4413 if (old_irr
&& bitmap_bit_p (old_irr
, bb
->index
))
4416 if (ipa_tm_scan_irr_block (bb
))
4418 bitmap_set_bit (new_irr
, bb
->index
);
4421 else if (exit_blocks
== NULL
|| !bitmap_bit_p (exit_blocks
, bb
->index
))
4423 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4424 if (!bitmap_bit_p (visited_blocks
, e
->dest
->index
))
4426 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
4427 pqueue
->safe_push (e
->dest
);
4431 while (!pqueue
->is_empty ());
4433 BITMAP_FREE (visited_blocks
);
4438 /* Propagate the irrevocable property both up and down the dominator tree.
4439 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
4440 TM regions; OLD_IRR are the results of a previous scan of the dominator
4441 tree which has been fully propagated; NEW_IRR is the set of new blocks
4442 which are gaining the irrevocable property during the current scan. */
4445 ipa_tm_propagate_irr (basic_block entry_block
, bitmap new_irr
,
4446 bitmap old_irr
, bitmap exit_blocks
)
4448 vec
<basic_block
> bbs
;
4449 bitmap all_region_blocks
;
4451 /* If this block is in the old set, no need to rescan. */
4452 if (old_irr
&& bitmap_bit_p (old_irr
, entry_block
->index
))
4455 all_region_blocks
= BITMAP_ALLOC (&tm_obstack
);
4456 bbs
= get_tm_region_blocks (entry_block
, exit_blocks
, NULL
,
4457 all_region_blocks
, false);
4460 basic_block bb
= bbs
.pop ();
4461 bool this_irr
= bitmap_bit_p (new_irr
, bb
->index
);
4462 bool all_son_irr
= false;
4466 /* Propagate up. If my children are, I am too, but we must have
4467 at least one child that is. */
4470 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4472 if (!bitmap_bit_p (new_irr
, e
->dest
->index
))
4474 all_son_irr
= false;
4482 /* Add block to new_irr if it hasn't already been processed. */
4483 if (!old_irr
|| !bitmap_bit_p (old_irr
, bb
->index
))
4485 bitmap_set_bit (new_irr
, bb
->index
);
4491 /* Propagate down to everyone we immediately dominate. */
4495 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
4497 son
= next_dom_son (CDI_DOMINATORS
, son
))
4499 /* Make sure block is actually in a TM region, and it
4500 isn't already in old_irr. */
4501 if ((!old_irr
|| !bitmap_bit_p (old_irr
, son
->index
))
4502 && bitmap_bit_p (all_region_blocks
, son
->index
))
4503 bitmap_set_bit (new_irr
, son
->index
);
4507 while (!bbs
.is_empty ());
4509 BITMAP_FREE (all_region_blocks
);
4514 ipa_tm_decrement_clone_counts (basic_block bb
, bool for_clone
)
4516 gimple_stmt_iterator gsi
;
4518 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4520 gimple stmt
= gsi_stmt (gsi
);
4521 if (is_gimple_call (stmt
) && !is_tm_pure_call (stmt
))
4523 tree fndecl
= gimple_call_fndecl (stmt
);
4526 struct tm_ipa_cg_data
*d
;
4528 struct cgraph_node
*tnode
;
4530 if (is_tm_ending_fndecl (fndecl
))
4532 if (find_tm_replacement_function (fndecl
))
4535 tnode
= cgraph_node::get (fndecl
);
4536 d
= get_cg_data (&tnode
, true);
4538 pcallers
= (for_clone
? &d
->tm_callers_clone
4539 : &d
->tm_callers_normal
);
4541 gcc_assert (*pcallers
> 0);
4548 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
4549 as well as other irrevocable actions such as inline assembly. Mark all
4550 such blocks as irrevocable and decrement the number of calls to
4551 transactional clones. Return true if, for the transactional clone, the
4552 entire function is irrevocable. */
4555 ipa_tm_scan_irr_function (struct cgraph_node
*node
, bool for_clone
)
4557 struct tm_ipa_cg_data
*d
;
4558 bitmap new_irr
, old_irr
;
4561 /* Builtin operators (operator new, and such). */
4562 if (DECL_STRUCT_FUNCTION (node
->decl
) == NULL
4563 || DECL_STRUCT_FUNCTION (node
->decl
)->cfg
== NULL
)
4566 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
4567 calculate_dominance_info (CDI_DOMINATORS
);
4569 d
= get_cg_data (&node
, true);
4570 auto_vec
<basic_block
, 10> queue
;
4571 new_irr
= BITMAP_ALLOC (&tm_obstack
);
4573 /* Scan each tm region, propagating irrevocable status through the tree. */
4576 old_irr
= d
->irrevocable_blocks_clone
;
4577 queue
.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
4578 if (ipa_tm_scan_irr_blocks (&queue
, new_irr
, old_irr
, NULL
))
4580 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)),
4583 ret
= bitmap_bit_p (new_irr
,
4584 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
))->index
);
4589 struct tm_region
*region
;
4591 old_irr
= d
->irrevocable_blocks_normal
;
4592 for (region
= d
->all_tm_regions
; region
; region
= region
->next
)
4594 queue
.quick_push (region
->entry_block
);
4595 if (ipa_tm_scan_irr_blocks (&queue
, new_irr
, old_irr
,
4596 region
->exit_blocks
))
4597 ipa_tm_propagate_irr (region
->entry_block
, new_irr
, old_irr
,
4598 region
->exit_blocks
);
4602 /* If we found any new irrevocable blocks, reduce the call count for
4603 transactional clones within the irrevocable blocks. Save the new
4604 set of irrevocable blocks for next time. */
4605 if (!bitmap_empty_p (new_irr
))
4607 bitmap_iterator bmi
;
4610 EXECUTE_IF_SET_IN_BITMAP (new_irr
, 0, i
, bmi
)
4611 ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun
, i
),
4616 bitmap_ior_into (old_irr
, new_irr
);
4617 BITMAP_FREE (new_irr
);
4620 d
->irrevocable_blocks_clone
= new_irr
;
4622 d
->irrevocable_blocks_normal
= new_irr
;
4624 if (dump_file
&& new_irr
)
4627 bitmap_iterator bmi
;
4630 dname
= lang_hooks
.decl_printable_name (current_function_decl
, 2);
4631 EXECUTE_IF_SET_IN_BITMAP (new_irr
, 0, i
, bmi
)
4632 fprintf (dump_file
, "%s: bb %d goes irrevocable\n", dname
, i
);
4636 BITMAP_FREE (new_irr
);
4643 /* Return true if, for the transactional clone of NODE, any call
4644 may enter irrevocable mode. */
4647 ipa_tm_mayenterirr_function (struct cgraph_node
*node
)
4649 struct tm_ipa_cg_data
*d
;
4653 d
= get_cg_data (&node
, true);
4655 flags
= flags_from_decl_or_type (decl
);
4657 /* Handle some TM builtins. Ordinarily these aren't actually generated
4658 at this point, but handling these functions when written in by the
4659 user makes it easier to build unit tests. */
4660 if (flags
& ECF_TM_BUILTIN
)
4663 /* Filter out all functions that are marked. */
4664 if (flags
& ECF_TM_PURE
)
4666 if (is_tm_safe (decl
))
4668 if (is_tm_irrevocable (decl
))
4670 if (is_tm_callable (decl
))
4672 if (find_tm_replacement_function (decl
))
4675 /* If we aren't seeing the final version of the function we don't
4676 know what it will contain at runtime. */
4677 if (node
->get_availability () < AVAIL_AVAILABLE
)
4680 /* If the function must go irrevocable, then of course true. */
4681 if (d
->is_irrevocable
)
4684 /* If there are any blocks marked irrevocable, then the function
4685 as a whole may enter irrevocable. */
4686 if (d
->irrevocable_blocks_clone
)
4689 /* We may have previously marked this function as tm_may_enter_irr;
4690 see pass_diagnose_tm_blocks. */
4691 if (node
->local
.tm_may_enter_irr
)
4694 /* Recurse on the main body for aliases. In general, this will
4695 result in one of the bits above being set so that we will not
4696 have to recurse next time. */
4698 return ipa_tm_mayenterirr_function (cgraph_node::get (node
->thunk
.alias
));
4700 /* What remains is unmarked local functions without items that force
4701 the function to go irrevocable. */
4705 /* Diagnose calls from transaction_safe functions to unmarked
4706 functions that are determined to not be safe. */
4709 ipa_tm_diagnose_tm_safe (struct cgraph_node
*node
)
4711 struct cgraph_edge
*e
;
4713 for (e
= node
->callees
; e
; e
= e
->next_callee
)
4714 if (!is_tm_callable (e
->callee
->decl
)
4715 && e
->callee
->local
.tm_may_enter_irr
)
4716 error_at (gimple_location (e
->call_stmt
),
4717 "unsafe function call %qD within "
4718 "%<transaction_safe%> function", e
->callee
->decl
);
4721 /* Diagnose call from atomic transactions to unmarked functions
4722 that are determined to not be safe. */
4725 ipa_tm_diagnose_transaction (struct cgraph_node
*node
,
4726 struct tm_region
*all_tm_regions
)
4728 struct tm_region
*r
;
4730 for (r
= all_tm_regions
; r
; r
= r
->next
)
4731 if (gimple_transaction_subcode (r
->get_transaction_stmt ())
4734 /* Atomic transactions can be nested inside relaxed. */
4736 ipa_tm_diagnose_transaction (node
, r
->inner
);
4740 vec
<basic_block
> bbs
;
4741 gimple_stmt_iterator gsi
;
4745 bbs
= get_tm_region_blocks (r
->entry_block
, r
->exit_blocks
,
4746 r
->irr_blocks
, NULL
, false);
4748 for (i
= 0; bbs
.iterate (i
, &bb
); ++i
)
4749 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4751 gimple stmt
= gsi_stmt (gsi
);
4754 if (gimple_code (stmt
) == GIMPLE_ASM
)
4756 error_at (gimple_location (stmt
),
4757 "asm not allowed in atomic transaction");
4761 if (!is_gimple_call (stmt
))
4763 fndecl
= gimple_call_fndecl (stmt
);
4765 /* Indirect function calls have been diagnosed already. */
4769 /* Stop at the end of the transaction. */
4770 if (is_tm_ending_fndecl (fndecl
))
4772 if (bitmap_bit_p (r
->exit_blocks
, bb
->index
))
4777 /* Marked functions have been diagnosed already. */
4778 if (is_tm_pure_call (stmt
))
4780 if (is_tm_callable (fndecl
))
4783 if (cgraph_node::local_info (fndecl
)->tm_may_enter_irr
)
4784 error_at (gimple_location (stmt
),
4785 "unsafe function call %qD within "
4786 "atomic transaction", fndecl
);
4793 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4794 OLD_DECL. The returned value is a freshly malloced pointer that
4795 should be freed by the caller. */
4798 tm_mangle (tree old_asm_id
)
4800 const char *old_asm_name
;
4803 struct demangle_component
*dc
;
4806 /* Determine if the symbol is already a valid C++ mangled name. Do this
4807 even for C, which might be interfacing with C++ code via appropriately
4808 ugly identifiers. */
4809 /* ??? We could probably do just as well checking for "_Z" and be done. */
4810 old_asm_name
= IDENTIFIER_POINTER (old_asm_id
);
4811 dc
= cplus_demangle_v3_components (old_asm_name
, DMGL_NO_OPTS
, &alloc
);
4818 sprintf (length
, "%u", IDENTIFIER_LENGTH (old_asm_id
));
4819 tm_name
= concat ("_ZGTt", length
, old_asm_name
, NULL
);
4823 old_asm_name
+= 2; /* Skip _Z */
4827 case DEMANGLE_COMPONENT_TRANSACTION_CLONE
:
4828 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE
:
4829 /* Don't play silly games, you! */
4832 case DEMANGLE_COMPONENT_HIDDEN_ALIAS
:
4833 /* I'd really like to know if we can ever be passed one of
4834 these from the C++ front end. The Logical Thing would
4835 seem that hidden-alias should be outer-most, so that we
4836 get hidden-alias of a transaction-clone and not vice-versa. */
4844 tm_name
= concat ("_ZGTt", old_asm_name
, NULL
);
4848 new_asm_id
= get_identifier (tm_name
);
4855 ipa_tm_mark_force_output_node (struct cgraph_node
*node
)
4857 node
->mark_force_output ();
4858 node
->analyzed
= true;
4862 ipa_tm_mark_forced_by_abi_node (struct cgraph_node
*node
)
4864 node
->forced_by_abi
= true;
4865 node
->analyzed
= true;
4868 /* Callback data for ipa_tm_create_version_alias. */
4869 struct create_version_alias_info
4871 struct cgraph_node
*old_node
;
4875 /* A subroutine of ipa_tm_create_version, called via
4876 cgraph_for_node_and_aliases. Create new tm clones for each of
4877 the existing aliases. */
4879 ipa_tm_create_version_alias (struct cgraph_node
*node
, void *data
)
4881 struct create_version_alias_info
*info
4882 = (struct create_version_alias_info
*)data
;
4883 tree old_decl
, new_decl
, tm_name
;
4884 struct cgraph_node
*new_node
;
4886 if (!node
->cpp_implicit_alias
)
4889 old_decl
= node
->decl
;
4890 tm_name
= tm_mangle (DECL_ASSEMBLER_NAME (old_decl
));
4891 new_decl
= build_decl (DECL_SOURCE_LOCATION (old_decl
),
4892 TREE_CODE (old_decl
), tm_name
,
4893 TREE_TYPE (old_decl
));
4895 SET_DECL_ASSEMBLER_NAME (new_decl
, tm_name
);
4896 SET_DECL_RTL (new_decl
, NULL
);
4898 /* Based loosely on C++'s make_alias_for(). */
4899 TREE_PUBLIC (new_decl
) = TREE_PUBLIC (old_decl
);
4900 DECL_CONTEXT (new_decl
) = DECL_CONTEXT (old_decl
);
4901 DECL_LANG_SPECIFIC (new_decl
) = DECL_LANG_SPECIFIC (old_decl
);
4902 TREE_READONLY (new_decl
) = TREE_READONLY (old_decl
);
4903 DECL_EXTERNAL (new_decl
) = 0;
4904 DECL_ARTIFICIAL (new_decl
) = 1;
4905 TREE_ADDRESSABLE (new_decl
) = 1;
4906 TREE_USED (new_decl
) = 1;
4907 TREE_SYMBOL_REFERENCED (tm_name
) = 1;
4909 /* Perform the same remapping to the comdat group. */
4910 if (DECL_ONE_ONLY (new_decl
))
4911 varpool_node::get (new_decl
)->set_comdat_group
4912 (tm_mangle (decl_comdat_group_id (old_decl
)));
4914 new_node
= cgraph_node::create_same_body_alias (new_decl
, info
->new_decl
);
4915 new_node
->tm_clone
= true;
4916 new_node
->externally_visible
= info
->old_node
->externally_visible
;
4917 new_node
->no_reorder
= info
->old_node
->no_reorder
;
4918 /* ?? Do not traverse aliases here. */
4919 get_cg_data (&node
, false)->clone
= new_node
;
4921 record_tm_clone_pair (old_decl
, new_decl
);
4923 if (info
->old_node
->force_output
4924 || info
->old_node
->ref_list
.first_referring ())
4925 ipa_tm_mark_force_output_node (new_node
);
4926 if (info
->old_node
->forced_by_abi
)
4927 ipa_tm_mark_forced_by_abi_node (new_node
);
4931 /* Create a copy of the function (possibly declaration only) of OLD_NODE,
4932 appropriate for the transactional clone. */
4935 ipa_tm_create_version (struct cgraph_node
*old_node
)
4937 tree new_decl
, old_decl
, tm_name
;
4938 struct cgraph_node
*new_node
;
4940 old_decl
= old_node
->decl
;
4941 new_decl
= copy_node (old_decl
);
4943 /* DECL_ASSEMBLER_NAME needs to be set before we call
4944 cgraph_copy_node_for_versioning below, because cgraph_node will
4945 fill the assembler_name_hash. */
4946 tm_name
= tm_mangle (DECL_ASSEMBLER_NAME (old_decl
));
4947 SET_DECL_ASSEMBLER_NAME (new_decl
, tm_name
);
4948 SET_DECL_RTL (new_decl
, NULL
);
4949 TREE_SYMBOL_REFERENCED (tm_name
) = 1;
4951 /* Perform the same remapping to the comdat group. */
4952 if (DECL_ONE_ONLY (new_decl
))
4953 varpool_node::get (new_decl
)->set_comdat_group
4954 (tm_mangle (DECL_COMDAT_GROUP (old_decl
)));
4956 gcc_assert (!old_node
->ipa_transforms_to_apply
.exists ());
4957 new_node
= old_node
->create_version_clone (new_decl
, vNULL
, NULL
);
4958 new_node
->local
.local
= false;
4959 new_node
->externally_visible
= old_node
->externally_visible
;
4960 new_node
->lowered
= true;
4961 new_node
->tm_clone
= 1;
4962 if (!old_node
->implicit_section
)
4963 new_node
->set_section (old_node
->get_section ());
4964 get_cg_data (&old_node
, true)->clone
= new_node
;
4966 if (old_node
->get_availability () >= AVAIL_INTERPOSABLE
)
4968 /* Remap extern inline to static inline. */
4969 /* ??? Is it worth trying to use make_decl_one_only? */
4970 if (DECL_DECLARED_INLINE_P (new_decl
) && DECL_EXTERNAL (new_decl
))
4972 DECL_EXTERNAL (new_decl
) = 0;
4973 TREE_PUBLIC (new_decl
) = 0;
4974 DECL_WEAK (new_decl
) = 0;
4977 tree_function_versioning (old_decl
, new_decl
,
4982 record_tm_clone_pair (old_decl
, new_decl
);
4984 symtab
->call_cgraph_insertion_hooks (new_node
);
4985 if (old_node
->force_output
4986 || old_node
->ref_list
.first_referring ())
4987 ipa_tm_mark_force_output_node (new_node
);
4988 if (old_node
->forced_by_abi
)
4989 ipa_tm_mark_forced_by_abi_node (new_node
);
4991 /* Do the same thing, but for any aliases of the original node. */
4993 struct create_version_alias_info data
;
4994 data
.old_node
= old_node
;
4995 data
.new_decl
= new_decl
;
4996 old_node
->call_for_symbol_thunks_and_aliases (ipa_tm_create_version_alias
,
5001 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
5004 ipa_tm_insert_irr_call (struct cgraph_node
*node
, struct tm_region
*region
,
5007 gimple_stmt_iterator gsi
;
5010 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
5012 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE
),
5013 1, build_int_cst (NULL_TREE
, MODE_SERIALIRREVOCABLE
));
5015 split_block_after_labels (bb
);
5016 gsi
= gsi_after_labels (bb
);
5017 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
5019 node
->create_edge (cgraph_node::get_create
5020 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE
)),
5022 compute_call_stmt_bb_frequency (node
->decl
,
5026 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */
5029 ipa_tm_insert_gettmclone_call (struct cgraph_node
*node
,
5030 struct tm_region
*region
,
5031 gimple_stmt_iterator
*gsi
, gcall
*stmt
)
5033 tree gettm_fn
, ret
, old_fn
, callfn
;
5038 old_fn
= gimple_call_fn (stmt
);
5040 if (TREE_CODE (old_fn
) == ADDR_EXPR
)
5042 tree fndecl
= TREE_OPERAND (old_fn
, 0);
5043 tree clone
= get_tm_clone_pair (fndecl
);
5045 /* By transforming the call into a TM_GETTMCLONE, we are
5046 technically taking the address of the original function and
5047 its clone. Explain this so inlining will know this function
5049 cgraph_node::get (fndecl
)->mark_address_taken () ;
5051 cgraph_node::get (clone
)->mark_address_taken ();
5054 safe
= is_tm_safe (TREE_TYPE (old_fn
));
5055 gettm_fn
= builtin_decl_explicit (safe
? BUILT_IN_TM_GETTMCLONE_SAFE
5056 : BUILT_IN_TM_GETTMCLONE_IRR
);
5057 ret
= create_tmp_var (ptr_type_node
);
5060 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
5062 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
5063 if (TREE_CODE (old_fn
) == OBJ_TYPE_REF
)
5064 old_fn
= OBJ_TYPE_REF_EXPR (old_fn
);
5066 g
= gimple_build_call (gettm_fn
, 1, old_fn
);
5067 ret
= make_ssa_name (ret
, g
);
5068 gimple_call_set_lhs (g
, ret
);
5070 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
5072 node
->create_edge (cgraph_node::get_create (gettm_fn
), g
, 0,
5073 compute_call_stmt_bb_frequency (node
->decl
,
5076 /* Cast return value from tm_gettmclone* into appropriate function
5078 callfn
= create_tmp_var (TREE_TYPE (old_fn
));
5079 g2
= gimple_build_assign (callfn
,
5080 fold_build1 (NOP_EXPR
, TREE_TYPE (callfn
), ret
));
5081 callfn
= make_ssa_name (callfn
, g2
);
5082 gimple_assign_set_lhs (g2
, callfn
);
5083 gsi_insert_before (gsi
, g2
, GSI_SAME_STMT
);
5085 /* ??? This is a hack to preserve the NOTHROW bit on the call,
5086 which we would have derived from the decl. Failure to save
5087 this bit means we might have to split the basic block. */
5088 if (gimple_call_nothrow_p (stmt
))
5089 gimple_call_set_nothrow (stmt
, true);
5091 gimple_call_set_fn (stmt
, callfn
);
5093 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
5094 for a call statement. Fix it. */
5096 tree lhs
= gimple_call_lhs (stmt
);
5097 tree rettype
= TREE_TYPE (gimple_call_fntype (stmt
));
5099 && !useless_type_conversion_p (TREE_TYPE (lhs
), rettype
))
5103 temp
= create_tmp_reg (rettype
);
5104 gimple_call_set_lhs (stmt
, temp
);
5106 g2
= gimple_build_assign (lhs
,
5107 fold_build1 (VIEW_CONVERT_EXPR
,
5108 TREE_TYPE (lhs
), temp
));
5109 gsi_insert_after (gsi
, g2
, GSI_SAME_STMT
);
5114 cgraph_edge
*e
= cgraph_node::get (current_function_decl
)->get_edge (stmt
);
5115 if (e
&& e
->indirect_info
)
5116 e
->indirect_info
->polymorphic
= false;
5121 /* Helper function for ipa_tm_transform_calls*. Given a call
5122 statement in GSI which resides inside transaction REGION, redirect
5123 the call to either its wrapper function, or its clone. */
5126 ipa_tm_transform_calls_redirect (struct cgraph_node
*node
,
5127 struct tm_region
*region
,
5128 gimple_stmt_iterator
*gsi
,
5129 bool *need_ssa_rename_p
)
5131 gcall
*stmt
= as_a
<gcall
*> (gsi_stmt (*gsi
));
5132 struct cgraph_node
*new_node
;
5133 struct cgraph_edge
*e
= node
->get_edge (stmt
);
5134 tree fndecl
= gimple_call_fndecl (stmt
);
5136 /* For indirect calls, pass the address through the runtime. */
5139 *need_ssa_rename_p
|=
5140 ipa_tm_insert_gettmclone_call (node
, region
, gsi
, stmt
);
5144 /* Handle some TM builtins. Ordinarily these aren't actually generated
5145 at this point, but handling these functions when written in by the
5146 user makes it easier to build unit tests. */
5147 if (flags_from_decl_or_type (fndecl
) & ECF_TM_BUILTIN
)
5150 /* Fixup recursive calls inside clones. */
5151 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
5152 for recursion but not update the call statements themselves? */
5153 if (e
->caller
== e
->callee
&& decl_is_tm_clone (current_function_decl
))
5155 gimple_call_set_fndecl (stmt
, current_function_decl
);
5159 /* If there is a replacement, use it. */
5160 fndecl
= find_tm_replacement_function (fndecl
);
5163 new_node
= cgraph_node::get_create (fndecl
);
5165 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
5167 We can't do this earlier in record_tm_replacement because
5168 cgraph_remove_unreachable_nodes is called before we inject
5169 references to the node. Further, we can't do this in some
5170 nice central place in ipa_tm_execute because we don't have
5171 the exact list of wrapper functions that would be used.
5172 Marking more wrappers than necessary results in the creation
5173 of unnecessary cgraph_nodes, which can cause some of the
5174 other IPA passes to crash.
5176 We do need to mark these nodes so that we get the proper
5177 result in expand_call_tm. */
5178 /* ??? This seems broken. How is it that we're marking the
5179 CALLEE as may_enter_irr? Surely we should be marking the
5180 CALLER. Also note that find_tm_replacement_function also
5181 contains mappings into the TM runtime, e.g. memcpy. These
5182 we know won't go irrevocable. */
5183 new_node
->local
.tm_may_enter_irr
= 1;
5187 struct tm_ipa_cg_data
*d
;
5188 struct cgraph_node
*tnode
= e
->callee
;
5190 d
= get_cg_data (&tnode
, true);
5191 new_node
= d
->clone
;
5193 /* As we've already skipped pure calls and appropriate builtins,
5194 and we've already marked irrevocable blocks, if we can't come
5195 up with a static replacement, then ask the runtime. */
5196 if (new_node
== NULL
)
5198 *need_ssa_rename_p
|=
5199 ipa_tm_insert_gettmclone_call (node
, region
, gsi
, stmt
);
5203 fndecl
= new_node
->decl
;
5206 e
->redirect_callee (new_node
);
5207 gimple_call_set_fndecl (stmt
, fndecl
);
5210 /* Helper function for ipa_tm_transform_calls. For a given BB,
5211 install calls to tm_irrevocable when IRR_BLOCKS are reached,
5212 redirect other calls to the generated transactional clone. */
5215 ipa_tm_transform_calls_1 (struct cgraph_node
*node
, struct tm_region
*region
,
5216 basic_block bb
, bitmap irr_blocks
)
5218 gimple_stmt_iterator gsi
;
5219 bool need_ssa_rename
= false;
5221 if (irr_blocks
&& bitmap_bit_p (irr_blocks
, bb
->index
))
5223 ipa_tm_insert_irr_call (node
, region
, bb
);
5227 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5229 gimple stmt
= gsi_stmt (gsi
);
5231 if (!is_gimple_call (stmt
))
5233 if (is_tm_pure_call (stmt
))
5236 /* Redirect edges to the appropriate replacement or clone. */
5237 ipa_tm_transform_calls_redirect (node
, region
, &gsi
, &need_ssa_rename
);
5240 return need_ssa_rename
;
5243 /* Walk the CFG for REGION, beginning at BB. Install calls to
5244 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
5245 the generated transactional clone. */
5248 ipa_tm_transform_calls (struct cgraph_node
*node
, struct tm_region
*region
,
5249 basic_block bb
, bitmap irr_blocks
)
5251 bool need_ssa_rename
= false;
5254 auto_vec
<basic_block
> queue
;
5255 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
5257 queue
.safe_push (bb
);
5263 ipa_tm_transform_calls_1 (node
, region
, bb
, irr_blocks
);
5265 if (irr_blocks
&& bitmap_bit_p (irr_blocks
, bb
->index
))
5268 if (region
&& bitmap_bit_p (region
->exit_blocks
, bb
->index
))
5271 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
5272 if (!bitmap_bit_p (visited_blocks
, e
->dest
->index
))
5274 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
5275 queue
.safe_push (e
->dest
);
5278 while (!queue
.is_empty ());
5280 BITMAP_FREE (visited_blocks
);
5282 return need_ssa_rename
;
5285 /* Transform the calls within the TM regions within NODE. */
5288 ipa_tm_transform_transaction (struct cgraph_node
*node
)
5290 struct tm_ipa_cg_data
*d
;
5291 struct tm_region
*region
;
5292 bool need_ssa_rename
= false;
5294 d
= get_cg_data (&node
, true);
5296 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
5297 calculate_dominance_info (CDI_DOMINATORS
);
5299 for (region
= d
->all_tm_regions
; region
; region
= region
->next
)
5301 /* If we're sure to go irrevocable, don't transform anything. */
5302 if (d
->irrevocable_blocks_normal
5303 && bitmap_bit_p (d
->irrevocable_blocks_normal
,
5304 region
->entry_block
->index
))
5306 transaction_subcode_ior (region
, GTMA_DOES_GO_IRREVOCABLE
5307 | GTMA_MAY_ENTER_IRREVOCABLE
5308 | GTMA_HAS_NO_INSTRUMENTATION
);
5313 ipa_tm_transform_calls (node
, region
, region
->entry_block
,
5314 d
->irrevocable_blocks_normal
);
5317 if (need_ssa_rename
)
5318 update_ssa (TODO_update_ssa_only_virtuals
);
5323 /* Transform the calls within the transactional clone of NODE. */
5326 ipa_tm_transform_clone (struct cgraph_node
*node
)
5328 struct tm_ipa_cg_data
*d
;
5329 bool need_ssa_rename
;
5331 d
= get_cg_data (&node
, true);
5333 /* If this function makes no calls and has no irrevocable blocks,
5334 then there's nothing to do. */
5335 /* ??? Remove non-aborting top-level transactions. */
5336 if (!node
->callees
&& !node
->indirect_calls
&& !d
->irrevocable_blocks_clone
)
5339 push_cfun (DECL_STRUCT_FUNCTION (d
->clone
->decl
));
5340 calculate_dominance_info (CDI_DOMINATORS
);
5343 ipa_tm_transform_calls (d
->clone
, NULL
,
5344 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)),
5345 d
->irrevocable_blocks_clone
);
5347 if (need_ssa_rename
)
5348 update_ssa (TODO_update_ssa_only_virtuals
);
5353 /* Main entry point for the transactional memory IPA pass. */
5356 ipa_tm_execute (void)
5358 cgraph_node_queue tm_callees
= cgraph_node_queue ();
5359 /* List of functions that will go irrevocable. */
5360 cgraph_node_queue irr_worklist
= cgraph_node_queue ();
5362 struct cgraph_node
*node
;
5363 struct tm_ipa_cg_data
*d
;
5364 enum availability a
;
5367 #ifdef ENABLE_CHECKING
5368 cgraph_node::verify_cgraph_nodes ();
5371 bitmap_obstack_initialize (&tm_obstack
);
5372 initialize_original_copy_tables ();
5374 /* For all local functions marked tm_callable, queue them. */
5375 FOR_EACH_DEFINED_FUNCTION (node
)
5376 if (is_tm_callable (node
->decl
)
5377 && node
->get_availability () >= AVAIL_INTERPOSABLE
)
5379 d
= get_cg_data (&node
, true);
5380 maybe_push_queue (node
, &tm_callees
, &d
->in_callee_queue
);
5383 /* For all local reachable functions... */
5384 FOR_EACH_DEFINED_FUNCTION (node
)
5386 && node
->get_availability () >= AVAIL_INTERPOSABLE
)
5388 /* ... marked tm_pure, record that fact for the runtime by
5389 indicating that the pure function is its own tm_callable.
5390 No need to do this if the function's address can't be taken. */
5391 if (is_tm_pure (node
->decl
))
5393 if (!node
->local
.local
)
5394 record_tm_clone_pair (node
->decl
, node
->decl
);
5398 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
5399 calculate_dominance_info (CDI_DOMINATORS
);
5401 tm_region_init (NULL
);
5404 d
= get_cg_data (&node
, true);
5406 /* Scan for calls that are in each transaction, and
5407 generate the uninstrumented code path. */
5408 ipa_tm_scan_calls_transaction (d
, &tm_callees
);
5410 /* Put it in the worklist so we can scan the function
5411 later (ipa_tm_scan_irr_function) and mark the
5412 irrevocable blocks. */
5413 maybe_push_queue (node
, &irr_worklist
, &d
->in_worklist
);
5414 d
->want_irr_scan_normal
= true;
5420 /* For every local function on the callee list, scan as if we will be
5421 creating a transactional clone, queueing all new functions we find
5423 for (i
= 0; i
< tm_callees
.length (); ++i
)
5425 node
= tm_callees
[i
];
5426 a
= node
->get_availability ();
5427 d
= get_cg_data (&node
, true);
5429 /* Put it in the worklist so we can scan the function later
5430 (ipa_tm_scan_irr_function) and mark the irrevocable
5432 maybe_push_queue (node
, &irr_worklist
, &d
->in_worklist
);
5434 /* Some callees cannot be arbitrarily cloned. These will always be
5435 irrevocable. Mark these now, so that we need not scan them. */
5436 if (is_tm_irrevocable (node
->decl
))
5437 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5438 else if (a
<= AVAIL_NOT_AVAILABLE
5439 && !is_tm_safe_or_pure (node
->decl
))
5440 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5441 else if (a
>= AVAIL_INTERPOSABLE
)
5443 if (!tree_versionable_function_p (node
->decl
))
5444 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5445 else if (!d
->is_irrevocable
)
5447 /* If this is an alias, make sure its base is queued as well.
5448 we need not scan the callees now, as the base will do. */
5451 node
= cgraph_node::get (node
->thunk
.alias
);
5452 d
= get_cg_data (&node
, true);
5453 maybe_push_queue (node
, &tm_callees
, &d
->in_callee_queue
);
5457 /* Add all nodes called by this function into
5458 tm_callees as well. */
5459 ipa_tm_scan_calls_clone (node
, &tm_callees
);
5464 /* Iterate scans until no more work to be done. Prefer not to use
5465 vec::pop because the worklist tends to follow a breadth-first
5466 search of the callgraph, which should allow convergance with a
5467 minimum number of scans. But we also don't want the worklist
5468 array to grow without bound, so we shift the array up periodically. */
5469 for (i
= 0; i
< irr_worklist
.length (); ++i
)
5471 if (i
> 256 && i
== irr_worklist
.length () / 8)
5473 irr_worklist
.block_remove (0, i
);
5477 node
= irr_worklist
[i
];
5478 d
= get_cg_data (&node
, true);
5479 d
->in_worklist
= false;
5481 if (d
->want_irr_scan_normal
)
5483 d
->want_irr_scan_normal
= false;
5484 ipa_tm_scan_irr_function (node
, false);
5486 if (d
->in_callee_queue
&& ipa_tm_scan_irr_function (node
, true))
5487 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5490 /* For every function on the callee list, collect the tm_may_enter_irr
5492 irr_worklist
.truncate (0);
5493 for (i
= 0; i
< tm_callees
.length (); ++i
)
5495 node
= tm_callees
[i
];
5496 if (ipa_tm_mayenterirr_function (node
))
5498 d
= get_cg_data (&node
, true);
5499 gcc_assert (d
->in_worklist
== false);
5500 maybe_push_queue (node
, &irr_worklist
, &d
->in_worklist
);
5504 /* Propagate the tm_may_enter_irr bit to callers until stable. */
5505 for (i
= 0; i
< irr_worklist
.length (); ++i
)
5507 struct cgraph_node
*caller
;
5508 struct cgraph_edge
*e
;
5509 struct ipa_ref
*ref
;
5511 if (i
> 256 && i
== irr_worklist
.length () / 8)
5513 irr_worklist
.block_remove (0, i
);
5517 node
= irr_worklist
[i
];
5518 d
= get_cg_data (&node
, true);
5519 d
->in_worklist
= false;
5520 node
->local
.tm_may_enter_irr
= true;
5522 /* Propagate back to normal callers. */
5523 for (e
= node
->callers
; e
; e
= e
->next_caller
)
5526 if (!is_tm_safe_or_pure (caller
->decl
)
5527 && !caller
->local
.tm_may_enter_irr
)
5529 d
= get_cg_data (&caller
, true);
5530 maybe_push_queue (caller
, &irr_worklist
, &d
->in_worklist
);
5534 /* Propagate back to referring aliases as well. */
5535 FOR_EACH_ALIAS (node
, ref
)
5537 caller
= dyn_cast
<cgraph_node
*> (ref
->referring
);
5538 if (!caller
->local
.tm_may_enter_irr
)
5540 /* ?? Do not traverse aliases here. */
5541 d
= get_cg_data (&caller
, false);
5542 maybe_push_queue (caller
, &irr_worklist
, &d
->in_worklist
);
5547 /* Now validate all tm_safe functions, and all atomic regions in
5549 FOR_EACH_DEFINED_FUNCTION (node
)
5551 && node
->get_availability () >= AVAIL_INTERPOSABLE
)
5553 d
= get_cg_data (&node
, true);
5554 if (is_tm_safe (node
->decl
))
5555 ipa_tm_diagnose_tm_safe (node
);
5556 else if (d
->all_tm_regions
)
5557 ipa_tm_diagnose_transaction (node
, d
->all_tm_regions
);
5560 /* Create clones. Do those that are not irrevocable and have a
5561 positive call count. Do those publicly visible functions that
5562 the user directed us to clone. */
5563 for (i
= 0; i
< tm_callees
.length (); ++i
)
5567 node
= tm_callees
[i
];
5568 if (node
->cpp_implicit_alias
)
5571 a
= node
->get_availability ();
5572 d
= get_cg_data (&node
, true);
5574 if (a
<= AVAIL_NOT_AVAILABLE
)
5575 doit
= is_tm_callable (node
->decl
);
5576 else if (a
<= AVAIL_AVAILABLE
&& is_tm_callable (node
->decl
))
5578 else if (!d
->is_irrevocable
5579 && d
->tm_callers_normal
+ d
->tm_callers_clone
> 0)
5583 ipa_tm_create_version (node
);
5586 /* Redirect calls to the new clones, and insert irrevocable marks. */
5587 for (i
= 0; i
< tm_callees
.length (); ++i
)
5589 node
= tm_callees
[i
];
5592 d
= get_cg_data (&node
, true);
5594 ipa_tm_transform_clone (node
);
5597 FOR_EACH_DEFINED_FUNCTION (node
)
5599 && node
->get_availability () >= AVAIL_INTERPOSABLE
)
5601 d
= get_cg_data (&node
, true);
5602 if (d
->all_tm_regions
)
5603 ipa_tm_transform_transaction (node
);
5606 /* Free and clear all data structures. */
5607 tm_callees
.release ();
5608 irr_worklist
.release ();
5609 bitmap_obstack_release (&tm_obstack
);
5610 free_original_copy_tables ();
5612 FOR_EACH_FUNCTION (node
)
5615 #ifdef ENABLE_CHECKING
5616 cgraph_node::verify_cgraph_nodes ();
5624 const pass_data pass_data_ipa_tm
=
5626 SIMPLE_IPA_PASS
, /* type */
5628 OPTGROUP_NONE
, /* optinfo_flags */
5629 TV_TRANS_MEM
, /* tv_id */
5630 ( PROP_ssa
| PROP_cfg
), /* properties_required */
5631 0, /* properties_provided */
5632 0, /* properties_destroyed */
5633 0, /* todo_flags_start */
5634 0, /* todo_flags_finish */
5637 class pass_ipa_tm
: public simple_ipa_opt_pass
5640 pass_ipa_tm (gcc::context
*ctxt
)
5641 : simple_ipa_opt_pass (pass_data_ipa_tm
, ctxt
)
5644 /* opt_pass methods: */
5645 virtual bool gate (function
*) { return flag_tm
; }
5646 virtual unsigned int execute (function
*) { return ipa_tm_execute (); }
5648 }; // class pass_ipa_tm
5652 simple_ipa_opt_pass
*
5653 make_pass_ipa_tm (gcc::context
*ctxt
)
5655 return new pass_ipa_tm (ctxt
);
5658 #include "gt-trans-mem.h"