1 /* Passes for transactional memory support.
2 Copyright (C) 2008-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
23 #include "hash-table.h"
27 #include "gimple-iterator.h"
28 #include "gimplify-me.h"
29 #include "gimple-walk.h"
30 #include "gimple-ssa.h"
33 #include "tree-ssanames.h"
34 #include "tree-into-ssa.h"
35 #include "tree-pass.h"
36 #include "tree-inline.h"
37 #include "diagnostic-core.h"
40 #include "trans-mem.h"
43 #include "langhooks.h"
44 #include "gimple-pretty-print.h"
46 #include "tree-ssa-address.h"
49 #define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
50 #define PROB_VERY_LIKELY (PROB_ALWAYS - PROB_VERY_UNLIKELY)
51 #define PROB_UNLIKELY (REG_BR_PROB_BASE / 5 - 1)
52 #define PROB_LIKELY (PROB_ALWAYS - PROB_VERY_LIKELY)
53 #define PROB_ALWAYS (REG_BR_PROB_BASE)
55 #define A_RUNINSTRUMENTEDCODE 0x0001
56 #define A_RUNUNINSTRUMENTEDCODE 0x0002
57 #define A_SAVELIVEVARIABLES 0x0004
58 #define A_RESTORELIVEVARIABLES 0x0008
59 #define A_ABORTTRANSACTION 0x0010
61 #define AR_USERABORT 0x0001
62 #define AR_USERRETRY 0x0002
63 #define AR_TMCONFLICT 0x0004
64 #define AR_EXCEPTIONBLOCKABORT 0x0008
65 #define AR_OUTERABORT 0x0010
67 #define MODE_SERIALIRREVOCABLE 0x0000
70 /* The representation of a transaction changes several times during the
71 lowering process. In the beginning, in the front-end we have the
72 GENERIC tree TRANSACTION_EXPR. For example,
80 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
81 trivially replaced with a GIMPLE_TRANSACTION node.
83 During pass_lower_tm, we examine the body of transactions looking
84 for aborts. Transactions that do not contain an abort may be
85 merged into an outer transaction. We also add a TRY-FINALLY node
86 to arrange for the transaction to be committed on any exit.
88 [??? Think about how this arrangement affects throw-with-commit
89 and throw-with-abort operations. In this case we want the TRY to
90 handle gotos, but not to catch any exceptions because the transaction
91 will already be closed.]
93 GIMPLE_TRANSACTION [label=NULL] {
100 __builtin___tm_abort ();
102 __builtin___tm_commit ();
106 During pass_lower_eh, we create EH regions for the transactions,
107 intermixed with the regular EH stuff. This gives us a nice persistent
108 mapping (all the way through rtl) from transactional memory operation
109 back to the transaction, which allows us to get the abnormal edges
110 correct to model transaction aborts and restarts:
112 GIMPLE_TRANSACTION [label=over]
118 __builtin___tm_abort ();
119 __builtin___tm_commit ();
122 This is the end of all_lowering_passes, and so is what is present
123 during the IPA passes, and through all of the optimization passes.
125 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
126 functions and mark functions for cloning.
128 At the end of gimple optimization, before exiting SSA form,
129 pass_tm_edges replaces statements that perform transactional
130 memory operations with the appropriate TM builtins, and swap
131 out function calls with their transactional clones. At this
132 point we introduce the abnormal transaction restart edges and
133 complete lowering of the GIMPLE_TRANSACTION node.
135 x = __builtin___tm_start (MAY_ABORT);
137 if (x & abort_transaction)
140 t0 = __builtin___tm_load (global);
142 __builtin___tm_store (&global, t1);
144 __builtin___tm_abort ();
145 __builtin___tm_commit ();
149 static void *expand_regions (struct tm_region
*,
150 void *(*callback
)(struct tm_region
*, void *),
154 /* Return the attributes we want to examine for X, or NULL if it's not
155 something we examine. We look at function types, but allow pointers
156 to function types and function decls and peek through. */
159 get_attrs_for (const_tree x
)
161 switch (TREE_CODE (x
))
164 return TYPE_ATTRIBUTES (TREE_TYPE (x
));
171 if (TREE_CODE (x
) != POINTER_TYPE
)
177 if (TREE_CODE (x
) != FUNCTION_TYPE
&& TREE_CODE (x
) != METHOD_TYPE
)
183 return TYPE_ATTRIBUTES (x
);
187 /* Return true if X has been marked TM_PURE. */
190 is_tm_pure (const_tree x
)
194 switch (TREE_CODE (x
))
205 if (TREE_CODE (x
) != POINTER_TYPE
)
211 if (TREE_CODE (x
) != FUNCTION_TYPE
&& TREE_CODE (x
) != METHOD_TYPE
)
216 flags
= flags_from_decl_or_type (x
);
217 return (flags
& ECF_TM_PURE
) != 0;
220 /* Return true if X has been marked TM_IRREVOCABLE. */
223 is_tm_irrevocable (tree x
)
225 tree attrs
= get_attrs_for (x
);
227 if (attrs
&& lookup_attribute ("transaction_unsafe", attrs
))
230 /* A call to the irrevocable builtin is by definition,
232 if (TREE_CODE (x
) == ADDR_EXPR
)
233 x
= TREE_OPERAND (x
, 0);
234 if (TREE_CODE (x
) == FUNCTION_DECL
235 && DECL_BUILT_IN_CLASS (x
) == BUILT_IN_NORMAL
236 && DECL_FUNCTION_CODE (x
) == BUILT_IN_TM_IRREVOCABLE
)
242 /* Return true if X has been marked TM_SAFE. */
245 is_tm_safe (const_tree x
)
249 tree attrs
= get_attrs_for (x
);
252 if (lookup_attribute ("transaction_safe", attrs
))
254 if (lookup_attribute ("transaction_may_cancel_outer", attrs
))
261 /* Return true if CALL is const, or tm_pure. */
264 is_tm_pure_call (gimple call
)
266 tree fn
= gimple_call_fn (call
);
268 if (TREE_CODE (fn
) == ADDR_EXPR
)
270 fn
= TREE_OPERAND (fn
, 0);
271 gcc_assert (TREE_CODE (fn
) == FUNCTION_DECL
);
276 return is_tm_pure (fn
);
279 /* Return true if X has been marked TM_CALLABLE. */
282 is_tm_callable (tree x
)
284 tree attrs
= get_attrs_for (x
);
287 if (lookup_attribute ("transaction_callable", attrs
))
289 if (lookup_attribute ("transaction_safe", attrs
))
291 if (lookup_attribute ("transaction_may_cancel_outer", attrs
))
297 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
300 is_tm_may_cancel_outer (tree x
)
302 tree attrs
= get_attrs_for (x
);
304 return lookup_attribute ("transaction_may_cancel_outer", attrs
) != NULL
;
308 /* Return true for built in functions that "end" a transaction. */
311 is_tm_ending_fndecl (tree fndecl
)
313 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
314 switch (DECL_FUNCTION_CODE (fndecl
))
316 case BUILT_IN_TM_COMMIT
:
317 case BUILT_IN_TM_COMMIT_EH
:
318 case BUILT_IN_TM_ABORT
:
319 case BUILT_IN_TM_IRREVOCABLE
:
328 /* Return true if STMT is a built in function call that "ends" a
332 is_tm_ending (gimple stmt
)
336 if (gimple_code (stmt
) != GIMPLE_CALL
)
339 fndecl
= gimple_call_fndecl (stmt
);
340 return (fndecl
!= NULL_TREE
341 && is_tm_ending_fndecl (fndecl
));
344 /* Return true if STMT is a TM load. */
347 is_tm_load (gimple stmt
)
351 if (gimple_code (stmt
) != GIMPLE_CALL
)
354 fndecl
= gimple_call_fndecl (stmt
);
355 return (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
356 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl
)));
359 /* Same as above, but for simple TM loads, that is, not the
360 after-write, after-read, etc optimized variants. */
363 is_tm_simple_load (gimple stmt
)
367 if (gimple_code (stmt
) != GIMPLE_CALL
)
370 fndecl
= gimple_call_fndecl (stmt
);
371 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
373 enum built_in_function fcode
= DECL_FUNCTION_CODE (fndecl
);
374 return (fcode
== BUILT_IN_TM_LOAD_1
375 || fcode
== BUILT_IN_TM_LOAD_2
376 || fcode
== BUILT_IN_TM_LOAD_4
377 || fcode
== BUILT_IN_TM_LOAD_8
378 || fcode
== BUILT_IN_TM_LOAD_FLOAT
379 || fcode
== BUILT_IN_TM_LOAD_DOUBLE
380 || fcode
== BUILT_IN_TM_LOAD_LDOUBLE
381 || fcode
== BUILT_IN_TM_LOAD_M64
382 || fcode
== BUILT_IN_TM_LOAD_M128
383 || fcode
== BUILT_IN_TM_LOAD_M256
);
388 /* Return true if STMT is a TM store. */
391 is_tm_store (gimple stmt
)
395 if (gimple_code (stmt
) != GIMPLE_CALL
)
398 fndecl
= gimple_call_fndecl (stmt
);
399 return (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
400 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl
)));
403 /* Same as above, but for simple TM stores, that is, not the
404 after-write, after-read, etc optimized variants. */
407 is_tm_simple_store (gimple stmt
)
411 if (gimple_code (stmt
) != GIMPLE_CALL
)
414 fndecl
= gimple_call_fndecl (stmt
);
415 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
417 enum built_in_function fcode
= DECL_FUNCTION_CODE (fndecl
);
418 return (fcode
== BUILT_IN_TM_STORE_1
419 || fcode
== BUILT_IN_TM_STORE_2
420 || fcode
== BUILT_IN_TM_STORE_4
421 || fcode
== BUILT_IN_TM_STORE_8
422 || fcode
== BUILT_IN_TM_STORE_FLOAT
423 || fcode
== BUILT_IN_TM_STORE_DOUBLE
424 || fcode
== BUILT_IN_TM_STORE_LDOUBLE
425 || fcode
== BUILT_IN_TM_STORE_M64
426 || fcode
== BUILT_IN_TM_STORE_M128
427 || fcode
== BUILT_IN_TM_STORE_M256
);
432 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */
435 is_tm_abort (tree fndecl
)
438 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
439 && DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_TM_ABORT
);
442 /* Build a GENERIC tree for a user abort. This is called by front ends
443 while transforming the __tm_abort statement. */
446 build_tm_abort_call (location_t loc
, bool is_outer
)
448 return build_call_expr_loc (loc
, builtin_decl_explicit (BUILT_IN_TM_ABORT
), 1,
449 build_int_cst (integer_type_node
,
451 | (is_outer
? AR_OUTERABORT
: 0)));
454 /* Common gateing function for several of the TM passes. */
462 /* Map for aribtrary function replacement under TM, as created
463 by the tm_wrap attribute. */
465 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map
)))
469 record_tm_replacement (tree from
, tree to
)
471 struct tree_map
**slot
, *h
;
473 /* Do not inline wrapper functions that will get replaced in the TM
476 Suppose you have foo() that will get replaced into tmfoo(). Make
477 sure the inliner doesn't try to outsmart us and inline foo()
478 before we get a chance to do the TM replacement. */
479 DECL_UNINLINABLE (from
) = 1;
481 if (tm_wrap_map
== NULL
)
482 tm_wrap_map
= htab_create_ggc (32, tree_map_hash
, tree_map_eq
, 0);
484 h
= ggc_alloc_tree_map ();
485 h
->hash
= htab_hash_pointer (from
);
489 slot
= (struct tree_map
**)
490 htab_find_slot_with_hash (tm_wrap_map
, h
, h
->hash
, INSERT
);
494 /* Return a TM-aware replacement function for DECL. */
497 find_tm_replacement_function (tree fndecl
)
501 struct tree_map
*h
, in
;
503 in
.base
.from
= fndecl
;
504 in
.hash
= htab_hash_pointer (fndecl
);
505 h
= (struct tree_map
*) htab_find_with_hash (tm_wrap_map
, &in
, in
.hash
);
510 /* ??? We may well want TM versions of most of the common <string.h>
511 functions. For now, we've already these two defined. */
512 /* Adjust expand_call_tm() attributes as necessary for the cases
514 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
515 switch (DECL_FUNCTION_CODE (fndecl
))
517 case BUILT_IN_MEMCPY
:
518 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY
);
519 case BUILT_IN_MEMMOVE
:
520 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE
);
521 case BUILT_IN_MEMSET
:
522 return builtin_decl_explicit (BUILT_IN_TM_MEMSET
);
530 /* When appropriate, record TM replacement for memory allocation functions.
532 FROM is the FNDECL to wrap. */
534 tm_malloc_replacement (tree from
)
539 if (TREE_CODE (from
) != FUNCTION_DECL
)
542 /* If we have a previous replacement, the user must be explicitly
543 wrapping malloc/calloc/free. They better know what they're
545 if (find_tm_replacement_function (from
))
548 str
= IDENTIFIER_POINTER (DECL_NAME (from
));
550 if (!strcmp (str
, "malloc"))
551 to
= builtin_decl_explicit (BUILT_IN_TM_MALLOC
);
552 else if (!strcmp (str
, "calloc"))
553 to
= builtin_decl_explicit (BUILT_IN_TM_CALLOC
);
554 else if (!strcmp (str
, "free"))
555 to
= builtin_decl_explicit (BUILT_IN_TM_FREE
);
559 TREE_NOTHROW (to
) = 0;
561 record_tm_replacement (from
, to
);
564 /* Diagnostics for tm_safe functions/regions. Called by the front end
565 once we've lowered the function to high-gimple. */
567 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
568 Process exactly one statement. WI->INFO is set to non-null when in
569 the context of a tm_safe function, and null for a __transaction block. */
571 #define DIAG_TM_OUTER 1
572 #define DIAG_TM_SAFE 2
573 #define DIAG_TM_RELAXED 4
577 unsigned int summary_flags
: 8;
578 unsigned int block_flags
: 8;
579 unsigned int func_flags
: 8;
580 unsigned int saw_volatile
: 1;
584 /* Return true if T is a volatile variable of some kind. */
587 volatile_var_p (tree t
)
589 return (SSA_VAR_P (t
)
590 && TREE_THIS_VOLATILE (TREE_TYPE (t
)));
593 /* Tree callback function for diagnose_tm pass. */
596 diagnose_tm_1_op (tree
*tp
, int *walk_subtrees ATTRIBUTE_UNUSED
,
599 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
600 struct diagnose_tm
*d
= (struct diagnose_tm
*) wi
->info
;
602 if (volatile_var_p (*tp
)
603 && d
->block_flags
& DIAG_TM_SAFE
607 error_at (gimple_location (d
->stmt
),
608 "invalid volatile use of %qD inside transaction",
616 is_tm_safe_or_pure (const_tree x
)
618 return is_tm_safe (x
) || is_tm_pure (x
);
622 diagnose_tm_1 (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
623 struct walk_stmt_info
*wi
)
625 gimple stmt
= gsi_stmt (*gsi
);
626 struct diagnose_tm
*d
= (struct diagnose_tm
*) wi
->info
;
628 /* Save stmt for use in leaf analysis. */
631 switch (gimple_code (stmt
))
635 tree fn
= gimple_call_fn (stmt
);
637 if ((d
->summary_flags
& DIAG_TM_OUTER
) == 0
638 && is_tm_may_cancel_outer (fn
))
639 error_at (gimple_location (stmt
),
640 "%<transaction_may_cancel_outer%> function call not within"
641 " outer transaction or %<transaction_may_cancel_outer%>");
643 if (d
->summary_flags
& DIAG_TM_SAFE
)
645 bool is_safe
, direct_call_p
;
648 if (TREE_CODE (fn
) == ADDR_EXPR
649 && TREE_CODE (TREE_OPERAND (fn
, 0)) == FUNCTION_DECL
)
651 direct_call_p
= true;
652 replacement
= TREE_OPERAND (fn
, 0);
653 replacement
= find_tm_replacement_function (replacement
);
659 direct_call_p
= false;
660 replacement
= NULL_TREE
;
663 if (is_tm_safe_or_pure (fn
))
665 else if (is_tm_callable (fn
) || is_tm_irrevocable (fn
))
667 /* A function explicitly marked transaction_callable as
668 opposed to transaction_safe is being defined to be
669 unsafe as part of its ABI, regardless of its contents. */
672 else if (direct_call_p
)
674 if (flags_from_decl_or_type (fn
) & ECF_TM_BUILTIN
)
676 else if (replacement
)
678 /* ??? At present we've been considering replacements
679 merely transaction_callable, and therefore might
680 enter irrevocable. The tm_wrap attribute has not
681 yet made it into the new language spec. */
686 /* ??? Diagnostics for unmarked direct calls moved into
687 the IPA pass. Section 3.2 of the spec details how
688 functions not marked should be considered "implicitly
689 safe" based on having examined the function body. */
695 /* An unmarked indirect call. Consider it unsafe even
696 though optimization may yet figure out how to inline. */
702 if (TREE_CODE (fn
) == ADDR_EXPR
)
703 fn
= TREE_OPERAND (fn
, 0);
704 if (d
->block_flags
& DIAG_TM_SAFE
)
707 error_at (gimple_location (stmt
),
708 "unsafe function call %qD within "
709 "atomic transaction", fn
);
712 if (!DECL_P (fn
) || DECL_NAME (fn
))
713 error_at (gimple_location (stmt
),
714 "unsafe function call %qE within "
715 "atomic transaction", fn
);
717 error_at (gimple_location (stmt
),
718 "unsafe indirect function call within "
719 "atomic transaction");
725 error_at (gimple_location (stmt
),
726 "unsafe function call %qD within "
727 "%<transaction_safe%> function", fn
);
730 if (!DECL_P (fn
) || DECL_NAME (fn
))
731 error_at (gimple_location (stmt
),
732 "unsafe function call %qE within "
733 "%<transaction_safe%> function", fn
);
735 error_at (gimple_location (stmt
),
736 "unsafe indirect function call within "
737 "%<transaction_safe%> function");
746 /* ??? We ought to come up with a way to add attributes to
747 asm statements, and then add "transaction_safe" to it.
748 Either that or get the language spec to resurrect __tm_waiver. */
749 if (d
->block_flags
& DIAG_TM_SAFE
)
750 error_at (gimple_location (stmt
),
751 "asm not allowed in atomic transaction");
752 else if (d
->func_flags
& DIAG_TM_SAFE
)
753 error_at (gimple_location (stmt
),
754 "asm not allowed in %<transaction_safe%> function");
757 case GIMPLE_TRANSACTION
:
759 unsigned char inner_flags
= DIAG_TM_SAFE
;
761 if (gimple_transaction_subcode (stmt
) & GTMA_IS_RELAXED
)
763 if (d
->block_flags
& DIAG_TM_SAFE
)
764 error_at (gimple_location (stmt
),
765 "relaxed transaction in atomic transaction");
766 else if (d
->func_flags
& DIAG_TM_SAFE
)
767 error_at (gimple_location (stmt
),
768 "relaxed transaction in %<transaction_safe%> function");
769 inner_flags
= DIAG_TM_RELAXED
;
771 else if (gimple_transaction_subcode (stmt
) & GTMA_IS_OUTER
)
774 error_at (gimple_location (stmt
),
775 "outer transaction in transaction");
776 else if (d
->func_flags
& DIAG_TM_OUTER
)
777 error_at (gimple_location (stmt
),
778 "outer transaction in "
779 "%<transaction_may_cancel_outer%> function");
780 else if (d
->func_flags
& DIAG_TM_SAFE
)
781 error_at (gimple_location (stmt
),
782 "outer transaction in %<transaction_safe%> function");
783 inner_flags
|= DIAG_TM_OUTER
;
786 *handled_ops_p
= true;
787 if (gimple_transaction_body (stmt
))
789 struct walk_stmt_info wi_inner
;
790 struct diagnose_tm d_inner
;
792 memset (&d_inner
, 0, sizeof (d_inner
));
793 d_inner
.func_flags
= d
->func_flags
;
794 d_inner
.block_flags
= d
->block_flags
| inner_flags
;
795 d_inner
.summary_flags
= d_inner
.func_flags
| d_inner
.block_flags
;
797 memset (&wi_inner
, 0, sizeof (wi_inner
));
798 wi_inner
.info
= &d_inner
;
800 walk_gimple_seq (gimple_transaction_body (stmt
),
801 diagnose_tm_1
, diagnose_tm_1_op
, &wi_inner
);
814 diagnose_tm_blocks (void)
816 struct walk_stmt_info wi
;
817 struct diagnose_tm d
;
819 memset (&d
, 0, sizeof (d
));
820 if (is_tm_may_cancel_outer (current_function_decl
))
821 d
.func_flags
= DIAG_TM_OUTER
| DIAG_TM_SAFE
;
822 else if (is_tm_safe (current_function_decl
))
823 d
.func_flags
= DIAG_TM_SAFE
;
824 d
.summary_flags
= d
.func_flags
;
826 memset (&wi
, 0, sizeof (wi
));
829 walk_gimple_seq (gimple_body (current_function_decl
),
830 diagnose_tm_1
, diagnose_tm_1_op
, &wi
);
837 const pass_data pass_data_diagnose_tm_blocks
=
839 GIMPLE_PASS
, /* type */
840 "*diagnose_tm_blocks", /* name */
841 OPTGROUP_NONE
, /* optinfo_flags */
843 true, /* has_execute */
844 TV_TRANS_MEM
, /* tv_id */
845 PROP_gimple_any
, /* properties_required */
846 0, /* properties_provided */
847 0, /* properties_destroyed */
848 0, /* todo_flags_start */
849 0, /* todo_flags_finish */
852 class pass_diagnose_tm_blocks
: public gimple_opt_pass
855 pass_diagnose_tm_blocks (gcc::context
*ctxt
)
856 : gimple_opt_pass (pass_data_diagnose_tm_blocks
, ctxt
)
859 /* opt_pass methods: */
860 bool gate () { return gate_tm (); }
861 unsigned int execute () { return diagnose_tm_blocks (); }
863 }; // class pass_diagnose_tm_blocks
868 make_pass_diagnose_tm_blocks (gcc::context
*ctxt
)
870 return new pass_diagnose_tm_blocks (ctxt
);
873 /* Instead of instrumenting thread private memory, we save the
874 addresses in a log which we later use to save/restore the addresses
875 upon transaction start/restart.
877 The log is keyed by address, where each element contains individual
878 statements among different code paths that perform the store.
880 This log is later used to generate either plain save/restore of the
881 addresses upon transaction start/restart, or calls to the ITM_L*
884 So for something like:
886 struct large { int x[1000]; };
887 struct large lala = { 0 };
893 We can either save/restore:
896 trxn = _ITM_startTransaction ();
897 if (trxn & a_saveLiveVariables)
898 tmp_lala1 = lala.x[i];
899 else if (a & a_restoreLiveVariables)
900 lala.x[i] = tmp_lala1;
902 or use the logging functions:
905 trxn = _ITM_startTransaction ();
906 _ITM_LU4 (&lala.x[i]);
908 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
909 far up the dominator tree to shadow all of the writes to a given
910 location (thus reducing the total number of logging calls), but not
911 so high as to be called on a path that does not perform a
914 /* One individual log entry. We may have multiple statements for the
915 same location if neither dominate each other (on different
917 typedef struct tm_log_entry
919 /* Address to save. */
921 /* Entry block for the transaction this address occurs in. */
922 basic_block entry_block
;
923 /* Dominating statements the store occurs in. */
925 /* Initially, while we are building the log, we place a nonzero
926 value here to mean that this address *will* be saved with a
927 save/restore sequence. Later, when generating the save sequence
928 we place the SSA temp generated here. */
933 /* Log entry hashtable helpers. */
935 struct log_entry_hasher
937 typedef tm_log_entry value_type
;
938 typedef tm_log_entry compare_type
;
939 static inline hashval_t
hash (const value_type
*);
940 static inline bool equal (const value_type
*, const compare_type
*);
941 static inline void remove (value_type
*);
944 /* Htab support. Return hash value for a `tm_log_entry'. */
946 log_entry_hasher::hash (const value_type
*log
)
948 return iterative_hash_expr (log
->addr
, 0);
951 /* Htab support. Return true if two log entries are the same. */
953 log_entry_hasher::equal (const value_type
*log1
, const compare_type
*log2
)
957 rth: I suggest that we get rid of the component refs etc.
958 I.e. resolve the reference to base + offset.
960 We may need to actually finish a merge with mainline for this,
961 since we'd like to be presented with Richi's MEM_REF_EXPRs more
962 often than not. But in the meantime your tm_log_entry could save
963 the results of get_inner_reference.
965 See: g++.dg/tm/pr46653.C
968 /* Special case plain equality because operand_equal_p() below will
969 return FALSE if the addresses are equal but they have
970 side-effects (e.g. a volatile address). */
971 if (log1
->addr
== log2
->addr
)
974 return operand_equal_p (log1
->addr
, log2
->addr
, 0);
977 /* Htab support. Free one tm_log_entry. */
979 log_entry_hasher::remove (value_type
*lp
)
981 lp
->stmts
.release ();
986 /* The actual log. */
987 static hash_table
<log_entry_hasher
> tm_log
;
989 /* Addresses to log with a save/restore sequence. These should be in
991 static vec
<tree
> tm_log_save_addresses
;
993 enum thread_memory_type
997 mem_transaction_local
,
1001 typedef struct tm_new_mem_map
1003 /* SSA_NAME being dereferenced. */
1005 enum thread_memory_type local_new_memory
;
1008 /* Hashtable helpers. */
1010 struct tm_mem_map_hasher
: typed_free_remove
<tm_new_mem_map_t
>
1012 typedef tm_new_mem_map_t value_type
;
1013 typedef tm_new_mem_map_t compare_type
;
1014 static inline hashval_t
hash (const value_type
*);
1015 static inline bool equal (const value_type
*, const compare_type
*);
1019 tm_mem_map_hasher::hash (const value_type
*v
)
1021 return (intptr_t)v
->val
>> 4;
1025 tm_mem_map_hasher::equal (const value_type
*v
, const compare_type
*c
)
1027 return v
->val
== c
->val
;
1030 /* Map for an SSA_NAME originally pointing to a non aliased new piece
1031 of memory (malloc, alloc, etc). */
1032 static hash_table
<tm_mem_map_hasher
> tm_new_mem_hash
;
1034 /* Initialize logging data structures. */
1039 tm_new_mem_hash
.create (5);
1040 tm_log_save_addresses
.create (5);
1043 /* Free logging data structures. */
1045 tm_log_delete (void)
1048 tm_new_mem_hash
.dispose ();
1049 tm_log_save_addresses
.release ();
1052 /* Return true if MEM is a transaction invariant memory for the TM
1053 region starting at REGION_ENTRY_BLOCK. */
1055 transaction_invariant_address_p (const_tree mem
, basic_block region_entry_block
)
1057 if ((TREE_CODE (mem
) == INDIRECT_REF
|| TREE_CODE (mem
) == MEM_REF
)
1058 && TREE_CODE (TREE_OPERAND (mem
, 0)) == SSA_NAME
)
1062 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem
, 0)));
1063 return def_bb
!= region_entry_block
1064 && dominated_by_p (CDI_DOMINATORS
, region_entry_block
, def_bb
);
1067 mem
= strip_invariant_refs (mem
);
1068 return mem
&& (CONSTANT_CLASS_P (mem
) || decl_address_invariant_p (mem
));
1071 /* Given an address ADDR in STMT, find it in the memory log or add it,
1072 making sure to keep only the addresses highest in the dominator
1075 ENTRY_BLOCK is the entry_block for the transaction.
1077 If we find the address in the log, make sure it's either the same
1078 address, or an equivalent one that dominates ADDR.
1080 If we find the address, but neither ADDR dominates the found
1081 address, nor the found one dominates ADDR, we're on different
1082 execution paths. Add it.
1084 If known, ENTRY_BLOCK is the entry block for the region, otherwise
1087 tm_log_add (basic_block entry_block
, tree addr
, gimple stmt
)
1089 tm_log_entry
**slot
;
1090 struct tm_log_entry l
, *lp
;
1093 slot
= tm_log
.find_slot (&l
, INSERT
);
1096 tree type
= TREE_TYPE (addr
);
1098 lp
= XNEW (struct tm_log_entry
);
1102 /* Small invariant addresses can be handled as save/restores. */
1104 && transaction_invariant_address_p (lp
->addr
, entry_block
)
1105 && TYPE_SIZE_UNIT (type
) != NULL
1106 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
))
1107 && ((HOST_WIDE_INT
) tree_to_uhwi (TYPE_SIZE_UNIT (type
))
1108 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE
))
1109 /* We must be able to copy this type normally. I.e., no
1110 special constructors and the like. */
1111 && !TREE_ADDRESSABLE (type
))
1113 lp
->save_var
= create_tmp_reg (TREE_TYPE (lp
->addr
), "tm_save");
1114 lp
->stmts
.create (0);
1115 lp
->entry_block
= entry_block
;
1116 /* Save addresses separately in dominator order so we don't
1117 get confused by overlapping addresses in the save/restore
1119 tm_log_save_addresses
.safe_push (lp
->addr
);
1123 /* Use the logging functions. */
1124 lp
->stmts
.create (5);
1125 lp
->stmts
.quick_push (stmt
);
1126 lp
->save_var
= NULL
;
1136 /* If we're generating a save/restore sequence, we don't care
1137 about statements. */
1141 for (i
= 0; lp
->stmts
.iterate (i
, &oldstmt
); ++i
)
1143 if (stmt
== oldstmt
)
1145 /* We already have a store to the same address, higher up the
1146 dominator tree. Nothing to do. */
1147 if (dominated_by_p (CDI_DOMINATORS
,
1148 gimple_bb (stmt
), gimple_bb (oldstmt
)))
1150 /* We should be processing blocks in dominator tree order. */
1151 gcc_assert (!dominated_by_p (CDI_DOMINATORS
,
1152 gimple_bb (oldstmt
), gimple_bb (stmt
)));
1154 /* Store is on a different code path. */
1155 lp
->stmts
.safe_push (stmt
);
1159 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1160 result, insert the new statements before GSI. */
1163 gimplify_addr (gimple_stmt_iterator
*gsi
, tree x
)
1165 if (TREE_CODE (x
) == TARGET_MEM_REF
)
1166 x
= tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x
)), x
);
1168 x
= build_fold_addr_expr (x
);
1169 return force_gimple_operand_gsi (gsi
, x
, true, NULL
, true, GSI_SAME_STMT
);
1172 /* Instrument one address with the logging functions.
1173 ADDR is the address to save.
1174 STMT is the statement before which to place it. */
1176 tm_log_emit_stmt (tree addr
, gimple stmt
)
1178 tree type
= TREE_TYPE (addr
);
1179 tree size
= TYPE_SIZE_UNIT (type
);
1180 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
1182 enum built_in_function code
= BUILT_IN_TM_LOG
;
1184 if (type
== float_type_node
)
1185 code
= BUILT_IN_TM_LOG_FLOAT
;
1186 else if (type
== double_type_node
)
1187 code
= BUILT_IN_TM_LOG_DOUBLE
;
1188 else if (type
== long_double_type_node
)
1189 code
= BUILT_IN_TM_LOG_LDOUBLE
;
1190 else if (tree_fits_uhwi_p (size
))
1192 unsigned int n
= tree_to_uhwi (size
);
1196 code
= BUILT_IN_TM_LOG_1
;
1199 code
= BUILT_IN_TM_LOG_2
;
1202 code
= BUILT_IN_TM_LOG_4
;
1205 code
= BUILT_IN_TM_LOG_8
;
1208 code
= BUILT_IN_TM_LOG
;
1209 if (TREE_CODE (type
) == VECTOR_TYPE
)
1211 if (n
== 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64
))
1212 code
= BUILT_IN_TM_LOG_M64
;
1213 else if (n
== 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128
))
1214 code
= BUILT_IN_TM_LOG_M128
;
1215 else if (n
== 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256
))
1216 code
= BUILT_IN_TM_LOG_M256
;
1222 addr
= gimplify_addr (&gsi
, addr
);
1223 if (code
== BUILT_IN_TM_LOG
)
1224 log
= gimple_build_call (builtin_decl_explicit (code
), 2, addr
, size
);
1226 log
= gimple_build_call (builtin_decl_explicit (code
), 1, addr
);
1227 gsi_insert_before (&gsi
, log
, GSI_SAME_STMT
);
1230 /* Go through the log and instrument address that must be instrumented
1231 with the logging functions. Leave the save/restore addresses for
1236 hash_table
<log_entry_hasher
>::iterator hi
;
1237 struct tm_log_entry
*lp
;
1239 FOR_EACH_HASH_TABLE_ELEMENT (tm_log
, lp
, tm_log_entry_t
, hi
)
1246 fprintf (dump_file
, "TM thread private mem logging: ");
1247 print_generic_expr (dump_file
, lp
->addr
, 0);
1248 fprintf (dump_file
, "\n");
1254 fprintf (dump_file
, "DUMPING to variable\n");
1260 fprintf (dump_file
, "DUMPING with logging functions\n");
1261 for (i
= 0; lp
->stmts
.iterate (i
, &stmt
); ++i
)
1262 tm_log_emit_stmt (lp
->addr
, stmt
);
1267 /* Emit the save sequence for the corresponding addresses in the log.
1268 ENTRY_BLOCK is the entry block for the transaction.
1269 BB is the basic block to insert the code in. */
1271 tm_log_emit_saves (basic_block entry_block
, basic_block bb
)
1274 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
1276 struct tm_log_entry l
, *lp
;
1278 for (i
= 0; i
< tm_log_save_addresses
.length (); ++i
)
1280 l
.addr
= tm_log_save_addresses
[i
];
1281 lp
= *(tm_log
.find_slot (&l
, NO_INSERT
));
1282 gcc_assert (lp
->save_var
!= NULL
);
1284 /* We only care about variables in the current transaction. */
1285 if (lp
->entry_block
!= entry_block
)
1288 stmt
= gimple_build_assign (lp
->save_var
, unshare_expr (lp
->addr
));
1290 /* Make sure we can create an SSA_NAME for this type. For
1291 instance, aggregates aren't allowed, in which case the system
1292 will create a VOP for us and everything will just work. */
1293 if (is_gimple_reg_type (TREE_TYPE (lp
->save_var
)))
1295 lp
->save_var
= make_ssa_name (lp
->save_var
, stmt
);
1296 gimple_assign_set_lhs (stmt
, lp
->save_var
);
1299 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1303 /* Emit the restore sequence for the corresponding addresses in the log.
1304 ENTRY_BLOCK is the entry block for the transaction.
1305 BB is the basic block to insert the code in. */
1307 tm_log_emit_restores (basic_block entry_block
, basic_block bb
)
1310 struct tm_log_entry l
, *lp
;
1311 gimple_stmt_iterator gsi
;
1314 for (i
= tm_log_save_addresses
.length () - 1; i
>= 0; i
--)
1316 l
.addr
= tm_log_save_addresses
[i
];
1317 lp
= *(tm_log
.find_slot (&l
, NO_INSERT
));
1318 gcc_assert (lp
->save_var
!= NULL
);
1320 /* We only care about variables in the current transaction. */
1321 if (lp
->entry_block
!= entry_block
)
1324 /* Restores are in LIFO order from the saves in case we have
1326 gsi
= gsi_start_bb (bb
);
1328 stmt
= gimple_build_assign (unshare_expr (lp
->addr
), lp
->save_var
);
1329 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
1334 static tree
lower_sequence_tm (gimple_stmt_iterator
*, bool *,
1335 struct walk_stmt_info
*);
1336 static tree
lower_sequence_no_tm (gimple_stmt_iterator
*, bool *,
1337 struct walk_stmt_info
*);
1339 /* Evaluate an address X being dereferenced and determine if it
1340 originally points to a non aliased new chunk of memory (malloc,
1343 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1344 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1345 Return MEM_NON_LOCAL otherwise.
1347 ENTRY_BLOCK is the entry block to the transaction containing the
1348 dereference of X. */
1349 static enum thread_memory_type
1350 thread_private_new_memory (basic_block entry_block
, tree x
)
1353 enum tree_code code
;
1354 tm_new_mem_map_t
**slot
;
1355 tm_new_mem_map_t elt
, *elt_p
;
1357 enum thread_memory_type retval
= mem_transaction_local
;
1360 || TREE_CODE (x
) != SSA_NAME
1361 /* Possible uninitialized use, or a function argument. In
1362 either case, we don't care. */
1363 || SSA_NAME_IS_DEFAULT_DEF (x
))
1364 return mem_non_local
;
1366 /* Look in cache first. */
1368 slot
= tm_new_mem_hash
.find_slot (&elt
, INSERT
);
1371 return elt_p
->local_new_memory
;
1373 /* Optimistically assume the memory is transaction local during
1374 processing. This catches recursion into this variable. */
1375 *slot
= elt_p
= XNEW (tm_new_mem_map_t
);
1377 elt_p
->local_new_memory
= mem_transaction_local
;
1379 /* Search DEF chain to find the original definition of this address. */
1382 if (ptr_deref_may_alias_global_p (x
))
1384 /* Address escapes. This is not thread-private. */
1385 retval
= mem_non_local
;
1386 goto new_memory_ret
;
1389 stmt
= SSA_NAME_DEF_STMT (x
);
1391 /* If the malloc call is outside the transaction, this is
1393 if (retval
!= mem_thread_local
1394 && !dominated_by_p (CDI_DOMINATORS
, gimple_bb (stmt
), entry_block
))
1395 retval
= mem_thread_local
;
1397 if (is_gimple_assign (stmt
))
1399 code
= gimple_assign_rhs_code (stmt
);
1400 /* x = foo ==> foo */
1401 if (code
== SSA_NAME
)
1402 x
= gimple_assign_rhs1 (stmt
);
1403 /* x = foo + n ==> foo */
1404 else if (code
== POINTER_PLUS_EXPR
)
1405 x
= gimple_assign_rhs1 (stmt
);
1406 /* x = (cast*) foo ==> foo */
1407 else if (code
== VIEW_CONVERT_EXPR
|| code
== NOP_EXPR
)
1408 x
= gimple_assign_rhs1 (stmt
);
1409 /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
1410 else if (code
== COND_EXPR
)
1412 tree op1
= gimple_assign_rhs2 (stmt
);
1413 tree op2
= gimple_assign_rhs3 (stmt
);
1414 enum thread_memory_type mem
;
1415 retval
= thread_private_new_memory (entry_block
, op1
);
1416 if (retval
== mem_non_local
)
1417 goto new_memory_ret
;
1418 mem
= thread_private_new_memory (entry_block
, op2
);
1419 retval
= MIN (retval
, mem
);
1420 goto new_memory_ret
;
1424 retval
= mem_non_local
;
1425 goto new_memory_ret
;
1430 if (gimple_code (stmt
) == GIMPLE_PHI
)
1433 enum thread_memory_type mem
;
1434 tree phi_result
= gimple_phi_result (stmt
);
1436 /* If any of the ancestors are non-local, we are sure to
1437 be non-local. Otherwise we can avoid doing anything
1438 and inherit what has already been generated. */
1440 for (i
= 0; i
< gimple_phi_num_args (stmt
); ++i
)
1442 tree op
= PHI_ARG_DEF (stmt
, i
);
1444 /* Exclude self-assignment. */
1445 if (phi_result
== op
)
1448 mem
= thread_private_new_memory (entry_block
, op
);
1449 if (mem
== mem_non_local
)
1452 goto new_memory_ret
;
1454 retval
= MIN (retval
, mem
);
1456 goto new_memory_ret
;
1461 while (TREE_CODE (x
) == SSA_NAME
);
1463 if (stmt
&& is_gimple_call (stmt
) && gimple_call_flags (stmt
) & ECF_MALLOC
)
1464 /* Thread-local or transaction-local. */
1467 retval
= mem_non_local
;
1470 elt_p
->local_new_memory
= retval
;
1474 /* Determine whether X has to be instrumented using a read
1477 ENTRY_BLOCK is the entry block for the region where stmt resides
1478 in. NULL if unknown.
1480 STMT is the statement in which X occurs in. It is used for thread
1481 private memory instrumentation. If no TPM instrumentation is
1482 desired, STMT should be null. */
1484 requires_barrier (basic_block entry_block
, tree x
, gimple stmt
)
1487 while (handled_component_p (x
))
1488 x
= TREE_OPERAND (x
, 0);
1490 switch (TREE_CODE (x
))
1495 enum thread_memory_type ret
;
1497 ret
= thread_private_new_memory (entry_block
, TREE_OPERAND (x
, 0));
1498 if (ret
== mem_non_local
)
1500 if (stmt
&& ret
== mem_thread_local
)
1501 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1502 tm_log_add (entry_block
, orig
, stmt
);
1504 /* Transaction-locals require nothing at all. For malloc, a
1505 transaction restart frees the memory and we reallocate.
1506 For alloca, the stack pointer gets reset by the retry and
1511 case TARGET_MEM_REF
:
1512 if (TREE_CODE (TMR_BASE (x
)) != ADDR_EXPR
)
1514 x
= TREE_OPERAND (TMR_BASE (x
), 0);
1515 if (TREE_CODE (x
) == PARM_DECL
)
1517 gcc_assert (TREE_CODE (x
) == VAR_DECL
);
1523 if (DECL_BY_REFERENCE (x
))
1525 /* ??? This value is a pointer, but aggregate_value_p has been
1526 jigged to return true which confuses needs_to_live_in_memory.
1527 This ought to be cleaned up generically.
1529 FIXME: Verify this still happens after the next mainline
1530 merge. Testcase ie g++.dg/tm/pr47554.C.
1535 if (is_global_var (x
))
1536 return !TREE_READONLY (x
);
1537 if (/* FIXME: This condition should actually go below in the
1538 tm_log_add() call, however is_call_clobbered() depends on
1539 aliasing info which is not available during
1540 gimplification. Since requires_barrier() gets called
1541 during lower_sequence_tm/gimplification, leave the call
1542 to needs_to_live_in_memory until we eliminate
1543 lower_sequence_tm altogether. */
1544 needs_to_live_in_memory (x
))
1548 /* For local memory that doesn't escape (aka thread private
1549 memory), we can either save the value at the beginning of
1550 the transaction and restore on restart, or call a tm
1551 function to dynamically save and restore on restart
1554 tm_log_add (entry_block
, orig
, stmt
);
1563 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1564 a transaction region. */
1567 examine_assign_tm (unsigned *state
, gimple_stmt_iterator
*gsi
)
1569 gimple stmt
= gsi_stmt (*gsi
);
1571 if (requires_barrier (/*entry_block=*/NULL
, gimple_assign_rhs1 (stmt
), NULL
))
1572 *state
|= GTMA_HAVE_LOAD
;
1573 if (requires_barrier (/*entry_block=*/NULL
, gimple_assign_lhs (stmt
), NULL
))
1574 *state
|= GTMA_HAVE_STORE
;
1577 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1580 examine_call_tm (unsigned *state
, gimple_stmt_iterator
*gsi
)
1582 gimple stmt
= gsi_stmt (*gsi
);
1585 if (is_tm_pure_call (stmt
))
1588 /* Check if this call is a transaction abort. */
1589 fn
= gimple_call_fndecl (stmt
);
1590 if (is_tm_abort (fn
))
1591 *state
|= GTMA_HAVE_ABORT
;
1593 /* Note that something may happen. */
1594 *state
|= GTMA_HAVE_LOAD
| GTMA_HAVE_STORE
;
1597 /* Lower a GIMPLE_TRANSACTION statement. */
1600 lower_transaction (gimple_stmt_iterator
*gsi
, struct walk_stmt_info
*wi
)
1602 gimple g
, stmt
= gsi_stmt (*gsi
);
1603 unsigned int *outer_state
= (unsigned int *) wi
->info
;
1604 unsigned int this_state
= 0;
1605 struct walk_stmt_info this_wi
;
1607 /* First, lower the body. The scanning that we do inside gives
1608 us some idea of what we're dealing with. */
1609 memset (&this_wi
, 0, sizeof (this_wi
));
1610 this_wi
.info
= (void *) &this_state
;
1611 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt
),
1612 lower_sequence_tm
, NULL
, &this_wi
);
1614 /* If there was absolutely nothing transaction related inside the
1615 transaction, we may elide it. Likewise if this is a nested
1616 transaction and does not contain an abort. */
1618 || (!(this_state
& GTMA_HAVE_ABORT
) && outer_state
!= NULL
))
1621 *outer_state
|= this_state
;
1623 gsi_insert_seq_before (gsi
, gimple_transaction_body (stmt
),
1625 gimple_transaction_set_body (stmt
, NULL
);
1627 gsi_remove (gsi
, true);
1628 wi
->removed_stmt
= true;
1632 /* Wrap the body of the transaction in a try-finally node so that
1633 the commit call is always properly called. */
1634 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT
), 0);
1635 if (flag_exceptions
)
1638 gimple_seq n_seq
, e_seq
;
1640 n_seq
= gimple_seq_alloc_with_stmt (g
);
1643 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER
),
1644 1, integer_zero_node
);
1645 ptr
= create_tmp_var (ptr_type_node
, NULL
);
1646 gimple_call_set_lhs (g
, ptr
);
1647 gimple_seq_add_stmt (&e_seq
, g
);
1649 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH
),
1651 gimple_seq_add_stmt (&e_seq
, g
);
1653 g
= gimple_build_eh_else (n_seq
, e_seq
);
1656 g
= gimple_build_try (gimple_transaction_body (stmt
),
1657 gimple_seq_alloc_with_stmt (g
), GIMPLE_TRY_FINALLY
);
1658 gsi_insert_after (gsi
, g
, GSI_CONTINUE_LINKING
);
1660 gimple_transaction_set_body (stmt
, NULL
);
1662 /* If the transaction calls abort or if this is an outer transaction,
1663 add an "over" label afterwards. */
1664 if ((this_state
& (GTMA_HAVE_ABORT
))
1665 || (gimple_transaction_subcode (stmt
) & GTMA_IS_OUTER
))
1667 tree label
= create_artificial_label (UNKNOWN_LOCATION
);
1668 gimple_transaction_set_label (stmt
, label
);
1669 gsi_insert_after (gsi
, gimple_build_label (label
), GSI_CONTINUE_LINKING
);
1672 /* Record the set of operations found for use later. */
1673 this_state
|= gimple_transaction_subcode (stmt
) & GTMA_DECLARATION_MASK
;
1674 gimple_transaction_set_subcode (stmt
, this_state
);
1677 /* Iterate through the statements in the sequence, lowering them all
1678 as appropriate for being in a transaction. */
1681 lower_sequence_tm (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1682 struct walk_stmt_info
*wi
)
1684 unsigned int *state
= (unsigned int *) wi
->info
;
1685 gimple stmt
= gsi_stmt (*gsi
);
1687 *handled_ops_p
= true;
1688 switch (gimple_code (stmt
))
1691 /* Only memory reads/writes need to be instrumented. */
1692 if (gimple_assign_single_p (stmt
))
1693 examine_assign_tm (state
, gsi
);
1697 examine_call_tm (state
, gsi
);
1701 *state
|= GTMA_MAY_ENTER_IRREVOCABLE
;
1704 case GIMPLE_TRANSACTION
:
1705 lower_transaction (gsi
, wi
);
1709 *handled_ops_p
= !gimple_has_substatements (stmt
);
1716 /* Iterate through the statements in the sequence, lowering them all
1717 as appropriate for being outside of a transaction. */
1720 lower_sequence_no_tm (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1721 struct walk_stmt_info
* wi
)
1723 gimple stmt
= gsi_stmt (*gsi
);
1725 if (gimple_code (stmt
) == GIMPLE_TRANSACTION
)
1727 *handled_ops_p
= true;
1728 lower_transaction (gsi
, wi
);
1731 *handled_ops_p
= !gimple_has_substatements (stmt
);
1736 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1737 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1738 been moved out, and all the data required for constructing a proper
1739 CFG has been recorded. */
1742 execute_lower_tm (void)
1744 struct walk_stmt_info wi
;
1747 /* Transactional clones aren't created until a later pass. */
1748 gcc_assert (!decl_is_tm_clone (current_function_decl
));
1750 body
= gimple_body (current_function_decl
);
1751 memset (&wi
, 0, sizeof (wi
));
1752 walk_gimple_seq_mod (&body
, lower_sequence_no_tm
, NULL
, &wi
);
1753 gimple_set_body (current_function_decl
, body
);
1760 const pass_data pass_data_lower_tm
=
1762 GIMPLE_PASS
, /* type */
1763 "tmlower", /* name */
1764 OPTGROUP_NONE
, /* optinfo_flags */
1765 true, /* has_gate */
1766 true, /* has_execute */
1767 TV_TRANS_MEM
, /* tv_id */
1768 PROP_gimple_lcf
, /* properties_required */
1769 0, /* properties_provided */
1770 0, /* properties_destroyed */
1771 0, /* todo_flags_start */
1772 0, /* todo_flags_finish */
1775 class pass_lower_tm
: public gimple_opt_pass
1778 pass_lower_tm (gcc::context
*ctxt
)
1779 : gimple_opt_pass (pass_data_lower_tm
, ctxt
)
1782 /* opt_pass methods: */
1783 bool gate () { return gate_tm (); }
1784 unsigned int execute () { return execute_lower_tm (); }
1786 }; // class pass_lower_tm
1791 make_pass_lower_tm (gcc::context
*ctxt
)
1793 return new pass_lower_tm (ctxt
);
1796 /* Collect region information for each transaction. */
1800 /* Link to the next unnested transaction. */
1801 struct tm_region
*next
;
1803 /* Link to the next inner transaction. */
1804 struct tm_region
*inner
;
1806 /* Link to the next outer transaction. */
1807 struct tm_region
*outer
;
1809 /* The GIMPLE_TRANSACTION statement beginning this transaction.
1810 After TM_MARK, this gets replaced by a call to
1811 BUILT_IN_TM_START. */
1812 gimple transaction_stmt
;
1814 /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
1815 BUILT_IN_TM_START, this field is true if the transaction is an
1816 outer transaction. */
1817 bool original_transaction_was_outer
;
1819 /* Return value from BUILT_IN_TM_START. */
1822 /* The entry block to this region. This will always be the first
1823 block of the body of the transaction. */
1824 basic_block entry_block
;
1826 /* The first block after an expanded call to _ITM_beginTransaction. */
1827 basic_block restart_block
;
1829 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1830 These blocks are still a part of the region (i.e., the border is
1831 inclusive). Note that this set is only complete for paths in the CFG
1832 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1833 the edge to the "over" label. */
1836 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1840 typedef struct tm_region
*tm_region_p
;
1842 /* True if there are pending edge statements to be committed for the
1843 current function being scanned in the tmmark pass. */
1844 bool pending_edge_inserts_p
;
1846 static struct tm_region
*all_tm_regions
;
1847 static bitmap_obstack tm_obstack
;
1850 /* A subroutine of tm_region_init. Record the existence of the
1851 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1853 static struct tm_region
*
1854 tm_region_init_0 (struct tm_region
*outer
, basic_block bb
, gimple stmt
)
1856 struct tm_region
*region
;
1858 region
= (struct tm_region
*)
1859 obstack_alloc (&tm_obstack
.obstack
, sizeof (struct tm_region
));
1863 region
->next
= outer
->inner
;
1864 outer
->inner
= region
;
1868 region
->next
= all_tm_regions
;
1869 all_tm_regions
= region
;
1871 region
->inner
= NULL
;
1872 region
->outer
= outer
;
1874 region
->transaction_stmt
= stmt
;
1875 region
->original_transaction_was_outer
= false;
1876 region
->tm_state
= NULL
;
1878 /* There are either one or two edges out of the block containing
1879 the GIMPLE_TRANSACTION, one to the actual region and one to the
1880 "over" label if the region contains an abort. The former will
1881 always be the one marked FALLTHRU. */
1882 region
->entry_block
= FALLTHRU_EDGE (bb
)->dest
;
1884 region
->exit_blocks
= BITMAP_ALLOC (&tm_obstack
);
1885 region
->irr_blocks
= BITMAP_ALLOC (&tm_obstack
);
1890 /* A subroutine of tm_region_init. Record all the exit and
1891 irrevocable blocks in BB into the region's exit_blocks and
1892 irr_blocks bitmaps. Returns the new region being scanned. */
1894 static struct tm_region
*
1895 tm_region_init_1 (struct tm_region
*region
, basic_block bb
)
1897 gimple_stmt_iterator gsi
;
1901 || (!region
->irr_blocks
&& !region
->exit_blocks
))
1904 /* Check to see if this is the end of a region by seeing if it
1905 contains a call to __builtin_tm_commit{,_eh}. Note that the
1906 outermost region for DECL_IS_TM_CLONE need not collect this. */
1907 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
1910 if (gimple_code (g
) == GIMPLE_CALL
)
1912 tree fn
= gimple_call_fndecl (g
);
1913 if (fn
&& DECL_BUILT_IN_CLASS (fn
) == BUILT_IN_NORMAL
)
1915 if ((DECL_FUNCTION_CODE (fn
) == BUILT_IN_TM_COMMIT
1916 || DECL_FUNCTION_CODE (fn
) == BUILT_IN_TM_COMMIT_EH
)
1917 && region
->exit_blocks
)
1919 bitmap_set_bit (region
->exit_blocks
, bb
->index
);
1920 region
= region
->outer
;
1923 if (DECL_FUNCTION_CODE (fn
) == BUILT_IN_TM_IRREVOCABLE
)
1924 bitmap_set_bit (region
->irr_blocks
, bb
->index
);
1931 /* Collect all of the transaction regions within the current function
1932 and record them in ALL_TM_REGIONS. The REGION parameter may specify
1933 an "outermost" region for use by tm clones. */
1936 tm_region_init (struct tm_region
*region
)
1942 vec
<basic_block
> queue
= vNULL
;
1943 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
1944 struct tm_region
*old_region
;
1945 vec
<tm_region_p
> bb_regions
= vNULL
;
1947 all_tm_regions
= region
;
1948 bb
= single_succ (ENTRY_BLOCK_PTR
);
1950 /* We could store this information in bb->aux, but we may get called
1951 through get_all_tm_blocks() from another pass that may be already
1953 bb_regions
.safe_grow_cleared (last_basic_block
);
1955 queue
.safe_push (bb
);
1956 bb_regions
[bb
->index
] = region
;
1960 region
= bb_regions
[bb
->index
];
1961 bb_regions
[bb
->index
] = NULL
;
1963 /* Record exit and irrevocable blocks. */
1964 region
= tm_region_init_1 (region
, bb
);
1966 /* Check for the last statement in the block beginning a new region. */
1968 old_region
= region
;
1969 if (g
&& gimple_code (g
) == GIMPLE_TRANSACTION
)
1970 region
= tm_region_init_0 (region
, bb
, g
);
1972 /* Process subsequent blocks. */
1973 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1974 if (!bitmap_bit_p (visited_blocks
, e
->dest
->index
))
1976 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
1977 queue
.safe_push (e
->dest
);
1979 /* If the current block started a new region, make sure that only
1980 the entry block of the new region is associated with this region.
1981 Other successors are still part of the old region. */
1982 if (old_region
!= region
&& e
->dest
!= region
->entry_block
)
1983 bb_regions
[e
->dest
->index
] = old_region
;
1985 bb_regions
[e
->dest
->index
] = region
;
1988 while (!queue
.is_empty ());
1990 BITMAP_FREE (visited_blocks
);
1991 bb_regions
.release ();
1994 /* The "gate" function for all transactional memory expansion and optimization
1995 passes. We collect region information for each top-level transaction, and
1996 if we don't find any, we skip all of the TM passes. Each region will have
1997 all of the exit blocks recorded, and the originating statement. */
2005 calculate_dominance_info (CDI_DOMINATORS
);
2006 bitmap_obstack_initialize (&tm_obstack
);
2008 /* If the function is a TM_CLONE, then the entire function is the region. */
2009 if (decl_is_tm_clone (current_function_decl
))
2011 struct tm_region
*region
= (struct tm_region
*)
2012 obstack_alloc (&tm_obstack
.obstack
, sizeof (struct tm_region
));
2013 memset (region
, 0, sizeof (*region
));
2014 region
->entry_block
= single_succ (ENTRY_BLOCK_PTR
);
2015 /* For a clone, the entire function is the region. But even if
2016 we don't need to record any exit blocks, we may need to
2017 record irrevocable blocks. */
2018 region
->irr_blocks
= BITMAP_ALLOC (&tm_obstack
);
2020 tm_region_init (region
);
2024 tm_region_init (NULL
);
2026 /* If we didn't find any regions, cleanup and skip the whole tree
2027 of tm-related optimizations. */
2028 if (all_tm_regions
== NULL
)
2030 bitmap_obstack_release (&tm_obstack
);
2040 const pass_data pass_data_tm_init
=
2042 GIMPLE_PASS
, /* type */
2043 "*tminit", /* name */
2044 OPTGROUP_NONE
, /* optinfo_flags */
2045 true, /* has_gate */
2046 false, /* has_execute */
2047 TV_TRANS_MEM
, /* tv_id */
2048 ( PROP_ssa
| PROP_cfg
), /* properties_required */
2049 0, /* properties_provided */
2050 0, /* properties_destroyed */
2051 0, /* todo_flags_start */
2052 0, /* todo_flags_finish */
2055 class pass_tm_init
: public gimple_opt_pass
2058 pass_tm_init (gcc::context
*ctxt
)
2059 : gimple_opt_pass (pass_data_tm_init
, ctxt
)
2062 /* opt_pass methods: */
2063 bool gate () { return gate_tm_init (); }
2065 }; // class pass_tm_init
2070 make_pass_tm_init (gcc::context
*ctxt
)
2072 return new pass_tm_init (ctxt
);
2075 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
2076 represented by STATE. */
2079 transaction_subcode_ior (struct tm_region
*region
, unsigned flags
)
2081 if (region
&& region
->transaction_stmt
)
2083 flags
|= gimple_transaction_subcode (region
->transaction_stmt
);
2084 gimple_transaction_set_subcode (region
->transaction_stmt
, flags
);
2088 /* Construct a memory load in a transactional context. Return the
2089 gimple statement performing the load, or NULL if there is no
2090 TM_LOAD builtin of the appropriate size to do the load.
2092 LOC is the location to use for the new statement(s). */
2095 build_tm_load (location_t loc
, tree lhs
, tree rhs
, gimple_stmt_iterator
*gsi
)
2097 enum built_in_function code
= END_BUILTINS
;
2098 tree t
, type
= TREE_TYPE (rhs
), decl
;
2101 if (type
== float_type_node
)
2102 code
= BUILT_IN_TM_LOAD_FLOAT
;
2103 else if (type
== double_type_node
)
2104 code
= BUILT_IN_TM_LOAD_DOUBLE
;
2105 else if (type
== long_double_type_node
)
2106 code
= BUILT_IN_TM_LOAD_LDOUBLE
;
2107 else if (TYPE_SIZE_UNIT (type
) != NULL
2108 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
)))
2110 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type
)))
2113 code
= BUILT_IN_TM_LOAD_1
;
2116 code
= BUILT_IN_TM_LOAD_2
;
2119 code
= BUILT_IN_TM_LOAD_4
;
2122 code
= BUILT_IN_TM_LOAD_8
;
2127 if (code
== END_BUILTINS
)
2129 decl
= targetm
.vectorize
.builtin_tm_load (type
);
2134 decl
= builtin_decl_explicit (code
);
2136 t
= gimplify_addr (gsi
, rhs
);
2137 gcall
= gimple_build_call (decl
, 1, t
);
2138 gimple_set_location (gcall
, loc
);
2140 t
= TREE_TYPE (TREE_TYPE (decl
));
2141 if (useless_type_conversion_p (type
, t
))
2143 gimple_call_set_lhs (gcall
, lhs
);
2144 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2151 temp
= create_tmp_reg (t
, NULL
);
2152 gimple_call_set_lhs (gcall
, temp
);
2153 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2155 t
= fold_build1 (VIEW_CONVERT_EXPR
, type
, temp
);
2156 g
= gimple_build_assign (lhs
, t
);
2157 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
2164 /* Similarly for storing TYPE in a transactional context. */
2167 build_tm_store (location_t loc
, tree lhs
, tree rhs
, gimple_stmt_iterator
*gsi
)
2169 enum built_in_function code
= END_BUILTINS
;
2170 tree t
, fn
, type
= TREE_TYPE (rhs
), simple_type
;
2173 if (type
== float_type_node
)
2174 code
= BUILT_IN_TM_STORE_FLOAT
;
2175 else if (type
== double_type_node
)
2176 code
= BUILT_IN_TM_STORE_DOUBLE
;
2177 else if (type
== long_double_type_node
)
2178 code
= BUILT_IN_TM_STORE_LDOUBLE
;
2179 else if (TYPE_SIZE_UNIT (type
) != NULL
2180 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
)))
2182 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type
)))
2185 code
= BUILT_IN_TM_STORE_1
;
2188 code
= BUILT_IN_TM_STORE_2
;
2191 code
= BUILT_IN_TM_STORE_4
;
2194 code
= BUILT_IN_TM_STORE_8
;
2199 if (code
== END_BUILTINS
)
2201 fn
= targetm
.vectorize
.builtin_tm_store (type
);
2206 fn
= builtin_decl_explicit (code
);
2208 simple_type
= TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn
))));
2210 if (TREE_CODE (rhs
) == CONSTRUCTOR
)
2212 /* Handle the easy initialization to zero. */
2213 if (!CONSTRUCTOR_ELTS (rhs
))
2214 rhs
= build_int_cst (simple_type
, 0);
2217 /* ...otherwise punt to the caller and probably use
2218 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2219 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2224 else if (!useless_type_conversion_p (simple_type
, type
))
2229 temp
= create_tmp_reg (simple_type
, NULL
);
2230 t
= fold_build1 (VIEW_CONVERT_EXPR
, simple_type
, rhs
);
2231 g
= gimple_build_assign (temp
, t
);
2232 gimple_set_location (g
, loc
);
2233 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
2238 t
= gimplify_addr (gsi
, lhs
);
2239 gcall
= gimple_build_call (fn
, 2, t
, rhs
);
2240 gimple_set_location (gcall
, loc
);
2241 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2247 /* Expand an assignment statement into transactional builtins. */
2250 expand_assign_tm (struct tm_region
*region
, gimple_stmt_iterator
*gsi
)
2252 gimple stmt
= gsi_stmt (*gsi
);
2253 location_t loc
= gimple_location (stmt
);
2254 tree lhs
= gimple_assign_lhs (stmt
);
2255 tree rhs
= gimple_assign_rhs1 (stmt
);
2256 bool store_p
= requires_barrier (region
->entry_block
, lhs
, NULL
);
2257 bool load_p
= requires_barrier (region
->entry_block
, rhs
, NULL
);
2258 gimple gcall
= NULL
;
2260 if (!load_p
&& !store_p
)
2262 /* Add thread private addresses to log if applicable. */
2263 requires_barrier (region
->entry_block
, lhs
, stmt
);
2268 // Remove original load/store statement.
2269 gsi_remove (gsi
, true);
2271 if (load_p
&& !store_p
)
2273 transaction_subcode_ior (region
, GTMA_HAVE_LOAD
);
2274 gcall
= build_tm_load (loc
, lhs
, rhs
, gsi
);
2276 else if (store_p
&& !load_p
)
2278 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2279 gcall
= build_tm_store (loc
, lhs
, rhs
, gsi
);
2283 tree lhs_addr
, rhs_addr
, tmp
;
2286 transaction_subcode_ior (region
, GTMA_HAVE_LOAD
);
2288 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2290 /* ??? Figure out if there's any possible overlap between the LHS
2291 and the RHS and if not, use MEMCPY. */
2293 if (load_p
&& is_gimple_reg (lhs
))
2295 tmp
= create_tmp_var (TREE_TYPE (lhs
), NULL
);
2296 lhs_addr
= build_fold_addr_expr (tmp
);
2301 lhs_addr
= gimplify_addr (gsi
, lhs
);
2303 rhs_addr
= gimplify_addr (gsi
, rhs
);
2304 gcall
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE
),
2305 3, lhs_addr
, rhs_addr
,
2306 TYPE_SIZE_UNIT (TREE_TYPE (lhs
)));
2307 gimple_set_location (gcall
, loc
);
2308 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2312 gcall
= gimple_build_assign (lhs
, tmp
);
2313 gsi_insert_before (gsi
, gcall
, GSI_SAME_STMT
);
2317 /* Now that we have the load/store in its instrumented form, add
2318 thread private addresses to the log if applicable. */
2320 requires_barrier (region
->entry_block
, lhs
, gcall
);
2322 // The calls to build_tm_{store,load} above inserted the instrumented
2323 // call into the stream.
2324 // gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2328 /* Expand a call statement as appropriate for a transaction. That is,
2329 either verify that the call does not affect the transaction, or
2330 redirect the call to a clone that handles transactions, or change
2331 the transaction state to IRREVOCABLE. Return true if the call is
2332 one of the builtins that end a transaction. */
2335 expand_call_tm (struct tm_region
*region
,
2336 gimple_stmt_iterator
*gsi
)
2338 gimple stmt
= gsi_stmt (*gsi
);
2339 tree lhs
= gimple_call_lhs (stmt
);
2341 struct cgraph_node
*node
;
2342 bool retval
= false;
2344 fn_decl
= gimple_call_fndecl (stmt
);
2346 if (fn_decl
== builtin_decl_explicit (BUILT_IN_TM_MEMCPY
)
2347 || fn_decl
== builtin_decl_explicit (BUILT_IN_TM_MEMMOVE
))
2348 transaction_subcode_ior (region
, GTMA_HAVE_STORE
| GTMA_HAVE_LOAD
);
2349 if (fn_decl
== builtin_decl_explicit (BUILT_IN_TM_MEMSET
))
2350 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2352 if (is_tm_pure_call (stmt
))
2356 retval
= is_tm_ending_fndecl (fn_decl
);
2359 /* Assume all non-const/pure calls write to memory, except
2360 transaction ending builtins. */
2361 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2364 /* For indirect calls, we already generated a call into the runtime. */
2367 tree fn
= gimple_call_fn (stmt
);
2369 /* We are guaranteed never to go irrevocable on a safe or pure
2370 call, and the pure call was handled above. */
2371 if (is_tm_safe (fn
))
2374 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
2379 node
= cgraph_get_node (fn_decl
);
2380 /* All calls should have cgraph here. */
2383 /* We can have a nodeless call here if some pass after IPA-tm
2384 added uninstrumented calls. For example, loop distribution
2385 can transform certain loop constructs into __builtin_mem*
2386 calls. In this case, see if we have a suitable TM
2387 replacement and fill in the gaps. */
2388 gcc_assert (DECL_BUILT_IN_CLASS (fn_decl
) == BUILT_IN_NORMAL
);
2389 enum built_in_function code
= DECL_FUNCTION_CODE (fn_decl
);
2390 gcc_assert (code
== BUILT_IN_MEMCPY
2391 || code
== BUILT_IN_MEMMOVE
2392 || code
== BUILT_IN_MEMSET
);
2394 tree repl
= find_tm_replacement_function (fn_decl
);
2397 gimple_call_set_fndecl (stmt
, repl
);
2399 node
= cgraph_create_node (repl
);
2400 node
->local
.tm_may_enter_irr
= false;
2401 return expand_call_tm (region
, gsi
);
2405 if (node
->local
.tm_may_enter_irr
)
2406 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
2408 if (is_tm_abort (fn_decl
))
2410 transaction_subcode_ior (region
, GTMA_HAVE_ABORT
);
2414 /* Instrument the store if needed.
2416 If the assignment happens inside the function call (return slot
2417 optimization), there is no instrumentation to be done, since
2418 the callee should have done the right thing. */
2419 if (lhs
&& requires_barrier (region
->entry_block
, lhs
, stmt
)
2420 && !gimple_call_return_slot_opt_p (stmt
))
2422 tree tmp
= create_tmp_reg (TREE_TYPE (lhs
), NULL
);
2423 location_t loc
= gimple_location (stmt
);
2424 edge fallthru_edge
= NULL
;
2426 /* Remember if the call was going to throw. */
2427 if (stmt_can_throw_internal (stmt
))
2431 basic_block bb
= gimple_bb (stmt
);
2433 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2434 if (e
->flags
& EDGE_FALLTHRU
)
2441 gimple_call_set_lhs (stmt
, tmp
);
2443 stmt
= gimple_build_assign (lhs
, tmp
);
2444 gimple_set_location (stmt
, loc
);
2446 /* We cannot throw in the middle of a BB. If the call was going
2447 to throw, place the instrumentation on the fallthru edge, so
2448 the call remains the last statement in the block. */
2451 gimple_seq fallthru_seq
= gimple_seq_alloc_with_stmt (stmt
);
2452 gimple_stmt_iterator fallthru_gsi
= gsi_start (fallthru_seq
);
2453 expand_assign_tm (region
, &fallthru_gsi
);
2454 gsi_insert_seq_on_edge (fallthru_edge
, fallthru_seq
);
2455 pending_edge_inserts_p
= true;
2459 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
2460 expand_assign_tm (region
, gsi
);
2463 transaction_subcode_ior (region
, GTMA_HAVE_STORE
);
2470 /* Expand all statements in BB as appropriate for being inside
2474 expand_block_tm (struct tm_region
*region
, basic_block bb
)
2476 gimple_stmt_iterator gsi
;
2478 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); )
2480 gimple stmt
= gsi_stmt (gsi
);
2481 switch (gimple_code (stmt
))
2484 /* Only memory reads/writes need to be instrumented. */
2485 if (gimple_assign_single_p (stmt
)
2486 && !gimple_clobber_p (stmt
))
2488 expand_assign_tm (region
, &gsi
);
2494 if (expand_call_tm (region
, &gsi
))
2504 if (!gsi_end_p (gsi
))
2509 /* Return the list of basic-blocks in REGION.
2511 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2512 following a TM_IRREVOCABLE call.
2514 INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the
2515 uninstrumented code path blocks in the list of basic blocks
2516 returned, false otherwise. */
2518 static vec
<basic_block
>
2519 get_tm_region_blocks (basic_block entry_block
,
2522 bitmap all_region_blocks
,
2523 bool stop_at_irrevocable_p
,
2524 bool include_uninstrumented_p
= true)
2526 vec
<basic_block
> bbs
= vNULL
;
2530 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
2533 bbs
.safe_push (entry_block
);
2534 bitmap_set_bit (visited_blocks
, entry_block
->index
);
2538 basic_block bb
= bbs
[i
++];
2541 bitmap_bit_p (exit_blocks
, bb
->index
))
2544 if (stop_at_irrevocable_p
2546 && bitmap_bit_p (irr_blocks
, bb
->index
))
2549 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2550 if ((include_uninstrumented_p
2551 || !(e
->flags
& EDGE_TM_UNINSTRUMENTED
))
2552 && !bitmap_bit_p (visited_blocks
, e
->dest
->index
))
2554 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
2555 bbs
.safe_push (e
->dest
);
2558 while (i
< bbs
.length ());
2560 if (all_region_blocks
)
2561 bitmap_ior_into (all_region_blocks
, visited_blocks
);
2563 BITMAP_FREE (visited_blocks
);
2567 // Callback data for collect_bb2reg.
2570 vec
<tm_region_p
> *bb2reg
;
2571 bool include_uninstrumented_p
;
2574 // Callback for expand_regions, collect innermost region data for each bb.
2576 collect_bb2reg (struct tm_region
*region
, void *data
)
2578 struct bb2reg_stuff
*stuff
= (struct bb2reg_stuff
*)data
;
2579 vec
<tm_region_p
> *bb2reg
= stuff
->bb2reg
;
2580 vec
<basic_block
> queue
;
2584 queue
= get_tm_region_blocks (region
->entry_block
,
2585 region
->exit_blocks
,
2588 /*stop_at_irr_p=*/true,
2589 stuff
->include_uninstrumented_p
);
2591 // We expect expand_region to perform a post-order traversal of the region
2592 // tree. Therefore the last region seen for any bb is the innermost.
2593 FOR_EACH_VEC_ELT (queue
, i
, bb
)
2594 (*bb2reg
)[bb
->index
] = region
;
2600 // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to
2601 // which a basic block belongs. Note that we only consider the instrumented
2602 // code paths for the region; the uninstrumented code paths are ignored if
2603 // INCLUDE_UNINSTRUMENTED_P is false.
2605 // ??? This data is very similar to the bb_regions array that is collected
2606 // during tm_region_init. Or, rather, this data is similar to what could
2607 // be used within tm_region_init. The actual computation in tm_region_init
2608 // begins and ends with bb_regions entirely full of NULL pointers, due to
2609 // the way in which pointers are swapped in and out of the array.
2611 // ??? Our callers expect that blocks are not shared between transactions.
2612 // When the optimizers get too smart, and blocks are shared, then during
2613 // the tm_mark phase we'll add log entries to only one of the two transactions,
2614 // and in the tm_edge phase we'll add edges to the CFG that create invalid
2615 // cycles. The symptom being SSA defs that do not dominate their uses.
2616 // Note that the optimizers were locally correct with their transformation,
2617 // as we have no info within the program that suggests that the blocks cannot
2620 // ??? There is currently a hack inside tree-ssa-pre.c to work around the
2621 // only known instance of this block sharing.
2623 static vec
<tm_region_p
>
2624 get_bb_regions_instrumented (bool traverse_clones
,
2625 bool include_uninstrumented_p
)
2627 unsigned n
= last_basic_block
;
2628 struct bb2reg_stuff stuff
;
2629 vec
<tm_region_p
> ret
;
2632 ret
.safe_grow_cleared (n
);
2633 stuff
.bb2reg
= &ret
;
2634 stuff
.include_uninstrumented_p
= include_uninstrumented_p
;
2635 expand_regions (all_tm_regions
, collect_bb2reg
, &stuff
, traverse_clones
);
2640 /* Set the IN_TRANSACTION for all gimple statements that appear in a
2644 compute_transaction_bits (void)
2646 struct tm_region
*region
;
2647 vec
<basic_block
> queue
;
2651 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2652 certainly don't need it to calculate CDI_DOMINATOR info. */
2656 bb
->flags
&= ~BB_IN_TRANSACTION
;
2658 for (region
= all_tm_regions
; region
; region
= region
->next
)
2660 queue
= get_tm_region_blocks (region
->entry_block
,
2661 region
->exit_blocks
,
2664 /*stop_at_irr_p=*/true);
2665 for (i
= 0; queue
.iterate (i
, &bb
); ++i
)
2666 bb
->flags
|= BB_IN_TRANSACTION
;
2671 bitmap_obstack_release (&tm_obstack
);
2674 /* Replace the GIMPLE_TRANSACTION in this region with the corresponding
2675 call to BUILT_IN_TM_START. */
2678 expand_transaction (struct tm_region
*region
, void *data ATTRIBUTE_UNUSED
)
2680 tree tm_start
= builtin_decl_explicit (BUILT_IN_TM_START
);
2681 basic_block transaction_bb
= gimple_bb (region
->transaction_stmt
);
2682 tree tm_state
= region
->tm_state
;
2683 tree tm_state_type
= TREE_TYPE (tm_state
);
2684 edge abort_edge
= NULL
;
2685 edge inst_edge
= NULL
;
2686 edge uninst_edge
= NULL
;
2687 edge fallthru_edge
= NULL
;
2689 // Identify the various successors of the transaction start.
2693 FOR_EACH_EDGE (e
, i
, transaction_bb
->succs
)
2695 if (e
->flags
& EDGE_TM_ABORT
)
2697 else if (e
->flags
& EDGE_TM_UNINSTRUMENTED
)
2701 if (e
->flags
& EDGE_FALLTHRU
)
2706 /* ??? There are plenty of bits here we're not computing. */
2708 int subcode
= gimple_transaction_subcode (region
->transaction_stmt
);
2710 if (subcode
& GTMA_DOES_GO_IRREVOCABLE
)
2711 flags
|= PR_DOESGOIRREVOCABLE
;
2712 if ((subcode
& GTMA_MAY_ENTER_IRREVOCABLE
) == 0)
2713 flags
|= PR_HASNOIRREVOCABLE
;
2714 /* If the transaction does not have an abort in lexical scope and is not
2715 marked as an outer transaction, then it will never abort. */
2716 if ((subcode
& GTMA_HAVE_ABORT
) == 0 && (subcode
& GTMA_IS_OUTER
) == 0)
2717 flags
|= PR_HASNOABORT
;
2718 if ((subcode
& GTMA_HAVE_STORE
) == 0)
2719 flags
|= PR_READONLY
;
2720 if (inst_edge
&& !(subcode
& GTMA_HAS_NO_INSTRUMENTATION
))
2721 flags
|= PR_INSTRUMENTEDCODE
;
2723 flags
|= PR_UNINSTRUMENTEDCODE
;
2724 if (subcode
& GTMA_IS_OUTER
)
2725 region
->original_transaction_was_outer
= true;
2726 tree t
= build_int_cst (tm_state_type
, flags
);
2727 gimple call
= gimple_build_call (tm_start
, 1, t
);
2728 gimple_call_set_lhs (call
, tm_state
);
2729 gimple_set_location (call
, gimple_location (region
->transaction_stmt
));
2731 // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START.
2732 gimple_stmt_iterator gsi
= gsi_last_bb (transaction_bb
);
2733 gcc_assert (gsi_stmt (gsi
) == region
->transaction_stmt
);
2734 gsi_insert_before (&gsi
, call
, GSI_SAME_STMT
);
2735 gsi_remove (&gsi
, true);
2736 region
->transaction_stmt
= call
;
2739 // Generate log saves.
2740 if (!tm_log_save_addresses
.is_empty ())
2741 tm_log_emit_saves (region
->entry_block
, transaction_bb
);
2743 // In the beginning, we've no tests to perform on transaction restart.
2744 // Note that after this point, transaction_bb becomes the "most recent
2745 // block containing tests for the transaction".
2746 region
->restart_block
= region
->entry_block
;
2748 // Generate log restores.
2749 if (!tm_log_save_addresses
.is_empty ())
2751 basic_block test_bb
= create_empty_bb (transaction_bb
);
2752 basic_block code_bb
= create_empty_bb (test_bb
);
2753 basic_block join_bb
= create_empty_bb (code_bb
);
2754 if (current_loops
&& transaction_bb
->loop_father
)
2756 add_bb_to_loop (test_bb
, transaction_bb
->loop_father
);
2757 add_bb_to_loop (code_bb
, transaction_bb
->loop_father
);
2758 add_bb_to_loop (join_bb
, transaction_bb
->loop_father
);
2760 if (region
->restart_block
== region
->entry_block
)
2761 region
->restart_block
= test_bb
;
2763 tree t1
= create_tmp_reg (tm_state_type
, NULL
);
2764 tree t2
= build_int_cst (tm_state_type
, A_RESTORELIVEVARIABLES
);
2765 gimple stmt
= gimple_build_assign_with_ops (BIT_AND_EXPR
, t1
,
2767 gimple_stmt_iterator gsi
= gsi_last_bb (test_bb
);
2768 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2770 t2
= build_int_cst (tm_state_type
, 0);
2771 stmt
= gimple_build_cond (NE_EXPR
, t1
, t2
, NULL
, NULL
);
2772 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2774 tm_log_emit_restores (region
->entry_block
, code_bb
);
2776 edge ei
= make_edge (transaction_bb
, test_bb
, EDGE_FALLTHRU
);
2777 edge et
= make_edge (test_bb
, code_bb
, EDGE_TRUE_VALUE
);
2778 edge ef
= make_edge (test_bb
, join_bb
, EDGE_FALSE_VALUE
);
2779 redirect_edge_pred (fallthru_edge
, join_bb
);
2781 join_bb
->frequency
= test_bb
->frequency
= transaction_bb
->frequency
;
2782 join_bb
->count
= test_bb
->count
= transaction_bb
->count
;
2784 ei
->probability
= PROB_ALWAYS
;
2785 et
->probability
= PROB_LIKELY
;
2786 ef
->probability
= PROB_UNLIKELY
;
2787 et
->count
= apply_probability (test_bb
->count
, et
->probability
);
2788 ef
->count
= apply_probability (test_bb
->count
, ef
->probability
);
2790 code_bb
->count
= et
->count
;
2791 code_bb
->frequency
= EDGE_FREQUENCY (et
);
2793 transaction_bb
= join_bb
;
2796 // If we have an ABORT edge, create a test to perform the abort.
2799 basic_block test_bb
= create_empty_bb (transaction_bb
);
2800 if (current_loops
&& transaction_bb
->loop_father
)
2801 add_bb_to_loop (test_bb
, transaction_bb
->loop_father
);
2802 if (region
->restart_block
== region
->entry_block
)
2803 region
->restart_block
= test_bb
;
2805 tree t1
= create_tmp_reg (tm_state_type
, NULL
);
2806 tree t2
= build_int_cst (tm_state_type
, A_ABORTTRANSACTION
);
2807 gimple stmt
= gimple_build_assign_with_ops (BIT_AND_EXPR
, t1
,
2809 gimple_stmt_iterator gsi
= gsi_last_bb (test_bb
);
2810 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2812 t2
= build_int_cst (tm_state_type
, 0);
2813 stmt
= gimple_build_cond (NE_EXPR
, t1
, t2
, NULL
, NULL
);
2814 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2816 edge ei
= make_edge (transaction_bb
, test_bb
, EDGE_FALLTHRU
);
2817 test_bb
->frequency
= transaction_bb
->frequency
;
2818 test_bb
->count
= transaction_bb
->count
;
2819 ei
->probability
= PROB_ALWAYS
;
2821 // Not abort edge. If both are live, chose one at random as we'll
2822 // we'll be fixing that up below.
2823 redirect_edge_pred (fallthru_edge
, test_bb
);
2824 fallthru_edge
->flags
= EDGE_FALSE_VALUE
;
2825 fallthru_edge
->probability
= PROB_VERY_LIKELY
;
2826 fallthru_edge
->count
2827 = apply_probability (test_bb
->count
, fallthru_edge
->probability
);
2830 redirect_edge_pred (abort_edge
, test_bb
);
2831 abort_edge
->flags
= EDGE_TRUE_VALUE
;
2832 abort_edge
->probability
= PROB_VERY_UNLIKELY
;
2834 = apply_probability (test_bb
->count
, abort_edge
->probability
);
2836 transaction_bb
= test_bb
;
2839 // If we have both instrumented and uninstrumented code paths, select one.
2840 if (inst_edge
&& uninst_edge
)
2842 basic_block test_bb
= create_empty_bb (transaction_bb
);
2843 if (current_loops
&& transaction_bb
->loop_father
)
2844 add_bb_to_loop (test_bb
, transaction_bb
->loop_father
);
2845 if (region
->restart_block
== region
->entry_block
)
2846 region
->restart_block
= test_bb
;
2848 tree t1
= create_tmp_reg (tm_state_type
, NULL
);
2849 tree t2
= build_int_cst (tm_state_type
, A_RUNUNINSTRUMENTEDCODE
);
2851 gimple stmt
= gimple_build_assign_with_ops (BIT_AND_EXPR
, t1
,
2853 gimple_stmt_iterator gsi
= gsi_last_bb (test_bb
);
2854 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2856 t2
= build_int_cst (tm_state_type
, 0);
2857 stmt
= gimple_build_cond (NE_EXPR
, t1
, t2
, NULL
, NULL
);
2858 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2860 // Create the edge into test_bb first, as we want to copy values
2861 // out of the fallthru edge.
2862 edge e
= make_edge (transaction_bb
, test_bb
, fallthru_edge
->flags
);
2863 e
->probability
= fallthru_edge
->probability
;
2864 test_bb
->count
= e
->count
= fallthru_edge
->count
;
2865 test_bb
->frequency
= EDGE_FREQUENCY (e
);
2867 // Now update the edges to the inst/uninist implementations.
2868 // For now assume that the paths are equally likely. When using HTM,
2869 // we'll try the uninst path first and fallback to inst path if htm
2870 // buffers are exceeded. Without HTM we start with the inst path and
2871 // use the uninst path when falling back to serial mode.
2872 redirect_edge_pred (inst_edge
, test_bb
);
2873 inst_edge
->flags
= EDGE_FALSE_VALUE
;
2874 inst_edge
->probability
= REG_BR_PROB_BASE
/ 2;
2876 = apply_probability (test_bb
->count
, inst_edge
->probability
);
2878 redirect_edge_pred (uninst_edge
, test_bb
);
2879 uninst_edge
->flags
= EDGE_TRUE_VALUE
;
2880 uninst_edge
->probability
= REG_BR_PROB_BASE
/ 2;
2882 = apply_probability (test_bb
->count
, uninst_edge
->probability
);
2885 // If we have no previous special cases, and we have PHIs at the beginning
2886 // of the atomic region, this means we have a loop at the beginning of the
2887 // atomic region that shares the first block. This can cause problems with
2888 // the transaction restart abnormal edges to be added in the tm_edges pass.
2889 // Solve this by adding a new empty block to receive the abnormal edges.
2890 if (region
->restart_block
== region
->entry_block
2891 && phi_nodes (region
->entry_block
))
2893 basic_block empty_bb
= create_empty_bb (transaction_bb
);
2894 region
->restart_block
= empty_bb
;
2895 if (current_loops
&& transaction_bb
->loop_father
)
2896 add_bb_to_loop (empty_bb
, transaction_bb
->loop_father
);
2898 redirect_edge_pred (fallthru_edge
, empty_bb
);
2899 make_edge (transaction_bb
, empty_bb
, EDGE_FALLTHRU
);
2905 /* Generate the temporary to be used for the return value of
2906 BUILT_IN_TM_START. */
2909 generate_tm_state (struct tm_region
*region
, void *data ATTRIBUTE_UNUSED
)
2911 tree tm_start
= builtin_decl_explicit (BUILT_IN_TM_START
);
2913 create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start
)), "tm_state");
2915 // Reset the subcode, post optimizations. We'll fill this in
2916 // again as we process blocks.
2917 if (region
->exit_blocks
)
2919 unsigned int subcode
2920 = gimple_transaction_subcode (region
->transaction_stmt
);
2922 if (subcode
& GTMA_DOES_GO_IRREVOCABLE
)
2923 subcode
&= (GTMA_DECLARATION_MASK
| GTMA_DOES_GO_IRREVOCABLE
2924 | GTMA_MAY_ENTER_IRREVOCABLE
2925 | GTMA_HAS_NO_INSTRUMENTATION
);
2927 subcode
&= GTMA_DECLARATION_MASK
;
2928 gimple_transaction_set_subcode (region
->transaction_stmt
, subcode
);
2934 // Propagate flags from inner transactions outwards.
2936 propagate_tm_flags_out (struct tm_region
*region
)
2940 propagate_tm_flags_out (region
->inner
);
2942 if (region
->outer
&& region
->outer
->transaction_stmt
)
2944 unsigned s
= gimple_transaction_subcode (region
->transaction_stmt
);
2945 s
&= (GTMA_HAVE_ABORT
| GTMA_HAVE_LOAD
| GTMA_HAVE_STORE
2946 | GTMA_MAY_ENTER_IRREVOCABLE
);
2947 s
|= gimple_transaction_subcode (region
->outer
->transaction_stmt
);
2948 gimple_transaction_set_subcode (region
->outer
->transaction_stmt
, s
);
2951 propagate_tm_flags_out (region
->next
);
2954 /* Entry point to the MARK phase of TM expansion. Here we replace
2955 transactional memory statements with calls to builtins, and function
2956 calls with their transactional clones (if available). But we don't
2957 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
2960 execute_tm_mark (void)
2962 pending_edge_inserts_p
= false;
2964 expand_regions (all_tm_regions
, generate_tm_state
, NULL
,
2965 /*traverse_clones=*/true);
2969 vec
<tm_region_p
> bb_regions
2970 = get_bb_regions_instrumented (/*traverse_clones=*/true,
2971 /*include_uninstrumented_p=*/false);
2972 struct tm_region
*r
;
2975 // Expand memory operations into calls into the runtime.
2976 // This collects log entries as well.
2977 FOR_EACH_VEC_ELT (bb_regions
, i
, r
)
2981 if (r
->transaction_stmt
)
2983 unsigned sub
= gimple_transaction_subcode (r
->transaction_stmt
);
2985 /* If we're sure to go irrevocable, there won't be
2986 anything to expand, since the run-time will go
2987 irrevocable right away. */
2988 if (sub
& GTMA_DOES_GO_IRREVOCABLE
2989 && sub
& GTMA_MAY_ENTER_IRREVOCABLE
)
2992 expand_block_tm (r
, BASIC_BLOCK (i
));
2996 bb_regions
.release ();
2998 // Propagate flags from inner transactions outwards.
2999 propagate_tm_flags_out (all_tm_regions
);
3001 // Expand GIMPLE_TRANSACTIONs into calls into the runtime.
3002 expand_regions (all_tm_regions
, expand_transaction
, NULL
,
3003 /*traverse_clones=*/false);
3008 if (pending_edge_inserts_p
)
3009 gsi_commit_edge_inserts ();
3010 free_dominance_info (CDI_DOMINATORS
);
3016 const pass_data pass_data_tm_mark
=
3018 GIMPLE_PASS
, /* type */
3019 "tmmark", /* name */
3020 OPTGROUP_NONE
, /* optinfo_flags */
3021 false, /* has_gate */
3022 true, /* has_execute */
3023 TV_TRANS_MEM
, /* tv_id */
3024 ( PROP_ssa
| PROP_cfg
), /* properties_required */
3025 0, /* properties_provided */
3026 0, /* properties_destroyed */
3027 0, /* todo_flags_start */
3028 ( TODO_update_ssa
| TODO_verify_ssa
), /* todo_flags_finish */
3031 class pass_tm_mark
: public gimple_opt_pass
3034 pass_tm_mark (gcc::context
*ctxt
)
3035 : gimple_opt_pass (pass_data_tm_mark
, ctxt
)
3038 /* opt_pass methods: */
3039 unsigned int execute () { return execute_tm_mark (); }
3041 }; // class pass_tm_mark
3046 make_pass_tm_mark (gcc::context
*ctxt
)
3048 return new pass_tm_mark (ctxt
);
3052 /* Create an abnormal edge from STMT at iter, splitting the block
3053 as necessary. Adjust *PNEXT as needed for the split block. */
3056 split_bb_make_tm_edge (gimple stmt
, basic_block dest_bb
,
3057 gimple_stmt_iterator iter
, gimple_stmt_iterator
*pnext
)
3059 basic_block bb
= gimple_bb (stmt
);
3060 if (!gsi_one_before_end_p (iter
))
3062 edge e
= split_block (bb
, stmt
);
3063 *pnext
= gsi_start_bb (e
->dest
);
3065 make_edge (bb
, dest_bb
, EDGE_ABNORMAL
);
3067 // Record the need for the edge for the benefit of the rtl passes.
3068 if (cfun
->gimple_df
->tm_restart
== NULL
)
3069 cfun
->gimple_df
->tm_restart
= htab_create_ggc (31, struct_ptr_hash
,
3070 struct_ptr_eq
, ggc_free
);
3072 struct tm_restart_node dummy
;
3074 dummy
.label_or_list
= gimple_block_label (dest_bb
);
3076 void **slot
= htab_find_slot (cfun
->gimple_df
->tm_restart
, &dummy
, INSERT
);
3077 struct tm_restart_node
*n
= (struct tm_restart_node
*) *slot
;
3080 n
= ggc_alloc_tm_restart_node ();
3085 tree old
= n
->label_or_list
;
3086 if (TREE_CODE (old
) == LABEL_DECL
)
3087 old
= tree_cons (NULL
, old
, NULL
);
3088 n
->label_or_list
= tree_cons (NULL
, dummy
.label_or_list
, old
);
3092 /* Split block BB as necessary for every builtin function we added, and
3093 wire up the abnormal back edges implied by the transaction restart. */
3096 expand_block_edges (struct tm_region
*const region
, basic_block bb
)
3098 gimple_stmt_iterator gsi
, next_gsi
;
3100 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi
= next_gsi
)
3102 gimple stmt
= gsi_stmt (gsi
);
3105 gsi_next (&next_gsi
);
3107 // ??? Shouldn't we split for any non-pure, non-irrevocable function?
3108 if (gimple_code (stmt
) != GIMPLE_CALL
3109 || (gimple_call_flags (stmt
) & ECF_TM_BUILTIN
) == 0)
3112 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
)) == BUILT_IN_TM_ABORT
)
3114 // If we have a ``_transaction_cancel [[outer]]'', there is only
3115 // one abnormal edge: to the transaction marked OUTER.
3116 // All compiler-generated instances of BUILT_IN_TM_ABORT have a
3117 // constant argument, which we can examine here. Users invoking
3118 // TM_ABORT directly get what they deserve.
3119 tree arg
= gimple_call_arg (stmt
, 0);
3120 if (TREE_CODE (arg
) == INTEGER_CST
3121 && (TREE_INT_CST_LOW (arg
) & AR_OUTERABORT
) != 0
3122 && !decl_is_tm_clone (current_function_decl
))
3124 // Find the GTMA_IS_OUTER transaction.
3125 for (struct tm_region
*o
= region
; o
; o
= o
->outer
)
3126 if (o
->original_transaction_was_outer
)
3128 split_bb_make_tm_edge (stmt
, o
->restart_block
,
3133 // Otherwise, the front-end should have semantically checked
3134 // outer aborts, but in either case the target region is not
3135 // within this function.
3139 // Non-outer, TM aborts have an abnormal edge to the inner-most
3140 // transaction, the one being aborted;
3141 split_bb_make_tm_edge (stmt
, region
->restart_block
, gsi
, &next_gsi
);
3144 // All TM builtins have an abnormal edge to the outer-most transaction.
3145 // We never restart inner transactions. For tm clones, we know a-priori
3146 // that the outer-most transaction is outside the function.
3147 if (decl_is_tm_clone (current_function_decl
))
3150 if (cfun
->gimple_df
->tm_restart
== NULL
)
3151 cfun
->gimple_df
->tm_restart
3152 = htab_create_ggc (31, struct_ptr_hash
, struct_ptr_eq
, ggc_free
);
3154 // All TM builtins have an abnormal edge to the outer-most transaction.
3155 // We never restart inner transactions.
3156 for (struct tm_region
*o
= region
; o
; o
= o
->outer
)
3159 split_bb_make_tm_edge (stmt
, o
->restart_block
, gsi
, &next_gsi
);
3163 // Delete any tail-call annotation that may have been added.
3164 // The tail-call pass may have mis-identified the commit as being
3165 // a candidate because we had not yet added this restart edge.
3166 gimple_call_set_tail (stmt
, false);
3170 /* Entry point to the final expansion of transactional nodes. */
3173 execute_tm_edges (void)
3175 vec
<tm_region_p
> bb_regions
3176 = get_bb_regions_instrumented (/*traverse_clones=*/false,
3177 /*include_uninstrumented_p=*/true);
3178 struct tm_region
*r
;
3181 FOR_EACH_VEC_ELT (bb_regions
, i
, r
)
3183 expand_block_edges (r
, BASIC_BLOCK (i
));
3185 bb_regions
.release ();
3187 /* We've got to release the dominance info now, to indicate that it
3188 must be rebuilt completely. Otherwise we'll crash trying to update
3189 the SSA web in the TODO section following this pass. */
3190 free_dominance_info (CDI_DOMINATORS
);
3191 bitmap_obstack_release (&tm_obstack
);
3192 all_tm_regions
= NULL
;
3199 const pass_data pass_data_tm_edges
=
3201 GIMPLE_PASS
, /* type */
3202 "tmedge", /* name */
3203 OPTGROUP_NONE
, /* optinfo_flags */
3204 false, /* has_gate */
3205 true, /* has_execute */
3206 TV_TRANS_MEM
, /* tv_id */
3207 ( PROP_ssa
| PROP_cfg
), /* properties_required */
3208 0, /* properties_provided */
3209 0, /* properties_destroyed */
3210 0, /* todo_flags_start */
3211 ( TODO_update_ssa
| TODO_verify_ssa
), /* todo_flags_finish */
3214 class pass_tm_edges
: public gimple_opt_pass
3217 pass_tm_edges (gcc::context
*ctxt
)
3218 : gimple_opt_pass (pass_data_tm_edges
, ctxt
)
3221 /* opt_pass methods: */
3222 unsigned int execute () { return execute_tm_edges (); }
3224 }; // class pass_tm_edges
3229 make_pass_tm_edges (gcc::context
*ctxt
)
3231 return new pass_tm_edges (ctxt
);
3234 /* Helper function for expand_regions. Expand REGION and recurse to
3235 the inner region. Call CALLBACK on each region. CALLBACK returns
3236 NULL to continue the traversal, otherwise a non-null value which
3237 this function will return as well. TRAVERSE_CLONES is true if we
3238 should traverse transactional clones. */
3241 expand_regions_1 (struct tm_region
*region
,
3242 void *(*callback
)(struct tm_region
*, void *),
3244 bool traverse_clones
)
3246 void *retval
= NULL
;
3247 if (region
->exit_blocks
3248 || (traverse_clones
&& decl_is_tm_clone (current_function_decl
)))
3250 retval
= callback (region
, data
);
3256 retval
= expand_regions (region
->inner
, callback
, data
, traverse_clones
);
3263 /* Traverse the regions enclosed and including REGION. Execute
3264 CALLBACK for each region, passing DATA. CALLBACK returns NULL to
3265 continue the traversal, otherwise a non-null value which this
3266 function will return as well. TRAVERSE_CLONES is true if we should
3267 traverse transactional clones. */
3270 expand_regions (struct tm_region
*region
,
3271 void *(*callback
)(struct tm_region
*, void *),
3273 bool traverse_clones
)
3275 void *retval
= NULL
;
3278 retval
= expand_regions_1 (region
, callback
, data
, traverse_clones
);
3281 region
= region
->next
;
3287 /* A unique TM memory operation. */
3288 typedef struct tm_memop
3290 /* Unique ID that all memory operations to the same location have. */
3291 unsigned int value_id
;
3292 /* Address of load/store. */
3296 /* TM memory operation hashtable helpers. */
3298 struct tm_memop_hasher
: typed_free_remove
<tm_memop
>
3300 typedef tm_memop value_type
;
3301 typedef tm_memop compare_type
;
3302 static inline hashval_t
hash (const value_type
*);
3303 static inline bool equal (const value_type
*, const compare_type
*);
3306 /* Htab support. Return a hash value for a `tm_memop'. */
3308 tm_memop_hasher::hash (const value_type
*mem
)
3310 tree addr
= mem
->addr
;
3311 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
3312 actually done with operand_equal_p (see tm_memop_eq). */
3313 if (TREE_CODE (addr
) == ADDR_EXPR
)
3314 addr
= TREE_OPERAND (addr
, 0);
3315 return iterative_hash_expr (addr
, 0);
3318 /* Htab support. Return true if two tm_memop's are the same. */
3320 tm_memop_hasher::equal (const value_type
*mem1
, const compare_type
*mem2
)
3322 return operand_equal_p (mem1
->addr
, mem2
->addr
, 0);
3325 /* Sets for solving data flow equations in the memory optimization pass. */
3326 struct tm_memopt_bitmaps
3328 /* Stores available to this BB upon entry. Basically, stores that
3329 dominate this BB. */
3330 bitmap store_avail_in
;
3331 /* Stores available at the end of this BB. */
3332 bitmap store_avail_out
;
3333 bitmap store_antic_in
;
3334 bitmap store_antic_out
;
3335 /* Reads available to this BB upon entry. Basically, reads that
3336 dominate this BB. */
3337 bitmap read_avail_in
;
3338 /* Reads available at the end of this BB. */
3339 bitmap read_avail_out
;
3340 /* Reads performed in this BB. */
3342 /* Writes performed in this BB. */
3345 /* Temporary storage for pass. */
3346 /* Is the current BB in the worklist? */
3347 bool avail_in_worklist_p
;
3348 /* Have we visited this BB? */
3352 static bitmap_obstack tm_memopt_obstack
;
3354 /* Unique counter for TM loads and stores. Loads and stores of the
3355 same address get the same ID. */
3356 static unsigned int tm_memopt_value_id
;
3357 static hash_table
<tm_memop_hasher
> tm_memopt_value_numbers
;
3359 #define STORE_AVAIL_IN(BB) \
3360 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
3361 #define STORE_AVAIL_OUT(BB) \
3362 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
3363 #define STORE_ANTIC_IN(BB) \
3364 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
3365 #define STORE_ANTIC_OUT(BB) \
3366 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
3367 #define READ_AVAIL_IN(BB) \
3368 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
3369 #define READ_AVAIL_OUT(BB) \
3370 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
3371 #define READ_LOCAL(BB) \
3372 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
3373 #define STORE_LOCAL(BB) \
3374 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
3375 #define AVAIL_IN_WORKLIST_P(BB) \
3376 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
3377 #define BB_VISITED_P(BB) \
3378 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
3380 /* Given a TM load/store in STMT, return the value number for the address
3384 tm_memopt_value_number (gimple stmt
, enum insert_option op
)
3386 struct tm_memop tmpmem
, *mem
;
3389 gcc_assert (is_tm_load (stmt
) || is_tm_store (stmt
));
3390 tmpmem
.addr
= gimple_call_arg (stmt
, 0);
3391 slot
= tm_memopt_value_numbers
.find_slot (&tmpmem
, op
);
3394 else if (op
== INSERT
)
3396 mem
= XNEW (struct tm_memop
);
3398 mem
->value_id
= tm_memopt_value_id
++;
3399 mem
->addr
= tmpmem
.addr
;
3403 return mem
->value_id
;
3406 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
3409 tm_memopt_accumulate_memops (basic_block bb
)
3411 gimple_stmt_iterator gsi
;
3413 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3415 gimple stmt
= gsi_stmt (gsi
);
3419 if (is_tm_store (stmt
))
3420 bits
= STORE_LOCAL (bb
);
3421 else if (is_tm_load (stmt
))
3422 bits
= READ_LOCAL (bb
);
3426 loc
= tm_memopt_value_number (stmt
, INSERT
);
3427 bitmap_set_bit (bits
, loc
);
3430 fprintf (dump_file
, "TM memopt (%s): value num=%d, BB=%d, addr=",
3431 is_tm_load (stmt
) ? "LOAD" : "STORE", loc
,
3432 gimple_bb (stmt
)->index
);
3433 print_generic_expr (dump_file
, gimple_call_arg (stmt
, 0), 0);
3434 fprintf (dump_file
, "\n");
3439 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
3442 dump_tm_memopt_set (const char *set_name
, bitmap bits
)
3446 const char *comma
= "";
3448 fprintf (dump_file
, "TM memopt: %s: [", set_name
);
3449 EXECUTE_IF_SET_IN_BITMAP (bits
, 0, i
, bi
)
3451 hash_table
<tm_memop_hasher
>::iterator hi
;
3452 struct tm_memop
*mem
= NULL
;
3454 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
3455 FOR_EACH_HASH_TABLE_ELEMENT (tm_memopt_value_numbers
, mem
, tm_memop_t
, hi
)
3456 if (mem
->value_id
== i
)
3458 gcc_assert (mem
->value_id
== i
);
3459 fprintf (dump_file
, "%s", comma
);
3461 print_generic_expr (dump_file
, mem
->addr
, 0);
3463 fprintf (dump_file
, "]\n");
3466 /* Prettily dump all of the memopt sets in BLOCKS. */
3469 dump_tm_memopt_sets (vec
<basic_block
> blocks
)
3474 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3476 fprintf (dump_file
, "------------BB %d---------\n", bb
->index
);
3477 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb
));
3478 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb
));
3479 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb
));
3480 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb
));
3481 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb
));
3482 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb
));
3486 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3489 tm_memopt_compute_avin (basic_block bb
)
3494 /* Seed with the AVOUT of any predecessor. */
3495 for (ix
= 0; ix
< EDGE_COUNT (bb
->preds
); ix
++)
3497 e
= EDGE_PRED (bb
, ix
);
3498 /* Make sure we have already visited this BB, and is thus
3501 If e->src->aux is NULL, this predecessor is actually on an
3502 enclosing transaction. We only care about the current
3503 transaction, so ignore it. */
3504 if (e
->src
->aux
&& BB_VISITED_P (e
->src
))
3506 bitmap_copy (STORE_AVAIL_IN (bb
), STORE_AVAIL_OUT (e
->src
));
3507 bitmap_copy (READ_AVAIL_IN (bb
), READ_AVAIL_OUT (e
->src
));
3512 for (; ix
< EDGE_COUNT (bb
->preds
); ix
++)
3514 e
= EDGE_PRED (bb
, ix
);
3515 if (e
->src
->aux
&& BB_VISITED_P (e
->src
))
3517 bitmap_and_into (STORE_AVAIL_IN (bb
), STORE_AVAIL_OUT (e
->src
));
3518 bitmap_and_into (READ_AVAIL_IN (bb
), READ_AVAIL_OUT (e
->src
));
3522 BB_VISITED_P (bb
) = true;
3525 /* Compute the STORE_ANTIC_IN for the basic block BB. */
3528 tm_memopt_compute_antin (basic_block bb
)
3533 /* Seed with the ANTIC_OUT of any successor. */
3534 for (ix
= 0; ix
< EDGE_COUNT (bb
->succs
); ix
++)
3536 e
= EDGE_SUCC (bb
, ix
);
3537 /* Make sure we have already visited this BB, and is thus
3539 if (BB_VISITED_P (e
->dest
))
3541 bitmap_copy (STORE_ANTIC_IN (bb
), STORE_ANTIC_OUT (e
->dest
));
3546 for (; ix
< EDGE_COUNT (bb
->succs
); ix
++)
3548 e
= EDGE_SUCC (bb
, ix
);
3549 if (BB_VISITED_P (e
->dest
))
3550 bitmap_and_into (STORE_ANTIC_IN (bb
), STORE_ANTIC_OUT (e
->dest
));
3553 BB_VISITED_P (bb
) = true;
3556 /* Compute the AVAIL sets for every basic block in BLOCKS.
3558 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3560 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3561 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3563 This is basically what we do in lcm's compute_available(), but here
3564 we calculate two sets of sets (one for STOREs and one for READs),
3565 and we work on a region instead of the entire CFG.
3567 REGION is the TM region.
3568 BLOCKS are the basic blocks in the region. */
3571 tm_memopt_compute_available (struct tm_region
*region
,
3572 vec
<basic_block
> blocks
)
3575 basic_block
*worklist
, *qin
, *qout
, *qend
, bb
;
3576 unsigned int qlen
, i
;
3580 /* Allocate a worklist array/queue. Entries are only added to the
3581 list if they were not already on the list. So the size is
3582 bounded by the number of basic blocks in the region. */
3583 qlen
= blocks
.length () - 1;
3584 qin
= qout
= worklist
=
3585 XNEWVEC (basic_block
, qlen
);
3587 /* Put every block in the region on the worklist. */
3588 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3590 /* Seed AVAIL_OUT with the LOCAL set. */
3591 bitmap_ior_into (STORE_AVAIL_OUT (bb
), STORE_LOCAL (bb
));
3592 bitmap_ior_into (READ_AVAIL_OUT (bb
), READ_LOCAL (bb
));
3594 AVAIL_IN_WORKLIST_P (bb
) = true;
3595 /* No need to insert the entry block, since it has an AVIN of
3596 null, and an AVOUT that has already been seeded in. */
3597 if (bb
!= region
->entry_block
)
3601 /* The entry block has been initialized with the local sets. */
3602 BB_VISITED_P (region
->entry_block
) = true;
3605 qend
= &worklist
[qlen
];
3607 /* Iterate until the worklist is empty. */
3610 /* Take the first entry off the worklist. */
3617 /* This block can be added to the worklist again if necessary. */
3618 AVAIL_IN_WORKLIST_P (bb
) = false;
3619 tm_memopt_compute_avin (bb
);
3621 /* Note: We do not add the LOCAL sets here because we already
3622 seeded the AVAIL_OUT sets with them. */
3623 changed
= bitmap_ior_into (STORE_AVAIL_OUT (bb
), STORE_AVAIL_IN (bb
));
3624 changed
|= bitmap_ior_into (READ_AVAIL_OUT (bb
), READ_AVAIL_IN (bb
));
3626 && (region
->exit_blocks
== NULL
3627 || !bitmap_bit_p (region
->exit_blocks
, bb
->index
)))
3628 /* If the out state of this block changed, then we need to add
3629 its successors to the worklist if they are not already in. */
3630 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3631 if (!AVAIL_IN_WORKLIST_P (e
->dest
) && e
->dest
!= EXIT_BLOCK_PTR
)
3634 AVAIL_IN_WORKLIST_P (e
->dest
) = true;
3645 dump_tm_memopt_sets (blocks
);
3648 /* Compute ANTIC sets for every basic block in BLOCKS.
3650 We compute STORE_ANTIC_OUT as follows:
3652 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3653 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3655 REGION is the TM region.
3656 BLOCKS are the basic blocks in the region. */
3659 tm_memopt_compute_antic (struct tm_region
*region
,
3660 vec
<basic_block
> blocks
)
3663 basic_block
*worklist
, *qin
, *qout
, *qend
, bb
;
3668 /* Allocate a worklist array/queue. Entries are only added to the
3669 list if they were not already on the list. So the size is
3670 bounded by the number of basic blocks in the region. */
3671 qin
= qout
= worklist
= XNEWVEC (basic_block
, blocks
.length ());
3673 for (qlen
= 0, i
= blocks
.length () - 1; i
>= 0; --i
)
3677 /* Seed ANTIC_OUT with the LOCAL set. */
3678 bitmap_ior_into (STORE_ANTIC_OUT (bb
), STORE_LOCAL (bb
));
3680 /* Put every block in the region on the worklist. */
3681 AVAIL_IN_WORKLIST_P (bb
) = true;
3682 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3683 and their ANTIC_OUT has already been seeded in. */
3684 if (region
->exit_blocks
3685 && !bitmap_bit_p (region
->exit_blocks
, bb
->index
))
3692 /* The exit blocks have been initialized with the local sets. */
3693 if (region
->exit_blocks
)
3697 EXECUTE_IF_SET_IN_BITMAP (region
->exit_blocks
, 0, i
, bi
)
3698 BB_VISITED_P (BASIC_BLOCK (i
)) = true;
3702 qend
= &worklist
[qlen
];
3704 /* Iterate until the worklist is empty. */
3707 /* Take the first entry off the worklist. */
3714 /* This block can be added to the worklist again if necessary. */
3715 AVAIL_IN_WORKLIST_P (bb
) = false;
3716 tm_memopt_compute_antin (bb
);
3718 /* Note: We do not add the LOCAL sets here because we already
3719 seeded the ANTIC_OUT sets with them. */
3720 if (bitmap_ior_into (STORE_ANTIC_OUT (bb
), STORE_ANTIC_IN (bb
))
3721 && bb
!= region
->entry_block
)
3722 /* If the out state of this block changed, then we need to add
3723 its predecessors to the worklist if they are not already in. */
3724 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3725 if (!AVAIL_IN_WORKLIST_P (e
->src
))
3728 AVAIL_IN_WORKLIST_P (e
->src
) = true;
3739 dump_tm_memopt_sets (blocks
);
3742 /* Offsets of load variants from TM_LOAD. For example,
3743 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3744 See gtm-builtins.def. */
3745 #define TRANSFORM_RAR 1
3746 #define TRANSFORM_RAW 2
3747 #define TRANSFORM_RFW 3
3748 /* Offsets of store variants from TM_STORE. */
3749 #define TRANSFORM_WAR 1
3750 #define TRANSFORM_WAW 2
3752 /* Inform about a load/store optimization. */
3755 dump_tm_memopt_transform (gimple stmt
)
3759 fprintf (dump_file
, "TM memopt: transforming: ");
3760 print_gimple_stmt (dump_file
, stmt
, 0, 0);
3761 fprintf (dump_file
, "\n");
3765 /* Perform a read/write optimization. Replaces the TM builtin in STMT
3766 by a builtin that is OFFSET entries down in the builtins table in
3767 gtm-builtins.def. */
3770 tm_memopt_transform_stmt (unsigned int offset
,
3772 gimple_stmt_iterator
*gsi
)
3774 tree fn
= gimple_call_fn (stmt
);
3775 gcc_assert (TREE_CODE (fn
) == ADDR_EXPR
);
3776 TREE_OPERAND (fn
, 0)
3777 = builtin_decl_explicit ((enum built_in_function
)
3778 (DECL_FUNCTION_CODE (TREE_OPERAND (fn
, 0))
3780 gimple_call_set_fn (stmt
, fn
);
3781 gsi_replace (gsi
, stmt
, true);
3782 dump_tm_memopt_transform (stmt
);
3785 /* Perform the actual TM memory optimization transformations in the
3786 basic blocks in BLOCKS. */
3789 tm_memopt_transform_blocks (vec
<basic_block
> blocks
)
3793 gimple_stmt_iterator gsi
;
3795 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3797 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3799 gimple stmt
= gsi_stmt (gsi
);
3800 bitmap read_avail
= READ_AVAIL_IN (bb
);
3801 bitmap store_avail
= STORE_AVAIL_IN (bb
);
3802 bitmap store_antic
= STORE_ANTIC_OUT (bb
);
3805 if (is_tm_simple_load (stmt
))
3807 loc
= tm_memopt_value_number (stmt
, NO_INSERT
);
3808 if (store_avail
&& bitmap_bit_p (store_avail
, loc
))
3809 tm_memopt_transform_stmt (TRANSFORM_RAW
, stmt
, &gsi
);
3810 else if (store_antic
&& bitmap_bit_p (store_antic
, loc
))
3812 tm_memopt_transform_stmt (TRANSFORM_RFW
, stmt
, &gsi
);
3813 bitmap_set_bit (store_avail
, loc
);
3815 else if (read_avail
&& bitmap_bit_p (read_avail
, loc
))
3816 tm_memopt_transform_stmt (TRANSFORM_RAR
, stmt
, &gsi
);
3818 bitmap_set_bit (read_avail
, loc
);
3820 else if (is_tm_simple_store (stmt
))
3822 loc
= tm_memopt_value_number (stmt
, NO_INSERT
);
3823 if (store_avail
&& bitmap_bit_p (store_avail
, loc
))
3824 tm_memopt_transform_stmt (TRANSFORM_WAW
, stmt
, &gsi
);
3827 if (read_avail
&& bitmap_bit_p (read_avail
, loc
))
3828 tm_memopt_transform_stmt (TRANSFORM_WAR
, stmt
, &gsi
);
3829 bitmap_set_bit (store_avail
, loc
);
3836 /* Return a new set of bitmaps for a BB. */
3838 static struct tm_memopt_bitmaps
*
3839 tm_memopt_init_sets (void)
3841 struct tm_memopt_bitmaps
*b
3842 = XOBNEW (&tm_memopt_obstack
.obstack
, struct tm_memopt_bitmaps
);
3843 b
->store_avail_in
= BITMAP_ALLOC (&tm_memopt_obstack
);
3844 b
->store_avail_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3845 b
->store_antic_in
= BITMAP_ALLOC (&tm_memopt_obstack
);
3846 b
->store_antic_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3847 b
->store_avail_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3848 b
->read_avail_in
= BITMAP_ALLOC (&tm_memopt_obstack
);
3849 b
->read_avail_out
= BITMAP_ALLOC (&tm_memopt_obstack
);
3850 b
->read_local
= BITMAP_ALLOC (&tm_memopt_obstack
);
3851 b
->store_local
= BITMAP_ALLOC (&tm_memopt_obstack
);
3855 /* Free sets computed for each BB. */
3858 tm_memopt_free_sets (vec
<basic_block
> blocks
)
3863 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3867 /* Clear the visited bit for every basic block in BLOCKS. */
3870 tm_memopt_clear_visited (vec
<basic_block
> blocks
)
3875 for (i
= 0; blocks
.iterate (i
, &bb
); ++i
)
3876 BB_VISITED_P (bb
) = false;
3879 /* Replace TM load/stores with hints for the runtime. We handle
3880 things like read-after-write, write-after-read, read-after-read,
3881 read-for-write, etc. */
3884 execute_tm_memopt (void)
3886 struct tm_region
*region
;
3887 vec
<basic_block
> bbs
;
3889 tm_memopt_value_id
= 0;
3890 tm_memopt_value_numbers
.create (10);
3892 for (region
= all_tm_regions
; region
; region
= region
->next
)
3894 /* All the TM stores/loads in the current region. */
3898 bitmap_obstack_initialize (&tm_memopt_obstack
);
3900 /* Save all BBs for the current region. */
3901 bbs
= get_tm_region_blocks (region
->entry_block
,
3902 region
->exit_blocks
,
3907 /* Collect all the memory operations. */
3908 for (i
= 0; bbs
.iterate (i
, &bb
); ++i
)
3910 bb
->aux
= tm_memopt_init_sets ();
3911 tm_memopt_accumulate_memops (bb
);
3914 /* Solve data flow equations and transform each block accordingly. */
3915 tm_memopt_clear_visited (bbs
);
3916 tm_memopt_compute_available (region
, bbs
);
3917 tm_memopt_clear_visited (bbs
);
3918 tm_memopt_compute_antic (region
, bbs
);
3919 tm_memopt_transform_blocks (bbs
);
3921 tm_memopt_free_sets (bbs
);
3923 bitmap_obstack_release (&tm_memopt_obstack
);
3924 tm_memopt_value_numbers
.empty ();
3927 tm_memopt_value_numbers
.dispose ();
3932 gate_tm_memopt (void)
3934 return flag_tm
&& optimize
> 0;
3939 const pass_data pass_data_tm_memopt
=
3941 GIMPLE_PASS
, /* type */
3942 "tmmemopt", /* name */
3943 OPTGROUP_NONE
, /* optinfo_flags */
3944 true, /* has_gate */
3945 true, /* has_execute */
3946 TV_TRANS_MEM
, /* tv_id */
3947 ( PROP_ssa
| PROP_cfg
), /* properties_required */
3948 0, /* properties_provided */
3949 0, /* properties_destroyed */
3950 0, /* todo_flags_start */
3951 0, /* todo_flags_finish */
3954 class pass_tm_memopt
: public gimple_opt_pass
3957 pass_tm_memopt (gcc::context
*ctxt
)
3958 : gimple_opt_pass (pass_data_tm_memopt
, ctxt
)
3961 /* opt_pass methods: */
3962 bool gate () { return gate_tm_memopt (); }
3963 unsigned int execute () { return execute_tm_memopt (); }
3965 }; // class pass_tm_memopt
3970 make_pass_tm_memopt (gcc::context
*ctxt
)
3972 return new pass_tm_memopt (ctxt
);
3976 /* Interprocedual analysis for the creation of transactional clones.
3977 The aim of this pass is to find which functions are referenced in
3978 a non-irrevocable transaction context, and for those over which
3979 we have control (or user directive), create a version of the
3980 function which uses only the transactional interface to reference
3981 protected memories. This analysis proceeds in several steps:
3983 (1) Collect the set of all possible transactional clones:
3985 (a) For all local public functions marked tm_callable, push
3986 it onto the tm_callee queue.
3988 (b) For all local functions, scan for calls in transaction blocks.
3989 Push the caller and callee onto the tm_caller and tm_callee
3990 queues. Count the number of callers for each callee.
3992 (c) For each local function on the callee list, assume we will
3993 create a transactional clone. Push *all* calls onto the
3994 callee queues; count the number of clone callers separately
3995 to the number of original callers.
3997 (2) Propagate irrevocable status up the dominator tree:
3999 (a) Any external function on the callee list that is not marked
4000 tm_callable is irrevocable. Push all callers of such onto
4003 (b) For each function on the worklist, mark each block that
4004 contains an irrevocable call. Use the AND operator to
4005 propagate that mark up the dominator tree.
4007 (c) If we reach the entry block for a possible transactional
4008 clone, then the transactional clone is irrevocable, and
4009 we should not create the clone after all. Push all
4010 callers onto the worklist.
4012 (d) Place tm_irrevocable calls at the beginning of the relevant
4013 blocks. Special case here is the entry block for the entire
4014 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
4015 the library to begin the region in serial mode. Decrement
4016 the call count for all callees in the irrevocable region.
4018 (3) Create the transactional clones:
4020 Any tm_callee that still has a non-zero call count is cloned.
4023 /* This structure is stored in the AUX field of each cgraph_node. */
4024 struct tm_ipa_cg_data
4026 /* The clone of the function that got created. */
4027 struct cgraph_node
*clone
;
4029 /* The tm regions in the normal function. */
4030 struct tm_region
*all_tm_regions
;
4032 /* The blocks of the normal/clone functions that contain irrevocable
4033 calls, or blocks that are post-dominated by irrevocable calls. */
4034 bitmap irrevocable_blocks_normal
;
4035 bitmap irrevocable_blocks_clone
;
4037 /* The blocks of the normal function that are involved in transactions. */
4038 bitmap transaction_blocks_normal
;
4040 /* The number of callers to the transactional clone of this function
4041 from normal and transactional clones respectively. */
4042 unsigned tm_callers_normal
;
4043 unsigned tm_callers_clone
;
4045 /* True if all calls to this function's transactional clone
4046 are irrevocable. Also automatically true if the function
4047 has no transactional clone. */
4048 bool is_irrevocable
;
4050 /* Flags indicating the presence of this function in various queues. */
4051 bool in_callee_queue
;
4054 /* Flags indicating the kind of scan desired while in the worklist. */
4055 bool want_irr_scan_normal
;
4058 typedef vec
<cgraph_node_ptr
> cgraph_node_queue
;
4060 /* Return the ipa data associated with NODE, allocating zeroed memory
4061 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
4062 and set *NODE accordingly. */
4064 static struct tm_ipa_cg_data
*
4065 get_cg_data (struct cgraph_node
**node
, bool traverse_aliases
)
4067 struct tm_ipa_cg_data
*d
;
4069 if (traverse_aliases
&& (*node
)->alias
)
4070 *node
= cgraph_alias_target (*node
);
4072 d
= (struct tm_ipa_cg_data
*) (*node
)->aux
;
4076 d
= (struct tm_ipa_cg_data
*)
4077 obstack_alloc (&tm_obstack
.obstack
, sizeof (*d
));
4078 (*node
)->aux
= (void *) d
;
4079 memset (d
, 0, sizeof (*d
));
4085 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
4086 it is already present. */
4089 maybe_push_queue (struct cgraph_node
*node
,
4090 cgraph_node_queue
*queue_p
, bool *in_queue_p
)
4095 queue_p
->safe_push (node
);
4099 /* Duplicate the basic blocks in QUEUE for use in the uninstrumented
4100 code path. QUEUE are the basic blocks inside the transaction
4101 represented in REGION.
4103 Later in split_code_paths() we will add the conditional to choose
4104 between the two alternatives. */
4107 ipa_uninstrument_transaction (struct tm_region
*region
,
4108 vec
<basic_block
> queue
)
4110 gimple transaction
= region
->transaction_stmt
;
4111 basic_block transaction_bb
= gimple_bb (transaction
);
4112 int n
= queue
.length ();
4113 basic_block
*new_bbs
= XNEWVEC (basic_block
, n
);
4115 copy_bbs (queue
.address (), n
, new_bbs
, NULL
, 0, NULL
, NULL
, transaction_bb
,
4117 edge e
= make_edge (transaction_bb
, new_bbs
[0], EDGE_TM_UNINSTRUMENTED
);
4118 add_phi_args_after_copy (new_bbs
, n
, e
);
4120 // Now we will have a GIMPLE_ATOMIC with 3 possible edges out of it.
4121 // a) EDGE_FALLTHRU into the transaction
4122 // b) EDGE_TM_ABORT out of the transaction
4123 // c) EDGE_TM_UNINSTRUMENTED into the uninstrumented blocks.
4128 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
4129 Queue all callees within block BB. */
4132 ipa_tm_scan_calls_block (cgraph_node_queue
*callees_p
,
4133 basic_block bb
, bool for_clone
)
4135 gimple_stmt_iterator gsi
;
4137 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4139 gimple stmt
= gsi_stmt (gsi
);
4140 if (is_gimple_call (stmt
) && !is_tm_pure_call (stmt
))
4142 tree fndecl
= gimple_call_fndecl (stmt
);
4145 struct tm_ipa_cg_data
*d
;
4147 struct cgraph_node
*node
;
4149 if (is_tm_ending_fndecl (fndecl
))
4151 if (find_tm_replacement_function (fndecl
))
4154 node
= cgraph_get_node (fndecl
);
4155 gcc_assert (node
!= NULL
);
4156 d
= get_cg_data (&node
, true);
4158 pcallers
= (for_clone
? &d
->tm_callers_clone
4159 : &d
->tm_callers_normal
);
4162 maybe_push_queue (node
, callees_p
, &d
->in_callee_queue
);
4168 /* Scan all calls in NODE that are within a transaction region,
4169 and push the resulting nodes into the callee queue. */
4172 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data
*d
,
4173 cgraph_node_queue
*callees_p
)
4175 struct tm_region
*r
;
4177 d
->transaction_blocks_normal
= BITMAP_ALLOC (&tm_obstack
);
4178 d
->all_tm_regions
= all_tm_regions
;
4180 for (r
= all_tm_regions
; r
; r
= r
->next
)
4182 vec
<basic_block
> bbs
;
4186 bbs
= get_tm_region_blocks (r
->entry_block
, r
->exit_blocks
, NULL
,
4187 d
->transaction_blocks_normal
, false);
4189 // Generate the uninstrumented code path for this transaction.
4190 ipa_uninstrument_transaction (r
, bbs
);
4192 FOR_EACH_VEC_ELT (bbs
, i
, bb
)
4193 ipa_tm_scan_calls_block (callees_p
, bb
, false);
4198 // ??? copy_bbs should maintain cgraph edges for the blocks as it is
4199 // copying them, rather than forcing us to do this externally.
4200 rebuild_cgraph_edges ();
4202 // ??? In ipa_uninstrument_transaction we don't try to update dominators
4203 // because copy_bbs doesn't return a VEC like iterate_fix_dominators expects.
4204 // Instead, just release dominators here so update_ssa recomputes them.
4205 free_dominance_info (CDI_DOMINATORS
);
4207 // When building the uninstrumented code path, copy_bbs will have invoked
4208 // create_new_def_for starting an "ssa update context". There is only one
4209 // instance of this context, so resolve ssa updates before moving on to
4210 // the next function.
4211 update_ssa (TODO_update_ssa
);
4214 /* Scan all calls in NODE as if this is the transactional clone,
4215 and push the destinations into the callee queue. */
4218 ipa_tm_scan_calls_clone (struct cgraph_node
*node
,
4219 cgraph_node_queue
*callees_p
)
4221 struct function
*fn
= DECL_STRUCT_FUNCTION (node
->decl
);
4224 FOR_EACH_BB_FN (bb
, fn
)
4225 ipa_tm_scan_calls_block (callees_p
, bb
, true);
4228 /* The function NODE has been detected to be irrevocable. Push all
4229 of its callers onto WORKLIST for the purpose of re-scanning them. */
4232 ipa_tm_note_irrevocable (struct cgraph_node
*node
,
4233 cgraph_node_queue
*worklist_p
)
4235 struct tm_ipa_cg_data
*d
= get_cg_data (&node
, true);
4236 struct cgraph_edge
*e
;
4238 d
->is_irrevocable
= true;
4240 for (e
= node
->callers
; e
; e
= e
->next_caller
)
4243 struct cgraph_node
*caller
;
4245 /* Don't examine recursive calls. */
4246 if (e
->caller
== node
)
4248 /* Even if we think we can go irrevocable, believe the user
4250 if (is_tm_safe_or_pure (e
->caller
->decl
))
4254 d
= get_cg_data (&caller
, true);
4256 /* Check if the callee is in a transactional region. If so,
4257 schedule the function for normal re-scan as well. */
4258 bb
= gimple_bb (e
->call_stmt
);
4259 gcc_assert (bb
!= NULL
);
4260 if (d
->transaction_blocks_normal
4261 && bitmap_bit_p (d
->transaction_blocks_normal
, bb
->index
))
4262 d
->want_irr_scan_normal
= true;
4264 maybe_push_queue (caller
, worklist_p
, &d
->in_worklist
);
4268 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
4269 within the block is irrevocable. */
4272 ipa_tm_scan_irr_block (basic_block bb
)
4274 gimple_stmt_iterator gsi
;
4277 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4279 gimple stmt
= gsi_stmt (gsi
);
4280 switch (gimple_code (stmt
))
4283 if (gimple_assign_single_p (stmt
))
4285 tree lhs
= gimple_assign_lhs (stmt
);
4286 tree rhs
= gimple_assign_rhs1 (stmt
);
4287 if (volatile_var_p (lhs
) || volatile_var_p (rhs
))
4294 tree lhs
= gimple_call_lhs (stmt
);
4295 if (lhs
&& volatile_var_p (lhs
))
4298 if (is_tm_pure_call (stmt
))
4301 fn
= gimple_call_fn (stmt
);
4303 /* Functions with the attribute are by definition irrevocable. */
4304 if (is_tm_irrevocable (fn
))
4307 /* For direct function calls, go ahead and check for replacement
4308 functions, or transitive irrevocable functions. For indirect
4309 functions, we'll ask the runtime. */
4310 if (TREE_CODE (fn
) == ADDR_EXPR
)
4312 struct tm_ipa_cg_data
*d
;
4313 struct cgraph_node
*node
;
4315 fn
= TREE_OPERAND (fn
, 0);
4316 if (is_tm_ending_fndecl (fn
))
4318 if (find_tm_replacement_function (fn
))
4321 node
= cgraph_get_node (fn
);
4322 d
= get_cg_data (&node
, true);
4324 /* Return true if irrevocable, but above all, believe
4326 if (d
->is_irrevocable
4327 && !is_tm_safe_or_pure (fn
))
4334 /* ??? The Approved Method of indicating that an inline
4335 assembly statement is not relevant to the transaction
4336 is to wrap it in a __tm_waiver block. This is not
4337 yet implemented, so we can't check for it. */
4338 if (is_tm_safe (current_function_decl
))
4340 tree t
= build1 (NOP_EXPR
, void_type_node
, size_zero_node
);
4341 SET_EXPR_LOCATION (t
, gimple_location (stmt
));
4342 error ("%Kasm not allowed in %<transaction_safe%> function", t
);
4354 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking
4355 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
4356 scanning past OLD_IRR or EXIT_BLOCKS. */
4359 ipa_tm_scan_irr_blocks (vec
<basic_block
> *pqueue
, bitmap new_irr
,
4360 bitmap old_irr
, bitmap exit_blocks
)
4362 bool any_new_irr
= false;
4365 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
4369 basic_block bb
= pqueue
->pop ();
4371 /* Don't re-scan blocks we know already are irrevocable. */
4372 if (old_irr
&& bitmap_bit_p (old_irr
, bb
->index
))
4375 if (ipa_tm_scan_irr_block (bb
))
4377 bitmap_set_bit (new_irr
, bb
->index
);
4380 else if (exit_blocks
== NULL
|| !bitmap_bit_p (exit_blocks
, bb
->index
))
4382 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4383 if (!bitmap_bit_p (visited_blocks
, e
->dest
->index
))
4385 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
4386 pqueue
->safe_push (e
->dest
);
4390 while (!pqueue
->is_empty ());
4392 BITMAP_FREE (visited_blocks
);
4397 /* Propagate the irrevocable property both up and down the dominator tree.
4398 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
4399 TM regions; OLD_IRR are the results of a previous scan of the dominator
4400 tree which has been fully propagated; NEW_IRR is the set of new blocks
4401 which are gaining the irrevocable property during the current scan. */
4404 ipa_tm_propagate_irr (basic_block entry_block
, bitmap new_irr
,
4405 bitmap old_irr
, bitmap exit_blocks
)
4407 vec
<basic_block
> bbs
;
4408 bitmap all_region_blocks
;
4410 /* If this block is in the old set, no need to rescan. */
4411 if (old_irr
&& bitmap_bit_p (old_irr
, entry_block
->index
))
4414 all_region_blocks
= BITMAP_ALLOC (&tm_obstack
);
4415 bbs
= get_tm_region_blocks (entry_block
, exit_blocks
, NULL
,
4416 all_region_blocks
, false);
4419 basic_block bb
= bbs
.pop ();
4420 bool this_irr
= bitmap_bit_p (new_irr
, bb
->index
);
4421 bool all_son_irr
= false;
4425 /* Propagate up. If my children are, I am too, but we must have
4426 at least one child that is. */
4429 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4431 if (!bitmap_bit_p (new_irr
, e
->dest
->index
))
4433 all_son_irr
= false;
4441 /* Add block to new_irr if it hasn't already been processed. */
4442 if (!old_irr
|| !bitmap_bit_p (old_irr
, bb
->index
))
4444 bitmap_set_bit (new_irr
, bb
->index
);
4450 /* Propagate down to everyone we immediately dominate. */
4454 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
4456 son
= next_dom_son (CDI_DOMINATORS
, son
))
4458 /* Make sure block is actually in a TM region, and it
4459 isn't already in old_irr. */
4460 if ((!old_irr
|| !bitmap_bit_p (old_irr
, son
->index
))
4461 && bitmap_bit_p (all_region_blocks
, son
->index
))
4462 bitmap_set_bit (new_irr
, son
->index
);
4466 while (!bbs
.is_empty ());
4468 BITMAP_FREE (all_region_blocks
);
4473 ipa_tm_decrement_clone_counts (basic_block bb
, bool for_clone
)
4475 gimple_stmt_iterator gsi
;
4477 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4479 gimple stmt
= gsi_stmt (gsi
);
4480 if (is_gimple_call (stmt
) && !is_tm_pure_call (stmt
))
4482 tree fndecl
= gimple_call_fndecl (stmt
);
4485 struct tm_ipa_cg_data
*d
;
4487 struct cgraph_node
*tnode
;
4489 if (is_tm_ending_fndecl (fndecl
))
4491 if (find_tm_replacement_function (fndecl
))
4494 tnode
= cgraph_get_node (fndecl
);
4495 d
= get_cg_data (&tnode
, true);
4497 pcallers
= (for_clone
? &d
->tm_callers_clone
4498 : &d
->tm_callers_normal
);
4500 gcc_assert (*pcallers
> 0);
4507 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
4508 as well as other irrevocable actions such as inline assembly. Mark all
4509 such blocks as irrevocable and decrement the number of calls to
4510 transactional clones. Return true if, for the transactional clone, the
4511 entire function is irrevocable. */
4514 ipa_tm_scan_irr_function (struct cgraph_node
*node
, bool for_clone
)
4516 struct tm_ipa_cg_data
*d
;
4517 bitmap new_irr
, old_irr
;
4518 vec
<basic_block
> queue
;
4521 /* Builtin operators (operator new, and such). */
4522 if (DECL_STRUCT_FUNCTION (node
->decl
) == NULL
4523 || DECL_STRUCT_FUNCTION (node
->decl
)->cfg
== NULL
)
4526 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
4527 calculate_dominance_info (CDI_DOMINATORS
);
4529 d
= get_cg_data (&node
, true);
4531 new_irr
= BITMAP_ALLOC (&tm_obstack
);
4533 /* Scan each tm region, propagating irrevocable status through the tree. */
4536 old_irr
= d
->irrevocable_blocks_clone
;
4537 queue
.quick_push (single_succ (ENTRY_BLOCK_PTR
));
4538 if (ipa_tm_scan_irr_blocks (&queue
, new_irr
, old_irr
, NULL
))
4540 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR
), new_irr
,
4542 ret
= bitmap_bit_p (new_irr
, single_succ (ENTRY_BLOCK_PTR
)->index
);
4547 struct tm_region
*region
;
4549 old_irr
= d
->irrevocable_blocks_normal
;
4550 for (region
= d
->all_tm_regions
; region
; region
= region
->next
)
4552 queue
.quick_push (region
->entry_block
);
4553 if (ipa_tm_scan_irr_blocks (&queue
, new_irr
, old_irr
,
4554 region
->exit_blocks
))
4555 ipa_tm_propagate_irr (region
->entry_block
, new_irr
, old_irr
,
4556 region
->exit_blocks
);
4560 /* If we found any new irrevocable blocks, reduce the call count for
4561 transactional clones within the irrevocable blocks. Save the new
4562 set of irrevocable blocks for next time. */
4563 if (!bitmap_empty_p (new_irr
))
4565 bitmap_iterator bmi
;
4568 EXECUTE_IF_SET_IN_BITMAP (new_irr
, 0, i
, bmi
)
4569 ipa_tm_decrement_clone_counts (BASIC_BLOCK (i
), for_clone
);
4573 bitmap_ior_into (old_irr
, new_irr
);
4574 BITMAP_FREE (new_irr
);
4577 d
->irrevocable_blocks_clone
= new_irr
;
4579 d
->irrevocable_blocks_normal
= new_irr
;
4581 if (dump_file
&& new_irr
)
4584 bitmap_iterator bmi
;
4587 dname
= lang_hooks
.decl_printable_name (current_function_decl
, 2);
4588 EXECUTE_IF_SET_IN_BITMAP (new_irr
, 0, i
, bmi
)
4589 fprintf (dump_file
, "%s: bb %d goes irrevocable\n", dname
, i
);
4593 BITMAP_FREE (new_irr
);
4601 /* Return true if, for the transactional clone of NODE, any call
4602 may enter irrevocable mode. */
4605 ipa_tm_mayenterirr_function (struct cgraph_node
*node
)
4607 struct tm_ipa_cg_data
*d
;
4611 d
= get_cg_data (&node
, true);
4613 flags
= flags_from_decl_or_type (decl
);
4615 /* Handle some TM builtins. Ordinarily these aren't actually generated
4616 at this point, but handling these functions when written in by the
4617 user makes it easier to build unit tests. */
4618 if (flags
& ECF_TM_BUILTIN
)
4621 /* Filter out all functions that are marked. */
4622 if (flags
& ECF_TM_PURE
)
4624 if (is_tm_safe (decl
))
4626 if (is_tm_irrevocable (decl
))
4628 if (is_tm_callable (decl
))
4630 if (find_tm_replacement_function (decl
))
4633 /* If we aren't seeing the final version of the function we don't
4634 know what it will contain at runtime. */
4635 if (cgraph_function_body_availability (node
) < AVAIL_AVAILABLE
)
4638 /* If the function must go irrevocable, then of course true. */
4639 if (d
->is_irrevocable
)
4642 /* If there are any blocks marked irrevocable, then the function
4643 as a whole may enter irrevocable. */
4644 if (d
->irrevocable_blocks_clone
)
4647 /* We may have previously marked this function as tm_may_enter_irr;
4648 see pass_diagnose_tm_blocks. */
4649 if (node
->local
.tm_may_enter_irr
)
4652 /* Recurse on the main body for aliases. In general, this will
4653 result in one of the bits above being set so that we will not
4654 have to recurse next time. */
4656 return ipa_tm_mayenterirr_function (cgraph_get_node (node
->thunk
.alias
));
4658 /* What remains is unmarked local functions without items that force
4659 the function to go irrevocable. */
4663 /* Diagnose calls from transaction_safe functions to unmarked
4664 functions that are determined to not be safe. */
4667 ipa_tm_diagnose_tm_safe (struct cgraph_node
*node
)
4669 struct cgraph_edge
*e
;
4671 for (e
= node
->callees
; e
; e
= e
->next_callee
)
4672 if (!is_tm_callable (e
->callee
->decl
)
4673 && e
->callee
->local
.tm_may_enter_irr
)
4674 error_at (gimple_location (e
->call_stmt
),
4675 "unsafe function call %qD within "
4676 "%<transaction_safe%> function", e
->callee
->decl
);
4679 /* Diagnose call from atomic transactions to unmarked functions
4680 that are determined to not be safe. */
4683 ipa_tm_diagnose_transaction (struct cgraph_node
*node
,
4684 struct tm_region
*all_tm_regions
)
4686 struct tm_region
*r
;
4688 for (r
= all_tm_regions
; r
; r
= r
->next
)
4689 if (gimple_transaction_subcode (r
->transaction_stmt
) & GTMA_IS_RELAXED
)
4691 /* Atomic transactions can be nested inside relaxed. */
4693 ipa_tm_diagnose_transaction (node
, r
->inner
);
4697 vec
<basic_block
> bbs
;
4698 gimple_stmt_iterator gsi
;
4702 bbs
= get_tm_region_blocks (r
->entry_block
, r
->exit_blocks
,
4703 r
->irr_blocks
, NULL
, false);
4705 for (i
= 0; bbs
.iterate (i
, &bb
); ++i
)
4706 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4708 gimple stmt
= gsi_stmt (gsi
);
4711 if (gimple_code (stmt
) == GIMPLE_ASM
)
4713 error_at (gimple_location (stmt
),
4714 "asm not allowed in atomic transaction");
4718 if (!is_gimple_call (stmt
))
4720 fndecl
= gimple_call_fndecl (stmt
);
4722 /* Indirect function calls have been diagnosed already. */
4726 /* Stop at the end of the transaction. */
4727 if (is_tm_ending_fndecl (fndecl
))
4729 if (bitmap_bit_p (r
->exit_blocks
, bb
->index
))
4734 /* Marked functions have been diagnosed already. */
4735 if (is_tm_pure_call (stmt
))
4737 if (is_tm_callable (fndecl
))
4740 if (cgraph_local_info (fndecl
)->tm_may_enter_irr
)
4741 error_at (gimple_location (stmt
),
4742 "unsafe function call %qD within "
4743 "atomic transaction", fndecl
);
4750 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4751 OLD_DECL. The returned value is a freshly malloced pointer that
4752 should be freed by the caller. */
4755 tm_mangle (tree old_asm_id
)
4757 const char *old_asm_name
;
4760 struct demangle_component
*dc
;
4763 /* Determine if the symbol is already a valid C++ mangled name. Do this
4764 even for C, which might be interfacing with C++ code via appropriately
4765 ugly identifiers. */
4766 /* ??? We could probably do just as well checking for "_Z" and be done. */
4767 old_asm_name
= IDENTIFIER_POINTER (old_asm_id
);
4768 dc
= cplus_demangle_v3_components (old_asm_name
, DMGL_NO_OPTS
, &alloc
);
4775 sprintf (length
, "%u", IDENTIFIER_LENGTH (old_asm_id
));
4776 tm_name
= concat ("_ZGTt", length
, old_asm_name
, NULL
);
4780 old_asm_name
+= 2; /* Skip _Z */
4784 case DEMANGLE_COMPONENT_TRANSACTION_CLONE
:
4785 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE
:
4786 /* Don't play silly games, you! */
4789 case DEMANGLE_COMPONENT_HIDDEN_ALIAS
:
4790 /* I'd really like to know if we can ever be passed one of
4791 these from the C++ front end. The Logical Thing would
4792 seem that hidden-alias should be outer-most, so that we
4793 get hidden-alias of a transaction-clone and not vice-versa. */
4801 tm_name
= concat ("_ZGTt", old_asm_name
, NULL
);
4805 new_asm_id
= get_identifier (tm_name
);
4812 ipa_tm_mark_force_output_node (struct cgraph_node
*node
)
4814 cgraph_mark_force_output_node (node
);
4815 node
->analyzed
= true;
4819 ipa_tm_mark_forced_by_abi_node (struct cgraph_node
*node
)
4821 node
->forced_by_abi
= true;
4822 node
->analyzed
= true;
4825 /* Callback data for ipa_tm_create_version_alias. */
4826 struct create_version_alias_info
4828 struct cgraph_node
*old_node
;
4832 /* A subroutine of ipa_tm_create_version, called via
4833 cgraph_for_node_and_aliases. Create new tm clones for each of
4834 the existing aliases. */
4836 ipa_tm_create_version_alias (struct cgraph_node
*node
, void *data
)
4838 struct create_version_alias_info
*info
4839 = (struct create_version_alias_info
*)data
;
4840 tree old_decl
, new_decl
, tm_name
;
4841 struct cgraph_node
*new_node
;
4843 if (!node
->cpp_implicit_alias
)
4846 old_decl
= node
->decl
;
4847 tm_name
= tm_mangle (DECL_ASSEMBLER_NAME (old_decl
));
4848 new_decl
= build_decl (DECL_SOURCE_LOCATION (old_decl
),
4849 TREE_CODE (old_decl
), tm_name
,
4850 TREE_TYPE (old_decl
));
4852 SET_DECL_ASSEMBLER_NAME (new_decl
, tm_name
);
4853 SET_DECL_RTL (new_decl
, NULL
);
4855 /* Based loosely on C++'s make_alias_for(). */
4856 TREE_PUBLIC (new_decl
) = TREE_PUBLIC (old_decl
);
4857 DECL_CONTEXT (new_decl
) = DECL_CONTEXT (old_decl
);
4858 DECL_LANG_SPECIFIC (new_decl
) = DECL_LANG_SPECIFIC (old_decl
);
4859 TREE_READONLY (new_decl
) = TREE_READONLY (old_decl
);
4860 DECL_EXTERNAL (new_decl
) = 0;
4861 DECL_ARTIFICIAL (new_decl
) = 1;
4862 TREE_ADDRESSABLE (new_decl
) = 1;
4863 TREE_USED (new_decl
) = 1;
4864 TREE_SYMBOL_REFERENCED (tm_name
) = 1;
4866 /* Perform the same remapping to the comdat group. */
4867 if (DECL_ONE_ONLY (new_decl
))
4868 DECL_COMDAT_GROUP (new_decl
) = tm_mangle (DECL_COMDAT_GROUP (old_decl
));
4870 new_node
= cgraph_same_body_alias (NULL
, new_decl
, info
->new_decl
);
4871 new_node
->tm_clone
= true;
4872 new_node
->externally_visible
= info
->old_node
->externally_visible
;
4873 /* ?? Do not traverse aliases here. */
4874 get_cg_data (&node
, false)->clone
= new_node
;
4876 record_tm_clone_pair (old_decl
, new_decl
);
4878 if (info
->old_node
->force_output
4879 || ipa_ref_list_first_referring (&info
->old_node
->ref_list
))
4880 ipa_tm_mark_force_output_node (new_node
);
4881 if (info
->old_node
->forced_by_abi
)
4882 ipa_tm_mark_forced_by_abi_node (new_node
);
4886 /* Create a copy of the function (possibly declaration only) of OLD_NODE,
4887 appropriate for the transactional clone. */
4890 ipa_tm_create_version (struct cgraph_node
*old_node
)
4892 tree new_decl
, old_decl
, tm_name
;
4893 struct cgraph_node
*new_node
;
4895 old_decl
= old_node
->decl
;
4896 new_decl
= copy_node (old_decl
);
4898 /* DECL_ASSEMBLER_NAME needs to be set before we call
4899 cgraph_copy_node_for_versioning below, because cgraph_node will
4900 fill the assembler_name_hash. */
4901 tm_name
= tm_mangle (DECL_ASSEMBLER_NAME (old_decl
));
4902 SET_DECL_ASSEMBLER_NAME (new_decl
, tm_name
);
4903 SET_DECL_RTL (new_decl
, NULL
);
4904 TREE_SYMBOL_REFERENCED (tm_name
) = 1;
4906 /* Perform the same remapping to the comdat group. */
4907 if (DECL_ONE_ONLY (new_decl
))
4908 DECL_COMDAT_GROUP (new_decl
) = tm_mangle (DECL_COMDAT_GROUP (old_decl
));
4910 new_node
= cgraph_copy_node_for_versioning (old_node
, new_decl
, vNULL
, NULL
);
4911 new_node
->local
.local
= false;
4912 new_node
->externally_visible
= old_node
->externally_visible
;
4913 new_node
->lowered
= true;
4914 new_node
->tm_clone
= 1;
4915 get_cg_data (&old_node
, true)->clone
= new_node
;
4917 if (cgraph_function_body_availability (old_node
) >= AVAIL_OVERWRITABLE
)
4919 /* Remap extern inline to static inline. */
4920 /* ??? Is it worth trying to use make_decl_one_only? */
4921 if (DECL_DECLARED_INLINE_P (new_decl
) && DECL_EXTERNAL (new_decl
))
4923 DECL_EXTERNAL (new_decl
) = 0;
4924 TREE_PUBLIC (new_decl
) = 0;
4925 DECL_WEAK (new_decl
) = 0;
4928 tree_function_versioning (old_decl
, new_decl
,
4933 record_tm_clone_pair (old_decl
, new_decl
);
4935 cgraph_call_function_insertion_hooks (new_node
);
4936 if (old_node
->force_output
4937 || ipa_ref_list_first_referring (&old_node
->ref_list
))
4938 ipa_tm_mark_force_output_node (new_node
);
4939 if (old_node
->forced_by_abi
)
4940 ipa_tm_mark_forced_by_abi_node (new_node
);
4942 /* Do the same thing, but for any aliases of the original node. */
4944 struct create_version_alias_info data
;
4945 data
.old_node
= old_node
;
4946 data
.new_decl
= new_decl
;
4947 cgraph_for_node_and_aliases (old_node
, ipa_tm_create_version_alias
,
4952 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
4955 ipa_tm_insert_irr_call (struct cgraph_node
*node
, struct tm_region
*region
,
4958 gimple_stmt_iterator gsi
;
4961 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
4963 g
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE
),
4964 1, build_int_cst (NULL_TREE
, MODE_SERIALIRREVOCABLE
));
4966 split_block_after_labels (bb
);
4967 gsi
= gsi_after_labels (bb
);
4968 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
4970 cgraph_create_edge (node
,
4971 cgraph_get_create_node
4972 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE
)),
4974 compute_call_stmt_bb_frequency (node
->decl
,
4978 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */
4981 ipa_tm_insert_gettmclone_call (struct cgraph_node
*node
,
4982 struct tm_region
*region
,
4983 gimple_stmt_iterator
*gsi
, gimple stmt
)
4985 tree gettm_fn
, ret
, old_fn
, callfn
;
4989 old_fn
= gimple_call_fn (stmt
);
4991 if (TREE_CODE (old_fn
) == ADDR_EXPR
)
4993 tree fndecl
= TREE_OPERAND (old_fn
, 0);
4994 tree clone
= get_tm_clone_pair (fndecl
);
4996 /* By transforming the call into a TM_GETTMCLONE, we are
4997 technically taking the address of the original function and
4998 its clone. Explain this so inlining will know this function
5000 cgraph_mark_address_taken_node (cgraph_get_node (fndecl
));
5002 cgraph_mark_address_taken_node (cgraph_get_node (clone
));
5005 safe
= is_tm_safe (TREE_TYPE (old_fn
));
5006 gettm_fn
= builtin_decl_explicit (safe
? BUILT_IN_TM_GETTMCLONE_SAFE
5007 : BUILT_IN_TM_GETTMCLONE_IRR
);
5008 ret
= create_tmp_var (ptr_type_node
, NULL
);
5011 transaction_subcode_ior (region
, GTMA_MAY_ENTER_IRREVOCABLE
);
5013 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
5014 if (TREE_CODE (old_fn
) == OBJ_TYPE_REF
)
5015 old_fn
= OBJ_TYPE_REF_EXPR (old_fn
);
5017 g
= gimple_build_call (gettm_fn
, 1, old_fn
);
5018 ret
= make_ssa_name (ret
, g
);
5019 gimple_call_set_lhs (g
, ret
);
5021 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
5023 cgraph_create_edge (node
, cgraph_get_create_node (gettm_fn
), g
, 0,
5024 compute_call_stmt_bb_frequency (node
->decl
,
5027 /* Cast return value from tm_gettmclone* into appropriate function
5029 callfn
= create_tmp_var (TREE_TYPE (old_fn
), NULL
);
5030 g2
= gimple_build_assign (callfn
,
5031 fold_build1 (NOP_EXPR
, TREE_TYPE (callfn
), ret
));
5032 callfn
= make_ssa_name (callfn
, g2
);
5033 gimple_assign_set_lhs (g2
, callfn
);
5034 gsi_insert_before (gsi
, g2
, GSI_SAME_STMT
);
5036 /* ??? This is a hack to preserve the NOTHROW bit on the call,
5037 which we would have derived from the decl. Failure to save
5038 this bit means we might have to split the basic block. */
5039 if (gimple_call_nothrow_p (stmt
))
5040 gimple_call_set_nothrow (stmt
, true);
5042 gimple_call_set_fn (stmt
, callfn
);
5044 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
5045 for a call statement. Fix it. */
5047 tree lhs
= gimple_call_lhs (stmt
);
5048 tree rettype
= TREE_TYPE (gimple_call_fntype (stmt
));
5050 && !useless_type_conversion_p (TREE_TYPE (lhs
), rettype
))
5054 temp
= create_tmp_reg (rettype
, 0);
5055 gimple_call_set_lhs (stmt
, temp
);
5057 g2
= gimple_build_assign (lhs
,
5058 fold_build1 (VIEW_CONVERT_EXPR
,
5059 TREE_TYPE (lhs
), temp
));
5060 gsi_insert_after (gsi
, g2
, GSI_SAME_STMT
);
5069 /* Helper function for ipa_tm_transform_calls*. Given a call
5070 statement in GSI which resides inside transaction REGION, redirect
5071 the call to either its wrapper function, or its clone. */
5074 ipa_tm_transform_calls_redirect (struct cgraph_node
*node
,
5075 struct tm_region
*region
,
5076 gimple_stmt_iterator
*gsi
,
5077 bool *need_ssa_rename_p
)
5079 gimple stmt
= gsi_stmt (*gsi
);
5080 struct cgraph_node
*new_node
;
5081 struct cgraph_edge
*e
= cgraph_edge (node
, stmt
);
5082 tree fndecl
= gimple_call_fndecl (stmt
);
5084 /* For indirect calls, pass the address through the runtime. */
5087 *need_ssa_rename_p
|=
5088 ipa_tm_insert_gettmclone_call (node
, region
, gsi
, stmt
);
5092 /* Handle some TM builtins. Ordinarily these aren't actually generated
5093 at this point, but handling these functions when written in by the
5094 user makes it easier to build unit tests. */
5095 if (flags_from_decl_or_type (fndecl
) & ECF_TM_BUILTIN
)
5098 /* Fixup recursive calls inside clones. */
5099 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
5100 for recursion but not update the call statements themselves? */
5101 if (e
->caller
== e
->callee
&& decl_is_tm_clone (current_function_decl
))
5103 gimple_call_set_fndecl (stmt
, current_function_decl
);
5107 /* If there is a replacement, use it. */
5108 fndecl
= find_tm_replacement_function (fndecl
);
5111 new_node
= cgraph_get_create_node (fndecl
);
5113 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
5115 We can't do this earlier in record_tm_replacement because
5116 cgraph_remove_unreachable_nodes is called before we inject
5117 references to the node. Further, we can't do this in some
5118 nice central place in ipa_tm_execute because we don't have
5119 the exact list of wrapper functions that would be used.
5120 Marking more wrappers than necessary results in the creation
5121 of unnecessary cgraph_nodes, which can cause some of the
5122 other IPA passes to crash.
5124 We do need to mark these nodes so that we get the proper
5125 result in expand_call_tm. */
5126 /* ??? This seems broken. How is it that we're marking the
5127 CALLEE as may_enter_irr? Surely we should be marking the
5128 CALLER. Also note that find_tm_replacement_function also
5129 contains mappings into the TM runtime, e.g. memcpy. These
5130 we know won't go irrevocable. */
5131 new_node
->local
.tm_may_enter_irr
= 1;
5135 struct tm_ipa_cg_data
*d
;
5136 struct cgraph_node
*tnode
= e
->callee
;
5138 d
= get_cg_data (&tnode
, true);
5139 new_node
= d
->clone
;
5141 /* As we've already skipped pure calls and appropriate builtins,
5142 and we've already marked irrevocable blocks, if we can't come
5143 up with a static replacement, then ask the runtime. */
5144 if (new_node
== NULL
)
5146 *need_ssa_rename_p
|=
5147 ipa_tm_insert_gettmclone_call (node
, region
, gsi
, stmt
);
5151 fndecl
= new_node
->decl
;
5154 cgraph_redirect_edge_callee (e
, new_node
);
5155 gimple_call_set_fndecl (stmt
, fndecl
);
5158 /* Helper function for ipa_tm_transform_calls. For a given BB,
5159 install calls to tm_irrevocable when IRR_BLOCKS are reached,
5160 redirect other calls to the generated transactional clone. */
5163 ipa_tm_transform_calls_1 (struct cgraph_node
*node
, struct tm_region
*region
,
5164 basic_block bb
, bitmap irr_blocks
)
5166 gimple_stmt_iterator gsi
;
5167 bool need_ssa_rename
= false;
5169 if (irr_blocks
&& bitmap_bit_p (irr_blocks
, bb
->index
))
5171 ipa_tm_insert_irr_call (node
, region
, bb
);
5175 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5177 gimple stmt
= gsi_stmt (gsi
);
5179 if (!is_gimple_call (stmt
))
5181 if (is_tm_pure_call (stmt
))
5184 /* Redirect edges to the appropriate replacement or clone. */
5185 ipa_tm_transform_calls_redirect (node
, region
, &gsi
, &need_ssa_rename
);
5188 return need_ssa_rename
;
5191 /* Walk the CFG for REGION, beginning at BB. Install calls to
5192 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
5193 the generated transactional clone. */
5196 ipa_tm_transform_calls (struct cgraph_node
*node
, struct tm_region
*region
,
5197 basic_block bb
, bitmap irr_blocks
)
5199 bool need_ssa_rename
= false;
5202 vec
<basic_block
> queue
= vNULL
;
5203 bitmap visited_blocks
= BITMAP_ALLOC (NULL
);
5205 queue
.safe_push (bb
);
5211 ipa_tm_transform_calls_1 (node
, region
, bb
, irr_blocks
);
5213 if (irr_blocks
&& bitmap_bit_p (irr_blocks
, bb
->index
))
5216 if (region
&& bitmap_bit_p (region
->exit_blocks
, bb
->index
))
5219 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
5220 if (!bitmap_bit_p (visited_blocks
, e
->dest
->index
))
5222 bitmap_set_bit (visited_blocks
, e
->dest
->index
);
5223 queue
.safe_push (e
->dest
);
5226 while (!queue
.is_empty ());
5229 BITMAP_FREE (visited_blocks
);
5231 return need_ssa_rename
;
5234 /* Transform the calls within the TM regions within NODE. */
5237 ipa_tm_transform_transaction (struct cgraph_node
*node
)
5239 struct tm_ipa_cg_data
*d
;
5240 struct tm_region
*region
;
5241 bool need_ssa_rename
= false;
5243 d
= get_cg_data (&node
, true);
5245 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
5246 calculate_dominance_info (CDI_DOMINATORS
);
5248 for (region
= d
->all_tm_regions
; region
; region
= region
->next
)
5250 /* If we're sure to go irrevocable, don't transform anything. */
5251 if (d
->irrevocable_blocks_normal
5252 && bitmap_bit_p (d
->irrevocable_blocks_normal
,
5253 region
->entry_block
->index
))
5255 transaction_subcode_ior (region
, GTMA_DOES_GO_IRREVOCABLE
5256 | GTMA_MAY_ENTER_IRREVOCABLE
5257 | GTMA_HAS_NO_INSTRUMENTATION
);
5262 ipa_tm_transform_calls (node
, region
, region
->entry_block
,
5263 d
->irrevocable_blocks_normal
);
5266 if (need_ssa_rename
)
5267 update_ssa (TODO_update_ssa_only_virtuals
);
5272 /* Transform the calls within the transactional clone of NODE. */
5275 ipa_tm_transform_clone (struct cgraph_node
*node
)
5277 struct tm_ipa_cg_data
*d
;
5278 bool need_ssa_rename
;
5280 d
= get_cg_data (&node
, true);
5282 /* If this function makes no calls and has no irrevocable blocks,
5283 then there's nothing to do. */
5284 /* ??? Remove non-aborting top-level transactions. */
5285 if (!node
->callees
&& !node
->indirect_calls
&& !d
->irrevocable_blocks_clone
)
5288 push_cfun (DECL_STRUCT_FUNCTION (d
->clone
->decl
));
5289 calculate_dominance_info (CDI_DOMINATORS
);
5292 ipa_tm_transform_calls (d
->clone
, NULL
, single_succ (ENTRY_BLOCK_PTR
),
5293 d
->irrevocable_blocks_clone
);
5295 if (need_ssa_rename
)
5296 update_ssa (TODO_update_ssa_only_virtuals
);
5301 /* Main entry point for the transactional memory IPA pass. */
5304 ipa_tm_execute (void)
5306 cgraph_node_queue tm_callees
= cgraph_node_queue ();
5307 /* List of functions that will go irrevocable. */
5308 cgraph_node_queue irr_worklist
= cgraph_node_queue ();
5310 struct cgraph_node
*node
;
5311 struct tm_ipa_cg_data
*d
;
5312 enum availability a
;
5315 #ifdef ENABLE_CHECKING
5319 bitmap_obstack_initialize (&tm_obstack
);
5320 initialize_original_copy_tables ();
5322 /* For all local functions marked tm_callable, queue them. */
5323 FOR_EACH_DEFINED_FUNCTION (node
)
5324 if (is_tm_callable (node
->decl
)
5325 && cgraph_function_body_availability (node
) >= AVAIL_OVERWRITABLE
)
5327 d
= get_cg_data (&node
, true);
5328 maybe_push_queue (node
, &tm_callees
, &d
->in_callee_queue
);
5331 /* For all local reachable functions... */
5332 FOR_EACH_DEFINED_FUNCTION (node
)
5334 && cgraph_function_body_availability (node
) >= AVAIL_OVERWRITABLE
)
5336 /* ... marked tm_pure, record that fact for the runtime by
5337 indicating that the pure function is its own tm_callable.
5338 No need to do this if the function's address can't be taken. */
5339 if (is_tm_pure (node
->decl
))
5341 if (!node
->local
.local
)
5342 record_tm_clone_pair (node
->decl
, node
->decl
);
5346 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
5347 calculate_dominance_info (CDI_DOMINATORS
);
5349 tm_region_init (NULL
);
5352 d
= get_cg_data (&node
, true);
5354 /* Scan for calls that are in each transaction, and
5355 generate the uninstrumented code path. */
5356 ipa_tm_scan_calls_transaction (d
, &tm_callees
);
5358 /* Put it in the worklist so we can scan the function
5359 later (ipa_tm_scan_irr_function) and mark the
5360 irrevocable blocks. */
5361 maybe_push_queue (node
, &irr_worklist
, &d
->in_worklist
);
5362 d
->want_irr_scan_normal
= true;
5368 /* For every local function on the callee list, scan as if we will be
5369 creating a transactional clone, queueing all new functions we find
5371 for (i
= 0; i
< tm_callees
.length (); ++i
)
5373 node
= tm_callees
[i
];
5374 a
= cgraph_function_body_availability (node
);
5375 d
= get_cg_data (&node
, true);
5377 /* Put it in the worklist so we can scan the function later
5378 (ipa_tm_scan_irr_function) and mark the irrevocable
5380 maybe_push_queue (node
, &irr_worklist
, &d
->in_worklist
);
5382 /* Some callees cannot be arbitrarily cloned. These will always be
5383 irrevocable. Mark these now, so that we need not scan them. */
5384 if (is_tm_irrevocable (node
->decl
))
5385 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5386 else if (a
<= AVAIL_NOT_AVAILABLE
5387 && !is_tm_safe_or_pure (node
->decl
))
5388 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5389 else if (a
>= AVAIL_OVERWRITABLE
)
5391 if (!tree_versionable_function_p (node
->decl
))
5392 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5393 else if (!d
->is_irrevocable
)
5395 /* If this is an alias, make sure its base is queued as well.
5396 we need not scan the callees now, as the base will do. */
5399 node
= cgraph_get_node (node
->thunk
.alias
);
5400 d
= get_cg_data (&node
, true);
5401 maybe_push_queue (node
, &tm_callees
, &d
->in_callee_queue
);
5405 /* Add all nodes called by this function into
5406 tm_callees as well. */
5407 ipa_tm_scan_calls_clone (node
, &tm_callees
);
5412 /* Iterate scans until no more work to be done. Prefer not to use
5413 vec::pop because the worklist tends to follow a breadth-first
5414 search of the callgraph, which should allow convergance with a
5415 minimum number of scans. But we also don't want the worklist
5416 array to grow without bound, so we shift the array up periodically. */
5417 for (i
= 0; i
< irr_worklist
.length (); ++i
)
5419 if (i
> 256 && i
== irr_worklist
.length () / 8)
5421 irr_worklist
.block_remove (0, i
);
5425 node
= irr_worklist
[i
];
5426 d
= get_cg_data (&node
, true);
5427 d
->in_worklist
= false;
5429 if (d
->want_irr_scan_normal
)
5431 d
->want_irr_scan_normal
= false;
5432 ipa_tm_scan_irr_function (node
, false);
5434 if (d
->in_callee_queue
&& ipa_tm_scan_irr_function (node
, true))
5435 ipa_tm_note_irrevocable (node
, &irr_worklist
);
5438 /* For every function on the callee list, collect the tm_may_enter_irr
5440 irr_worklist
.truncate (0);
5441 for (i
= 0; i
< tm_callees
.length (); ++i
)
5443 node
= tm_callees
[i
];
5444 if (ipa_tm_mayenterirr_function (node
))
5446 d
= get_cg_data (&node
, true);
5447 gcc_assert (d
->in_worklist
== false);
5448 maybe_push_queue (node
, &irr_worklist
, &d
->in_worklist
);
5452 /* Propagate the tm_may_enter_irr bit to callers until stable. */
5453 for (i
= 0; i
< irr_worklist
.length (); ++i
)
5455 struct cgraph_node
*caller
;
5456 struct cgraph_edge
*e
;
5457 struct ipa_ref
*ref
;
5460 if (i
> 256 && i
== irr_worklist
.length () / 8)
5462 irr_worklist
.block_remove (0, i
);
5466 node
= irr_worklist
[i
];
5467 d
= get_cg_data (&node
, true);
5468 d
->in_worklist
= false;
5469 node
->local
.tm_may_enter_irr
= true;
5471 /* Propagate back to normal callers. */
5472 for (e
= node
->callers
; e
; e
= e
->next_caller
)
5475 if (!is_tm_safe_or_pure (caller
->decl
)
5476 && !caller
->local
.tm_may_enter_irr
)
5478 d
= get_cg_data (&caller
, true);
5479 maybe_push_queue (caller
, &irr_worklist
, &d
->in_worklist
);
5483 /* Propagate back to referring aliases as well. */
5484 for (j
= 0; ipa_ref_list_referring_iterate (&node
->ref_list
, j
, ref
); j
++)
5486 caller
= cgraph (ref
->referring
);
5487 if (ref
->use
== IPA_REF_ALIAS
5488 && !caller
->local
.tm_may_enter_irr
)
5490 /* ?? Do not traverse aliases here. */
5491 d
= get_cg_data (&caller
, false);
5492 maybe_push_queue (caller
, &irr_worklist
, &d
->in_worklist
);
5497 /* Now validate all tm_safe functions, and all atomic regions in
5499 FOR_EACH_DEFINED_FUNCTION (node
)
5501 && cgraph_function_body_availability (node
) >= AVAIL_OVERWRITABLE
)
5503 d
= get_cg_data (&node
, true);
5504 if (is_tm_safe (node
->decl
))
5505 ipa_tm_diagnose_tm_safe (node
);
5506 else if (d
->all_tm_regions
)
5507 ipa_tm_diagnose_transaction (node
, d
->all_tm_regions
);
5510 /* Create clones. Do those that are not irrevocable and have a
5511 positive call count. Do those publicly visible functions that
5512 the user directed us to clone. */
5513 for (i
= 0; i
< tm_callees
.length (); ++i
)
5517 node
= tm_callees
[i
];
5518 if (node
->cpp_implicit_alias
)
5521 a
= cgraph_function_body_availability (node
);
5522 d
= get_cg_data (&node
, true);
5524 if (a
<= AVAIL_NOT_AVAILABLE
)
5525 doit
= is_tm_callable (node
->decl
);
5526 else if (a
<= AVAIL_AVAILABLE
&& is_tm_callable (node
->decl
))
5528 else if (!d
->is_irrevocable
5529 && d
->tm_callers_normal
+ d
->tm_callers_clone
> 0)
5533 ipa_tm_create_version (node
);
5536 /* Redirect calls to the new clones, and insert irrevocable marks. */
5537 for (i
= 0; i
< tm_callees
.length (); ++i
)
5539 node
= tm_callees
[i
];
5542 d
= get_cg_data (&node
, true);
5544 ipa_tm_transform_clone (node
);
5547 FOR_EACH_DEFINED_FUNCTION (node
)
5549 && cgraph_function_body_availability (node
) >= AVAIL_OVERWRITABLE
)
5551 d
= get_cg_data (&node
, true);
5552 if (d
->all_tm_regions
)
5553 ipa_tm_transform_transaction (node
);
5556 /* Free and clear all data structures. */
5557 tm_callees
.release ();
5558 irr_worklist
.release ();
5559 bitmap_obstack_release (&tm_obstack
);
5560 free_original_copy_tables ();
5562 FOR_EACH_FUNCTION (node
)
5565 #ifdef ENABLE_CHECKING
5574 const pass_data pass_data_ipa_tm
=
5576 SIMPLE_IPA_PASS
, /* type */
5578 OPTGROUP_NONE
, /* optinfo_flags */
5579 true, /* has_gate */
5580 true, /* has_execute */
5581 TV_TRANS_MEM
, /* tv_id */
5582 ( PROP_ssa
| PROP_cfg
), /* properties_required */
5583 0, /* properties_provided */
5584 0, /* properties_destroyed */
5585 0, /* todo_flags_start */
5586 0, /* todo_flags_finish */
5589 class pass_ipa_tm
: public simple_ipa_opt_pass
5592 pass_ipa_tm (gcc::context
*ctxt
)
5593 : simple_ipa_opt_pass (pass_data_ipa_tm
, ctxt
)
5596 /* opt_pass methods: */
5597 bool gate () { return gate_tm (); }
5598 unsigned int execute () { return ipa_tm_execute (); }
5600 }; // class pass_ipa_tm
5604 simple_ipa_opt_pass
*
5605 make_pass_ipa_tm (gcc::context
*ctxt
)
5607 return new pass_ipa_tm (ctxt
);
5610 #include "gt-trans-mem.h"