1 /* Loop autoparallelization.
2 Copyright (C) 2006-2019 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <pop@cri.ensmp.fr>
4 Zdenek Dvorak <dvorakz@suse.cz> and Razya Ladelsky <razya@il.ibm.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "tree-pass.h"
32 #include "gimple-pretty-print.h"
33 #include "fold-const.h"
35 #include "gimple-iterator.h"
36 #include "gimplify-me.h"
37 #include "gimple-walk.h"
38 #include "stor-layout.h"
39 #include "tree-nested.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
45 #include "tree-into-ssa.h"
47 #include "tree-scalar-evolution.h"
48 #include "langhooks.h"
49 #include "tree-vectorizer.h"
50 #include "tree-hasher.h"
51 #include "tree-parloops.h"
52 #include "omp-general.h"
55 #include "tree-ssa-alias.h"
57 #include "gomp-constants.h"
59 #include "stringpool.h"
62 /* This pass tries to distribute iterations of loops into several threads.
63 The implementation is straightforward -- for each loop we test whether its
64 iterations are independent, and if it is the case (and some additional
65 conditions regarding profitability and correctness are satisfied), we
66 add GIMPLE_OMP_PARALLEL and GIMPLE_OMP_FOR codes and let omp expansion
69 The most of the complexity is in bringing the code into shape expected
71 -- for GIMPLE_OMP_FOR, ensuring that the loop has only one induction
72 variable and that the exit test is at the start of the loop body
73 -- for GIMPLE_OMP_PARALLEL, replacing the references to local addressable
74 variables by accesses through pointers, and breaking up ssa chains
75 by storing the values incoming to the parallelized loop to a structure
76 passed to the new function as an argument (something similar is done
77 in omp gimplification, unfortunately only a small part of the code
81 -- if there are several parallelizable loops in a function, it may be
82 possible to generate the threads just once (using synchronization to
83 ensure that cross-loop dependences are obeyed).
84 -- handling of common reduction patterns for outer loops.
86 More info can also be found at http://gcc.gnu.org/wiki/AutoParInGCC */
89 currently we use code inspired by vect_force_simple_reduction to detect
91 The code transformation will be introduced by an example.
98 for (i = 0; i < N; i++)
108 # sum_29 = PHI <sum_11(5), 1(3)>
109 # i_28 = PHI <i_12(5), 0(3)>
112 sum_11 = D.1795_8 + sum_29;
120 # sum_21 = PHI <sum_11(4)>
121 printf (&"%d"[0], sum_21);
124 after reduction transformation (only relevant parts):
132 # Storing the initial value given by the user. #
134 .paral_data_store.32.sum.27 = 1;
136 #pragma omp parallel num_threads(4)
138 #pragma omp for schedule(static)
140 # The neutral element corresponding to the particular
141 reduction's operation, e.g. 0 for PLUS_EXPR,
142 1 for MULT_EXPR, etc. replaces the user's initial value. #
144 # sum.27_29 = PHI <sum.27_11, 0>
146 sum.27_11 = D.1827_8 + sum.27_29;
150 # Adding this reduction phi is done at create_phi_for_local_result() #
151 # sum.27_56 = PHI <sum.27_11, 0>
154 # Creating the atomic operation is done at
155 create_call_for_reduction_1() #
157 #pragma omp atomic_load
158 D.1839_59 = *&.paral_data_load.33_51->reduction.23;
159 D.1840_60 = sum.27_56 + D.1839_59;
160 #pragma omp atomic_store (D.1840_60);
164 # collecting the result after the join of the threads is done at
165 create_loads_for_reductions().
166 The value computed by the threads is loaded from the
170 .paral_data_load.33_52 = &.paral_data_store.32;
171 sum_37 = .paral_data_load.33_52->sum.27;
172 sum_43 = D.1795_41 + sum_37;
175 # sum_21 = PHI <sum_43, sum_26>
176 printf (&"%d"[0], sum_21);
184 /* Error reporting helper for parloops_is_simple_reduction below. GIMPLE
185 statement STMT is printed with a message MSG. */
188 report_ploop_op (dump_flags_t msg_type
, gimple
*stmt
, const char *msg
)
190 dump_printf_loc (msg_type
, vect_location
, "%s%G", msg
, stmt
);
193 /* DEF_STMT_INFO occurs in a loop that contains a potential reduction
194 operation. Return true if the results of DEF_STMT_INFO are something
195 that can be accumulated by such a reduction. */
198 parloops_valid_reduction_input_p (stmt_vec_info def_stmt_info
)
200 return (is_gimple_assign (def_stmt_info
->stmt
)
201 || is_gimple_call (def_stmt_info
->stmt
)
202 || STMT_VINFO_DEF_TYPE (def_stmt_info
) == vect_induction_def
203 || (gimple_code (def_stmt_info
->stmt
) == GIMPLE_PHI
204 && STMT_VINFO_DEF_TYPE (def_stmt_info
) == vect_internal_def
205 && !is_loop_header_bb_p (gimple_bb (def_stmt_info
->stmt
))));
208 /* Detect SLP reduction of the form:
218 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
219 FIRST_STMT is the first reduction stmt in the chain
220 (a2 = operation (a1)).
222 Return TRUE if a reduction chain was detected. */
225 parloops_is_slp_reduction (loop_vec_info loop_info
, gimple
*phi
,
228 class loop
*loop
= (gimple_bb (phi
))->loop_father
;
229 class loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
231 gimple
*loop_use_stmt
= NULL
;
232 stmt_vec_info use_stmt_info
;
234 imm_use_iterator imm_iter
;
236 int nloop_uses
, size
= 0, n_out_of_loop_uses
;
239 if (loop
!= vect_loop
)
242 auto_vec
<stmt_vec_info
, 8> reduc_chain
;
243 lhs
= PHI_RESULT (phi
);
244 code
= gimple_assign_rhs_code (first_stmt
);
248 n_out_of_loop_uses
= 0;
249 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
251 gimple
*use_stmt
= USE_STMT (use_p
);
252 if (is_gimple_debug (use_stmt
))
255 /* Check if we got back to the reduction phi. */
258 loop_use_stmt
= use_stmt
;
263 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
265 loop_use_stmt
= use_stmt
;
269 n_out_of_loop_uses
++;
271 /* There are can be either a single use in the loop or two uses in
273 if (nloop_uses
> 1 || (n_out_of_loop_uses
&& nloop_uses
))
280 /* We reached a statement with no loop uses. */
284 /* This is a loop exit phi, and we haven't reached the reduction phi. */
285 if (gimple_code (loop_use_stmt
) == GIMPLE_PHI
)
288 if (!is_gimple_assign (loop_use_stmt
)
289 || code
!= gimple_assign_rhs_code (loop_use_stmt
)
290 || !flow_bb_inside_loop_p (loop
, gimple_bb (loop_use_stmt
)))
293 /* Insert USE_STMT into reduction chain. */
294 use_stmt_info
= loop_info
->lookup_stmt (loop_use_stmt
);
295 reduc_chain
.safe_push (use_stmt_info
);
297 lhs
= gimple_assign_lhs (loop_use_stmt
);
301 if (!found
|| loop_use_stmt
!= phi
|| size
< 2)
304 /* Swap the operands, if needed, to make the reduction operand be the second
306 lhs
= PHI_RESULT (phi
);
307 for (unsigned i
= 0; i
< reduc_chain
.length (); ++i
)
309 gassign
*next_stmt
= as_a
<gassign
*> (reduc_chain
[i
]->stmt
);
310 if (gimple_assign_rhs2 (next_stmt
) == lhs
)
312 tree op
= gimple_assign_rhs1 (next_stmt
);
313 stmt_vec_info def_stmt_info
= loop_info
->lookup_def (op
);
315 /* Check that the other def is either defined in the loop
316 ("vect_internal_def"), or it's an induction (defined by a
317 loop-header phi-node). */
319 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt_info
->stmt
))
320 && parloops_valid_reduction_input_p (def_stmt_info
))
322 lhs
= gimple_assign_lhs (next_stmt
);
330 tree op
= gimple_assign_rhs2 (next_stmt
);
331 stmt_vec_info def_stmt_info
= loop_info
->lookup_def (op
);
333 /* Check that the other def is either defined in the loop
334 ("vect_internal_def"), or it's an induction (defined by a
335 loop-header phi-node). */
337 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt_info
->stmt
))
338 && parloops_valid_reduction_input_p (def_stmt_info
))
340 if (dump_enabled_p ())
341 dump_printf_loc (MSG_NOTE
, vect_location
, "swapping oprnds: %G",
344 swap_ssa_operands (next_stmt
,
345 gimple_assign_rhs1_ptr (next_stmt
),
346 gimple_assign_rhs2_ptr (next_stmt
));
347 update_stmt (next_stmt
);
353 lhs
= gimple_assign_lhs (next_stmt
);
356 /* Build up the actual chain. */
357 for (unsigned i
= 0; i
< reduc_chain
.length () - 1; ++i
)
359 REDUC_GROUP_FIRST_ELEMENT (reduc_chain
[i
]) = reduc_chain
[0];
360 REDUC_GROUP_NEXT_ELEMENT (reduc_chain
[i
]) = reduc_chain
[i
+1];
362 REDUC_GROUP_FIRST_ELEMENT (reduc_chain
.last ()) = reduc_chain
[0];
363 REDUC_GROUP_NEXT_ELEMENT (reduc_chain
.last ()) = NULL
;
365 /* Save the chain for further analysis in SLP detection. */
366 LOOP_VINFO_REDUCTION_CHAINS (loop_info
).safe_push (reduc_chain
[0]);
367 REDUC_GROUP_SIZE (reduc_chain
[0]) = size
;
372 /* Return true if we need an in-order reduction for operation CODE
373 on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
374 overflow must wrap. */
377 parloops_needs_fold_left_reduction_p (tree type
, tree_code code
,
378 bool need_wrapping_integral_overflow
)
380 /* CHECKME: check for !flag_finite_math_only too? */
381 if (SCALAR_FLOAT_TYPE_P (type
))
389 return !flag_associative_math
;
392 if (INTEGRAL_TYPE_P (type
))
394 if (!operation_no_trapping_overflow (type
, code
))
396 if (need_wrapping_integral_overflow
397 && !TYPE_OVERFLOW_WRAPS (type
)
398 && operation_can_overflow (code
))
403 if (SAT_FIXED_POINT_TYPE_P (type
))
410 /* Function parloops_is_simple_reduction
412 (1) Detect a cross-iteration def-use cycle that represents a simple
413 reduction computation. We look for the following pattern:
418 a2 = operation (a3, a1)
425 a2 = operation (a3, a1)
428 1. operation is commutative and associative and it is safe to
429 change the order of the computation
430 2. no uses for a2 in the loop (a2 is used out of the loop)
431 3. no uses of a1 in the loop besides the reduction operation
432 4. no uses of a1 outside the loop.
434 Conditions 1,4 are tested here.
435 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
437 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
440 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
444 inner loop (def of a3)
447 (4) Detect condition expressions, ie:
448 for (int i = 0; i < N; i++)
455 parloops_is_simple_reduction (loop_vec_info loop_info
, stmt_vec_info phi_info
,
457 bool need_wrapping_integral_overflow
,
458 enum vect_reduction_type
*v_reduc_type
)
460 gphi
*phi
= as_a
<gphi
*> (phi_info
->stmt
);
461 class loop
*loop
= (gimple_bb (phi
))->loop_father
;
462 class loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
463 bool nested_in_vect_loop
= flow_loop_nested_p (vect_loop
, loop
);
464 gimple
*phi_use_stmt
= NULL
;
465 enum tree_code orig_code
, code
;
466 tree op1
, op2
, op3
= NULL_TREE
, op4
= NULL_TREE
;
469 imm_use_iterator imm_iter
;
473 *double_reduc
= false;
474 *v_reduc_type
= TREE_CODE_REDUCTION
;
476 tree phi_name
= PHI_RESULT (phi
);
477 /* ??? If there are no uses of the PHI result the inner loop reduction
478 won't be detected as possibly double-reduction by vectorizable_reduction
479 because that tries to walk the PHI arg from the preheader edge which
480 can be constant. See PR60382. */
481 if (has_zero_uses (phi_name
))
483 unsigned nphi_def_loop_uses
= 0;
484 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, phi_name
)
486 gimple
*use_stmt
= USE_STMT (use_p
);
487 if (is_gimple_debug (use_stmt
))
490 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
492 if (dump_enabled_p ())
493 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
494 "intermediate value used outside loop.\n");
499 nphi_def_loop_uses
++;
500 phi_use_stmt
= use_stmt
;
503 edge latch_e
= loop_latch_edge (loop
);
504 tree loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
505 if (TREE_CODE (loop_arg
) != SSA_NAME
)
507 if (dump_enabled_p ())
508 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
509 "reduction: not ssa_name: %T\n", loop_arg
);
513 stmt_vec_info def_stmt_info
= loop_info
->lookup_def (loop_arg
);
515 || !flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt_info
->stmt
)))
518 if (gassign
*def_stmt
= dyn_cast
<gassign
*> (def_stmt_info
->stmt
))
520 name
= gimple_assign_lhs (def_stmt
);
523 else if (gphi
*def_stmt
= dyn_cast
<gphi
*> (def_stmt_info
->stmt
))
525 name
= PHI_RESULT (def_stmt
);
530 if (dump_enabled_p ())
531 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
532 "reduction: unhandled reduction operation: %G",
533 def_stmt_info
->stmt
);
537 unsigned nlatch_def_loop_uses
= 0;
538 auto_vec
<gphi
*, 3> lcphis
;
539 bool inner_loop_of_double_reduc
= false;
540 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
542 gimple
*use_stmt
= USE_STMT (use_p
);
543 if (is_gimple_debug (use_stmt
))
545 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
546 nlatch_def_loop_uses
++;
549 /* We can have more than one loop-closed PHI. */
550 lcphis
.safe_push (as_a
<gphi
*> (use_stmt
));
551 if (nested_in_vect_loop
552 && (STMT_VINFO_DEF_TYPE (loop_info
->lookup_stmt (use_stmt
))
553 == vect_double_reduction_def
))
554 inner_loop_of_double_reduc
= true;
558 /* If this isn't a nested cycle or if the nested cycle reduction value
559 is used ouside of the inner loop we cannot handle uses of the reduction
561 if ((!nested_in_vect_loop
|| inner_loop_of_double_reduc
)
562 && (nlatch_def_loop_uses
> 1 || nphi_def_loop_uses
> 1))
564 if (dump_enabled_p ())
565 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
566 "reduction used in loop.\n");
570 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
571 defined in the inner loop. */
574 gphi
*def_stmt
= as_a
<gphi
*> (def_stmt_info
->stmt
);
575 op1
= PHI_ARG_DEF (def_stmt
, 0);
577 if (gimple_phi_num_args (def_stmt
) != 1
578 || TREE_CODE (op1
) != SSA_NAME
)
580 if (dump_enabled_p ())
581 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
582 "unsupported phi node definition.\n");
587 gimple
*def1
= SSA_NAME_DEF_STMT (op1
);
589 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
591 && flow_bb_inside_loop_p (loop
->inner
, gimple_bb (def1
))
592 && is_gimple_assign (def1
)
593 && is_a
<gphi
*> (phi_use_stmt
)
594 && flow_bb_inside_loop_p (loop
->inner
, gimple_bb (phi_use_stmt
)))
596 if (dump_enabled_p ())
597 report_ploop_op (MSG_NOTE
, def_stmt
,
598 "detected double reduction: ");
600 *double_reduc
= true;
601 return def_stmt_info
;
607 /* If we are vectorizing an inner reduction we are executing that
608 in the original order only in case we are not dealing with a
610 bool check_reduction
= true;
611 if (flow_loop_nested_p (vect_loop
, loop
))
615 check_reduction
= false;
616 FOR_EACH_VEC_ELT (lcphis
, i
, lcphi
)
617 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, gimple_phi_result (lcphi
))
619 gimple
*use_stmt
= USE_STMT (use_p
);
620 if (is_gimple_debug (use_stmt
))
622 if (! flow_bb_inside_loop_p (vect_loop
, gimple_bb (use_stmt
)))
623 check_reduction
= true;
627 gassign
*def_stmt
= as_a
<gassign
*> (def_stmt_info
->stmt
);
628 code
= orig_code
= gimple_assign_rhs_code (def_stmt
);
630 if (nested_in_vect_loop
&& !check_reduction
)
632 /* FIXME: Even for non-reductions code generation is funneled
633 through vectorizable_reduction for the stmt defining the
634 PHI latch value. So we have to artificially restrict ourselves
635 for the supported operations. */
636 switch (get_gimple_rhs_class (code
))
638 case GIMPLE_BINARY_RHS
:
639 case GIMPLE_TERNARY_RHS
:
642 /* Not supported by vectorizable_reduction. */
643 if (dump_enabled_p ())
644 report_ploop_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
645 "nested cycle: not handled operation: ");
648 if (dump_enabled_p ())
649 report_ploop_op (MSG_NOTE
, def_stmt
, "detected nested cycle: ");
650 return def_stmt_info
;
653 /* We can handle "res -= x[i]", which is non-associative by
654 simply rewriting this into "res += -x[i]". Avoid changing
655 gimple instruction for the first simple tests and only do this
656 if we're allowed to change code at all. */
657 if (code
== MINUS_EXPR
&& gimple_assign_rhs2 (def_stmt
) != phi_name
)
660 if (code
== COND_EXPR
)
662 if (! nested_in_vect_loop
)
663 *v_reduc_type
= COND_REDUCTION
;
665 op3
= gimple_assign_rhs1 (def_stmt
);
666 if (COMPARISON_CLASS_P (op3
))
668 op4
= TREE_OPERAND (op3
, 1);
669 op3
= TREE_OPERAND (op3
, 0);
671 if (op3
== phi_name
|| op4
== phi_name
)
673 if (dump_enabled_p ())
674 report_ploop_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
675 "reduction: condition depends on previous"
680 op1
= gimple_assign_rhs2 (def_stmt
);
681 op2
= gimple_assign_rhs3 (def_stmt
);
683 else if (!commutative_tree_code (code
) || !associative_tree_code (code
))
685 if (dump_enabled_p ())
686 report_ploop_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
687 "reduction: not commutative/associative: ");
690 else if (get_gimple_rhs_class (code
) == GIMPLE_BINARY_RHS
)
692 op1
= gimple_assign_rhs1 (def_stmt
);
693 op2
= gimple_assign_rhs2 (def_stmt
);
697 if (dump_enabled_p ())
698 report_ploop_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
699 "reduction: not handled operation: ");
703 if (TREE_CODE (op1
) != SSA_NAME
&& TREE_CODE (op2
) != SSA_NAME
)
705 if (dump_enabled_p ())
706 report_ploop_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
707 "reduction: both uses not ssa_names: ");
712 type
= TREE_TYPE (gimple_assign_lhs (def_stmt
));
713 if ((TREE_CODE (op1
) == SSA_NAME
714 && !types_compatible_p (type
,TREE_TYPE (op1
)))
715 || (TREE_CODE (op2
) == SSA_NAME
716 && !types_compatible_p (type
, TREE_TYPE (op2
)))
717 || (op3
&& TREE_CODE (op3
) == SSA_NAME
718 && !types_compatible_p (type
, TREE_TYPE (op3
)))
719 || (op4
&& TREE_CODE (op4
) == SSA_NAME
720 && !types_compatible_p (type
, TREE_TYPE (op4
))))
722 if (dump_enabled_p ())
724 dump_printf_loc (MSG_NOTE
, vect_location
,
725 "reduction: multiple types: operation type: "
726 "%T, operands types: %T,%T",
727 type
, TREE_TYPE (op1
), TREE_TYPE (op2
));
729 dump_printf (MSG_NOTE
, ",%T", TREE_TYPE (op3
));
732 dump_printf (MSG_NOTE
, ",%T", TREE_TYPE (op4
));
733 dump_printf (MSG_NOTE
, "\n");
739 /* Check whether it's ok to change the order of the computation.
740 Generally, when vectorizing a reduction we change the order of the
741 computation. This may change the behavior of the program in some
742 cases, so we need to check that this is ok. One exception is when
743 vectorizing an outer-loop: the inner-loop is executed sequentially,
744 and therefore vectorizing reductions in the inner-loop during
745 outer-loop vectorization is safe. */
747 && *v_reduc_type
== TREE_CODE_REDUCTION
748 && parloops_needs_fold_left_reduction_p (type
, code
,
749 need_wrapping_integral_overflow
))
750 *v_reduc_type
= FOLD_LEFT_REDUCTION
;
752 /* Reduction is safe. We're dealing with one of the following:
753 1) integer arithmetic and no trapv
754 2) floating point arithmetic, and special flags permit this optimization
755 3) nested cycle (i.e., outer loop vectorization). */
756 stmt_vec_info def1_info
= loop_info
->lookup_def (op1
);
757 stmt_vec_info def2_info
= loop_info
->lookup_def (op2
);
758 if (code
!= COND_EXPR
&& !def1_info
&& !def2_info
)
760 if (dump_enabled_p ())
761 report_ploop_op (MSG_NOTE
, def_stmt
,
762 "reduction: no defs for operands: ");
766 /* Check that one def is the reduction def, defined by PHI,
767 the other def is either defined in the loop ("vect_internal_def"),
768 or it's an induction (defined by a loop-header phi-node). */
771 && def2_info
->stmt
== phi
772 && (code
== COND_EXPR
774 || !flow_bb_inside_loop_p (loop
, gimple_bb (def1_info
->stmt
))
775 || parloops_valid_reduction_input_p (def1_info
)))
777 if (dump_enabled_p ())
778 report_ploop_op (MSG_NOTE
, def_stmt
, "detected reduction: ");
779 return def_stmt_info
;
783 && def1_info
->stmt
== phi
784 && (code
== COND_EXPR
786 || !flow_bb_inside_loop_p (loop
, gimple_bb (def2_info
->stmt
))
787 || parloops_valid_reduction_input_p (def2_info
)))
789 if (! nested_in_vect_loop
&& orig_code
!= MINUS_EXPR
)
791 /* Check if we can swap operands (just for simplicity - so that
792 the rest of the code can assume that the reduction variable
793 is always the last (second) argument). */
794 if (code
== COND_EXPR
)
796 /* Swap cond_expr by inverting the condition. */
797 tree cond_expr
= gimple_assign_rhs1 (def_stmt
);
798 enum tree_code invert_code
= ERROR_MARK
;
799 enum tree_code cond_code
= TREE_CODE (cond_expr
);
801 if (TREE_CODE_CLASS (cond_code
) == tcc_comparison
)
803 bool honor_nans
= HONOR_NANS (TREE_OPERAND (cond_expr
, 0));
804 invert_code
= invert_tree_comparison (cond_code
, honor_nans
);
806 if (invert_code
!= ERROR_MARK
)
808 TREE_SET_CODE (cond_expr
, invert_code
);
809 swap_ssa_operands (def_stmt
,
810 gimple_assign_rhs2_ptr (def_stmt
),
811 gimple_assign_rhs3_ptr (def_stmt
));
815 if (dump_enabled_p ())
816 report_ploop_op (MSG_NOTE
, def_stmt
,
817 "detected reduction: cannot swap operands "
823 swap_ssa_operands (def_stmt
, gimple_assign_rhs1_ptr (def_stmt
),
824 gimple_assign_rhs2_ptr (def_stmt
));
826 if (dump_enabled_p ())
827 report_ploop_op (MSG_NOTE
, def_stmt
,
828 "detected reduction: need to swap operands: ");
832 if (dump_enabled_p ())
833 report_ploop_op (MSG_NOTE
, def_stmt
, "detected reduction: ");
836 return def_stmt_info
;
839 /* Try to find SLP reduction chain. */
840 if (! nested_in_vect_loop
842 && orig_code
!= MINUS_EXPR
843 && parloops_is_slp_reduction (loop_info
, phi
, def_stmt
))
845 if (dump_enabled_p ())
846 report_ploop_op (MSG_NOTE
, def_stmt
,
847 "reduction: detected reduction chain: ");
849 return def_stmt_info
;
852 /* Look for the expression computing loop_arg from loop PHI result. */
853 if (check_reduction_path (vect_location
, loop
, phi
, loop_arg
, code
))
854 return def_stmt_info
;
856 if (dump_enabled_p ())
858 report_ploop_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
859 "reduction: unknown pattern: ");
865 /* Wrapper around vect_is_simple_reduction, which will modify code
866 in-place if it enables detection of more reductions. Arguments
870 parloops_force_simple_reduction (loop_vec_info loop_info
, stmt_vec_info phi_info
,
872 bool need_wrapping_integral_overflow
)
874 enum vect_reduction_type v_reduc_type
;
875 stmt_vec_info def_info
876 = parloops_is_simple_reduction (loop_info
, phi_info
, double_reduc
,
877 need_wrapping_integral_overflow
,
881 STMT_VINFO_REDUC_TYPE (phi_info
) = v_reduc_type
;
882 STMT_VINFO_REDUC_DEF (phi_info
) = def_info
;
883 STMT_VINFO_REDUC_TYPE (def_info
) = v_reduc_type
;
884 STMT_VINFO_REDUC_DEF (def_info
) = phi_info
;
889 /* Minimal number of iterations of a loop that should be executed in each
891 #define MIN_PER_THREAD param_parloops_min_per_thread
893 /* Element of the hashtable, representing a
894 reduction in the current loop. */
895 struct reduction_info
897 gimple
*reduc_stmt
; /* reduction statement. */
898 gimple
*reduc_phi
; /* The phi node defining the reduction. */
899 enum tree_code reduction_code
;/* code for the reduction operation. */
900 unsigned reduc_version
; /* SSA_NAME_VERSION of original reduc_phi
902 gphi
*keep_res
; /* The PHI_RESULT of this phi is the resulting value
903 of the reduction variable when existing the loop. */
904 tree initial_value
; /* The initial value of the reduction var before entering the loop. */
905 tree field
; /* the name of the field in the parloop data structure intended for reduction. */
906 tree reduc_addr
; /* The address of the reduction variable for
907 openacc reductions. */
908 tree init
; /* reduction initialization value. */
909 gphi
*new_phi
; /* (helper field) Newly created phi node whose result
910 will be passed to the atomic operation. Represents
911 the local result each thread computed for the reduction
915 /* Reduction info hashtable helpers. */
917 struct reduction_hasher
: free_ptr_hash
<reduction_info
>
919 static inline hashval_t
hash (const reduction_info
*);
920 static inline bool equal (const reduction_info
*, const reduction_info
*);
923 /* Equality and hash functions for hashtab code. */
926 reduction_hasher::equal (const reduction_info
*a
, const reduction_info
*b
)
928 return (a
->reduc_phi
== b
->reduc_phi
);
932 reduction_hasher::hash (const reduction_info
*a
)
934 return a
->reduc_version
;
937 typedef hash_table
<reduction_hasher
> reduction_info_table_type
;
940 static struct reduction_info
*
941 reduction_phi (reduction_info_table_type
*reduction_list
, gimple
*phi
)
943 struct reduction_info tmpred
, *red
;
945 if (reduction_list
->is_empty () || phi
== NULL
)
948 if (gimple_uid (phi
) == (unsigned int)-1
949 || gimple_uid (phi
) == 0)
952 tmpred
.reduc_phi
= phi
;
953 tmpred
.reduc_version
= gimple_uid (phi
);
954 red
= reduction_list
->find (&tmpred
);
955 gcc_assert (red
== NULL
|| red
->reduc_phi
== phi
);
960 /* Element of hashtable of names to copy. */
962 struct name_to_copy_elt
964 unsigned version
; /* The version of the name to copy. */
965 tree new_name
; /* The new name used in the copy. */
966 tree field
; /* The field of the structure used to pass the
970 /* Name copies hashtable helpers. */
972 struct name_to_copy_hasher
: free_ptr_hash
<name_to_copy_elt
>
974 static inline hashval_t
hash (const name_to_copy_elt
*);
975 static inline bool equal (const name_to_copy_elt
*, const name_to_copy_elt
*);
978 /* Equality and hash functions for hashtab code. */
981 name_to_copy_hasher::equal (const name_to_copy_elt
*a
, const name_to_copy_elt
*b
)
983 return a
->version
== b
->version
;
987 name_to_copy_hasher::hash (const name_to_copy_elt
*a
)
989 return (hashval_t
) a
->version
;
992 typedef hash_table
<name_to_copy_hasher
> name_to_copy_table_type
;
994 /* A transformation matrix, which is a self-contained ROWSIZE x COLSIZE
995 matrix. Rather than use floats, we simply keep a single DENOMINATOR that
996 represents the denominator for every element in the matrix. */
997 typedef struct lambda_trans_matrix_s
999 lambda_matrix matrix
;
1003 } *lambda_trans_matrix
;
1004 #define LTM_MATRIX(T) ((T)->matrix)
1005 #define LTM_ROWSIZE(T) ((T)->rowsize)
1006 #define LTM_COLSIZE(T) ((T)->colsize)
1007 #define LTM_DENOMINATOR(T) ((T)->denominator)
1009 /* Allocate a new transformation matrix. */
1011 static lambda_trans_matrix
1012 lambda_trans_matrix_new (int colsize
, int rowsize
,
1013 struct obstack
* lambda_obstack
)
1015 lambda_trans_matrix ret
;
1017 ret
= (lambda_trans_matrix
)
1018 obstack_alloc (lambda_obstack
, sizeof (struct lambda_trans_matrix_s
));
1019 LTM_MATRIX (ret
) = lambda_matrix_new (rowsize
, colsize
, lambda_obstack
);
1020 LTM_ROWSIZE (ret
) = rowsize
;
1021 LTM_COLSIZE (ret
) = colsize
;
1022 LTM_DENOMINATOR (ret
) = 1;
1026 /* Multiply a vector VEC by a matrix MAT.
1027 MAT is an M*N matrix, and VEC is a vector with length N. The result
1028 is stored in DEST which must be a vector of length M. */
1031 lambda_matrix_vector_mult (lambda_matrix matrix
, int m
, int n
,
1032 lambda_vector vec
, lambda_vector dest
)
1036 lambda_vector_clear (dest
, m
);
1037 for (i
= 0; i
< m
; i
++)
1038 for (j
= 0; j
< n
; j
++)
1039 dest
[i
] += matrix
[i
][j
] * vec
[j
];
1042 /* Return true if TRANS is a legal transformation matrix that respects
1043 the dependence vectors in DISTS and DIRS. The conservative answer
1046 "Wolfe proves that a unimodular transformation represented by the
1047 matrix T is legal when applied to a loop nest with a set of
1048 lexicographically non-negative distance vectors RDG if and only if
1049 for each vector d in RDG, (T.d >= 0) is lexicographically positive.
1050 i.e.: if and only if it transforms the lexicographically positive
1051 distance vectors to lexicographically positive vectors. Note that
1052 a unimodular matrix must transform the zero vector (and only it) to
1053 the zero vector." S.Muchnick. */
1056 lambda_transform_legal_p (lambda_trans_matrix trans
,
1058 vec
<ddr_p
> dependence_relations
)
1061 lambda_vector distres
;
1062 struct data_dependence_relation
*ddr
;
1064 gcc_assert (LTM_COLSIZE (trans
) == nb_loops
1065 && LTM_ROWSIZE (trans
) == nb_loops
);
1067 /* When there are no dependences, the transformation is correct. */
1068 if (dependence_relations
.length () == 0)
1071 ddr
= dependence_relations
[0];
1075 /* When there is an unknown relation in the dependence_relations, we
1076 know that it is no worth looking at this loop nest: give up. */
1077 if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
1080 distres
= lambda_vector_new (nb_loops
);
1082 /* For each distance vector in the dependence graph. */
1083 FOR_EACH_VEC_ELT (dependence_relations
, i
, ddr
)
1085 /* Don't care about relations for which we know that there is no
1086 dependence, nor about read-read (aka. output-dependences):
1087 these data accesses can happen in any order. */
1088 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
1089 || (DR_IS_READ (DDR_A (ddr
)) && DR_IS_READ (DDR_B (ddr
))))
1092 /* Conservatively answer: "this transformation is not valid". */
1093 if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
1096 /* If the dependence could not be captured by a distance vector,
1097 conservatively answer that the transform is not valid. */
1098 if (DDR_NUM_DIST_VECTS (ddr
) == 0)
1101 /* Compute trans.dist_vect */
1102 for (j
= 0; j
< DDR_NUM_DIST_VECTS (ddr
); j
++)
1104 lambda_matrix_vector_mult (LTM_MATRIX (trans
), nb_loops
, nb_loops
,
1105 DDR_DIST_VECT (ddr
, j
), distres
);
1107 if (!lambda_vector_lexico_pos (distres
, nb_loops
))
1114 /* Data dependency analysis. Returns true if the iterations of LOOP
1115 are independent on each other (that is, if we can execute them
1119 loop_parallel_p (class loop
*loop
, struct obstack
* parloop_obstack
)
1121 vec
<ddr_p
> dependence_relations
;
1122 vec
<data_reference_p
> datarefs
;
1123 lambda_trans_matrix trans
;
1126 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1128 fprintf (dump_file
, "Considering loop %d\n", loop
->num
);
1130 fprintf (dump_file
, "loop is innermost\n");
1132 fprintf (dump_file
, "loop NOT innermost\n");
1135 /* Check for problems with dependences. If the loop can be reversed,
1136 the iterations are independent. */
1137 auto_vec
<loop_p
, 3> loop_nest
;
1138 datarefs
.create (10);
1139 dependence_relations
.create (100);
1140 if (! compute_data_dependences_for_loop (loop
, true, &loop_nest
, &datarefs
,
1141 &dependence_relations
))
1143 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1144 fprintf (dump_file
, " FAILED: cannot analyze data dependencies\n");
1148 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1149 dump_data_dependence_relations (dump_file
, dependence_relations
);
1151 trans
= lambda_trans_matrix_new (1, 1, parloop_obstack
);
1152 LTM_MATRIX (trans
)[0][0] = -1;
1154 if (lambda_transform_legal_p (trans
, 1, dependence_relations
))
1157 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1158 fprintf (dump_file
, " SUCCESS: may be parallelized\n");
1160 else if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1162 " FAILED: data dependencies exist across iterations\n");
1165 free_dependence_relations (dependence_relations
);
1166 free_data_refs (datarefs
);
1171 /* Return true when LOOP contains basic blocks marked with the
1172 BB_IRREDUCIBLE_LOOP flag. */
1175 loop_has_blocks_with_irreducible_flag (class loop
*loop
)
1178 basic_block
*bbs
= get_loop_body_in_dom_order (loop
);
1181 for (i
= 0; i
< loop
->num_nodes
; i
++)
1182 if (bbs
[i
]->flags
& BB_IRREDUCIBLE_LOOP
)
1191 /* Assigns the address of OBJ in TYPE to an ssa name, and returns this name.
1192 The assignment statement is placed on edge ENTRY. DECL_ADDRESS maps decls
1193 to their addresses that can be reused. The address of OBJ is known to
1194 be invariant in the whole function. Other needed statements are placed
1195 right before GSI. */
1198 take_address_of (tree obj
, tree type
, edge entry
,
1199 int_tree_htab_type
*decl_address
, gimple_stmt_iterator
*gsi
)
1202 tree
*var_p
, name
, addr
;
1206 /* Since the address of OBJ is invariant, the trees may be shared.
1207 Avoid rewriting unrelated parts of the code. */
1208 obj
= unshare_expr (obj
);
1210 handled_component_p (*var_p
);
1211 var_p
= &TREE_OPERAND (*var_p
, 0))
1214 /* Canonicalize the access to base on a MEM_REF. */
1215 if (DECL_P (*var_p
))
1216 *var_p
= build_simple_mem_ref (build_fold_addr_expr (*var_p
));
1218 /* Assign a canonical SSA name to the address of the base decl used
1219 in the address and share it for all accesses and addresses based
1221 uid
= DECL_UID (TREE_OPERAND (TREE_OPERAND (*var_p
, 0), 0));
1224 int_tree_map
*slot
= decl_address
->find_slot (elt
, INSERT
);
1229 addr
= TREE_OPERAND (*var_p
, 0);
1230 const char *obj_name
1231 = get_name (TREE_OPERAND (TREE_OPERAND (*var_p
, 0), 0));
1233 name
= make_temp_ssa_name (TREE_TYPE (addr
), NULL
, obj_name
);
1235 name
= make_ssa_name (TREE_TYPE (addr
));
1236 stmt
= gimple_build_assign (name
, addr
);
1237 gsi_insert_on_edge_immediate (entry
, stmt
);
1245 /* Express the address in terms of the canonical SSA name. */
1246 TREE_OPERAND (*var_p
, 0) = name
;
1248 return build_fold_addr_expr_with_type (obj
, type
);
1250 name
= force_gimple_operand (build_addr (obj
),
1251 &stmts
, true, NULL_TREE
);
1252 if (!gimple_seq_empty_p (stmts
))
1253 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
1255 if (!useless_type_conversion_p (type
, TREE_TYPE (name
)))
1257 name
= force_gimple_operand (fold_convert (type
, name
), &stmts
, true,
1259 if (!gimple_seq_empty_p (stmts
))
1260 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
1267 reduc_stmt_res (gimple
*stmt
)
1269 return (gimple_code (stmt
) == GIMPLE_PHI
1270 ? gimple_phi_result (stmt
)
1271 : gimple_assign_lhs (stmt
));
1274 /* Callback for htab_traverse. Create the initialization statement
1275 for reduction described in SLOT, and place it at the preheader of
1276 the loop described in DATA. */
1279 initialize_reductions (reduction_info
**slot
, class loop
*loop
)
1285 struct reduction_info
*const reduc
= *slot
;
1287 /* Create initialization in preheader:
1288 reduction_variable = initialization value of reduction. */
1290 /* In the phi node at the header, replace the argument coming
1291 from the preheader with the reduction initialization value. */
1293 /* Initialize the reduction. */
1294 type
= TREE_TYPE (PHI_RESULT (reduc
->reduc_phi
));
1295 init
= omp_reduction_init_op (gimple_location (reduc
->reduc_stmt
),
1296 reduc
->reduction_code
, type
);
1299 /* Replace the argument representing the initialization value
1300 with the initialization value for the reduction (neutral
1301 element for the particular operation, e.g. 0 for PLUS_EXPR,
1302 1 for MULT_EXPR, etc).
1303 Keep the old value in a new variable "reduction_initial",
1304 that will be taken in consideration after the parallel
1305 computing is done. */
1307 e
= loop_preheader_edge (loop
);
1308 arg
= PHI_ARG_DEF_FROM_EDGE (reduc
->reduc_phi
, e
);
1309 /* Create new variable to hold the initial value. */
1311 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE
1312 (reduc
->reduc_phi
, loop_preheader_edge (loop
)), init
);
1313 reduc
->initial_value
= arg
;
1319 struct walk_stmt_info info
;
1321 int_tree_htab_type
*decl_address
;
1322 gimple_stmt_iterator
*gsi
;
1327 /* Eliminates references to local variables in *TP out of the single
1328 entry single exit region starting at DTA->ENTRY.
1329 DECL_ADDRESS contains addresses of the references that had their
1330 address taken already. If the expression is changed, CHANGED is
1331 set to true. Callback for walk_tree. */
1334 eliminate_local_variables_1 (tree
*tp
, int *walk_subtrees
, void *data
)
1336 struct elv_data
*const dta
= (struct elv_data
*) data
;
1337 tree t
= *tp
, var
, addr
, addr_type
, type
, obj
;
1343 if (!SSA_VAR_P (t
) || DECL_EXTERNAL (t
))
1346 type
= TREE_TYPE (t
);
1347 addr_type
= build_pointer_type (type
);
1348 addr
= take_address_of (t
, addr_type
, dta
->entry
, dta
->decl_address
,
1350 if (dta
->gsi
== NULL
&& addr
== NULL_TREE
)
1356 *tp
= build_simple_mem_ref (addr
);
1358 dta
->changed
= true;
1362 if (TREE_CODE (t
) == ADDR_EXPR
)
1364 /* ADDR_EXPR may appear in two contexts:
1365 -- as a gimple operand, when the address taken is a function invariant
1366 -- as gimple rhs, when the resulting address in not a function
1368 We do not need to do anything special in the latter case (the base of
1369 the memory reference whose address is taken may be replaced in the
1370 DECL_P case). The former case is more complicated, as we need to
1371 ensure that the new address is still a gimple operand. Thus, it
1372 is not sufficient to replace just the base of the memory reference --
1373 we need to move the whole computation of the address out of the
1375 if (!is_gimple_val (t
))
1379 obj
= TREE_OPERAND (t
, 0);
1380 var
= get_base_address (obj
);
1381 if (!var
|| !SSA_VAR_P (var
) || DECL_EXTERNAL (var
))
1384 addr_type
= TREE_TYPE (t
);
1385 addr
= take_address_of (obj
, addr_type
, dta
->entry
, dta
->decl_address
,
1387 if (dta
->gsi
== NULL
&& addr
== NULL_TREE
)
1394 dta
->changed
= true;
1404 /* Moves the references to local variables in STMT at *GSI out of the single
1405 entry single exit region starting at ENTRY. DECL_ADDRESS contains
1406 addresses of the references that had their address taken
1410 eliminate_local_variables_stmt (edge entry
, gimple_stmt_iterator
*gsi
,
1411 int_tree_htab_type
*decl_address
)
1413 struct elv_data dta
;
1414 gimple
*stmt
= gsi_stmt (*gsi
);
1416 memset (&dta
.info
, '\0', sizeof (dta
.info
));
1418 dta
.decl_address
= decl_address
;
1419 dta
.changed
= false;
1422 if (gimple_debug_bind_p (stmt
))
1425 walk_tree (gimple_debug_bind_get_value_ptr (stmt
),
1426 eliminate_local_variables_1
, &dta
.info
, NULL
);
1429 gimple_debug_bind_reset_value (stmt
);
1433 else if (gimple_clobber_p (stmt
))
1435 unlink_stmt_vdef (stmt
);
1436 stmt
= gimple_build_nop ();
1437 gsi_replace (gsi
, stmt
, false);
1443 walk_gimple_op (stmt
, eliminate_local_variables_1
, &dta
.info
);
1450 /* Eliminates the references to local variables from the single entry
1451 single exit region between the ENTRY and EXIT edges.
1454 1) Taking address of a local variable -- these are moved out of the
1455 region (and temporary variable is created to hold the address if
1458 2) Dereferencing a local variable -- these are replaced with indirect
1462 eliminate_local_variables (edge entry
, edge exit
)
1465 auto_vec
<basic_block
, 3> body
;
1467 gimple_stmt_iterator gsi
;
1468 bool has_debug_stmt
= false;
1469 int_tree_htab_type
decl_address (10);
1470 basic_block entry_bb
= entry
->src
;
1471 basic_block exit_bb
= exit
->dest
;
1473 gather_blocks_in_sese_region (entry_bb
, exit_bb
, &body
);
1475 FOR_EACH_VEC_ELT (body
, i
, bb
)
1476 if (bb
!= entry_bb
&& bb
!= exit_bb
)
1478 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1479 if (is_gimple_debug (gsi_stmt (gsi
)))
1481 if (gimple_debug_bind_p (gsi_stmt (gsi
)))
1482 has_debug_stmt
= true;
1485 eliminate_local_variables_stmt (entry
, &gsi
, &decl_address
);
1489 FOR_EACH_VEC_ELT (body
, i
, bb
)
1490 if (bb
!= entry_bb
&& bb
!= exit_bb
)
1491 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1492 if (gimple_debug_bind_p (gsi_stmt (gsi
)))
1493 eliminate_local_variables_stmt (entry
, &gsi
, &decl_address
);
1496 /* Returns true if expression EXPR is not defined between ENTRY and
1497 EXIT, i.e. if all its operands are defined outside of the region. */
1500 expr_invariant_in_region_p (edge entry
, edge exit
, tree expr
)
1502 basic_block entry_bb
= entry
->src
;
1503 basic_block exit_bb
= exit
->dest
;
1506 if (is_gimple_min_invariant (expr
))
1509 if (TREE_CODE (expr
) == SSA_NAME
)
1511 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (expr
));
1513 && dominated_by_p (CDI_DOMINATORS
, def_bb
, entry_bb
)
1514 && !dominated_by_p (CDI_DOMINATORS
, def_bb
, exit_bb
))
1523 /* If COPY_NAME_P is true, creates and returns a duplicate of NAME.
1524 The copies are stored to NAME_COPIES, if NAME was already duplicated,
1525 its duplicate stored in NAME_COPIES is returned.
1527 Regardless of COPY_NAME_P, the decl used as a base of the ssa name is also
1528 duplicated, storing the copies in DECL_COPIES. */
1531 separate_decls_in_region_name (tree name
, name_to_copy_table_type
*name_copies
,
1532 int_tree_htab_type
*decl_copies
,
1535 tree copy
, var
, var_copy
;
1536 unsigned idx
, uid
, nuid
;
1537 struct int_tree_map ielt
;
1538 struct name_to_copy_elt elt
, *nelt
;
1539 name_to_copy_elt
**slot
;
1540 int_tree_map
*dslot
;
1542 if (TREE_CODE (name
) != SSA_NAME
)
1545 idx
= SSA_NAME_VERSION (name
);
1547 slot
= name_copies
->find_slot_with_hash (&elt
, idx
,
1548 copy_name_p
? INSERT
: NO_INSERT
);
1550 return (*slot
)->new_name
;
1554 copy
= duplicate_ssa_name (name
, NULL
);
1555 nelt
= XNEW (struct name_to_copy_elt
);
1556 nelt
->version
= idx
;
1557 nelt
->new_name
= copy
;
1558 nelt
->field
= NULL_TREE
;
1567 var
= SSA_NAME_VAR (name
);
1571 uid
= DECL_UID (var
);
1573 dslot
= decl_copies
->find_slot_with_hash (ielt
, uid
, INSERT
);
1576 var_copy
= create_tmp_var (TREE_TYPE (var
), get_name (var
));
1577 DECL_GIMPLE_REG_P (var_copy
) = DECL_GIMPLE_REG_P (var
);
1579 dslot
->to
= var_copy
;
1581 /* Ensure that when we meet this decl next time, we won't duplicate
1583 nuid
= DECL_UID (var_copy
);
1585 dslot
= decl_copies
->find_slot_with_hash (ielt
, nuid
, INSERT
);
1586 gcc_assert (!dslot
->to
);
1588 dslot
->to
= var_copy
;
1591 var_copy
= dslot
->to
;
1593 replace_ssa_name_symbol (copy
, var_copy
);
1597 /* Finds the ssa names used in STMT that are defined outside the
1598 region between ENTRY and EXIT and replaces such ssa names with
1599 their duplicates. The duplicates are stored to NAME_COPIES. Base
1600 decls of all ssa names used in STMT (including those defined in
1601 LOOP) are replaced with the new temporary variables; the
1602 replacement decls are stored in DECL_COPIES. */
1605 separate_decls_in_region_stmt (edge entry
, edge exit
, gimple
*stmt
,
1606 name_to_copy_table_type
*name_copies
,
1607 int_tree_htab_type
*decl_copies
)
1615 FOR_EACH_PHI_OR_STMT_DEF (def
, stmt
, oi
, SSA_OP_DEF
)
1617 name
= DEF_FROM_PTR (def
);
1618 gcc_assert (TREE_CODE (name
) == SSA_NAME
);
1619 copy
= separate_decls_in_region_name (name
, name_copies
, decl_copies
,
1621 gcc_assert (copy
== name
);
1624 FOR_EACH_PHI_OR_STMT_USE (use
, stmt
, oi
, SSA_OP_USE
)
1626 name
= USE_FROM_PTR (use
);
1627 if (TREE_CODE (name
) != SSA_NAME
)
1630 copy_name_p
= expr_invariant_in_region_p (entry
, exit
, name
);
1631 copy
= separate_decls_in_region_name (name
, name_copies
, decl_copies
,
1633 SET_USE (use
, copy
);
1637 /* Finds the ssa names used in STMT that are defined outside the
1638 region between ENTRY and EXIT and replaces such ssa names with
1639 their duplicates. The duplicates are stored to NAME_COPIES. Base
1640 decls of all ssa names used in STMT (including those defined in
1641 LOOP) are replaced with the new temporary variables; the
1642 replacement decls are stored in DECL_COPIES. */
1645 separate_decls_in_region_debug (gimple
*stmt
,
1646 name_to_copy_table_type
*name_copies
,
1647 int_tree_htab_type
*decl_copies
)
1652 struct int_tree_map ielt
;
1653 struct name_to_copy_elt elt
;
1654 name_to_copy_elt
**slot
;
1655 int_tree_map
*dslot
;
1657 if (gimple_debug_bind_p (stmt
))
1658 var
= gimple_debug_bind_get_var (stmt
);
1659 else if (gimple_debug_source_bind_p (stmt
))
1660 var
= gimple_debug_source_bind_get_var (stmt
);
1663 if (TREE_CODE (var
) == DEBUG_EXPR_DECL
|| TREE_CODE (var
) == LABEL_DECL
)
1665 gcc_assert (DECL_P (var
) && SSA_VAR_P (var
));
1666 ielt
.uid
= DECL_UID (var
);
1667 dslot
= decl_copies
->find_slot_with_hash (ielt
, ielt
.uid
, NO_INSERT
);
1670 if (gimple_debug_bind_p (stmt
))
1671 gimple_debug_bind_set_var (stmt
, dslot
->to
);
1672 else if (gimple_debug_source_bind_p (stmt
))
1673 gimple_debug_source_bind_set_var (stmt
, dslot
->to
);
1675 FOR_EACH_PHI_OR_STMT_USE (use
, stmt
, oi
, SSA_OP_USE
)
1677 name
= USE_FROM_PTR (use
);
1678 if (TREE_CODE (name
) != SSA_NAME
)
1681 elt
.version
= SSA_NAME_VERSION (name
);
1682 slot
= name_copies
->find_slot_with_hash (&elt
, elt
.version
, NO_INSERT
);
1685 gimple_debug_bind_reset_value (stmt
);
1690 SET_USE (use
, (*slot
)->new_name
);
1696 /* Callback for htab_traverse. Adds a field corresponding to the reduction
1697 specified in SLOT. The type is passed in DATA. */
1700 add_field_for_reduction (reduction_info
**slot
, tree type
)
1703 struct reduction_info
*const red
= *slot
;
1704 tree var
= reduc_stmt_res (red
->reduc_stmt
);
1705 tree field
= build_decl (gimple_location (red
->reduc_stmt
), FIELD_DECL
,
1706 SSA_NAME_IDENTIFIER (var
), TREE_TYPE (var
));
1708 insert_field_into_struct (type
, field
);
1715 /* Callback for htab_traverse. Adds a field corresponding to a ssa name
1716 described in SLOT. The type is passed in DATA. */
1719 add_field_for_name (name_to_copy_elt
**slot
, tree type
)
1721 struct name_to_copy_elt
*const elt
= *slot
;
1722 tree name
= ssa_name (elt
->version
);
1723 tree field
= build_decl (UNKNOWN_LOCATION
,
1724 FIELD_DECL
, SSA_NAME_IDENTIFIER (name
),
1727 insert_field_into_struct (type
, field
);
1733 /* Callback for htab_traverse. A local result is the intermediate result
1734 computed by a single
1735 thread, or the initial value in case no iteration was executed.
1736 This function creates a phi node reflecting these values.
1737 The phi's result will be stored in NEW_PHI field of the
1738 reduction's data structure. */
1741 create_phi_for_local_result (reduction_info
**slot
, class loop
*loop
)
1743 struct reduction_info
*const reduc
= *slot
;
1746 basic_block store_bb
, continue_bb
;
1750 /* STORE_BB is the block where the phi
1751 should be stored. It is the destination of the loop exit.
1752 (Find the fallthru edge from GIMPLE_OMP_CONTINUE). */
1753 continue_bb
= single_pred (loop
->latch
);
1754 store_bb
= FALLTHRU_EDGE (continue_bb
)->dest
;
1756 /* STORE_BB has two predecessors. One coming from the loop
1757 (the reduction's result is computed at the loop),
1758 and another coming from a block preceding the loop,
1760 are executed (the initial value should be taken). */
1761 if (EDGE_PRED (store_bb
, 0) == FALLTHRU_EDGE (continue_bb
))
1762 e
= EDGE_PRED (store_bb
, 1);
1764 e
= EDGE_PRED (store_bb
, 0);
1765 tree lhs
= reduc_stmt_res (reduc
->reduc_stmt
);
1766 local_res
= copy_ssa_name (lhs
);
1767 locus
= gimple_location (reduc
->reduc_stmt
);
1768 new_phi
= create_phi_node (local_res
, store_bb
);
1769 add_phi_arg (new_phi
, reduc
->init
, e
, locus
);
1770 add_phi_arg (new_phi
, lhs
, FALLTHRU_EDGE (continue_bb
), locus
);
1771 reduc
->new_phi
= new_phi
;
1781 basic_block store_bb
;
1782 basic_block load_bb
;
1785 /* Callback for htab_traverse. Create an atomic instruction for the
1786 reduction described in SLOT.
1787 DATA annotates the place in memory the atomic operation relates to,
1788 and the basic block it needs to be generated in. */
1791 create_call_for_reduction_1 (reduction_info
**slot
, struct clsn_data
*clsn_data
)
1793 struct reduction_info
*const reduc
= *slot
;
1794 gimple_stmt_iterator gsi
;
1795 tree type
= TREE_TYPE (PHI_RESULT (reduc
->reduc_phi
));
1800 tree t
, addr
, ref
, x
;
1801 tree tmp_load
, name
;
1804 if (reduc
->reduc_addr
== NULL_TREE
)
1806 load_struct
= build_simple_mem_ref (clsn_data
->load
);
1807 t
= build3 (COMPONENT_REF
, type
, load_struct
, reduc
->field
, NULL_TREE
);
1809 addr
= build_addr (t
);
1813 /* Set the address for the atomic store. */
1814 addr
= reduc
->reduc_addr
;
1816 /* Remove the non-atomic store '*addr = sum'. */
1817 tree res
= PHI_RESULT (reduc
->keep_res
);
1818 use_operand_p use_p
;
1820 bool single_use_p
= single_imm_use (res
, &use_p
, &stmt
);
1821 gcc_assert (single_use_p
);
1822 replace_uses_by (gimple_vdef (stmt
),
1823 gimple_vuse (stmt
));
1824 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
1825 gsi_remove (&gsi
, true);
1828 /* Create phi node. */
1829 bb
= clsn_data
->load_bb
;
1831 gsi
= gsi_last_bb (bb
);
1832 e
= split_block (bb
, gsi_stmt (gsi
));
1835 tmp_load
= create_tmp_var (TREE_TYPE (TREE_TYPE (addr
)));
1836 tmp_load
= make_ssa_name (tmp_load
);
1837 load
= gimple_build_omp_atomic_load (tmp_load
, addr
,
1838 OMP_MEMORY_ORDER_RELAXED
);
1839 SSA_NAME_DEF_STMT (tmp_load
) = load
;
1840 gsi
= gsi_start_bb (new_bb
);
1841 gsi_insert_after (&gsi
, load
, GSI_NEW_STMT
);
1843 e
= split_block (new_bb
, load
);
1845 gsi
= gsi_start_bb (new_bb
);
1847 x
= fold_build2 (reduc
->reduction_code
,
1848 TREE_TYPE (PHI_RESULT (reduc
->new_phi
)), ref
,
1849 PHI_RESULT (reduc
->new_phi
));
1851 name
= force_gimple_operand_gsi (&gsi
, x
, true, NULL_TREE
, true,
1852 GSI_CONTINUE_LINKING
);
1854 gimple
*store
= gimple_build_omp_atomic_store (name
,
1855 OMP_MEMORY_ORDER_RELAXED
);
1856 gsi_insert_after (&gsi
, store
, GSI_NEW_STMT
);
1860 /* Create the atomic operation at the join point of the threads.
1861 REDUCTION_LIST describes the reductions in the LOOP.
1862 LD_ST_DATA describes the shared data structure where
1863 shared data is stored in and loaded from. */
1865 create_call_for_reduction (class loop
*loop
,
1866 reduction_info_table_type
*reduction_list
,
1867 struct clsn_data
*ld_st_data
)
1869 reduction_list
->traverse
<class loop
*, create_phi_for_local_result
> (loop
);
1870 /* Find the fallthru edge from GIMPLE_OMP_CONTINUE. */
1871 basic_block continue_bb
= single_pred (loop
->latch
);
1872 ld_st_data
->load_bb
= FALLTHRU_EDGE (continue_bb
)->dest
;
1874 ->traverse
<struct clsn_data
*, create_call_for_reduction_1
> (ld_st_data
);
1877 /* Callback for htab_traverse. Loads the final reduction value at the
1878 join point of all threads, and inserts it in the right place. */
1881 create_loads_for_reductions (reduction_info
**slot
, struct clsn_data
*clsn_data
)
1883 struct reduction_info
*const red
= *slot
;
1885 gimple_stmt_iterator gsi
;
1886 tree type
= TREE_TYPE (reduc_stmt_res (red
->reduc_stmt
));
1891 /* If there's no exit phi, the result of the reduction is unused. */
1892 if (red
->keep_res
== NULL
)
1895 gsi
= gsi_after_labels (clsn_data
->load_bb
);
1896 load_struct
= build_simple_mem_ref (clsn_data
->load
);
1897 load_struct
= build3 (COMPONENT_REF
, type
, load_struct
, red
->field
,
1901 name
= PHI_RESULT (red
->keep_res
);
1902 stmt
= gimple_build_assign (name
, x
);
1904 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1906 for (gsi
= gsi_start_phis (gimple_bb (red
->keep_res
));
1907 !gsi_end_p (gsi
); gsi_next (&gsi
))
1908 if (gsi_stmt (gsi
) == red
->keep_res
)
1910 remove_phi_node (&gsi
, false);
1916 /* Load the reduction result that was stored in LD_ST_DATA.
1917 REDUCTION_LIST describes the list of reductions that the
1918 loads should be generated for. */
1920 create_final_loads_for_reduction (reduction_info_table_type
*reduction_list
,
1921 struct clsn_data
*ld_st_data
)
1923 gimple_stmt_iterator gsi
;
1927 gsi
= gsi_after_labels (ld_st_data
->load_bb
);
1928 t
= build_fold_addr_expr (ld_st_data
->store
);
1929 stmt
= gimple_build_assign (ld_st_data
->load
, t
);
1931 gsi_insert_before (&gsi
, stmt
, GSI_NEW_STMT
);
1934 ->traverse
<struct clsn_data
*, create_loads_for_reductions
> (ld_st_data
);
1938 /* Callback for htab_traverse. Store the neutral value for the
1939 particular reduction's operation, e.g. 0 for PLUS_EXPR,
1940 1 for MULT_EXPR, etc. into the reduction field.
1941 The reduction is specified in SLOT. The store information is
1945 create_stores_for_reduction (reduction_info
**slot
, struct clsn_data
*clsn_data
)
1947 struct reduction_info
*const red
= *slot
;
1950 gimple_stmt_iterator gsi
;
1951 tree type
= TREE_TYPE (reduc_stmt_res (red
->reduc_stmt
));
1953 gsi
= gsi_last_bb (clsn_data
->store_bb
);
1954 t
= build3 (COMPONENT_REF
, type
, clsn_data
->store
, red
->field
, NULL_TREE
);
1955 stmt
= gimple_build_assign (t
, red
->initial_value
);
1956 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1961 /* Callback for htab_traverse. Creates loads to a field of LOAD in LOAD_BB and
1962 store to a field of STORE in STORE_BB for the ssa name and its duplicate
1963 specified in SLOT. */
1966 create_loads_and_stores_for_name (name_to_copy_elt
**slot
,
1967 struct clsn_data
*clsn_data
)
1969 struct name_to_copy_elt
*const elt
= *slot
;
1972 gimple_stmt_iterator gsi
;
1973 tree type
= TREE_TYPE (elt
->new_name
);
1976 gsi
= gsi_last_bb (clsn_data
->store_bb
);
1977 t
= build3 (COMPONENT_REF
, type
, clsn_data
->store
, elt
->field
, NULL_TREE
);
1978 stmt
= gimple_build_assign (t
, ssa_name (elt
->version
));
1979 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1981 gsi
= gsi_last_bb (clsn_data
->load_bb
);
1982 load_struct
= build_simple_mem_ref (clsn_data
->load
);
1983 t
= build3 (COMPONENT_REF
, type
, load_struct
, elt
->field
, NULL_TREE
);
1984 stmt
= gimple_build_assign (elt
->new_name
, t
);
1985 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1990 /* Moves all the variables used in LOOP and defined outside of it (including
1991 the initial values of loop phi nodes, and *PER_THREAD if it is a ssa
1992 name) to a structure created for this purpose. The code
2000 is transformed this way:
2015 `old' is stored to *ARG_STRUCT and `new' is stored to NEW_ARG_STRUCT. The
2016 pointer `new' is intentionally not initialized (the loop will be split to a
2017 separate function later, and `new' will be initialized from its arguments).
2018 LD_ST_DATA holds information about the shared data structure used to pass
2019 information among the threads. It is initialized here, and
2020 gen_parallel_loop will pass it to create_call_for_reduction that
2021 needs this information. REDUCTION_LIST describes the reductions
2025 separate_decls_in_region (edge entry
, edge exit
,
2026 reduction_info_table_type
*reduction_list
,
2027 tree
*arg_struct
, tree
*new_arg_struct
,
2028 struct clsn_data
*ld_st_data
)
2031 basic_block bb1
= split_edge (entry
);
2032 basic_block bb0
= single_pred (bb1
);
2033 name_to_copy_table_type
name_copies (10);
2034 int_tree_htab_type
decl_copies (10);
2036 tree type
, type_name
, nvar
;
2037 gimple_stmt_iterator gsi
;
2038 struct clsn_data clsn_data
;
2039 auto_vec
<basic_block
, 3> body
;
2041 basic_block entry_bb
= bb1
;
2042 basic_block exit_bb
= exit
->dest
;
2043 bool has_debug_stmt
= false;
2045 entry
= single_succ_edge (entry_bb
);
2046 gather_blocks_in_sese_region (entry_bb
, exit_bb
, &body
);
2048 FOR_EACH_VEC_ELT (body
, i
, bb
)
2050 if (bb
!= entry_bb
&& bb
!= exit_bb
)
2052 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2053 separate_decls_in_region_stmt (entry
, exit
, gsi_stmt (gsi
),
2054 &name_copies
, &decl_copies
);
2056 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2058 gimple
*stmt
= gsi_stmt (gsi
);
2060 if (is_gimple_debug (stmt
))
2061 has_debug_stmt
= true;
2063 separate_decls_in_region_stmt (entry
, exit
, stmt
,
2064 &name_copies
, &decl_copies
);
2069 /* Now process debug bind stmts. We must not create decls while
2070 processing debug stmts, so we defer their processing so as to
2071 make sure we will have debug info for as many variables as
2072 possible (all of those that were dealt with in the loop above),
2073 and discard those for which we know there's nothing we can
2076 FOR_EACH_VEC_ELT (body
, i
, bb
)
2077 if (bb
!= entry_bb
&& bb
!= exit_bb
)
2079 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);)
2081 gimple
*stmt
= gsi_stmt (gsi
);
2083 if (is_gimple_debug (stmt
))
2085 if (separate_decls_in_region_debug (stmt
, &name_copies
,
2088 gsi_remove (&gsi
, true);
2097 if (name_copies
.is_empty () && reduction_list
->is_empty ())
2099 /* It may happen that there is nothing to copy (if there are only
2100 loop carried and external variables in the loop). */
2102 *new_arg_struct
= NULL
;
2106 /* Create the type for the structure to store the ssa names to. */
2107 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
2108 type_name
= build_decl (UNKNOWN_LOCATION
,
2109 TYPE_DECL
, create_tmp_var_name (".paral_data"),
2111 TYPE_NAME (type
) = type_name
;
2113 name_copies
.traverse
<tree
, add_field_for_name
> (type
);
2114 if (reduction_list
&& !reduction_list
->is_empty ())
2116 /* Create the fields for reductions. */
2117 reduction_list
->traverse
<tree
, add_field_for_reduction
> (type
);
2121 /* Create the loads and stores. */
2122 *arg_struct
= create_tmp_var (type
, ".paral_data_store");
2123 nvar
= create_tmp_var (build_pointer_type (type
), ".paral_data_load");
2124 *new_arg_struct
= make_ssa_name (nvar
);
2126 ld_st_data
->store
= *arg_struct
;
2127 ld_st_data
->load
= *new_arg_struct
;
2128 ld_st_data
->store_bb
= bb0
;
2129 ld_st_data
->load_bb
= bb1
;
2132 .traverse
<struct clsn_data
*, create_loads_and_stores_for_name
>
2135 /* Load the calculation from memory (after the join of the threads). */
2137 if (reduction_list
&& !reduction_list
->is_empty ())
2140 ->traverse
<struct clsn_data
*, create_stores_for_reduction
>
2142 clsn_data
.load
= make_ssa_name (nvar
);
2143 clsn_data
.load_bb
= exit
->dest
;
2144 clsn_data
.store
= ld_st_data
->store
;
2145 create_final_loads_for_reduction (reduction_list
, &clsn_data
);
2150 /* Returns true if FN was created to run in parallel. */
2153 parallelized_function_p (tree fndecl
)
2155 cgraph_node
*node
= cgraph_node::get (fndecl
);
2156 gcc_assert (node
!= NULL
);
2157 return node
->parallelized_function
;
2160 /* Creates and returns an empty function that will receive the body of
2161 a parallelized loop. */
2164 create_loop_fn (location_t loc
)
2168 tree decl
, type
, name
, t
;
2169 struct function
*act_cfun
= cfun
;
2170 static unsigned loopfn_num
;
2172 loc
= LOCATION_LOCUS (loc
);
2173 snprintf (buf
, 100, "%s.$loopfn", current_function_name ());
2174 ASM_FORMAT_PRIVATE_NAME (tname
, buf
, loopfn_num
++);
2175 clean_symbol_name (tname
);
2176 name
= get_identifier (tname
);
2177 type
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
2179 decl
= build_decl (loc
, FUNCTION_DECL
, name
, type
);
2180 TREE_STATIC (decl
) = 1;
2181 TREE_USED (decl
) = 1;
2182 DECL_ARTIFICIAL (decl
) = 1;
2183 DECL_IGNORED_P (decl
) = 0;
2184 TREE_PUBLIC (decl
) = 0;
2185 DECL_UNINLINABLE (decl
) = 1;
2186 DECL_EXTERNAL (decl
) = 0;
2187 DECL_CONTEXT (decl
) = NULL_TREE
;
2188 DECL_INITIAL (decl
) = make_node (BLOCK
);
2189 BLOCK_SUPERCONTEXT (DECL_INITIAL (decl
)) = decl
;
2191 t
= build_decl (loc
, RESULT_DECL
, NULL_TREE
, void_type_node
);
2192 DECL_ARTIFICIAL (t
) = 1;
2193 DECL_IGNORED_P (t
) = 1;
2194 DECL_RESULT (decl
) = t
;
2196 t
= build_decl (loc
, PARM_DECL
, get_identifier (".paral_data_param"),
2198 DECL_ARTIFICIAL (t
) = 1;
2199 DECL_ARG_TYPE (t
) = ptr_type_node
;
2200 DECL_CONTEXT (t
) = decl
;
2202 DECL_ARGUMENTS (decl
) = t
;
2204 allocate_struct_function (decl
, false);
2205 DECL_STRUCT_FUNCTION (decl
)->last_clique
= act_cfun
->last_clique
;
2207 /* The call to allocate_struct_function clobbers CFUN, so we need to restore
2209 set_cfun (act_cfun
);
2214 /* Replace uses of NAME by VAL in block BB. */
2217 replace_uses_in_bb_by (tree name
, tree val
, basic_block bb
)
2220 imm_use_iterator imm_iter
;
2222 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, name
)
2224 if (gimple_bb (use_stmt
) != bb
)
2227 use_operand_p use_p
;
2228 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
2229 SET_USE (use_p
, val
);
2233 /* Do transformation from:
2240 ivtmp_a = PHI <ivtmp_init (preheader), ivtmp_b (latch)>
2241 sum_a = PHI <sum_init (preheader), sum_b (latch)>
2245 sum_b = sum_a + sum_update
2253 ivtmp_b = ivtmp_a + 1;
2257 sum_z = PHI <sum_b (cond[1]), ...>
2259 [1] Where <bb cond> is single_pred (bb latch); In the simplest case,
2269 ivtmp_a = PHI <ivtmp_c (latch)>
2270 sum_a = PHI <sum_c (latch)>
2274 sum_b = sum_a + sum_update
2279 ivtmp_c = PHI <ivtmp_init (preheader), ivtmp_b (latch)>
2280 sum_c = PHI <sum_init (preheader), sum_b (latch)>
2281 if (ivtmp_c < n + 1)
2287 ivtmp_b = ivtmp_a + 1;
2291 sum_y = PHI <sum_c (newheader)>
2294 sum_z = PHI <sum_y (newexit), ...>
2297 In unified diff format:
2302 + goto <bb newheader>
2305 - ivtmp_a = PHI <ivtmp_init (preheader), ivtmp_b (latch)>
2306 - sum_a = PHI <sum_init (preheader), sum_b (latch)>
2307 + ivtmp_a = PHI <ivtmp_c (latch)>
2308 + sum_a = PHI <sum_c (latch)>
2312 sum_b = sum_a + sum_update
2319 + ivtmp_c = PHI <ivtmp_init (preheader), ivtmp_b (latch)>
2320 + sum_c = PHI <sum_init (preheader), sum_b (latch)>
2321 + if (ivtmp_c < n + 1)
2327 ivtmp_b = ivtmp_a + 1;
2329 + goto <bb newheader>
2332 + sum_y = PHI <sum_c (newheader)>
2335 - sum_z = PHI <sum_b (cond[1]), ...>
2336 + sum_z = PHI <sum_y (newexit), ...>
2338 Note: the example does not show any virtual phis, but these are handled more
2339 or less as reductions.
2342 Moves the exit condition of LOOP to the beginning of its header.
2343 REDUCTION_LIST describes the reductions in LOOP. BOUND is the new loop
2347 transform_to_exit_first_loop_alt (class loop
*loop
,
2348 reduction_info_table_type
*reduction_list
,
2351 basic_block header
= loop
->header
;
2352 basic_block latch
= loop
->latch
;
2353 edge exit
= single_dom_exit (loop
);
2354 basic_block exit_block
= exit
->dest
;
2355 gcond
*cond_stmt
= as_a
<gcond
*> (last_stmt (exit
->src
));
2356 tree control
= gimple_cond_lhs (cond_stmt
);
2359 /* Rewriting virtuals into loop-closed ssa normal form makes this
2360 transformation simpler. It also ensures that the virtuals are in
2361 loop-closed ssa normal from after the transformation, which is required by
2362 create_parallel_loop. */
2363 rewrite_virtuals_into_loop_closed_ssa (loop
);
2365 /* Create the new_header block. */
2366 basic_block new_header
= split_block_before_cond_jump (exit
->src
);
2367 edge edge_at_split
= single_pred_edge (new_header
);
2369 /* Redirect entry edge to new_header. */
2370 edge entry
= loop_preheader_edge (loop
);
2371 e
= redirect_edge_and_branch (entry
, new_header
);
2372 gcc_assert (e
== entry
);
2374 /* Redirect post_inc_edge to new_header. */
2375 edge post_inc_edge
= single_succ_edge (latch
);
2376 e
= redirect_edge_and_branch (post_inc_edge
, new_header
);
2377 gcc_assert (e
== post_inc_edge
);
2379 /* Redirect post_cond_edge to header. */
2380 edge post_cond_edge
= single_pred_edge (latch
);
2381 e
= redirect_edge_and_branch (post_cond_edge
, header
);
2382 gcc_assert (e
== post_cond_edge
);
2384 /* Redirect edge_at_split to latch. */
2385 e
= redirect_edge_and_branch (edge_at_split
, latch
);
2386 gcc_assert (e
== edge_at_split
);
2388 /* Set the new loop bound. */
2389 gimple_cond_set_rhs (cond_stmt
, bound
);
2390 update_stmt (cond_stmt
);
2392 /* Repair the ssa. */
2393 vec
<edge_var_map
> *v
= redirect_edge_var_map_vector (post_inc_edge
);
2397 for (gsi
= gsi_start_phis (header
), i
= 0;
2398 !gsi_end_p (gsi
) && v
->iterate (i
, &vm
);
2399 gsi_next (&gsi
), i
++)
2401 gphi
*phi
= gsi
.phi ();
2402 tree res_a
= PHI_RESULT (phi
);
2404 /* Create new phi. */
2405 tree res_c
= copy_ssa_name (res_a
, phi
);
2406 gphi
*nphi
= create_phi_node (res_c
, new_header
);
2408 /* Replace ivtmp_a with ivtmp_c in condition 'if (ivtmp_a < n)'. */
2409 replace_uses_in_bb_by (res_a
, res_c
, new_header
);
2411 /* Replace ivtmp/sum_b with ivtmp/sum_c in header phi. */
2412 add_phi_arg (phi
, res_c
, post_cond_edge
, UNKNOWN_LOCATION
);
2414 /* Replace sum_b with sum_c in exit phi. */
2415 tree res_b
= redirect_edge_var_map_def (vm
);
2416 replace_uses_in_bb_by (res_b
, res_c
, exit_block
);
2418 struct reduction_info
*red
= reduction_phi (reduction_list
, phi
);
2419 gcc_assert (virtual_operand_p (res_a
)
2425 /* Register the new reduction phi. */
2426 red
->reduc_phi
= nphi
;
2427 gimple_set_uid (red
->reduc_phi
, red
->reduc_version
);
2430 gcc_assert (gsi_end_p (gsi
) && !v
->iterate (i
, &vm
));
2432 /* Set the preheader argument of the new phis to ivtmp/sum_init. */
2433 flush_pending_stmts (entry
);
2435 /* Set the latch arguments of the new phis to ivtmp/sum_b. */
2436 flush_pending_stmts (post_inc_edge
);
2439 basic_block new_exit_block
= NULL
;
2440 if (!single_pred_p (exit
->dest
))
2442 /* Create a new empty exit block, inbetween the new loop header and the
2443 old exit block. The function separate_decls_in_region needs this block
2444 to insert code that is active on loop exit, but not any other path. */
2445 new_exit_block
= split_edge (exit
);
2448 /* Insert and register the reduction exit phis. */
2449 for (gphi_iterator gsi
= gsi_start_phis (exit_block
);
2453 gphi
*phi
= gsi
.phi ();
2455 tree res_z
= PHI_RESULT (phi
);
2458 if (new_exit_block
!= NULL
)
2460 /* Now that we have a new exit block, duplicate the phi of the old
2461 exit block in the new exit block to preserve loop-closed ssa. */
2462 edge succ_new_exit_block
= single_succ_edge (new_exit_block
);
2463 edge pred_new_exit_block
= single_pred_edge (new_exit_block
);
2464 tree res_y
= copy_ssa_name (res_z
, phi
);
2465 nphi
= create_phi_node (res_y
, new_exit_block
);
2466 res_c
= PHI_ARG_DEF_FROM_EDGE (phi
, succ_new_exit_block
);
2467 add_phi_arg (nphi
, res_c
, pred_new_exit_block
, UNKNOWN_LOCATION
);
2468 add_phi_arg (phi
, res_y
, succ_new_exit_block
, UNKNOWN_LOCATION
);
2471 res_c
= PHI_ARG_DEF_FROM_EDGE (phi
, exit
);
2473 if (virtual_operand_p (res_z
))
2476 gimple
*reduc_phi
= SSA_NAME_DEF_STMT (res_c
);
2477 struct reduction_info
*red
= reduction_phi (reduction_list
, reduc_phi
);
2479 red
->keep_res
= (nphi
!= NULL
2484 /* We're going to cancel the loop at the end of gen_parallel_loop, but until
2485 then we're still using some fields, so only bother about fields that are
2486 still used: header and latch.
2487 The loop has a new header bb, so we update it. The latch bb stays the
2489 loop
->header
= new_header
;
2491 /* Recalculate dominance info. */
2492 free_dominance_info (CDI_DOMINATORS
);
2493 calculate_dominance_info (CDI_DOMINATORS
);
2495 checking_verify_ssa (true, true);
2498 /* Tries to moves the exit condition of LOOP to the beginning of its header
2499 without duplication of the loop body. NIT is the number of iterations of the
2500 loop. REDUCTION_LIST describes the reductions in LOOP. Return true if
2501 transformation is successful. */
2504 try_transform_to_exit_first_loop_alt (class loop
*loop
,
2505 reduction_info_table_type
*reduction_list
,
2508 /* Check whether the latch contains a single statement. */
2509 if (!gimple_seq_nondebug_singleton_p (bb_seq (loop
->latch
)))
2512 /* Check whether the latch contains no phis. */
2513 if (phi_nodes (loop
->latch
) != NULL
)
2516 /* Check whether the latch contains the loop iv increment. */
2517 edge back
= single_succ_edge (loop
->latch
);
2518 edge exit
= single_dom_exit (loop
);
2519 gcond
*cond_stmt
= as_a
<gcond
*> (last_stmt (exit
->src
));
2520 tree control
= gimple_cond_lhs (cond_stmt
);
2521 gphi
*phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (control
));
2522 tree inc_res
= gimple_phi_arg_def (phi
, back
->dest_idx
);
2523 if (gimple_bb (SSA_NAME_DEF_STMT (inc_res
)) != loop
->latch
)
2526 /* Check whether there's no code between the loop condition and the latch. */
2527 if (!single_pred_p (loop
->latch
)
2528 || single_pred (loop
->latch
) != exit
->src
)
2531 tree alt_bound
= NULL_TREE
;
2532 tree nit_type
= TREE_TYPE (nit
);
2534 /* Figure out whether nit + 1 overflows. */
2535 if (TREE_CODE (nit
) == INTEGER_CST
)
2537 if (!tree_int_cst_equal (nit
, TYPE_MAX_VALUE (nit_type
)))
2539 alt_bound
= fold_build2_loc (UNKNOWN_LOCATION
, PLUS_EXPR
, nit_type
,
2540 nit
, build_one_cst (nit_type
));
2542 gcc_assert (TREE_CODE (alt_bound
) == INTEGER_CST
);
2543 transform_to_exit_first_loop_alt (loop
, reduction_list
, alt_bound
);
2548 /* Todo: Figure out if we can trigger this, if it's worth to handle
2549 optimally, and if we can handle it optimally. */
2554 gcc_assert (TREE_CODE (nit
) == SSA_NAME
);
2556 /* Variable nit is the loop bound as returned by canonicalize_loop_ivs, for an
2557 iv with base 0 and step 1 that is incremented in the latch, like this:
2560 # iv_1 = PHI <0 (preheader), iv_2 (latch)>
2571 The range of iv_1 is [0, nit]. The latch edge is taken for
2572 iv_1 == [0, nit - 1] and the exit edge is taken for iv_1 == nit. So the
2573 number of latch executions is equal to nit.
2575 The function max_loop_iterations gives us the maximum number of latch
2576 executions, so it gives us the maximum value of nit. */
2578 if (!max_loop_iterations (loop
, &nit_max
))
2581 /* Check if nit + 1 overflows. */
2582 widest_int type_max
= wi::to_widest (TYPE_MAX_VALUE (nit_type
));
2583 if (nit_max
>= type_max
)
2586 gimple
*def
= SSA_NAME_DEF_STMT (nit
);
2588 /* Try to find nit + 1, in the form of n in an assignment nit = n - 1. */
2590 && is_gimple_assign (def
)
2591 && gimple_assign_rhs_code (def
) == PLUS_EXPR
)
2593 tree op1
= gimple_assign_rhs1 (def
);
2594 tree op2
= gimple_assign_rhs2 (def
);
2595 if (integer_minus_onep (op1
))
2597 else if (integer_minus_onep (op2
))
2601 /* If not found, insert nit + 1. */
2602 if (alt_bound
== NULL_TREE
)
2604 alt_bound
= fold_build2 (PLUS_EXPR
, nit_type
, nit
,
2605 build_int_cst_type (nit_type
, 1));
2607 gimple_stmt_iterator gsi
= gsi_last_bb (loop_preheader_edge (loop
)->src
);
2610 = force_gimple_operand_gsi (&gsi
, alt_bound
, true, NULL_TREE
, false,
2611 GSI_CONTINUE_LINKING
);
2614 transform_to_exit_first_loop_alt (loop
, reduction_list
, alt_bound
);
2618 /* Moves the exit condition of LOOP to the beginning of its header. NIT is the
2619 number of iterations of the loop. REDUCTION_LIST describes the reductions in
2623 transform_to_exit_first_loop (class loop
*loop
,
2624 reduction_info_table_type
*reduction_list
,
2627 basic_block
*bbs
, *nbbs
, ex_bb
, orig_header
;
2630 edge exit
= single_dom_exit (loop
), hpred
;
2631 tree control
, control_name
, res
, t
;
2634 gcond
*cond_stmt
, *cond_nit
;
2637 split_block_after_labels (loop
->header
);
2638 orig_header
= single_succ (loop
->header
);
2639 hpred
= single_succ_edge (loop
->header
);
2641 cond_stmt
= as_a
<gcond
*> (last_stmt (exit
->src
));
2642 control
= gimple_cond_lhs (cond_stmt
);
2643 gcc_assert (gimple_cond_rhs (cond_stmt
) == nit
);
2645 /* Make sure that we have phi nodes on exit for all loop header phis
2646 (create_parallel_loop requires that). */
2647 for (gphi_iterator gsi
= gsi_start_phis (loop
->header
);
2652 res
= PHI_RESULT (phi
);
2653 t
= copy_ssa_name (res
, phi
);
2654 SET_PHI_RESULT (phi
, t
);
2655 nphi
= create_phi_node (res
, orig_header
);
2656 add_phi_arg (nphi
, t
, hpred
, UNKNOWN_LOCATION
);
2660 gimple_cond_set_lhs (cond_stmt
, t
);
2661 update_stmt (cond_stmt
);
2666 bbs
= get_loop_body_in_dom_order (loop
);
2668 for (n
= 0; bbs
[n
] != exit
->src
; n
++)
2670 nbbs
= XNEWVEC (basic_block
, n
);
2671 ok
= gimple_duplicate_sese_tail (single_succ_edge (loop
->header
), exit
,
2678 /* Other than reductions, the only gimple reg that should be copied
2679 out of the loop is the control variable. */
2680 exit
= single_dom_exit (loop
);
2681 control_name
= NULL_TREE
;
2682 for (gphi_iterator gsi
= gsi_start_phis (ex_bb
);
2686 res
= PHI_RESULT (phi
);
2687 if (virtual_operand_p (res
))
2693 /* Check if it is a part of reduction. If it is,
2694 keep the phi at the reduction's keep_res field. The
2695 PHI_RESULT of this phi is the resulting value of the reduction
2696 variable when exiting the loop. */
2698 if (!reduction_list
->is_empty ())
2700 struct reduction_info
*red
;
2702 tree val
= PHI_ARG_DEF_FROM_EDGE (phi
, exit
);
2703 red
= reduction_phi (reduction_list
, SSA_NAME_DEF_STMT (val
));
2706 red
->keep_res
= phi
;
2711 gcc_assert (control_name
== NULL_TREE
2712 && SSA_NAME_VAR (res
) == SSA_NAME_VAR (control
));
2714 remove_phi_node (&gsi
, false);
2716 gcc_assert (control_name
!= NULL_TREE
);
2718 /* Initialize the control variable to number of iterations
2719 according to the rhs of the exit condition. */
2720 gimple_stmt_iterator gsi
= gsi_after_labels (ex_bb
);
2721 cond_nit
= as_a
<gcond
*> (last_stmt (exit
->src
));
2722 nit_1
= gimple_cond_rhs (cond_nit
);
2723 nit_1
= force_gimple_operand_gsi (&gsi
,
2724 fold_convert (TREE_TYPE (control_name
), nit_1
),
2725 false, NULL_TREE
, false, GSI_SAME_STMT
);
2726 stmt
= gimple_build_assign (control_name
, nit_1
);
2727 gsi_insert_before (&gsi
, stmt
, GSI_NEW_STMT
);
2730 /* Create the parallel constructs for LOOP as described in gen_parallel_loop.
2731 LOOP_FN and DATA are the arguments of GIMPLE_OMP_PARALLEL.
2732 NEW_DATA is the variable that should be initialized from the argument
2733 of LOOP_FN. N_THREADS is the requested number of threads, which can be 0 if
2734 that number is to be determined later. */
2737 create_parallel_loop (class loop
*loop
, tree loop_fn
, tree data
,
2738 tree new_data
, unsigned n_threads
, location_t loc
,
2739 bool oacc_kernels_p
)
2741 gimple_stmt_iterator gsi
;
2742 basic_block for_bb
, ex_bb
, continue_bb
;
2744 gomp_parallel
*omp_par_stmt
;
2745 gimple
*omp_return_stmt1
, *omp_return_stmt2
;
2749 gomp_continue
*omp_cont_stmt
;
2750 tree cvar
, cvar_init
, initvar
, cvar_next
, cvar_base
, type
;
2751 edge exit
, nexit
, guard
, end
, e
;
2755 gcc_checking_assert (lookup_attribute ("oacc kernels",
2756 DECL_ATTRIBUTES (cfun
->decl
)));
2757 /* Indicate to later processing that this is a parallelized OpenACC
2758 kernels construct. */
2759 DECL_ATTRIBUTES (cfun
->decl
)
2760 = tree_cons (get_identifier ("oacc kernels parallelized"),
2761 NULL_TREE
, DECL_ATTRIBUTES (cfun
->decl
));
2765 /* Prepare the GIMPLE_OMP_PARALLEL statement. */
2767 basic_block bb
= loop_preheader_edge (loop
)->src
;
2768 basic_block paral_bb
= single_pred (bb
);
2769 gsi
= gsi_last_bb (paral_bb
);
2771 gcc_checking_assert (n_threads
!= 0);
2772 t
= build_omp_clause (loc
, OMP_CLAUSE_NUM_THREADS
);
2773 OMP_CLAUSE_NUM_THREADS_EXPR (t
)
2774 = build_int_cst (integer_type_node
, n_threads
);
2775 omp_par_stmt
= gimple_build_omp_parallel (NULL
, t
, loop_fn
, data
);
2776 gimple_set_location (omp_par_stmt
, loc
);
2778 gsi_insert_after (&gsi
, omp_par_stmt
, GSI_NEW_STMT
);
2780 /* Initialize NEW_DATA. */
2783 gassign
*assign_stmt
;
2785 gsi
= gsi_after_labels (bb
);
2787 param
= make_ssa_name (DECL_ARGUMENTS (loop_fn
));
2788 assign_stmt
= gimple_build_assign (param
, build_fold_addr_expr (data
));
2789 gsi_insert_before (&gsi
, assign_stmt
, GSI_SAME_STMT
);
2791 assign_stmt
= gimple_build_assign (new_data
,
2792 fold_convert (TREE_TYPE (new_data
), param
));
2793 gsi_insert_before (&gsi
, assign_stmt
, GSI_SAME_STMT
);
2796 /* Emit GIMPLE_OMP_RETURN for GIMPLE_OMP_PARALLEL. */
2797 bb
= split_loop_exit_edge (single_dom_exit (loop
));
2798 gsi
= gsi_last_bb (bb
);
2799 omp_return_stmt1
= gimple_build_omp_return (false);
2800 gimple_set_location (omp_return_stmt1
, loc
);
2801 gsi_insert_after (&gsi
, omp_return_stmt1
, GSI_NEW_STMT
);
2804 /* Extract data for GIMPLE_OMP_FOR. */
2805 gcc_assert (loop
->header
== single_dom_exit (loop
)->src
);
2806 cond_stmt
= as_a
<gcond
*> (last_stmt (loop
->header
));
2808 cvar
= gimple_cond_lhs (cond_stmt
);
2809 cvar_base
= SSA_NAME_VAR (cvar
);
2810 phi
= SSA_NAME_DEF_STMT (cvar
);
2811 cvar_init
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
2812 initvar
= copy_ssa_name (cvar
);
2813 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, loop_preheader_edge (loop
)),
2815 cvar_next
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
2817 gsi
= gsi_last_nondebug_bb (loop
->latch
);
2818 gcc_assert (gsi_stmt (gsi
) == SSA_NAME_DEF_STMT (cvar_next
));
2819 gsi_remove (&gsi
, true);
2822 for_bb
= split_edge (loop_preheader_edge (loop
));
2823 ex_bb
= split_loop_exit_edge (single_dom_exit (loop
));
2824 extract_true_false_edges_from_block (loop
->header
, &nexit
, &exit
);
2825 gcc_assert (exit
== single_dom_exit (loop
));
2827 guard
= make_edge (for_bb
, ex_bb
, 0);
2828 /* FIXME: What is the probability? */
2829 guard
->probability
= profile_probability::guessed_never ();
2830 /* Split the latch edge, so LOOPS_HAVE_SIMPLE_LATCHES is still valid. */
2831 loop
->latch
= split_edge (single_succ_edge (loop
->latch
));
2832 single_pred_edge (loop
->latch
)->flags
= 0;
2833 end
= make_single_succ_edge (single_pred (loop
->latch
), ex_bb
, EDGE_FALLTHRU
);
2834 rescan_loop_exit (end
, true, false);
2836 for (gphi_iterator gpi
= gsi_start_phis (ex_bb
);
2837 !gsi_end_p (gpi
); gsi_next (&gpi
))
2840 gphi
*phi
= gpi
.phi ();
2841 tree def
= PHI_ARG_DEF_FROM_EDGE (phi
, exit
);
2842 gimple
*def_stmt
= SSA_NAME_DEF_STMT (def
);
2844 /* If the exit phi is not connected to a header phi in the same loop, this
2845 value is not modified in the loop, and we're done with this phi. */
2846 if (!(gimple_code (def_stmt
) == GIMPLE_PHI
2847 && gimple_bb (def_stmt
) == loop
->header
))
2849 locus
= gimple_phi_arg_location_from_edge (phi
, exit
);
2850 add_phi_arg (phi
, def
, guard
, locus
);
2851 add_phi_arg (phi
, def
, end
, locus
);
2855 gphi
*stmt
= as_a
<gphi
*> (def_stmt
);
2856 def
= PHI_ARG_DEF_FROM_EDGE (stmt
, loop_preheader_edge (loop
));
2857 locus
= gimple_phi_arg_location_from_edge (stmt
,
2858 loop_preheader_edge (loop
));
2859 add_phi_arg (phi
, def
, guard
, locus
);
2861 def
= PHI_ARG_DEF_FROM_EDGE (stmt
, loop_latch_edge (loop
));
2862 locus
= gimple_phi_arg_location_from_edge (stmt
, loop_latch_edge (loop
));
2863 add_phi_arg (phi
, def
, end
, locus
);
2865 e
= redirect_edge_and_branch (exit
, nexit
->dest
);
2866 PENDING_STMT (e
) = NULL
;
2868 /* Emit GIMPLE_OMP_FOR. */
2870 /* Parallelized OpenACC kernels constructs use gang parallelism. See also
2871 omp-offload.c:execute_oacc_device_lower. */
2872 t
= build_omp_clause (loc
, OMP_CLAUSE_GANG
);
2875 t
= build_omp_clause (loc
, OMP_CLAUSE_SCHEDULE
);
2876 int chunk_size
= param_parloops_chunk_size
;
2877 switch (param_parloops_schedule
)
2879 case PARLOOPS_SCHEDULE_STATIC
:
2880 OMP_CLAUSE_SCHEDULE_KIND (t
) = OMP_CLAUSE_SCHEDULE_STATIC
;
2882 case PARLOOPS_SCHEDULE_DYNAMIC
:
2883 OMP_CLAUSE_SCHEDULE_KIND (t
) = OMP_CLAUSE_SCHEDULE_DYNAMIC
;
2885 case PARLOOPS_SCHEDULE_GUIDED
:
2886 OMP_CLAUSE_SCHEDULE_KIND (t
) = OMP_CLAUSE_SCHEDULE_GUIDED
;
2888 case PARLOOPS_SCHEDULE_AUTO
:
2889 OMP_CLAUSE_SCHEDULE_KIND (t
) = OMP_CLAUSE_SCHEDULE_AUTO
;
2892 case PARLOOPS_SCHEDULE_RUNTIME
:
2893 OMP_CLAUSE_SCHEDULE_KIND (t
) = OMP_CLAUSE_SCHEDULE_RUNTIME
;
2899 if (chunk_size
!= 0)
2900 OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
)
2901 = build_int_cst (integer_type_node
, chunk_size
);
2904 for_stmt
= gimple_build_omp_for (NULL
,
2906 ? GF_OMP_FOR_KIND_OACC_LOOP
2907 : GF_OMP_FOR_KIND_FOR
),
2910 gimple_cond_set_lhs (cond_stmt
, cvar_base
);
2911 type
= TREE_TYPE (cvar
);
2912 gimple_set_location (for_stmt
, loc
);
2913 gimple_omp_for_set_index (for_stmt
, 0, initvar
);
2914 gimple_omp_for_set_initial (for_stmt
, 0, cvar_init
);
2915 gimple_omp_for_set_final (for_stmt
, 0, gimple_cond_rhs (cond_stmt
));
2916 gimple_omp_for_set_cond (for_stmt
, 0, gimple_cond_code (cond_stmt
));
2917 gimple_omp_for_set_incr (for_stmt
, 0, build2 (PLUS_EXPR
, type
,
2919 build_int_cst (type
, 1)));
2921 gsi
= gsi_last_bb (for_bb
);
2922 gsi_insert_after (&gsi
, for_stmt
, GSI_NEW_STMT
);
2923 SSA_NAME_DEF_STMT (initvar
) = for_stmt
;
2925 /* Emit GIMPLE_OMP_CONTINUE. */
2926 continue_bb
= single_pred (loop
->latch
);
2927 gsi
= gsi_last_bb (continue_bb
);
2928 omp_cont_stmt
= gimple_build_omp_continue (cvar_next
, cvar
);
2929 gimple_set_location (omp_cont_stmt
, loc
);
2930 gsi_insert_after (&gsi
, omp_cont_stmt
, GSI_NEW_STMT
);
2931 SSA_NAME_DEF_STMT (cvar_next
) = omp_cont_stmt
;
2933 /* Emit GIMPLE_OMP_RETURN for GIMPLE_OMP_FOR. */
2934 gsi
= gsi_last_bb (ex_bb
);
2935 omp_return_stmt2
= gimple_build_omp_return (true);
2936 gimple_set_location (omp_return_stmt2
, loc
);
2937 gsi_insert_after (&gsi
, omp_return_stmt2
, GSI_NEW_STMT
);
2939 /* After the above dom info is hosed. Re-compute it. */
2940 free_dominance_info (CDI_DOMINATORS
);
2941 calculate_dominance_info (CDI_DOMINATORS
);
2944 /* Return number of phis in bb. If COUNT_VIRTUAL_P is false, don't count the
2948 num_phis (basic_block bb
, bool count_virtual_p
)
2950 unsigned int nr_phis
= 0;
2952 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2954 if (!count_virtual_p
&& virtual_operand_p (PHI_RESULT (gsi
.phi ())))
2963 /* Generates code to execute the iterations of LOOP in N_THREADS
2964 threads in parallel, which can be 0 if that number is to be determined
2967 NITER describes number of iterations of LOOP.
2968 REDUCTION_LIST describes the reductions existent in the LOOP. */
2971 gen_parallel_loop (class loop
*loop
,
2972 reduction_info_table_type
*reduction_list
,
2973 unsigned n_threads
, class tree_niter_desc
*niter
,
2974 bool oacc_kernels_p
)
2976 tree many_iterations_cond
, type
, nit
;
2977 tree arg_struct
, new_arg_struct
;
2980 struct clsn_data clsn_data
;
2983 unsigned int m_p_thread
=2;
2987 ---------------------------------------------------------------------
2990 IV = phi (INIT, IV + STEP)
2996 ---------------------------------------------------------------------
2998 with # of iterations NITER (possibly with MAY_BE_ZERO assumption),
2999 we generate the following code:
3001 ---------------------------------------------------------------------
3004 || NITER < MIN_PER_THREAD * N_THREADS)
3008 store all local loop-invariant variables used in body of the loop to DATA.
3009 GIMPLE_OMP_PARALLEL (OMP_CLAUSE_NUM_THREADS (N_THREADS), LOOPFN, DATA);
3010 load the variables from DATA.
3011 GIMPLE_OMP_FOR (IV = INIT; COND; IV += STEP) (OMP_CLAUSE_SCHEDULE (static))
3014 GIMPLE_OMP_CONTINUE;
3015 GIMPLE_OMP_RETURN -- GIMPLE_OMP_FOR
3016 GIMPLE_OMP_RETURN -- GIMPLE_OMP_PARALLEL
3022 IV = phi (INIT, IV + STEP)
3033 /* Create two versions of the loop -- in the old one, we know that the
3034 number of iterations is large enough, and we will transform it into the
3035 loop that will be split to loop_fn, the new one will be used for the
3036 remaining iterations. */
3038 /* We should compute a better number-of-iterations value for outer loops.
3041 for (i = 0; i < n; ++i)
3042 for (j = 0; j < m; ++j)
3045 we should compute nit = n * m, not nit = n.
3046 Also may_be_zero handling would need to be adjusted. */
3048 type
= TREE_TYPE (niter
->niter
);
3049 nit
= force_gimple_operand (unshare_expr (niter
->niter
), &stmts
, true,
3052 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
3054 if (!oacc_kernels_p
)
3059 m_p_thread
=MIN_PER_THREAD
;
3061 gcc_checking_assert (n_threads
!= 0);
3062 many_iterations_cond
=
3063 fold_build2 (GE_EXPR
, boolean_type_node
,
3064 nit
, build_int_cst (type
, m_p_thread
* n_threads
- 1));
3066 many_iterations_cond
3067 = fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
3068 invert_truthvalue (unshare_expr (niter
->may_be_zero
)),
3069 many_iterations_cond
);
3070 many_iterations_cond
3071 = force_gimple_operand (many_iterations_cond
, &stmts
, false, NULL_TREE
);
3073 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
3074 if (!is_gimple_condexpr (many_iterations_cond
))
3076 many_iterations_cond
3077 = force_gimple_operand (many_iterations_cond
, &stmts
,
3080 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
),
3084 initialize_original_copy_tables ();
3086 /* We assume that the loop usually iterates a lot. */
3087 loop_version (loop
, many_iterations_cond
, NULL
,
3088 profile_probability::likely (),
3089 profile_probability::unlikely (),
3090 profile_probability::likely (),
3091 profile_probability::unlikely (), true);
3092 update_ssa (TODO_update_ssa
);
3093 free_original_copy_tables ();
3096 /* Base all the induction variables in LOOP on a single control one. */
3097 canonicalize_loop_ivs (loop
, &nit
, true);
3098 if (num_phis (loop
->header
, false) != reduction_list
->elements () + 1)
3100 /* The call to canonicalize_loop_ivs above failed to "base all the
3101 induction variables in LOOP on a single control one". Do damage
3103 basic_block preheader
= loop_preheader_edge (loop
)->src
;
3104 basic_block cond_bb
= single_pred (preheader
);
3105 gcond
*cond
= as_a
<gcond
*> (gsi_stmt (gsi_last_bb (cond_bb
)));
3106 gimple_cond_make_true (cond
);
3108 /* We've gotten rid of the duplicate loop created by loop_version, but
3109 we can't undo whatever canonicalize_loop_ivs has done.
3110 TODO: Fix this properly by ensuring that the call to
3111 canonicalize_loop_ivs succeeds. */
3113 && (dump_flags
& TDF_DETAILS
))
3114 fprintf (dump_file
, "canonicalize_loop_ivs failed for loop %d,"
3115 " aborting transformation\n", loop
->num
);
3119 /* Ensure that the exit condition is the first statement in the loop.
3120 The common case is that latch of the loop is empty (apart from the
3121 increment) and immediately follows the loop exit test. Attempt to move the
3122 entry of the loop directly before the exit check and increase the number of
3123 iterations of the loop by one. */
3124 if (try_transform_to_exit_first_loop_alt (loop
, reduction_list
, nit
))
3127 && (dump_flags
& TDF_DETAILS
))
3129 "alternative exit-first loop transform succeeded"
3130 " for loop %d\n", loop
->num
);
3137 /* Fall back on the method that handles more cases, but duplicates the
3138 loop body: move the exit condition of LOOP to the beginning of its
3139 header, and duplicate the part of the last iteration that gets disabled
3140 to the exit of the loop. */
3141 transform_to_exit_first_loop (loop
, reduction_list
, nit
);
3144 /* Generate initializations for reductions. */
3145 if (!reduction_list
->is_empty ())
3146 reduction_list
->traverse
<class loop
*, initialize_reductions
> (loop
);
3148 /* Eliminate the references to local variables from the loop. */
3149 gcc_assert (single_exit (loop
));
3150 entry
= loop_preheader_edge (loop
);
3151 exit
= single_dom_exit (loop
);
3153 /* This rewrites the body in terms of new variables. This has already
3154 been done for oacc_kernels_p in pass_lower_omp/lower_omp (). */
3155 if (!oacc_kernels_p
)
3157 eliminate_local_variables (entry
, exit
);
3158 /* In the old loop, move all variables non-local to the loop to a
3159 structure and back, and create separate decls for the variables used in
3161 separate_decls_in_region (entry
, exit
, reduction_list
, &arg_struct
,
3162 &new_arg_struct
, &clsn_data
);
3166 arg_struct
= NULL_TREE
;
3167 new_arg_struct
= NULL_TREE
;
3168 clsn_data
.load
= NULL_TREE
;
3169 clsn_data
.load_bb
= exit
->dest
;
3170 clsn_data
.store
= NULL_TREE
;
3171 clsn_data
.store_bb
= NULL
;
3174 /* Create the parallel constructs. */
3175 loc
= UNKNOWN_LOCATION
;
3176 cond_stmt
= last_stmt (loop
->header
);
3178 loc
= gimple_location (cond_stmt
);
3179 create_parallel_loop (loop
, create_loop_fn (loc
), arg_struct
, new_arg_struct
,
3180 n_threads
, loc
, oacc_kernels_p
);
3181 if (!reduction_list
->is_empty ())
3182 create_call_for_reduction (loop
, reduction_list
, &clsn_data
);
3186 /* Free loop bound estimations that could contain references to
3187 removed statements. */
3188 free_numbers_of_iterations_estimates (cfun
);
3191 /* Returns true when LOOP contains vector phi nodes. */
3194 loop_has_vector_phi_nodes (class loop
*loop ATTRIBUTE_UNUSED
)
3197 basic_block
*bbs
= get_loop_body_in_dom_order (loop
);
3201 for (i
= 0; i
< loop
->num_nodes
; i
++)
3202 for (gsi
= gsi_start_phis (bbs
[i
]); !gsi_end_p (gsi
); gsi_next (&gsi
))
3203 if (TREE_CODE (TREE_TYPE (PHI_RESULT (gsi
.phi ()))) == VECTOR_TYPE
)
3212 /* Create a reduction_info struct, initialize it with REDUC_STMT
3213 and PHI, insert it to the REDUCTION_LIST. */
3216 build_new_reduction (reduction_info_table_type
*reduction_list
,
3217 gimple
*reduc_stmt
, gphi
*phi
)
3219 reduction_info
**slot
;
3220 struct reduction_info
*new_reduction
;
3221 enum tree_code reduction_code
;
3223 gcc_assert (reduc_stmt
);
3225 if (gimple_code (reduc_stmt
) == GIMPLE_PHI
)
3227 tree op1
= PHI_ARG_DEF (reduc_stmt
, 0);
3228 gimple
*def1
= SSA_NAME_DEF_STMT (op1
);
3229 reduction_code
= gimple_assign_rhs_code (def1
);
3232 reduction_code
= gimple_assign_rhs_code (reduc_stmt
);
3233 /* Check for OpenMP supported reduction. */
3234 switch (reduction_code
)
3244 case TRUTH_XOR_EXPR
:
3245 case TRUTH_AND_EXPR
:
3251 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3254 "Detected reduction. reduction stmt is:\n");
3255 print_gimple_stmt (dump_file
, reduc_stmt
, 0);
3256 fprintf (dump_file
, "\n");
3259 new_reduction
= XCNEW (struct reduction_info
);
3261 new_reduction
->reduc_stmt
= reduc_stmt
;
3262 new_reduction
->reduc_phi
= phi
;
3263 new_reduction
->reduc_version
= SSA_NAME_VERSION (gimple_phi_result (phi
));
3264 new_reduction
->reduction_code
= reduction_code
;
3265 slot
= reduction_list
->find_slot (new_reduction
, INSERT
);
3266 *slot
= new_reduction
;
3269 /* Callback for htab_traverse. Sets gimple_uid of reduc_phi stmts. */
3272 set_reduc_phi_uids (reduction_info
**slot
, void *data ATTRIBUTE_UNUSED
)
3274 struct reduction_info
*const red
= *slot
;
3275 gimple_set_uid (red
->reduc_phi
, red
->reduc_version
);
3279 /* Return true if the type of reduction performed by STMT_INFO is suitable
3283 valid_reduction_p (stmt_vec_info stmt_info
)
3285 /* Parallelization would reassociate the operation, which isn't
3286 allowed for in-order reductions. */
3287 vect_reduction_type reduc_type
= STMT_VINFO_REDUC_TYPE (stmt_info
);
3288 return reduc_type
!= FOLD_LEFT_REDUCTION
;
3291 /* Detect all reductions in the LOOP, insert them into REDUCTION_LIST. */
3294 gather_scalar_reductions (loop_p loop
, reduction_info_table_type
*reduction_list
)
3297 loop_vec_info simple_loop_info
;
3298 auto_vec
<gphi
*, 4> double_reduc_phis
;
3299 auto_vec
<gimple
*, 4> double_reduc_stmts
;
3301 vec_info_shared shared
;
3302 simple_loop_info
= vect_analyze_loop_form (loop
, &shared
);
3303 if (simple_loop_info
== NULL
)
3306 for (gsi
= gsi_start_phis (loop
->header
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3308 gphi
*phi
= gsi
.phi ();
3310 tree res
= PHI_RESULT (phi
);
3313 if (virtual_operand_p (res
))
3316 if (simple_iv (loop
, loop
, res
, &iv
, true))
3319 stmt_vec_info reduc_stmt_info
3320 = parloops_force_simple_reduction (simple_loop_info
,
3321 simple_loop_info
->lookup_stmt (phi
),
3322 &double_reduc
, true);
3323 if (!reduc_stmt_info
|| !valid_reduction_p (reduc_stmt_info
))
3328 if (loop
->inner
->inner
!= NULL
)
3331 double_reduc_phis
.safe_push (phi
);
3332 double_reduc_stmts
.safe_push (reduc_stmt_info
->stmt
);
3336 build_new_reduction (reduction_list
, reduc_stmt_info
->stmt
, phi
);
3338 delete simple_loop_info
;
3340 if (!double_reduc_phis
.is_empty ())
3342 vec_info_shared shared
;
3343 simple_loop_info
= vect_analyze_loop_form (loop
->inner
, &shared
);
3344 if (simple_loop_info
)
3349 FOR_EACH_VEC_ELT (double_reduc_phis
, i
, phi
)
3352 tree res
= PHI_RESULT (phi
);
3355 use_operand_p use_p
;
3357 bool single_use_p
= single_imm_use (res
, &use_p
, &inner_stmt
);
3358 gcc_assert (single_use_p
);
3359 if (gimple_code (inner_stmt
) != GIMPLE_PHI
)
3361 gphi
*inner_phi
= as_a
<gphi
*> (inner_stmt
);
3362 if (simple_iv (loop
->inner
, loop
->inner
, PHI_RESULT (inner_phi
),
3366 stmt_vec_info inner_phi_info
3367 = simple_loop_info
->lookup_stmt (inner_phi
);
3368 stmt_vec_info inner_reduc_stmt_info
3369 = parloops_force_simple_reduction (simple_loop_info
,
3371 &double_reduc
, true);
3372 gcc_assert (!double_reduc
);
3373 if (!inner_reduc_stmt_info
3374 || !valid_reduction_p (inner_reduc_stmt_info
))
3377 build_new_reduction (reduction_list
, double_reduc_stmts
[i
], phi
);
3379 delete simple_loop_info
;
3384 if (reduction_list
->is_empty ())
3387 /* As gimple_uid is used by the vectorizer in between vect_analyze_loop_form
3388 and delete simple_loop_info, we can set gimple_uid of reduc_phi stmts only
3391 FOR_EACH_BB_FN (bb
, cfun
)
3392 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3393 gimple_set_uid (gsi_stmt (gsi
), (unsigned int)-1);
3394 reduction_list
->traverse
<void *, set_reduc_phi_uids
> (NULL
);
3397 /* Try to initialize NITER for code generation part. */
3400 try_get_loop_niter (loop_p loop
, class tree_niter_desc
*niter
)
3402 edge exit
= single_dom_exit (loop
);
3406 /* We need to know # of iterations, and there should be no uses of values
3407 defined inside loop outside of it, unless the values are invariants of
3409 if (!number_of_iterations_exit (loop
, exit
, niter
, false))
3411 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3412 fprintf (dump_file
, " FAILED: number of iterations not known\n");
3419 /* Return the default def of the first function argument. */
3422 get_omp_data_i_param (void)
3424 tree decl
= DECL_ARGUMENTS (cfun
->decl
);
3425 gcc_assert (DECL_CHAIN (decl
) == NULL_TREE
);
3426 return ssa_default_def (cfun
, decl
);
3429 /* For PHI in loop header of LOOP, look for pattern:
3432 .omp_data_i = &.omp_data_arr;
3433 addr = .omp_data_i->sum;
3437 sum_b = PHI <sum_a (preheader), sum_c (latch)>
3439 and return addr. Otherwise, return NULL_TREE. */
3442 find_reduc_addr (class loop
*loop
, gphi
*phi
)
3444 edge e
= loop_preheader_edge (loop
);
3445 tree arg
= PHI_ARG_DEF_FROM_EDGE (phi
, e
);
3446 gimple
*stmt
= SSA_NAME_DEF_STMT (arg
);
3447 if (!gimple_assign_single_p (stmt
))
3449 tree memref
= gimple_assign_rhs1 (stmt
);
3450 if (TREE_CODE (memref
) != MEM_REF
)
3452 tree addr
= TREE_OPERAND (memref
, 0);
3454 gimple
*stmt2
= SSA_NAME_DEF_STMT (addr
);
3455 if (!gimple_assign_single_p (stmt2
))
3457 tree compref
= gimple_assign_rhs1 (stmt2
);
3458 if (TREE_CODE (compref
) != COMPONENT_REF
)
3460 tree addr2
= TREE_OPERAND (compref
, 0);
3461 if (TREE_CODE (addr2
) != MEM_REF
)
3463 addr2
= TREE_OPERAND (addr2
, 0);
3464 if (TREE_CODE (addr2
) != SSA_NAME
3465 || addr2
!= get_omp_data_i_param ())
3471 /* Try to initialize REDUCTION_LIST for code generation part.
3472 REDUCTION_LIST describes the reductions. */
3475 try_create_reduction_list (loop_p loop
,
3476 reduction_info_table_type
*reduction_list
,
3477 bool oacc_kernels_p
)
3479 edge exit
= single_dom_exit (loop
);
3484 /* Try to get rid of exit phis. */
3485 final_value_replacement_loop (loop
);
3487 gather_scalar_reductions (loop
, reduction_list
);
3490 for (gsi
= gsi_start_phis (exit
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3492 gphi
*phi
= gsi
.phi ();
3493 struct reduction_info
*red
;
3494 imm_use_iterator imm_iter
;
3495 use_operand_p use_p
;
3497 tree val
= PHI_ARG_DEF_FROM_EDGE (phi
, exit
);
3499 if (!virtual_operand_p (val
))
3501 if (TREE_CODE (val
) != SSA_NAME
)
3503 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3505 " FAILED: exit PHI argument invariant.\n");
3509 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3511 fprintf (dump_file
, "phi is ");
3512 print_gimple_stmt (dump_file
, phi
, 0);
3513 fprintf (dump_file
, "arg of phi to exit: value ");
3514 print_generic_expr (dump_file
, val
);
3515 fprintf (dump_file
, " used outside loop\n");
3517 " checking if it is part of reduction pattern:\n");
3519 if (reduction_list
->is_empty ())
3521 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3523 " FAILED: it is not a part of reduction.\n");
3527 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, val
)
3529 if (!gimple_debug_bind_p (USE_STMT (use_p
))
3530 && flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
))))
3532 reduc_phi
= USE_STMT (use_p
);
3536 red
= reduction_phi (reduction_list
, reduc_phi
);
3539 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3541 " FAILED: it is not a part of reduction.\n");
3544 if (red
->keep_res
!= NULL
)
3546 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3548 " FAILED: reduction has multiple exit phis.\n");
3551 red
->keep_res
= phi
;
3552 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3554 fprintf (dump_file
, "reduction phi is ");
3555 print_gimple_stmt (dump_file
, red
->reduc_phi
, 0);
3556 fprintf (dump_file
, "reduction stmt is ");
3557 print_gimple_stmt (dump_file
, red
->reduc_stmt
, 0);
3562 /* The iterations of the loop may communicate only through bivs whose
3563 iteration space can be distributed efficiently. */
3564 for (gsi
= gsi_start_phis (loop
->header
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3566 gphi
*phi
= gsi
.phi ();
3567 tree def
= PHI_RESULT (phi
);
3570 if (!virtual_operand_p (def
) && !simple_iv (loop
, loop
, def
, &iv
, true))
3572 struct reduction_info
*red
;
3574 red
= reduction_phi (reduction_list
, phi
);
3577 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3579 " FAILED: scalar dependency between iterations\n");
3587 for (gsi
= gsi_start_phis (loop
->header
); !gsi_end_p (gsi
);
3590 gphi
*phi
= gsi
.phi ();
3591 tree def
= PHI_RESULT (phi
);
3594 if (!virtual_operand_p (def
)
3595 && !simple_iv (loop
, loop
, def
, &iv
, true))
3597 tree addr
= find_reduc_addr (loop
, phi
);
3598 if (addr
== NULL_TREE
)
3600 struct reduction_info
*red
= reduction_phi (reduction_list
, phi
);
3601 red
->reduc_addr
= addr
;
3609 /* Return true if LOOP contains phis with ADDR_EXPR in args. */
3612 loop_has_phi_with_address_arg (class loop
*loop
)
3614 basic_block
*bbs
= get_loop_body (loop
);
3619 for (i
= 0; i
< loop
->num_nodes
; i
++)
3620 for (gsi
= gsi_start_phis (bbs
[i
]); !gsi_end_p (gsi
); gsi_next (&gsi
))
3622 gphi
*phi
= gsi
.phi ();
3623 for (j
= 0; j
< gimple_phi_num_args (phi
); j
++)
3625 tree arg
= gimple_phi_arg_def (phi
, j
);
3626 if (TREE_CODE (arg
) == ADDR_EXPR
)
3628 /* This should be handled by eliminate_local_variables, but that
3629 function currently ignores phis. */
3641 /* Return true if memory ref REF (corresponding to the stmt at GSI in
3642 REGIONS_BB[I]) conflicts with the statements in REGIONS_BB[I] after gsi,
3643 or the statements in REGIONS_BB[I + n]. REF_IS_STORE indicates if REF is a
3644 store. Ignore conflicts with SKIP_STMT. */
3647 ref_conflicts_with_region (gimple_stmt_iterator gsi
, ao_ref
*ref
,
3648 bool ref_is_store
, vec
<basic_block
> region_bbs
,
3649 unsigned int i
, gimple
*skip_stmt
)
3651 basic_block bb
= region_bbs
[i
];
3656 for (; !gsi_end_p (gsi
);
3659 gimple
*stmt
= gsi_stmt (gsi
);
3660 if (stmt
== skip_stmt
)
3664 fprintf (dump_file
, "skipping reduction store: ");
3665 print_gimple_stmt (dump_file
, stmt
, 0);
3670 if (!gimple_vdef (stmt
)
3671 && !gimple_vuse (stmt
))
3674 if (gimple_code (stmt
) == GIMPLE_RETURN
)
3679 if (ref_maybe_used_by_stmt_p (stmt
, ref
))
3683 fprintf (dump_file
, "Stmt ");
3684 print_gimple_stmt (dump_file
, stmt
, 0);
3691 if (stmt_may_clobber_ref_p_1 (stmt
, ref
))
3695 fprintf (dump_file
, "Stmt ");
3696 print_gimple_stmt (dump_file
, stmt
, 0);
3703 if (i
== region_bbs
.length ())
3706 gsi
= gsi_start_bb (bb
);
3712 /* Return true if the bbs in REGION_BBS but not in in_loop_bbs can be executed
3713 in parallel with REGION_BBS containing the loop. Return the stores of
3714 reduction results in REDUCTION_STORES. */
3717 oacc_entry_exit_ok_1 (bitmap in_loop_bbs
, vec
<basic_block
> region_bbs
,
3718 reduction_info_table_type
*reduction_list
,
3719 bitmap reduction_stores
)
3721 tree omp_data_i
= get_omp_data_i_param ();
3725 FOR_EACH_VEC_ELT (region_bbs
, i
, bb
)
3727 if (bitmap_bit_p (in_loop_bbs
, bb
->index
))
3730 gimple_stmt_iterator gsi
;
3731 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);
3734 gimple
*stmt
= gsi_stmt (gsi
);
3735 gimple
*skip_stmt
= NULL
;
3737 if (is_gimple_debug (stmt
)
3738 || gimple_code (stmt
) == GIMPLE_COND
)
3742 bool ref_is_store
= false;
3743 if (gimple_assign_load_p (stmt
))
3745 tree rhs
= gimple_assign_rhs1 (stmt
);
3746 tree base
= get_base_address (rhs
);
3747 if (TREE_CODE (base
) == MEM_REF
3748 && operand_equal_p (TREE_OPERAND (base
, 0), omp_data_i
, 0))
3751 tree lhs
= gimple_assign_lhs (stmt
);
3752 if (TREE_CODE (lhs
) == SSA_NAME
3753 && has_single_use (lhs
))
3755 use_operand_p use_p
;
3757 struct reduction_info
*red
;
3758 single_imm_use (lhs
, &use_p
, &use_stmt
);
3759 if (gimple_code (use_stmt
) == GIMPLE_PHI
3760 && (red
= reduction_phi (reduction_list
, use_stmt
)))
3762 tree val
= PHI_RESULT (red
->keep_res
);
3763 if (has_single_use (val
))
3765 single_imm_use (val
, &use_p
, &use_stmt
);
3766 if (gimple_store_p (use_stmt
))
3769 = SSA_NAME_VERSION (gimple_vdef (use_stmt
));
3770 bitmap_set_bit (reduction_stores
, id
);
3771 skip_stmt
= use_stmt
;
3774 fprintf (dump_file
, "found reduction load: ");
3775 print_gimple_stmt (dump_file
, stmt
, 0);
3782 ao_ref_init (&ref
, rhs
);
3784 else if (gimple_store_p (stmt
))
3786 ao_ref_init (&ref
, gimple_assign_lhs (stmt
));
3787 ref_is_store
= true;
3789 else if (gimple_code (stmt
) == GIMPLE_OMP_RETURN
)
3791 else if (!gimple_has_side_effects (stmt
)
3792 && !gimple_could_trap_p (stmt
)
3793 && !stmt_could_throw_p (cfun
, stmt
)
3794 && !gimple_vdef (stmt
)
3795 && !gimple_vuse (stmt
))
3797 else if (gimple_call_internal_p (stmt
, IFN_GOACC_DIM_POS
))
3799 else if (gimple_code (stmt
) == GIMPLE_RETURN
)
3805 fprintf (dump_file
, "Unhandled stmt in entry/exit: ");
3806 print_gimple_stmt (dump_file
, stmt
, 0);
3811 if (ref_conflicts_with_region (gsi
, &ref
, ref_is_store
, region_bbs
,
3816 fprintf (dump_file
, "conflicts with entry/exit stmt: ");
3817 print_gimple_stmt (dump_file
, stmt
, 0);
3827 /* Find stores inside REGION_BBS and outside IN_LOOP_BBS, and guard them with
3828 gang_pos == 0, except when the stores are REDUCTION_STORES. Return true
3829 if any changes were made. */
3832 oacc_entry_exit_single_gang (bitmap in_loop_bbs
, vec
<basic_block
> region_bbs
,
3833 bitmap reduction_stores
)
3835 tree gang_pos
= NULL_TREE
;
3836 bool changed
= false;
3840 FOR_EACH_VEC_ELT (region_bbs
, i
, bb
)
3842 if (bitmap_bit_p (in_loop_bbs
, bb
->index
))
3845 gimple_stmt_iterator gsi
;
3846 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);)
3848 gimple
*stmt
= gsi_stmt (gsi
);
3850 if (!gimple_store_p (stmt
))
3852 /* Update gsi to point to next stmt. */
3857 if (bitmap_bit_p (reduction_stores
,
3858 SSA_NAME_VERSION (gimple_vdef (stmt
))))
3863 "skipped reduction store for single-gang"
3865 print_gimple_stmt (dump_file
, stmt
, 0);
3868 /* Update gsi to point to next stmt. */
3875 if (gang_pos
== NULL_TREE
)
3877 tree arg
= build_int_cst (integer_type_node
, GOMP_DIM_GANG
);
3879 = gimple_build_call_internal (IFN_GOACC_DIM_POS
, 1, arg
);
3880 gang_pos
= make_ssa_name (integer_type_node
);
3881 gimple_call_set_lhs (gang_single
, gang_pos
);
3882 gimple_stmt_iterator start
3883 = gsi_start_bb (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
3884 tree vuse
= ssa_default_def (cfun
, gimple_vop (cfun
));
3885 gimple_set_vuse (gang_single
, vuse
);
3886 gsi_insert_before (&start
, gang_single
, GSI_SAME_STMT
);
3892 "found store that needs single-gang neutering: ");
3893 print_gimple_stmt (dump_file
, stmt
, 0);
3897 /* Split block before store. */
3898 gimple_stmt_iterator gsi2
= gsi
;
3901 if (gsi_end_p (gsi2
))
3903 e
= split_block_after_labels (bb
);
3904 gsi2
= gsi_last_bb (bb
);
3907 e
= split_block (bb
, gsi_stmt (gsi2
));
3908 basic_block bb2
= e
->dest
;
3910 /* Split block after store. */
3911 gimple_stmt_iterator gsi3
= gsi_start_bb (bb2
);
3912 edge e2
= split_block (bb2
, gsi_stmt (gsi3
));
3913 basic_block bb3
= e2
->dest
;
3916 = gimple_build_cond (EQ_EXPR
, gang_pos
, integer_zero_node
,
3917 NULL_TREE
, NULL_TREE
);
3918 gsi_insert_after (&gsi2
, cond
, GSI_NEW_STMT
);
3920 edge e3
= make_edge (bb
, bb3
, EDGE_FALSE_VALUE
);
3921 /* FIXME: What is the probability? */
3922 e3
->probability
= profile_probability::guessed_never ();
3923 e
->flags
= EDGE_TRUE_VALUE
;
3925 tree vdef
= gimple_vdef (stmt
);
3926 tree vuse
= gimple_vuse (stmt
);
3928 tree phi_res
= copy_ssa_name (vdef
);
3929 gphi
*new_phi
= create_phi_node (phi_res
, bb3
);
3930 replace_uses_by (vdef
, phi_res
);
3931 add_phi_arg (new_phi
, vuse
, e3
, UNKNOWN_LOCATION
);
3932 add_phi_arg (new_phi
, vdef
, e2
, UNKNOWN_LOCATION
);
3934 /* Update gsi to point to next stmt. */
3936 gsi
= gsi_start_bb (bb
);
3944 /* Return true if the statements before and after the LOOP can be executed in
3945 parallel with the function containing the loop. Resolve conflicting stores
3946 outside LOOP by guarding them such that only a single gang executes them. */
3949 oacc_entry_exit_ok (class loop
*loop
,
3950 reduction_info_table_type
*reduction_list
)
3952 basic_block
*loop_bbs
= get_loop_body_in_dom_order (loop
);
3953 vec
<basic_block
> region_bbs
3954 = get_all_dominated_blocks (CDI_DOMINATORS
, ENTRY_BLOCK_PTR_FOR_FN (cfun
));
3956 bitmap in_loop_bbs
= BITMAP_ALLOC (NULL
);
3957 bitmap_clear (in_loop_bbs
);
3958 for (unsigned int i
= 0; i
< loop
->num_nodes
; i
++)
3959 bitmap_set_bit (in_loop_bbs
, loop_bbs
[i
]->index
);
3961 bitmap reduction_stores
= BITMAP_ALLOC (NULL
);
3962 bool res
= oacc_entry_exit_ok_1 (in_loop_bbs
, region_bbs
, reduction_list
,
3967 bool changed
= oacc_entry_exit_single_gang (in_loop_bbs
, region_bbs
,
3971 free_dominance_info (CDI_DOMINATORS
);
3972 calculate_dominance_info (CDI_DOMINATORS
);
3976 region_bbs
.release ();
3979 BITMAP_FREE (in_loop_bbs
);
3980 BITMAP_FREE (reduction_stores
);
3985 /* Detect parallel loops and generate parallel code using libgomp
3986 primitives. Returns true if some loop was parallelized, false
3990 parallelize_loops (bool oacc_kernels_p
)
3993 bool changed
= false;
3995 class loop
*skip_loop
= NULL
;
3996 class tree_niter_desc niter_desc
;
3997 struct obstack parloop_obstack
;
3998 HOST_WIDE_INT estimated
;
4000 /* Do not parallelize loops in the functions created by parallelization. */
4002 && parallelized_function_p (cfun
->decl
))
4005 /* Do not parallelize loops in offloaded functions. */
4007 && oacc_get_fn_attrib (cfun
->decl
) != NULL
)
4010 if (cfun
->has_nonlocal_label
)
4013 /* For OpenACC kernels, n_threads will be determined later; otherwise, it's
4014 the argument to -ftree-parallelize-loops. */
4018 n_threads
= flag_tree_parallelize_loops
;
4020 gcc_obstack_init (&parloop_obstack
);
4021 reduction_info_table_type
reduction_list (10);
4023 calculate_dominance_info (CDI_DOMINATORS
);
4025 FOR_EACH_LOOP (loop
, 0)
4027 if (loop
== skip_loop
)
4029 if (!loop
->in_oacc_kernels_region
4030 && dump_file
&& (dump_flags
& TDF_DETAILS
))
4032 "Skipping loop %d as inner loop of parallelized loop\n",
4035 skip_loop
= loop
->inner
;
4041 reduction_list
.empty ();
4045 if (!loop
->in_oacc_kernels_region
)
4048 /* Don't try to parallelize inner loops in an oacc kernels region. */
4050 skip_loop
= loop
->inner
;
4052 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4054 "Trying loop %d with header bb %d in oacc kernels"
4055 " region\n", loop
->num
, loop
->header
->index
);
4058 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4060 fprintf (dump_file
, "Trying loop %d as candidate\n",loop
->num
);
4062 fprintf (dump_file
, "loop %d is not innermost\n",loop
->num
);
4064 fprintf (dump_file
, "loop %d is innermost\n",loop
->num
);
4067 if (!single_dom_exit (loop
))
4070 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4071 fprintf (dump_file
, "loop is !single_dom_exit\n");
4076 if (/* And of course, the loop must be parallelizable. */
4077 !can_duplicate_loop_p (loop
)
4078 || loop_has_blocks_with_irreducible_flag (loop
)
4079 || (loop_preheader_edge (loop
)->src
->flags
& BB_IRREDUCIBLE_LOOP
)
4080 /* FIXME: the check for vector phi nodes could be removed. */
4081 || loop_has_vector_phi_nodes (loop
))
4084 estimated
= estimated_loop_iterations_int (loop
);
4085 if (estimated
== -1)
4086 estimated
= get_likely_max_loop_iterations_int (loop
);
4087 /* FIXME: Bypass this check as graphite doesn't update the
4088 count and frequency correctly now. */
4089 if (!flag_loop_parallelize_all
4091 && ((estimated
!= -1
4093 < ((HOST_WIDE_INT
) n_threads
4094 * (loop
->inner
? 2 : MIN_PER_THREAD
) - 1)))
4095 /* Do not bother with loops in cold areas. */
4096 || optimize_loop_nest_for_size_p (loop
)))
4099 if (!try_get_loop_niter (loop
, &niter_desc
))
4102 if (!try_create_reduction_list (loop
, &reduction_list
, oacc_kernels_p
))
4105 if (loop_has_phi_with_address_arg (loop
))
4108 if (!loop
->can_be_parallel
4109 && !loop_parallel_p (loop
, &parloop_obstack
))
4113 && !oacc_entry_exit_ok (loop
, &reduction_list
))
4116 fprintf (dump_file
, "entry/exit not ok: FAILED\n");
4121 skip_loop
= loop
->inner
;
4123 if (dump_enabled_p ())
4125 dump_user_location_t loop_loc
= find_loop_location (loop
);
4127 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, loop_loc
,
4128 "parallelizing outer loop %d\n", loop
->num
);
4130 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, loop_loc
,
4131 "parallelizing inner loop %d\n", loop
->num
);
4134 gen_parallel_loop (loop
, &reduction_list
,
4135 n_threads
, &niter_desc
, oacc_kernels_p
);
4138 obstack_free (&parloop_obstack
, NULL
);
4140 /* Parallelization will cause new function calls to be inserted through
4141 which local variables will escape. Reset the points-to solution
4144 pt_solution_reset (&cfun
->gimple_df
->escaped
);
4149 /* Parallelization. */
4153 const pass_data pass_data_parallelize_loops
=
4155 GIMPLE_PASS
, /* type */
4156 "parloops", /* name */
4157 OPTGROUP_LOOP
, /* optinfo_flags */
4158 TV_TREE_PARALLELIZE_LOOPS
, /* tv_id */
4159 ( PROP_cfg
| PROP_ssa
), /* properties_required */
4160 0, /* properties_provided */
4161 0, /* properties_destroyed */
4162 0, /* todo_flags_start */
4163 0, /* todo_flags_finish */
4166 class pass_parallelize_loops
: public gimple_opt_pass
4169 pass_parallelize_loops (gcc::context
*ctxt
)
4170 : gimple_opt_pass (pass_data_parallelize_loops
, ctxt
),
4171 oacc_kernels_p (false)
4174 /* opt_pass methods: */
4175 virtual bool gate (function
*)
4178 return flag_openacc
;
4180 return flag_tree_parallelize_loops
> 1;
4182 virtual unsigned int execute (function
*);
4183 opt_pass
* clone () { return new pass_parallelize_loops (m_ctxt
); }
4184 void set_pass_param (unsigned int n
, bool param
)
4186 gcc_assert (n
== 0);
4187 oacc_kernels_p
= param
;
4191 bool oacc_kernels_p
;
4192 }; // class pass_parallelize_loops
4195 pass_parallelize_loops::execute (function
*fun
)
4197 tree nthreads
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
4198 if (nthreads
== NULL_TREE
)
4201 bool in_loop_pipeline
= scev_initialized_p ();
4202 if (!in_loop_pipeline
)
4203 loop_optimizer_init (LOOPS_NORMAL
4204 | LOOPS_HAVE_RECORDED_EXITS
);
4206 if (number_of_loops (fun
) <= 1)
4209 if (!in_loop_pipeline
)
4211 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa
);
4215 unsigned int todo
= 0;
4216 if (parallelize_loops (oacc_kernels_p
))
4218 fun
->curr_properties
&= ~(PROP_gimple_eomp
);
4220 checking_verify_loop_structure ();
4222 todo
|= TODO_update_ssa
;
4225 if (!in_loop_pipeline
)
4228 loop_optimizer_finalize ();
4237 make_pass_parallelize_loops (gcc::context
*ctxt
)
4239 return new pass_parallelize_loops (ctxt
);