1 /* Loop autoparallelization.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Sebastian Pop <pop@cri.ensmp.fr> and
5 Zdenek Dvorak <dvorakz@suse.cz>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
26 #include "tree-flow.h"
28 #include "tree-data-ref.h"
29 #include "tree-scalar-evolution.h"
30 #include "gimple-pretty-print.h"
31 #include "tree-pass.h"
32 #include "langhooks.h"
33 #include "tree-vectorizer.h"
35 /* This pass tries to distribute iterations of loops into several threads.
36 The implementation is straightforward -- for each loop we test whether its
37 iterations are independent, and if it is the case (and some additional
38 conditions regarding profitability and correctness are satisfied), we
39 add GIMPLE_OMP_PARALLEL and GIMPLE_OMP_FOR codes and let omp expansion
42 The most of the complexity is in bringing the code into shape expected
44 -- for GIMPLE_OMP_FOR, ensuring that the loop has only one induction
45 variable and that the exit test is at the start of the loop body
46 -- for GIMPLE_OMP_PARALLEL, replacing the references to local addressable
47 variables by accesses through pointers, and breaking up ssa chains
48 by storing the values incoming to the parallelized loop to a structure
49 passed to the new function as an argument (something similar is done
50 in omp gimplification, unfortunately only a small part of the code
54 -- if there are several parallelizable loops in a function, it may be
55 possible to generate the threads just once (using synchronization to
56 ensure that cross-loop dependences are obeyed).
57 -- handling of common scalar dependence patterns (accumulation, ...)
58 -- handling of non-innermost loops */
62 currently we use vect_force_simple_reduction() to detect reduction patterns.
63 The code transformation will be introduced by an example.
70 for (i = 0; i < N; i++)
80 # sum_29 = PHI <sum_11(5), 1(3)>
81 # i_28 = PHI <i_12(5), 0(3)>
84 sum_11 = D.1795_8 + sum_29;
92 # sum_21 = PHI <sum_11(4)>
93 printf (&"%d"[0], sum_21);
96 after reduction transformation (only relevant parts):
104 # Storing the initial value given by the user. #
106 .paral_data_store.32.sum.27 = 1;
108 #pragma omp parallel num_threads(4)
110 #pragma omp for schedule(static)
112 # The neutral element corresponding to the particular
113 reduction's operation, e.g. 0 for PLUS_EXPR,
114 1 for MULT_EXPR, etc. replaces the user's initial value. #
116 # sum.27_29 = PHI <sum.27_11, 0>
118 sum.27_11 = D.1827_8 + sum.27_29;
122 # Adding this reduction phi is done at create_phi_for_local_result() #
123 # sum.27_56 = PHI <sum.27_11, 0>
126 # Creating the atomic operation is done at
127 create_call_for_reduction_1() #
129 #pragma omp atomic_load
130 D.1839_59 = *&.paral_data_load.33_51->reduction.23;
131 D.1840_60 = sum.27_56 + D.1839_59;
132 #pragma omp atomic_store (D.1840_60);
136 # collecting the result after the join of the threads is done at
137 create_loads_for_reductions().
138 The value computed by the threads is loaded from the
142 .paral_data_load.33_52 = &.paral_data_store.32;
143 sum_37 = .paral_data_load.33_52->sum.27;
144 sum_43 = D.1795_41 + sum_37;
147 # sum_21 = PHI <sum_43, sum_26>
148 printf (&"%d"[0], sum_21);
156 /* Minimal number of iterations of a loop that should be executed in each
158 #define MIN_PER_THREAD 100
160 /* Element of the hashtable, representing a
161 reduction in the current loop. */
162 struct reduction_info
164 gimple reduc_stmt
; /* reduction statement. */
165 gimple reduc_phi
; /* The phi node defining the reduction. */
166 enum tree_code reduction_code
;/* code for the reduction operation. */
167 unsigned reduc_version
; /* SSA_NAME_VERSION of original reduc_phi
169 gimple keep_res
; /* The PHI_RESULT of this phi is the resulting value
170 of the reduction variable when existing the loop. */
171 tree initial_value
; /* The initial value of the reduction var before entering the loop. */
172 tree field
; /* the name of the field in the parloop data structure intended for reduction. */
173 tree init
; /* reduction initialization value. */
174 gimple new_phi
; /* (helper field) Newly created phi node whose result
175 will be passed to the atomic operation. Represents
176 the local result each thread computed for the reduction
180 /* Equality and hash functions for hashtab code. */
183 reduction_info_eq (const void *aa
, const void *bb
)
185 const struct reduction_info
*a
= (const struct reduction_info
*) aa
;
186 const struct reduction_info
*b
= (const struct reduction_info
*) bb
;
188 return (a
->reduc_phi
== b
->reduc_phi
);
192 reduction_info_hash (const void *aa
)
194 const struct reduction_info
*a
= (const struct reduction_info
*) aa
;
196 return a
->reduc_version
;
199 static struct reduction_info
*
200 reduction_phi (htab_t reduction_list
, gimple phi
)
202 struct reduction_info tmpred
, *red
;
204 if (htab_elements (reduction_list
) == 0 || phi
== NULL
)
207 tmpred
.reduc_phi
= phi
;
208 tmpred
.reduc_version
= gimple_uid (phi
);
209 red
= (struct reduction_info
*) htab_find (reduction_list
, &tmpred
);
214 /* Element of hashtable of names to copy. */
216 struct name_to_copy_elt
218 unsigned version
; /* The version of the name to copy. */
219 tree new_name
; /* The new name used in the copy. */
220 tree field
; /* The field of the structure used to pass the
224 /* Equality and hash functions for hashtab code. */
227 name_to_copy_elt_eq (const void *aa
, const void *bb
)
229 const struct name_to_copy_elt
*a
= (const struct name_to_copy_elt
*) aa
;
230 const struct name_to_copy_elt
*b
= (const struct name_to_copy_elt
*) bb
;
232 return a
->version
== b
->version
;
236 name_to_copy_elt_hash (const void *aa
)
238 const struct name_to_copy_elt
*a
= (const struct name_to_copy_elt
*) aa
;
240 return (hashval_t
) a
->version
;
243 /* A transformation matrix, which is a self-contained ROWSIZE x COLSIZE
244 matrix. Rather than use floats, we simply keep a single DENOMINATOR that
245 represents the denominator for every element in the matrix. */
246 typedef struct lambda_trans_matrix_s
248 lambda_matrix matrix
;
252 } *lambda_trans_matrix
;
253 #define LTM_MATRIX(T) ((T)->matrix)
254 #define LTM_ROWSIZE(T) ((T)->rowsize)
255 #define LTM_COLSIZE(T) ((T)->colsize)
256 #define LTM_DENOMINATOR(T) ((T)->denominator)
258 /* Allocate a new transformation matrix. */
260 static lambda_trans_matrix
261 lambda_trans_matrix_new (int colsize
, int rowsize
,
262 struct obstack
* lambda_obstack
)
264 lambda_trans_matrix ret
;
266 ret
= (lambda_trans_matrix
)
267 obstack_alloc (lambda_obstack
, sizeof (struct lambda_trans_matrix_s
));
268 LTM_MATRIX (ret
) = lambda_matrix_new (rowsize
, colsize
, lambda_obstack
);
269 LTM_ROWSIZE (ret
) = rowsize
;
270 LTM_COLSIZE (ret
) = colsize
;
271 LTM_DENOMINATOR (ret
) = 1;
275 /* Multiply a vector VEC by a matrix MAT.
276 MAT is an M*N matrix, and VEC is a vector with length N. The result
277 is stored in DEST which must be a vector of length M. */
280 lambda_matrix_vector_mult (lambda_matrix matrix
, int m
, int n
,
281 lambda_vector vec
, lambda_vector dest
)
285 lambda_vector_clear (dest
, m
);
286 for (i
= 0; i
< m
; i
++)
287 for (j
= 0; j
< n
; j
++)
288 dest
[i
] += matrix
[i
][j
] * vec
[j
];
291 /* Return true if TRANS is a legal transformation matrix that respects
292 the dependence vectors in DISTS and DIRS. The conservative answer
295 "Wolfe proves that a unimodular transformation represented by the
296 matrix T is legal when applied to a loop nest with a set of
297 lexicographically non-negative distance vectors RDG if and only if
298 for each vector d in RDG, (T.d >= 0) is lexicographically positive.
299 i.e.: if and only if it transforms the lexicographically positive
300 distance vectors to lexicographically positive vectors. Note that
301 a unimodular matrix must transform the zero vector (and only it) to
302 the zero vector." S.Muchnick. */
305 lambda_transform_legal_p (lambda_trans_matrix trans
,
307 VEC (ddr_p
, heap
) *dependence_relations
)
310 lambda_vector distres
;
311 struct data_dependence_relation
*ddr
;
313 gcc_assert (LTM_COLSIZE (trans
) == nb_loops
314 && LTM_ROWSIZE (trans
) == nb_loops
);
316 /* When there are no dependences, the transformation is correct. */
317 if (VEC_length (ddr_p
, dependence_relations
) == 0)
320 ddr
= VEC_index (ddr_p
, dependence_relations
, 0);
324 /* When there is an unknown relation in the dependence_relations, we
325 know that it is no worth looking at this loop nest: give up. */
326 if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
329 distres
= lambda_vector_new (nb_loops
);
331 /* For each distance vector in the dependence graph. */
332 FOR_EACH_VEC_ELT (ddr_p
, dependence_relations
, i
, ddr
)
334 /* Don't care about relations for which we know that there is no
335 dependence, nor about read-read (aka. output-dependences):
336 these data accesses can happen in any order. */
337 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
338 || (DR_IS_READ (DDR_A (ddr
)) && DR_IS_READ (DDR_B (ddr
))))
341 /* Conservatively answer: "this transformation is not valid". */
342 if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
345 /* If the dependence could not be captured by a distance vector,
346 conservatively answer that the transform is not valid. */
347 if (DDR_NUM_DIST_VECTS (ddr
) == 0)
350 /* Compute trans.dist_vect */
351 for (j
= 0; j
< DDR_NUM_DIST_VECTS (ddr
); j
++)
353 lambda_matrix_vector_mult (LTM_MATRIX (trans
), nb_loops
, nb_loops
,
354 DDR_DIST_VECT (ddr
, j
), distres
);
356 if (!lambda_vector_lexico_pos (distres
, nb_loops
))
363 /* Data dependency analysis. Returns true if the iterations of LOOP
364 are independent on each other (that is, if we can execute them
368 loop_parallel_p (struct loop
*loop
, struct obstack
* parloop_obstack
)
370 VEC (loop_p
, heap
) *loop_nest
;
371 VEC (ddr_p
, heap
) *dependence_relations
;
372 VEC (data_reference_p
, heap
) *datarefs
;
373 lambda_trans_matrix trans
;
376 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
378 fprintf (dump_file
, "Considering loop %d\n", loop
->num
);
380 fprintf (dump_file
, "loop is innermost\n");
382 fprintf (dump_file
, "loop NOT innermost\n");
385 /* Check for problems with dependences. If the loop can be reversed,
386 the iterations are independent. */
387 datarefs
= VEC_alloc (data_reference_p
, heap
, 10);
388 dependence_relations
= VEC_alloc (ddr_p
, heap
, 10 * 10);
389 loop_nest
= VEC_alloc (loop_p
, heap
, 3);
390 compute_data_dependences_for_loop (loop
, true, &loop_nest
, &datarefs
,
391 &dependence_relations
);
392 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
393 dump_data_dependence_relations (dump_file
, dependence_relations
);
395 trans
= lambda_trans_matrix_new (1, 1, parloop_obstack
);
396 LTM_MATRIX (trans
)[0][0] = -1;
398 if (lambda_transform_legal_p (trans
, 1, dependence_relations
))
401 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
402 fprintf (dump_file
, " SUCCESS: may be parallelized\n");
404 else if (dump_file
&& (dump_flags
& TDF_DETAILS
))
406 " FAILED: data dependencies exist across iterations\n");
408 VEC_free (loop_p
, heap
, loop_nest
);
409 free_dependence_relations (dependence_relations
);
410 free_data_refs (datarefs
);
415 /* Return true when LOOP contains basic blocks marked with the
416 BB_IRREDUCIBLE_LOOP flag. */
419 loop_has_blocks_with_irreducible_flag (struct loop
*loop
)
422 basic_block
*bbs
= get_loop_body_in_dom_order (loop
);
425 for (i
= 0; i
< loop
->num_nodes
; i
++)
426 if (bbs
[i
]->flags
& BB_IRREDUCIBLE_LOOP
)
435 /* Assigns the address of OBJ in TYPE to an ssa name, and returns this name.
436 The assignment statement is placed on edge ENTRY. DECL_ADDRESS maps decls
437 to their addresses that can be reused. The address of OBJ is known to
438 be invariant in the whole function. Other needed statements are placed
442 take_address_of (tree obj
, tree type
, edge entry
, htab_t decl_address
,
443 gimple_stmt_iterator
*gsi
)
447 struct int_tree_map ielt
, *nielt
;
448 tree
*var_p
, name
, bvar
, addr
;
452 /* Since the address of OBJ is invariant, the trees may be shared.
453 Avoid rewriting unrelated parts of the code. */
454 obj
= unshare_expr (obj
);
456 handled_component_p (*var_p
);
457 var_p
= &TREE_OPERAND (*var_p
, 0))
460 /* Canonicalize the access to base on a MEM_REF. */
462 *var_p
= build_simple_mem_ref (build_fold_addr_expr (*var_p
));
464 /* Assign a canonical SSA name to the address of the base decl used
465 in the address and share it for all accesses and addresses based
467 uid
= DECL_UID (TREE_OPERAND (TREE_OPERAND (*var_p
, 0), 0));
469 dslot
= htab_find_slot_with_hash (decl_address
, &ielt
, uid
, INSERT
);
474 addr
= TREE_OPERAND (*var_p
, 0);
475 bvar
= create_tmp_var (TREE_TYPE (addr
),
476 get_name (TREE_OPERAND
477 (TREE_OPERAND (*var_p
, 0), 0)));
478 add_referenced_var (bvar
);
479 stmt
= gimple_build_assign (bvar
, addr
);
480 name
= make_ssa_name (bvar
, stmt
);
481 gimple_assign_set_lhs (stmt
, name
);
482 gsi_insert_on_edge_immediate (entry
, stmt
);
484 nielt
= XNEW (struct int_tree_map
);
490 name
= ((struct int_tree_map
*) *dslot
)->to
;
492 /* Express the address in terms of the canonical SSA name. */
493 TREE_OPERAND (*var_p
, 0) = name
;
495 return build_fold_addr_expr_with_type (obj
, type
);
497 name
= force_gimple_operand (build_addr (obj
, current_function_decl
),
498 &stmts
, true, NULL_TREE
);
499 if (!gimple_seq_empty_p (stmts
))
500 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
502 if (!useless_type_conversion_p (type
, TREE_TYPE (name
)))
504 name
= force_gimple_operand (fold_convert (type
, name
), &stmts
, true,
506 if (!gimple_seq_empty_p (stmts
))
507 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
513 /* Callback for htab_traverse. Create the initialization statement
514 for reduction described in SLOT, and place it at the preheader of
515 the loop described in DATA. */
518 initialize_reductions (void **slot
, void *data
)
521 tree bvar
, type
, arg
;
524 struct reduction_info
*const reduc
= (struct reduction_info
*) *slot
;
525 struct loop
*loop
= (struct loop
*) data
;
527 /* Create initialization in preheader:
528 reduction_variable = initialization value of reduction. */
530 /* In the phi node at the header, replace the argument coming
531 from the preheader with the reduction initialization value. */
533 /* Create a new variable to initialize the reduction. */
534 type
= TREE_TYPE (PHI_RESULT (reduc
->reduc_phi
));
535 bvar
= create_tmp_var (type
, "reduction");
536 add_referenced_var (bvar
);
538 c
= build_omp_clause (gimple_location (reduc
->reduc_stmt
),
539 OMP_CLAUSE_REDUCTION
);
540 OMP_CLAUSE_REDUCTION_CODE (c
) = reduc
->reduction_code
;
541 OMP_CLAUSE_DECL (c
) = SSA_NAME_VAR (gimple_assign_lhs (reduc
->reduc_stmt
));
543 init
= omp_reduction_init (c
, TREE_TYPE (bvar
));
546 /* Replace the argument representing the initialization value
547 with the initialization value for the reduction (neutral
548 element for the particular operation, e.g. 0 for PLUS_EXPR,
549 1 for MULT_EXPR, etc).
550 Keep the old value in a new variable "reduction_initial",
551 that will be taken in consideration after the parallel
552 computing is done. */
554 e
= loop_preheader_edge (loop
);
555 arg
= PHI_ARG_DEF_FROM_EDGE (reduc
->reduc_phi
, e
);
556 /* Create new variable to hold the initial value. */
558 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE
559 (reduc
->reduc_phi
, loop_preheader_edge (loop
)), init
);
560 reduc
->initial_value
= arg
;
566 struct walk_stmt_info info
;
569 gimple_stmt_iterator
*gsi
;
574 /* Eliminates references to local variables in *TP out of the single
575 entry single exit region starting at DTA->ENTRY.
576 DECL_ADDRESS contains addresses of the references that had their
577 address taken already. If the expression is changed, CHANGED is
578 set to true. Callback for walk_tree. */
581 eliminate_local_variables_1 (tree
*tp
, int *walk_subtrees
, void *data
)
583 struct elv_data
*const dta
= (struct elv_data
*) data
;
584 tree t
= *tp
, var
, addr
, addr_type
, type
, obj
;
590 if (!SSA_VAR_P (t
) || DECL_EXTERNAL (t
))
593 type
= TREE_TYPE (t
);
594 addr_type
= build_pointer_type (type
);
595 addr
= take_address_of (t
, addr_type
, dta
->entry
, dta
->decl_address
,
597 if (dta
->gsi
== NULL
&& addr
== NULL_TREE
)
603 *tp
= build_simple_mem_ref (addr
);
609 if (TREE_CODE (t
) == ADDR_EXPR
)
611 /* ADDR_EXPR may appear in two contexts:
612 -- as a gimple operand, when the address taken is a function invariant
613 -- as gimple rhs, when the resulting address in not a function
615 We do not need to do anything special in the latter case (the base of
616 the memory reference whose address is taken may be replaced in the
617 DECL_P case). The former case is more complicated, as we need to
618 ensure that the new address is still a gimple operand. Thus, it
619 is not sufficient to replace just the base of the memory reference --
620 we need to move the whole computation of the address out of the
622 if (!is_gimple_val (t
))
626 obj
= TREE_OPERAND (t
, 0);
627 var
= get_base_address (obj
);
628 if (!var
|| !SSA_VAR_P (var
) || DECL_EXTERNAL (var
))
631 addr_type
= TREE_TYPE (t
);
632 addr
= take_address_of (obj
, addr_type
, dta
->entry
, dta
->decl_address
,
634 if (dta
->gsi
== NULL
&& addr
== NULL_TREE
)
651 /* Moves the references to local variables in STMT at *GSI out of the single
652 entry single exit region starting at ENTRY. DECL_ADDRESS contains
653 addresses of the references that had their address taken
657 eliminate_local_variables_stmt (edge entry
, gimple_stmt_iterator
*gsi
,
661 gimple stmt
= gsi_stmt (*gsi
);
663 memset (&dta
.info
, '\0', sizeof (dta
.info
));
665 dta
.decl_address
= decl_address
;
669 if (gimple_debug_bind_p (stmt
))
672 walk_tree (gimple_debug_bind_get_value_ptr (stmt
),
673 eliminate_local_variables_1
, &dta
.info
, NULL
);
676 gimple_debug_bind_reset_value (stmt
);
683 walk_gimple_op (stmt
, eliminate_local_variables_1
, &dta
.info
);
690 /* Eliminates the references to local variables from the single entry
691 single exit region between the ENTRY and EXIT edges.
694 1) Taking address of a local variable -- these are moved out of the
695 region (and temporary variable is created to hold the address if
698 2) Dereferencing a local variable -- these are replaced with indirect
702 eliminate_local_variables (edge entry
, edge exit
)
705 VEC (basic_block
, heap
) *body
= VEC_alloc (basic_block
, heap
, 3);
707 gimple_stmt_iterator gsi
;
708 bool has_debug_stmt
= false;
709 htab_t decl_address
= htab_create (10, int_tree_map_hash
, int_tree_map_eq
,
711 basic_block entry_bb
= entry
->src
;
712 basic_block exit_bb
= exit
->dest
;
714 gather_blocks_in_sese_region (entry_bb
, exit_bb
, &body
);
716 FOR_EACH_VEC_ELT (basic_block
, body
, i
, bb
)
717 if (bb
!= entry_bb
&& bb
!= exit_bb
)
718 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
719 if (gimple_debug_bind_p (gsi_stmt (gsi
)))
720 has_debug_stmt
= true;
722 eliminate_local_variables_stmt (entry
, &gsi
, decl_address
);
725 FOR_EACH_VEC_ELT (basic_block
, body
, i
, bb
)
726 if (bb
!= entry_bb
&& bb
!= exit_bb
)
727 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
728 if (gimple_debug_bind_p (gsi_stmt (gsi
)))
729 eliminate_local_variables_stmt (entry
, &gsi
, decl_address
);
731 htab_delete (decl_address
);
732 VEC_free (basic_block
, heap
, body
);
735 /* Returns true if expression EXPR is not defined between ENTRY and
736 EXIT, i.e. if all its operands are defined outside of the region. */
739 expr_invariant_in_region_p (edge entry
, edge exit
, tree expr
)
741 basic_block entry_bb
= entry
->src
;
742 basic_block exit_bb
= exit
->dest
;
745 if (is_gimple_min_invariant (expr
))
748 if (TREE_CODE (expr
) == SSA_NAME
)
750 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (expr
));
752 && dominated_by_p (CDI_DOMINATORS
, def_bb
, entry_bb
)
753 && !dominated_by_p (CDI_DOMINATORS
, def_bb
, exit_bb
))
762 /* If COPY_NAME_P is true, creates and returns a duplicate of NAME.
763 The copies are stored to NAME_COPIES, if NAME was already duplicated,
764 its duplicate stored in NAME_COPIES is returned.
766 Regardless of COPY_NAME_P, the decl used as a base of the ssa name is also
767 duplicated, storing the copies in DECL_COPIES. */
770 separate_decls_in_region_name (tree name
,
771 htab_t name_copies
, htab_t decl_copies
,
774 tree copy
, var
, var_copy
;
775 unsigned idx
, uid
, nuid
;
776 struct int_tree_map ielt
, *nielt
;
777 struct name_to_copy_elt elt
, *nelt
;
778 void **slot
, **dslot
;
780 if (TREE_CODE (name
) != SSA_NAME
)
783 idx
= SSA_NAME_VERSION (name
);
785 slot
= htab_find_slot_with_hash (name_copies
, &elt
, idx
,
786 copy_name_p
? INSERT
: NO_INSERT
);
788 return ((struct name_to_copy_elt
*) *slot
)->new_name
;
790 var
= SSA_NAME_VAR (name
);
791 uid
= DECL_UID (var
);
793 dslot
= htab_find_slot_with_hash (decl_copies
, &ielt
, uid
, INSERT
);
796 var_copy
= create_tmp_var (TREE_TYPE (var
), get_name (var
));
797 DECL_GIMPLE_REG_P (var_copy
) = DECL_GIMPLE_REG_P (var
);
798 add_referenced_var (var_copy
);
799 nielt
= XNEW (struct int_tree_map
);
801 nielt
->to
= var_copy
;
804 /* Ensure that when we meet this decl next time, we won't duplicate
806 nuid
= DECL_UID (var_copy
);
808 dslot
= htab_find_slot_with_hash (decl_copies
, &ielt
, nuid
, INSERT
);
809 gcc_assert (!*dslot
);
810 nielt
= XNEW (struct int_tree_map
);
812 nielt
->to
= var_copy
;
816 var_copy
= ((struct int_tree_map
*) *dslot
)->to
;
820 copy
= duplicate_ssa_name (name
, NULL
);
821 nelt
= XNEW (struct name_to_copy_elt
);
823 nelt
->new_name
= copy
;
824 nelt
->field
= NULL_TREE
;
833 SSA_NAME_VAR (copy
) = var_copy
;
837 /* Finds the ssa names used in STMT that are defined outside the
838 region between ENTRY and EXIT and replaces such ssa names with
839 their duplicates. The duplicates are stored to NAME_COPIES. Base
840 decls of all ssa names used in STMT (including those defined in
841 LOOP) are replaced with the new temporary variables; the
842 replacement decls are stored in DECL_COPIES. */
845 separate_decls_in_region_stmt (edge entry
, edge exit
, gimple stmt
,
846 htab_t name_copies
, htab_t decl_copies
)
854 mark_virtual_ops_for_renaming (stmt
);
856 FOR_EACH_PHI_OR_STMT_DEF (def
, stmt
, oi
, SSA_OP_DEF
)
858 name
= DEF_FROM_PTR (def
);
859 gcc_assert (TREE_CODE (name
) == SSA_NAME
);
860 copy
= separate_decls_in_region_name (name
, name_copies
, decl_copies
,
862 gcc_assert (copy
== name
);
865 FOR_EACH_PHI_OR_STMT_USE (use
, stmt
, oi
, SSA_OP_USE
)
867 name
= USE_FROM_PTR (use
);
868 if (TREE_CODE (name
) != SSA_NAME
)
871 copy_name_p
= expr_invariant_in_region_p (entry
, exit
, name
);
872 copy
= separate_decls_in_region_name (name
, name_copies
, decl_copies
,
878 /* Finds the ssa names used in STMT that are defined outside the
879 region between ENTRY and EXIT and replaces such ssa names with
880 their duplicates. The duplicates are stored to NAME_COPIES. Base
881 decls of all ssa names used in STMT (including those defined in
882 LOOP) are replaced with the new temporary variables; the
883 replacement decls are stored in DECL_COPIES. */
886 separate_decls_in_region_debug_bind (gimple stmt
,
887 htab_t name_copies
, htab_t decl_copies
)
892 struct int_tree_map ielt
;
893 struct name_to_copy_elt elt
;
894 void **slot
, **dslot
;
896 var
= gimple_debug_bind_get_var (stmt
);
897 if (TREE_CODE (var
) == DEBUG_EXPR_DECL
)
899 gcc_assert (DECL_P (var
) && SSA_VAR_P (var
));
900 ielt
.uid
= DECL_UID (var
);
901 dslot
= htab_find_slot_with_hash (decl_copies
, &ielt
, ielt
.uid
, NO_INSERT
);
904 gimple_debug_bind_set_var (stmt
, ((struct int_tree_map
*) *dslot
)->to
);
906 FOR_EACH_PHI_OR_STMT_USE (use
, stmt
, oi
, SSA_OP_USE
)
908 name
= USE_FROM_PTR (use
);
909 if (TREE_CODE (name
) != SSA_NAME
)
912 elt
.version
= SSA_NAME_VERSION (name
);
913 slot
= htab_find_slot_with_hash (name_copies
, &elt
, elt
.version
, NO_INSERT
);
916 gimple_debug_bind_reset_value (stmt
);
921 SET_USE (use
, ((struct name_to_copy_elt
*) *slot
)->new_name
);
927 /* Callback for htab_traverse. Adds a field corresponding to the reduction
928 specified in SLOT. The type is passed in DATA. */
931 add_field_for_reduction (void **slot
, void *data
)
934 struct reduction_info
*const red
= (struct reduction_info
*) *slot
;
935 tree
const type
= (tree
) data
;
936 tree var
= SSA_NAME_VAR (gimple_assign_lhs (red
->reduc_stmt
));
937 tree field
= build_decl (gimple_location (red
->reduc_stmt
),
938 FIELD_DECL
, DECL_NAME (var
), TREE_TYPE (var
));
940 insert_field_into_struct (type
, field
);
947 /* Callback for htab_traverse. Adds a field corresponding to a ssa name
948 described in SLOT. The type is passed in DATA. */
951 add_field_for_name (void **slot
, void *data
)
953 struct name_to_copy_elt
*const elt
= (struct name_to_copy_elt
*) *slot
;
954 tree type
= (tree
) data
;
955 tree name
= ssa_name (elt
->version
);
956 tree var
= SSA_NAME_VAR (name
);
957 tree field
= build_decl (DECL_SOURCE_LOCATION (var
),
958 FIELD_DECL
, DECL_NAME (var
), TREE_TYPE (var
));
960 insert_field_into_struct (type
, field
);
966 /* Callback for htab_traverse. A local result is the intermediate result
968 thread, or the initial value in case no iteration was executed.
969 This function creates a phi node reflecting these values.
970 The phi's result will be stored in NEW_PHI field of the
971 reduction's data structure. */
974 create_phi_for_local_result (void **slot
, void *data
)
976 struct reduction_info
*const reduc
= (struct reduction_info
*) *slot
;
977 const struct loop
*const loop
= (const struct loop
*) data
;
980 basic_block store_bb
;
982 source_location locus
;
984 /* STORE_BB is the block where the phi
985 should be stored. It is the destination of the loop exit.
986 (Find the fallthru edge from GIMPLE_OMP_CONTINUE). */
987 store_bb
= FALLTHRU_EDGE (loop
->latch
)->dest
;
989 /* STORE_BB has two predecessors. One coming from the loop
990 (the reduction's result is computed at the loop),
991 and another coming from a block preceding the loop,
993 are executed (the initial value should be taken). */
994 if (EDGE_PRED (store_bb
, 0) == FALLTHRU_EDGE (loop
->latch
))
995 e
= EDGE_PRED (store_bb
, 1);
997 e
= EDGE_PRED (store_bb
, 0);
999 = make_ssa_name (SSA_NAME_VAR (gimple_assign_lhs (reduc
->reduc_stmt
)),
1001 locus
= gimple_location (reduc
->reduc_stmt
);
1002 new_phi
= create_phi_node (local_res
, store_bb
);
1003 SSA_NAME_DEF_STMT (local_res
) = new_phi
;
1004 add_phi_arg (new_phi
, reduc
->init
, e
, locus
);
1005 add_phi_arg (new_phi
, gimple_assign_lhs (reduc
->reduc_stmt
),
1006 FALLTHRU_EDGE (loop
->latch
), locus
);
1007 reduc
->new_phi
= new_phi
;
1017 basic_block store_bb
;
1018 basic_block load_bb
;
1021 /* Callback for htab_traverse. Create an atomic instruction for the
1022 reduction described in SLOT.
1023 DATA annotates the place in memory the atomic operation relates to,
1024 and the basic block it needs to be generated in. */
1027 create_call_for_reduction_1 (void **slot
, void *data
)
1029 struct reduction_info
*const reduc
= (struct reduction_info
*) *slot
;
1030 struct clsn_data
*const clsn_data
= (struct clsn_data
*) data
;
1031 gimple_stmt_iterator gsi
;
1032 tree type
= TREE_TYPE (PHI_RESULT (reduc
->reduc_phi
));
1037 tree t
, addr
, ref
, x
;
1038 tree tmp_load
, name
;
1041 load_struct
= build_simple_mem_ref (clsn_data
->load
);
1042 t
= build3 (COMPONENT_REF
, type
, load_struct
, reduc
->field
, NULL_TREE
);
1044 addr
= build_addr (t
, current_function_decl
);
1046 /* Create phi node. */
1047 bb
= clsn_data
->load_bb
;
1049 e
= split_block (bb
, t
);
1052 tmp_load
= create_tmp_var (TREE_TYPE (TREE_TYPE (addr
)), NULL
);
1053 add_referenced_var (tmp_load
);
1054 tmp_load
= make_ssa_name (tmp_load
, NULL
);
1055 load
= gimple_build_omp_atomic_load (tmp_load
, addr
);
1056 SSA_NAME_DEF_STMT (tmp_load
) = load
;
1057 gsi
= gsi_start_bb (new_bb
);
1058 gsi_insert_after (&gsi
, load
, GSI_NEW_STMT
);
1060 e
= split_block (new_bb
, load
);
1062 gsi
= gsi_start_bb (new_bb
);
1064 x
= fold_build2 (reduc
->reduction_code
,
1065 TREE_TYPE (PHI_RESULT (reduc
->new_phi
)), ref
,
1066 PHI_RESULT (reduc
->new_phi
));
1068 name
= force_gimple_operand_gsi (&gsi
, x
, true, NULL_TREE
, true,
1069 GSI_CONTINUE_LINKING
);
1071 gsi_insert_after (&gsi
, gimple_build_omp_atomic_store (name
), GSI_NEW_STMT
);
1075 /* Create the atomic operation at the join point of the threads.
1076 REDUCTION_LIST describes the reductions in the LOOP.
1077 LD_ST_DATA describes the shared data structure where
1078 shared data is stored in and loaded from. */
1080 create_call_for_reduction (struct loop
*loop
, htab_t reduction_list
,
1081 struct clsn_data
*ld_st_data
)
1083 htab_traverse (reduction_list
, create_phi_for_local_result
, loop
);
1084 /* Find the fallthru edge from GIMPLE_OMP_CONTINUE. */
1085 ld_st_data
->load_bb
= FALLTHRU_EDGE (loop
->latch
)->dest
;
1086 htab_traverse (reduction_list
, create_call_for_reduction_1
, ld_st_data
);
1089 /* Callback for htab_traverse. Loads the final reduction value at the
1090 join point of all threads, and inserts it in the right place. */
1093 create_loads_for_reductions (void **slot
, void *data
)
1095 struct reduction_info
*const red
= (struct reduction_info
*) *slot
;
1096 struct clsn_data
*const clsn_data
= (struct clsn_data
*) data
;
1098 gimple_stmt_iterator gsi
;
1099 tree type
= TREE_TYPE (gimple_assign_lhs (red
->reduc_stmt
));
1104 gsi
= gsi_after_labels (clsn_data
->load_bb
);
1105 load_struct
= build_simple_mem_ref (clsn_data
->load
);
1106 load_struct
= build3 (COMPONENT_REF
, type
, load_struct
, red
->field
,
1110 name
= PHI_RESULT (red
->keep_res
);
1111 stmt
= gimple_build_assign (name
, x
);
1112 SSA_NAME_DEF_STMT (name
) = stmt
;
1114 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1116 for (gsi
= gsi_start_phis (gimple_bb (red
->keep_res
));
1117 !gsi_end_p (gsi
); gsi_next (&gsi
))
1118 if (gsi_stmt (gsi
) == red
->keep_res
)
1120 remove_phi_node (&gsi
, false);
1126 /* Load the reduction result that was stored in LD_ST_DATA.
1127 REDUCTION_LIST describes the list of reductions that the
1128 loads should be generated for. */
1130 create_final_loads_for_reduction (htab_t reduction_list
,
1131 struct clsn_data
*ld_st_data
)
1133 gimple_stmt_iterator gsi
;
1137 gsi
= gsi_after_labels (ld_st_data
->load_bb
);
1138 t
= build_fold_addr_expr (ld_st_data
->store
);
1139 stmt
= gimple_build_assign (ld_st_data
->load
, t
);
1141 gsi_insert_before (&gsi
, stmt
, GSI_NEW_STMT
);
1142 SSA_NAME_DEF_STMT (ld_st_data
->load
) = stmt
;
1144 htab_traverse (reduction_list
, create_loads_for_reductions
, ld_st_data
);
1148 /* Callback for htab_traverse. Store the neutral value for the
1149 particular reduction's operation, e.g. 0 for PLUS_EXPR,
1150 1 for MULT_EXPR, etc. into the reduction field.
1151 The reduction is specified in SLOT. The store information is
1155 create_stores_for_reduction (void **slot
, void *data
)
1157 struct reduction_info
*const red
= (struct reduction_info
*) *slot
;
1158 struct clsn_data
*const clsn_data
= (struct clsn_data
*) data
;
1161 gimple_stmt_iterator gsi
;
1162 tree type
= TREE_TYPE (gimple_assign_lhs (red
->reduc_stmt
));
1164 gsi
= gsi_last_bb (clsn_data
->store_bb
);
1165 t
= build3 (COMPONENT_REF
, type
, clsn_data
->store
, red
->field
, NULL_TREE
);
1166 stmt
= gimple_build_assign (t
, red
->initial_value
);
1167 mark_virtual_ops_for_renaming (stmt
);
1168 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1173 /* Callback for htab_traverse. Creates loads to a field of LOAD in LOAD_BB and
1174 store to a field of STORE in STORE_BB for the ssa name and its duplicate
1175 specified in SLOT. */
1178 create_loads_and_stores_for_name (void **slot
, void *data
)
1180 struct name_to_copy_elt
*const elt
= (struct name_to_copy_elt
*) *slot
;
1181 struct clsn_data
*const clsn_data
= (struct clsn_data
*) data
;
1184 gimple_stmt_iterator gsi
;
1185 tree type
= TREE_TYPE (elt
->new_name
);
1188 gsi
= gsi_last_bb (clsn_data
->store_bb
);
1189 t
= build3 (COMPONENT_REF
, type
, clsn_data
->store
, elt
->field
, NULL_TREE
);
1190 stmt
= gimple_build_assign (t
, ssa_name (elt
->version
));
1191 mark_virtual_ops_for_renaming (stmt
);
1192 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1194 gsi
= gsi_last_bb (clsn_data
->load_bb
);
1195 load_struct
= build_simple_mem_ref (clsn_data
->load
);
1196 t
= build3 (COMPONENT_REF
, type
, load_struct
, elt
->field
, NULL_TREE
);
1197 stmt
= gimple_build_assign (elt
->new_name
, t
);
1198 SSA_NAME_DEF_STMT (elt
->new_name
) = stmt
;
1199 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1204 /* Moves all the variables used in LOOP and defined outside of it (including
1205 the initial values of loop phi nodes, and *PER_THREAD if it is a ssa
1206 name) to a structure created for this purpose. The code
1214 is transformed this way:
1229 `old' is stored to *ARG_STRUCT and `new' is stored to NEW_ARG_STRUCT. The
1230 pointer `new' is intentionally not initialized (the loop will be split to a
1231 separate function later, and `new' will be initialized from its arguments).
1232 LD_ST_DATA holds information about the shared data structure used to pass
1233 information among the threads. It is initialized here, and
1234 gen_parallel_loop will pass it to create_call_for_reduction that
1235 needs this information. REDUCTION_LIST describes the reductions
1239 separate_decls_in_region (edge entry
, edge exit
, htab_t reduction_list
,
1240 tree
*arg_struct
, tree
*new_arg_struct
,
1241 struct clsn_data
*ld_st_data
)
1244 basic_block bb1
= split_edge (entry
);
1245 basic_block bb0
= single_pred (bb1
);
1246 htab_t name_copies
= htab_create (10, name_to_copy_elt_hash
,
1247 name_to_copy_elt_eq
, free
);
1248 htab_t decl_copies
= htab_create (10, int_tree_map_hash
, int_tree_map_eq
,
1251 tree type
, type_name
, nvar
;
1252 gimple_stmt_iterator gsi
;
1253 struct clsn_data clsn_data
;
1254 VEC (basic_block
, heap
) *body
= VEC_alloc (basic_block
, heap
, 3);
1256 basic_block entry_bb
= bb1
;
1257 basic_block exit_bb
= exit
->dest
;
1258 bool has_debug_stmt
= false;
1260 entry
= single_succ_edge (entry_bb
);
1261 gather_blocks_in_sese_region (entry_bb
, exit_bb
, &body
);
1263 FOR_EACH_VEC_ELT (basic_block
, body
, i
, bb
)
1265 if (bb
!= entry_bb
&& bb
!= exit_bb
)
1267 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1268 separate_decls_in_region_stmt (entry
, exit
, gsi_stmt (gsi
),
1269 name_copies
, decl_copies
);
1271 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1273 gimple stmt
= gsi_stmt (gsi
);
1275 if (is_gimple_debug (stmt
))
1276 has_debug_stmt
= true;
1278 separate_decls_in_region_stmt (entry
, exit
, stmt
,
1279 name_copies
, decl_copies
);
1284 /* Now process debug bind stmts. We must not create decls while
1285 processing debug stmts, so we defer their processing so as to
1286 make sure we will have debug info for as many variables as
1287 possible (all of those that were dealt with in the loop above),
1288 and discard those for which we know there's nothing we can
1291 FOR_EACH_VEC_ELT (basic_block
, body
, i
, bb
)
1292 if (bb
!= entry_bb
&& bb
!= exit_bb
)
1294 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);)
1296 gimple stmt
= gsi_stmt (gsi
);
1298 if (gimple_debug_bind_p (stmt
))
1300 if (separate_decls_in_region_debug_bind (stmt
,
1304 gsi_remove (&gsi
, true);
1313 VEC_free (basic_block
, heap
, body
);
1315 if (htab_elements (name_copies
) == 0 && htab_elements (reduction_list
) == 0)
1317 /* It may happen that there is nothing to copy (if there are only
1318 loop carried and external variables in the loop). */
1320 *new_arg_struct
= NULL
;
1324 /* Create the type for the structure to store the ssa names to. */
1325 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1326 type_name
= build_decl (UNKNOWN_LOCATION
,
1327 TYPE_DECL
, create_tmp_var_name (".paral_data"),
1329 TYPE_NAME (type
) = type_name
;
1331 htab_traverse (name_copies
, add_field_for_name
, type
);
1332 if (reduction_list
&& htab_elements (reduction_list
) > 0)
1334 /* Create the fields for reductions. */
1335 htab_traverse (reduction_list
, add_field_for_reduction
,
1340 /* Create the loads and stores. */
1341 *arg_struct
= create_tmp_var (type
, ".paral_data_store");
1342 add_referenced_var (*arg_struct
);
1343 nvar
= create_tmp_var (build_pointer_type (type
), ".paral_data_load");
1344 add_referenced_var (nvar
);
1345 *new_arg_struct
= make_ssa_name (nvar
, NULL
);
1347 ld_st_data
->store
= *arg_struct
;
1348 ld_st_data
->load
= *new_arg_struct
;
1349 ld_st_data
->store_bb
= bb0
;
1350 ld_st_data
->load_bb
= bb1
;
1352 htab_traverse (name_copies
, create_loads_and_stores_for_name
,
1355 /* Load the calculation from memory (after the join of the threads). */
1357 if (reduction_list
&& htab_elements (reduction_list
) > 0)
1359 htab_traverse (reduction_list
, create_stores_for_reduction
,
1361 clsn_data
.load
= make_ssa_name (nvar
, NULL
);
1362 clsn_data
.load_bb
= exit
->dest
;
1363 clsn_data
.store
= ld_st_data
->store
;
1364 create_final_loads_for_reduction (reduction_list
, &clsn_data
);
1368 htab_delete (decl_copies
);
1369 htab_delete (name_copies
);
1372 /* Bitmap containing uids of functions created by parallelization. We cannot
1373 allocate it from the default obstack, as it must live across compilation
1374 of several functions; we make it gc allocated instead. */
1376 static GTY(()) bitmap parallelized_functions
;
1378 /* Returns true if FN was created by create_loop_fn. */
1381 parallelized_function_p (tree fn
)
1383 if (!parallelized_functions
|| !DECL_ARTIFICIAL (fn
))
1386 return bitmap_bit_p (parallelized_functions
, DECL_UID (fn
));
1389 /* Creates and returns an empty function that will receive the body of
1390 a parallelized loop. */
1393 create_loop_fn (location_t loc
)
1397 tree decl
, type
, name
, t
;
1398 struct function
*act_cfun
= cfun
;
1399 static unsigned loopfn_num
;
1401 snprintf (buf
, 100, "%s.$loopfn", current_function_name ());
1402 ASM_FORMAT_PRIVATE_NAME (tname
, buf
, loopfn_num
++);
1403 clean_symbol_name (tname
);
1404 name
= get_identifier (tname
);
1405 type
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
1407 decl
= build_decl (loc
, FUNCTION_DECL
, name
, type
);
1408 if (!parallelized_functions
)
1409 parallelized_functions
= BITMAP_GGC_ALLOC ();
1410 bitmap_set_bit (parallelized_functions
, DECL_UID (decl
));
1412 TREE_STATIC (decl
) = 1;
1413 TREE_USED (decl
) = 1;
1414 DECL_ARTIFICIAL (decl
) = 1;
1415 DECL_IGNORED_P (decl
) = 0;
1416 TREE_PUBLIC (decl
) = 0;
1417 DECL_UNINLINABLE (decl
) = 1;
1418 DECL_EXTERNAL (decl
) = 0;
1419 DECL_CONTEXT (decl
) = NULL_TREE
;
1420 DECL_INITIAL (decl
) = make_node (BLOCK
);
1422 t
= build_decl (loc
, RESULT_DECL
, NULL_TREE
, void_type_node
);
1423 DECL_ARTIFICIAL (t
) = 1;
1424 DECL_IGNORED_P (t
) = 1;
1425 DECL_RESULT (decl
) = t
;
1427 t
= build_decl (loc
, PARM_DECL
, get_identifier (".paral_data_param"),
1429 DECL_ARTIFICIAL (t
) = 1;
1430 DECL_ARG_TYPE (t
) = ptr_type_node
;
1431 DECL_CONTEXT (t
) = decl
;
1433 DECL_ARGUMENTS (decl
) = t
;
1435 allocate_struct_function (decl
, false);
1437 /* The call to allocate_struct_function clobbers CFUN, so we need to restore
1439 set_cfun (act_cfun
);
1444 /* Moves the exit condition of LOOP to the beginning of its header, and
1445 duplicates the part of the last iteration that gets disabled to the
1446 exit of the loop. NIT is the number of iterations of the loop
1447 (used to initialize the variables in the duplicated part).
1449 TODO: the common case is that latch of the loop is empty and immediately
1450 follows the loop exit. In this case, it would be better not to copy the
1451 body of the loop, but only move the entry of the loop directly before the
1452 exit check and increase the number of iterations of the loop by one.
1453 This may need some additional preconditioning in case NIT = ~0.
1454 REDUCTION_LIST describes the reductions in LOOP. */
1457 transform_to_exit_first_loop (struct loop
*loop
, htab_t reduction_list
, tree nit
)
1459 basic_block
*bbs
, *nbbs
, ex_bb
, orig_header
;
1462 edge exit
= single_dom_exit (loop
), hpred
;
1463 tree control
, control_name
, res
, t
;
1464 gimple phi
, nphi
, cond_stmt
, stmt
, cond_nit
;
1465 gimple_stmt_iterator gsi
;
1468 split_block_after_labels (loop
->header
);
1469 orig_header
= single_succ (loop
->header
);
1470 hpred
= single_succ_edge (loop
->header
);
1472 cond_stmt
= last_stmt (exit
->src
);
1473 control
= gimple_cond_lhs (cond_stmt
);
1474 gcc_assert (gimple_cond_rhs (cond_stmt
) == nit
);
1476 /* Make sure that we have phi nodes on exit for all loop header phis
1477 (create_parallel_loop requires that). */
1478 for (gsi
= gsi_start_phis (loop
->header
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1480 phi
= gsi_stmt (gsi
);
1481 res
= PHI_RESULT (phi
);
1482 t
= make_ssa_name (SSA_NAME_VAR (res
), phi
);
1483 SET_PHI_RESULT (phi
, t
);
1484 nphi
= create_phi_node (res
, orig_header
);
1485 SSA_NAME_DEF_STMT (res
) = nphi
;
1486 add_phi_arg (nphi
, t
, hpred
, UNKNOWN_LOCATION
);
1490 gimple_cond_set_lhs (cond_stmt
, t
);
1491 update_stmt (cond_stmt
);
1495 bbs
= get_loop_body_in_dom_order (loop
);
1497 for (n
= 0; bbs
[n
] != loop
->latch
; n
++)
1499 nbbs
= XNEWVEC (basic_block
, n
);
1500 ok
= gimple_duplicate_sese_tail (single_succ_edge (loop
->header
), exit
,
1507 /* Other than reductions, the only gimple reg that should be copied
1508 out of the loop is the control variable. */
1510 control_name
= NULL_TREE
;
1511 for (gsi
= gsi_start_phis (ex_bb
); !gsi_end_p (gsi
); )
1513 phi
= gsi_stmt (gsi
);
1514 res
= PHI_RESULT (phi
);
1515 if (!is_gimple_reg (res
))
1521 /* Check if it is a part of reduction. If it is,
1522 keep the phi at the reduction's keep_res field. The
1523 PHI_RESULT of this phi is the resulting value of the reduction
1524 variable when exiting the loop. */
1526 exit
= single_dom_exit (loop
);
1528 if (htab_elements (reduction_list
) > 0)
1530 struct reduction_info
*red
;
1532 tree val
= PHI_ARG_DEF_FROM_EDGE (phi
, exit
);
1533 red
= reduction_phi (reduction_list
, SSA_NAME_DEF_STMT (val
));
1536 red
->keep_res
= phi
;
1541 gcc_assert (control_name
== NULL_TREE
1542 && SSA_NAME_VAR (res
) == SSA_NAME_VAR (control
));
1544 remove_phi_node (&gsi
, false);
1546 gcc_assert (control_name
!= NULL_TREE
);
1548 /* Initialize the control variable to number of iterations
1549 according to the rhs of the exit condition. */
1550 gsi
= gsi_after_labels (ex_bb
);
1551 cond_nit
= last_stmt (exit
->src
);
1552 nit_1
= gimple_cond_rhs (cond_nit
);
1553 nit_1
= force_gimple_operand_gsi (&gsi
,
1554 fold_convert (TREE_TYPE (control_name
), nit_1
),
1555 false, NULL_TREE
, false, GSI_SAME_STMT
);
1556 stmt
= gimple_build_assign (control_name
, nit_1
);
1557 gsi_insert_before (&gsi
, stmt
, GSI_NEW_STMT
);
1558 SSA_NAME_DEF_STMT (control_name
) = stmt
;
1561 /* Create the parallel constructs for LOOP as described in gen_parallel_loop.
1562 LOOP_FN and DATA are the arguments of GIMPLE_OMP_PARALLEL.
1563 NEW_DATA is the variable that should be initialized from the argument
1564 of LOOP_FN. N_THREADS is the requested number of threads. Returns the
1565 basic block containing GIMPLE_OMP_PARALLEL tree. */
1568 create_parallel_loop (struct loop
*loop
, tree loop_fn
, tree data
,
1569 tree new_data
, unsigned n_threads
, location_t loc
)
1571 gimple_stmt_iterator gsi
;
1572 basic_block bb
, paral_bb
, for_bb
, ex_bb
;
1574 gimple stmt
, for_stmt
, phi
, cond_stmt
;
1575 tree cvar
, cvar_init
, initvar
, cvar_next
, cvar_base
, type
;
1576 edge exit
, nexit
, guard
, end
, e
;
1578 /* Prepare the GIMPLE_OMP_PARALLEL statement. */
1579 bb
= loop_preheader_edge (loop
)->src
;
1580 paral_bb
= single_pred (bb
);
1581 gsi
= gsi_last_bb (paral_bb
);
1583 t
= build_omp_clause (loc
, OMP_CLAUSE_NUM_THREADS
);
1584 OMP_CLAUSE_NUM_THREADS_EXPR (t
)
1585 = build_int_cst (integer_type_node
, n_threads
);
1586 stmt
= gimple_build_omp_parallel (NULL
, t
, loop_fn
, data
);
1587 gimple_set_location (stmt
, loc
);
1589 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1591 /* Initialize NEW_DATA. */
1594 gsi
= gsi_after_labels (bb
);
1596 param
= make_ssa_name (DECL_ARGUMENTS (loop_fn
), NULL
);
1597 stmt
= gimple_build_assign (param
, build_fold_addr_expr (data
));
1598 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1599 SSA_NAME_DEF_STMT (param
) = stmt
;
1601 stmt
= gimple_build_assign (new_data
,
1602 fold_convert (TREE_TYPE (new_data
), param
));
1603 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1604 SSA_NAME_DEF_STMT (new_data
) = stmt
;
1607 /* Emit GIMPLE_OMP_RETURN for GIMPLE_OMP_PARALLEL. */
1608 bb
= split_loop_exit_edge (single_dom_exit (loop
));
1609 gsi
= gsi_last_bb (bb
);
1610 stmt
= gimple_build_omp_return (false);
1611 gimple_set_location (stmt
, loc
);
1612 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1614 /* Extract data for GIMPLE_OMP_FOR. */
1615 gcc_assert (loop
->header
== single_dom_exit (loop
)->src
);
1616 cond_stmt
= last_stmt (loop
->header
);
1618 cvar
= gimple_cond_lhs (cond_stmt
);
1619 cvar_base
= SSA_NAME_VAR (cvar
);
1620 phi
= SSA_NAME_DEF_STMT (cvar
);
1621 cvar_init
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
1622 initvar
= make_ssa_name (cvar_base
, NULL
);
1623 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, loop_preheader_edge (loop
)),
1625 cvar_next
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
1627 gsi
= gsi_last_nondebug_bb (loop
->latch
);
1628 gcc_assert (gsi_stmt (gsi
) == SSA_NAME_DEF_STMT (cvar_next
));
1629 gsi_remove (&gsi
, true);
1632 for_bb
= split_edge (loop_preheader_edge (loop
));
1633 ex_bb
= split_loop_exit_edge (single_dom_exit (loop
));
1634 extract_true_false_edges_from_block (loop
->header
, &nexit
, &exit
);
1635 gcc_assert (exit
== single_dom_exit (loop
));
1637 guard
= make_edge (for_bb
, ex_bb
, 0);
1638 single_succ_edge (loop
->latch
)->flags
= 0;
1639 end
= make_edge (loop
->latch
, ex_bb
, EDGE_FALLTHRU
);
1640 for (gsi
= gsi_start_phis (ex_bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1642 source_location locus
;
1644 phi
= gsi_stmt (gsi
);
1645 stmt
= SSA_NAME_DEF_STMT (PHI_ARG_DEF_FROM_EDGE (phi
, exit
));
1647 def
= PHI_ARG_DEF_FROM_EDGE (stmt
, loop_preheader_edge (loop
));
1648 locus
= gimple_phi_arg_location_from_edge (stmt
,
1649 loop_preheader_edge (loop
));
1650 add_phi_arg (phi
, def
, guard
, locus
);
1652 def
= PHI_ARG_DEF_FROM_EDGE (stmt
, loop_latch_edge (loop
));
1653 locus
= gimple_phi_arg_location_from_edge (stmt
, loop_latch_edge (loop
));
1654 add_phi_arg (phi
, def
, end
, locus
);
1656 e
= redirect_edge_and_branch (exit
, nexit
->dest
);
1657 PENDING_STMT (e
) = NULL
;
1659 /* Emit GIMPLE_OMP_FOR. */
1660 gimple_cond_set_lhs (cond_stmt
, cvar_base
);
1661 type
= TREE_TYPE (cvar
);
1662 t
= build_omp_clause (loc
, OMP_CLAUSE_SCHEDULE
);
1663 OMP_CLAUSE_SCHEDULE_KIND (t
) = OMP_CLAUSE_SCHEDULE_STATIC
;
1665 for_stmt
= gimple_build_omp_for (NULL
, t
, 1, NULL
);
1666 gimple_set_location (for_stmt
, loc
);
1667 gimple_omp_for_set_index (for_stmt
, 0, initvar
);
1668 gimple_omp_for_set_initial (for_stmt
, 0, cvar_init
);
1669 gimple_omp_for_set_final (for_stmt
, 0, gimple_cond_rhs (cond_stmt
));
1670 gimple_omp_for_set_cond (for_stmt
, 0, gimple_cond_code (cond_stmt
));
1671 gimple_omp_for_set_incr (for_stmt
, 0, build2 (PLUS_EXPR
, type
,
1673 build_int_cst (type
, 1)));
1675 gsi
= gsi_last_bb (for_bb
);
1676 gsi_insert_after (&gsi
, for_stmt
, GSI_NEW_STMT
);
1677 SSA_NAME_DEF_STMT (initvar
) = for_stmt
;
1679 /* Emit GIMPLE_OMP_CONTINUE. */
1680 gsi
= gsi_last_bb (loop
->latch
);
1681 stmt
= gimple_build_omp_continue (cvar_next
, cvar
);
1682 gimple_set_location (stmt
, loc
);
1683 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1684 SSA_NAME_DEF_STMT (cvar_next
) = stmt
;
1686 /* Emit GIMPLE_OMP_RETURN for GIMPLE_OMP_FOR. */
1687 gsi
= gsi_last_bb (ex_bb
);
1688 stmt
= gimple_build_omp_return (true);
1689 gimple_set_location (stmt
, loc
);
1690 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
1695 /* Generates code to execute the iterations of LOOP in N_THREADS
1696 threads in parallel.
1698 NITER describes number of iterations of LOOP.
1699 REDUCTION_LIST describes the reductions existent in the LOOP. */
1702 gen_parallel_loop (struct loop
*loop
, htab_t reduction_list
,
1703 unsigned n_threads
, struct tree_niter_desc
*niter
)
1706 tree many_iterations_cond
, type
, nit
;
1707 tree arg_struct
, new_arg_struct
;
1709 basic_block parallel_head
;
1711 struct clsn_data clsn_data
;
1718 ---------------------------------------------------------------------
1721 IV = phi (INIT, IV + STEP)
1727 ---------------------------------------------------------------------
1729 with # of iterations NITER (possibly with MAY_BE_ZERO assumption),
1730 we generate the following code:
1732 ---------------------------------------------------------------------
1735 || NITER < MIN_PER_THREAD * N_THREADS)
1739 store all local loop-invariant variables used in body of the loop to DATA.
1740 GIMPLE_OMP_PARALLEL (OMP_CLAUSE_NUM_THREADS (N_THREADS), LOOPFN, DATA);
1741 load the variables from DATA.
1742 GIMPLE_OMP_FOR (IV = INIT; COND; IV += STEP) (OMP_CLAUSE_SCHEDULE (static))
1745 GIMPLE_OMP_CONTINUE;
1746 GIMPLE_OMP_RETURN -- GIMPLE_OMP_FOR
1747 GIMPLE_OMP_RETURN -- GIMPLE_OMP_PARALLEL
1753 IV = phi (INIT, IV + STEP)
1764 /* Create two versions of the loop -- in the old one, we know that the
1765 number of iterations is large enough, and we will transform it into the
1766 loop that will be split to loop_fn, the new one will be used for the
1767 remaining iterations. */
1769 type
= TREE_TYPE (niter
->niter
);
1770 nit
= force_gimple_operand (unshare_expr (niter
->niter
), &stmts
, true,
1773 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
1775 many_iterations_cond
=
1776 fold_build2 (GE_EXPR
, boolean_type_node
,
1777 nit
, build_int_cst (type
, MIN_PER_THREAD
* n_threads
));
1778 many_iterations_cond
1779 = fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1780 invert_truthvalue (unshare_expr (niter
->may_be_zero
)),
1781 many_iterations_cond
);
1782 many_iterations_cond
1783 = force_gimple_operand (many_iterations_cond
, &stmts
, false, NULL_TREE
);
1785 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
1786 if (!is_gimple_condexpr (many_iterations_cond
))
1788 many_iterations_cond
1789 = force_gimple_operand (many_iterations_cond
, &stmts
,
1792 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
1795 initialize_original_copy_tables ();
1797 /* We assume that the loop usually iterates a lot. */
1798 prob
= 4 * REG_BR_PROB_BASE
/ 5;
1799 loop_version (loop
, many_iterations_cond
, NULL
,
1800 prob
, prob
, REG_BR_PROB_BASE
- prob
, true);
1801 update_ssa (TODO_update_ssa
);
1802 free_original_copy_tables ();
1804 /* Base all the induction variables in LOOP on a single control one. */
1805 canonicalize_loop_ivs (loop
, &nit
, true);
1807 /* Ensure that the exit condition is the first statement in the loop. */
1808 transform_to_exit_first_loop (loop
, reduction_list
, nit
);
1810 /* Generate initializations for reductions. */
1811 if (htab_elements (reduction_list
) > 0)
1812 htab_traverse (reduction_list
, initialize_reductions
, loop
);
1814 /* Eliminate the references to local variables from the loop. */
1815 gcc_assert (single_exit (loop
));
1816 entry
= loop_preheader_edge (loop
);
1817 exit
= single_dom_exit (loop
);
1819 eliminate_local_variables (entry
, exit
);
1820 /* In the old loop, move all variables non-local to the loop to a structure
1821 and back, and create separate decls for the variables used in loop. */
1822 separate_decls_in_region (entry
, exit
, reduction_list
, &arg_struct
,
1823 &new_arg_struct
, &clsn_data
);
1825 /* Create the parallel constructs. */
1826 loc
= UNKNOWN_LOCATION
;
1827 cond_stmt
= last_stmt (loop
->header
);
1829 loc
= gimple_location (cond_stmt
);
1830 parallel_head
= create_parallel_loop (loop
, create_loop_fn (loc
), arg_struct
,
1831 new_arg_struct
, n_threads
, loc
);
1832 if (htab_elements (reduction_list
) > 0)
1833 create_call_for_reduction (loop
, reduction_list
, &clsn_data
);
1837 /* Cancel the loop (it is simpler to do it here rather than to teach the
1838 expander to do it). */
1839 cancel_loop_tree (loop
);
1841 /* Free loop bound estimations that could contain references to
1842 removed statements. */
1843 FOR_EACH_LOOP (li
, loop
, 0)
1844 free_numbers_of_iterations_estimates_loop (loop
);
1846 /* Expand the parallel constructs. We do it directly here instead of running
1847 a separate expand_omp pass, since it is more efficient, and less likely to
1848 cause troubles with further analyses not being able to deal with the
1851 omp_expand_local (parallel_head
);
1854 /* Returns true when LOOP contains vector phi nodes. */
1857 loop_has_vector_phi_nodes (struct loop
*loop ATTRIBUTE_UNUSED
)
1860 basic_block
*bbs
= get_loop_body_in_dom_order (loop
);
1861 gimple_stmt_iterator gsi
;
1864 for (i
= 0; i
< loop
->num_nodes
; i
++)
1865 for (gsi
= gsi_start_phis (bbs
[i
]); !gsi_end_p (gsi
); gsi_next (&gsi
))
1866 if (TREE_CODE (TREE_TYPE (PHI_RESULT (gsi_stmt (gsi
)))) == VECTOR_TYPE
)
1875 /* Create a reduction_info struct, initialize it with REDUC_STMT
1876 and PHI, insert it to the REDUCTION_LIST. */
1879 build_new_reduction (htab_t reduction_list
, gimple reduc_stmt
, gimple phi
)
1882 struct reduction_info
*new_reduction
;
1884 gcc_assert (reduc_stmt
);
1886 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1889 "Detected reduction. reduction stmt is: \n");
1890 print_gimple_stmt (dump_file
, reduc_stmt
, 0, 0);
1891 fprintf (dump_file
, "\n");
1894 new_reduction
= XCNEW (struct reduction_info
);
1896 new_reduction
->reduc_stmt
= reduc_stmt
;
1897 new_reduction
->reduc_phi
= phi
;
1898 new_reduction
->reduc_version
= SSA_NAME_VERSION (gimple_phi_result (phi
));
1899 new_reduction
->reduction_code
= gimple_assign_rhs_code (reduc_stmt
);
1900 slot
= htab_find_slot (reduction_list
, new_reduction
, INSERT
);
1901 *slot
= new_reduction
;
1904 /* Callback for htab_traverse. Sets gimple_uid of reduc_phi stmts. */
1907 set_reduc_phi_uids (void **slot
, void *data ATTRIBUTE_UNUSED
)
1909 struct reduction_info
*const red
= (struct reduction_info
*) *slot
;
1910 gimple_set_uid (red
->reduc_phi
, red
->reduc_version
);
1914 /* Detect all reductions in the LOOP, insert them into REDUCTION_LIST. */
1917 gather_scalar_reductions (loop_p loop
, htab_t reduction_list
)
1919 gimple_stmt_iterator gsi
;
1920 loop_vec_info simple_loop_info
;
1923 simple_loop_info
= vect_analyze_loop_form (loop
);
1925 for (gsi
= gsi_start_phis (loop
->header
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1927 gimple phi
= gsi_stmt (gsi
);
1929 tree res
= PHI_RESULT (phi
);
1932 if (!is_gimple_reg (res
))
1935 if (!simple_iv (loop
, loop
, res
, &iv
, true)
1936 && simple_loop_info
)
1938 gimple reduc_stmt
= vect_force_simple_reduction (simple_loop_info
,
1941 if (reduc_stmt
&& !double_reduc
)
1942 build_new_reduction (reduction_list
, reduc_stmt
, phi
);
1945 destroy_loop_vec_info (simple_loop_info
, true);
1947 /* As gimple_uid is used by the vectorizer in between vect_analyze_loop_form
1948 and destroy_loop_vec_info, we can set gimple_uid of reduc_phi stmts
1950 htab_traverse (reduction_list
, set_reduc_phi_uids
, NULL
);
1953 /* Try to initialize NITER for code generation part. */
1956 try_get_loop_niter (loop_p loop
, struct tree_niter_desc
*niter
)
1958 edge exit
= single_dom_exit (loop
);
1962 /* We need to know # of iterations, and there should be no uses of values
1963 defined inside loop outside of it, unless the values are invariants of
1965 if (!number_of_iterations_exit (loop
, exit
, niter
, false))
1967 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1968 fprintf (dump_file
, " FAILED: number of iterations not known\n");
1975 /* Try to initialize REDUCTION_LIST for code generation part.
1976 REDUCTION_LIST describes the reductions. */
1979 try_create_reduction_list (loop_p loop
, htab_t reduction_list
)
1981 edge exit
= single_dom_exit (loop
);
1982 gimple_stmt_iterator gsi
;
1986 gather_scalar_reductions (loop
, reduction_list
);
1989 for (gsi
= gsi_start_phis (exit
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1991 gimple phi
= gsi_stmt (gsi
);
1992 struct reduction_info
*red
;
1993 imm_use_iterator imm_iter
;
1994 use_operand_p use_p
;
1996 tree val
= PHI_ARG_DEF_FROM_EDGE (phi
, exit
);
1998 if (is_gimple_reg (val
))
2000 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2002 fprintf (dump_file
, "phi is ");
2003 print_gimple_stmt (dump_file
, phi
, 0, 0);
2004 fprintf (dump_file
, "arg of phi to exit: value ");
2005 print_generic_expr (dump_file
, val
, 0);
2006 fprintf (dump_file
, " used outside loop\n");
2008 " checking if it a part of reduction pattern: \n");
2010 if (htab_elements (reduction_list
) == 0)
2012 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2014 " FAILED: it is not a part of reduction.\n");
2018 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, val
)
2020 if (!gimple_debug_bind_p (USE_STMT (use_p
))
2021 && flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
))))
2023 reduc_phi
= USE_STMT (use_p
);
2027 red
= reduction_phi (reduction_list
, reduc_phi
);
2030 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2032 " FAILED: it is not a part of reduction.\n");
2035 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2037 fprintf (dump_file
, "reduction phi is ");
2038 print_gimple_stmt (dump_file
, red
->reduc_phi
, 0, 0);
2039 fprintf (dump_file
, "reduction stmt is ");
2040 print_gimple_stmt (dump_file
, red
->reduc_stmt
, 0, 0);
2045 /* The iterations of the loop may communicate only through bivs whose
2046 iteration space can be distributed efficiently. */
2047 for (gsi
= gsi_start_phis (loop
->header
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2049 gimple phi
= gsi_stmt (gsi
);
2050 tree def
= PHI_RESULT (phi
);
2053 if (is_gimple_reg (def
) && !simple_iv (loop
, loop
, def
, &iv
, true))
2055 struct reduction_info
*red
;
2057 red
= reduction_phi (reduction_list
, phi
);
2060 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2062 " FAILED: scalar dependency between iterations\n");
2072 /* Detect parallel loops and generate parallel code using libgomp
2073 primitives. Returns true if some loop was parallelized, false
2077 parallelize_loops (void)
2079 unsigned n_threads
= flag_tree_parallelize_loops
;
2080 bool changed
= false;
2082 struct tree_niter_desc niter_desc
;
2084 htab_t reduction_list
;
2085 struct obstack parloop_obstack
;
2086 HOST_WIDE_INT estimated
;
2089 /* Do not parallelize loops in the functions created by parallelization. */
2090 if (parallelized_function_p (cfun
->decl
))
2092 if (cfun
->has_nonlocal_label
)
2095 gcc_obstack_init (&parloop_obstack
);
2096 reduction_list
= htab_create (10, reduction_info_hash
,
2097 reduction_info_eq
, free
);
2098 init_stmt_vec_info_vec ();
2100 FOR_EACH_LOOP (li
, loop
, 0)
2102 htab_empty (reduction_list
);
2103 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2105 fprintf (dump_file
, "Trying loop %d as candidate\n",loop
->num
);
2107 fprintf (dump_file
, "loop %d is not innermost\n",loop
->num
);
2109 fprintf (dump_file
, "loop %d is innermost\n",loop
->num
);
2112 /* If we use autopar in graphite pass, we use its marked dependency
2113 checking results. */
2114 if (flag_loop_parallelize_all
&& !loop
->can_be_parallel
)
2116 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2117 fprintf (dump_file
, "loop is not parallel according to graphite\n");
2121 if (!single_dom_exit (loop
))
2124 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2125 fprintf (dump_file
, "loop is !single_dom_exit\n");
2130 if (/* And of course, the loop must be parallelizable. */
2131 !can_duplicate_loop_p (loop
)
2132 || loop_has_blocks_with_irreducible_flag (loop
)
2133 || (loop_preheader_edge (loop
)->src
->flags
& BB_IRREDUCIBLE_LOOP
)
2134 /* FIXME: the check for vector phi nodes could be removed. */
2135 || loop_has_vector_phi_nodes (loop
))
2137 estimated
= estimated_loop_iterations_int (loop
, false);
2138 /* FIXME: Bypass this check as graphite doesn't update the
2139 count and frequency correctly now. */
2140 if (!flag_loop_parallelize_all
2142 && estimated
<= (HOST_WIDE_INT
) n_threads
* MIN_PER_THREAD
)
2143 /* Do not bother with loops in cold areas. */
2144 || optimize_loop_nest_for_size_p (loop
)))
2147 if (!try_get_loop_niter (loop
, &niter_desc
))
2150 if (!try_create_reduction_list (loop
, reduction_list
))
2153 if (!flag_loop_parallelize_all
2154 && !loop_parallel_p (loop
, &parloop_obstack
))
2158 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2161 fprintf (dump_file
, "parallelizing outer loop %d\n",loop
->header
->index
);
2163 fprintf (dump_file
, "parallelizing inner loop %d\n",loop
->header
->index
);
2164 loop_loc
= find_loop_location (loop
);
2165 if (loop_loc
!= UNKNOWN_LOC
)
2166 fprintf (dump_file
, "\nloop at %s:%d: ",
2167 LOC_FILE (loop_loc
), LOC_LINE (loop_loc
));
2169 gen_parallel_loop (loop
, reduction_list
,
2170 n_threads
, &niter_desc
);
2171 verify_flow_info ();
2172 verify_dominators (CDI_DOMINATORS
);
2173 verify_loop_structure ();
2174 verify_loop_closed_ssa (true);
2177 free_stmt_vec_info_vec ();
2178 htab_delete (reduction_list
);
2179 obstack_free (&parloop_obstack
, NULL
);
2181 /* Parallelization will cause new function calls to be inserted through
2182 which local variables will escape. Reset the points-to solution
2185 pt_solution_reset (&cfun
->gimple_df
->escaped
);
2190 #include "gt-tree-parloops.h"