Daily bump.
[official-gcc.git] / gcc / tree-parloops.c
blobca829f79f43557ac76bf05547c1e5efe3ef1c0c9
1 /* Loop autoparallelization.
2 Copyright (C) 2006 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <pop@cri.ensmp.fr> and
4 Zdenek Dvorak <dvorakz@suse.cz>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "rtl.h"
29 #include "tree-flow.h"
30 #include "cfgloop.h"
31 #include "ggc.h"
32 #include "tree-data-ref.h"
33 #include "diagnostic.h"
34 #include "tree-pass.h"
35 #include "tree-scalar-evolution.h"
36 #include "hashtab.h"
37 #include "langhooks.h"
38 #include "tree-vectorizer.h"
40 /* This pass tries to distribute iterations of loops into several threads.
41 The implementation is straightforward -- for each loop we test whether its
42 iterations are independent, and if it is the case (and some additional
43 conditions regarding profitability and correctness are satisfied), we
44 add OMP_PARALLEL and OMP_FOR codes and let omp expansion machinery do
45 its job.
47 The most of the complexity is in bringing the code into shape expected
48 by the omp expanders:
49 -- for OMP_FOR, ensuring that the loop has only one induction variable
50 and that the exit test is at the start of the loop body
51 -- for OMP_PARALLEL, replacing the references to local addressable
52 variables by accesses through pointers, and breaking up ssa chains
53 by storing the values incoming to the parallelized loop to a structure
54 passed to the new function as an argument (something similar is done
55 in omp gimplification, unfortunately only a small part of the code
56 can be shared).
58 TODO:
59 -- if there are several parallelizable loops in a function, it may be
60 possible to generate the threads just once (using synchronization to
61 ensure that cross-loop dependences are obeyed).
62 -- handling of common scalar dependence patterns (accumulation, ...)
63 -- handling of non-innermost loops */
65 /*
66 Reduction handling:
67 currently we use vect_is_simple_reduction() to detect reduction patterns.
68 The code transformation will be introduced by an example.
71 parloop
73 int sum=1;
75 for (i = 0; i < N; i++)
77 x[i] = i + 3;
78 sum+=x[i];
82 gimple-like code:
83 header_bb:
85 # sum_29 = PHI <sum_11(5), 1(3)>
86 # i_28 = PHI <i_12(5), 0(3)>
87 D.1795_8 = i_28 + 3;
88 x[i_28] = D.1795_8;
89 sum_11 = D.1795_8 + sum_29;
90 i_12 = i_28 + 1;
91 if (N_6(D) > i_12)
92 goto header_bb;
95 exit_bb:
97 # sum_21 = PHI <sum_11(4)>
98 printf (&"%d"[0], sum_21);
101 after reduction transformation (only relevant parts):
103 parloop
106 ....
109 # A new variable is created for each reduction:
110 "reduction_initial" is the initial value given by the user.
111 It is kept and will be used after the parallel computing is done. #
113 reduction_initial.24_46 = 1;
115 # Storing the neutral value of the
116 particular reduction's operation, e.g. 0 for PLUS_EXPR,
117 1 for MULT_EXPR, etc. into the reduction field.
118 This is done in create_stores_for_reduction. #
120 .paral_data_store.32.sum.27 = 0;
122 #pragma omp parallel num_threads(4)
124 #pragma omp for schedule(static)
125 # sum.27_29 = PHI <sum.27_11, 0> # The neutral element replaces
126 the user's inital value. #
127 sum.27_11 = D.1827_8 + sum.27_29;
128 OMP_CONTINUE
130 # Adding this reduction phi is done at create_phi_for_local_result() #
131 # sum.27_56 = PHI <sum.27_11, 0>
132 OMP_RETURN
134 # Creating the atomic operation is done at
135 create_call_for_reduction_1() #
137 #pragma omp atomic_load
138 D.1839_59 = *&.paral_data_load.33_51->reduction.23;
139 D.1840_60 = sum.27_56 + D.1839_59;
140 #pragma omp atomic_store (D.1840_60);
142 OMP_RETURN
144 # collecting the result after the join of the threads is done at
145 create_loads_for_reductions().
146 a new variable "reduction_final" is created. It calculates the final
147 value from the initial value and the value computed by the threads #
149 .paral_data_load.33_52 = &.paral_data_store.32;
150 reduction_final.34_53 = .paral_data_load.33_52->sum.27;
151 sum_37 = reduction_initial.24_46 + reduction_final.34_53;
152 sum_43 = D.1795_41 + sum_37;
154 exit bb:
155 # sum_21 = PHI <sum_43, sum_26>
156 printf (&"%d"[0], sum_21);
164 /* Minimal number of iterations of a loop that should be executed in each
165 thread. */
166 #define MIN_PER_THREAD 100
168 /* Element of the hashtable, representing a
169 reduction in the current loop. */
170 struct reduction_info
172 tree reduc_stmt; /* reduction statement. */
173 tree reduc_phi; /* The phi node defining the reduction. */
174 enum tree_code reduction_code; /* code for the reduction operation. */
175 tree keep_res; /* The PHI_RESULT of this phi is the resulting value
176 of the reduction variable when existing the loop. */
177 tree initial_value; /* An ssa name representing a new variable holding
178 the initial value of the reduction var before entering the loop. */
179 tree field; /* the name of the field in the parloop data structure intended for reduction. */
180 tree init; /* reduction initialization value. */
181 tree new_phi; /* (helper field) Newly created phi node whose result
182 will be passed to the atomic operation. Represents
183 the local result each thread computed for the reduction
184 operation. */
187 /* Equality and hash functions for hashtab code. */
189 static int
190 reduction_info_eq (const void *aa, const void *bb)
192 const struct reduction_info *a = (const struct reduction_info *) aa;
193 const struct reduction_info *b = (const struct reduction_info *) bb;
195 return (a->reduc_phi == b->reduc_phi);
198 static hashval_t
199 reduction_info_hash (const void *aa)
201 const struct reduction_info *a = (const struct reduction_info *) aa;
203 return htab_hash_pointer (a->reduc_phi);
206 static struct reduction_info *
207 reduction_phi (htab_t reduction_list, tree phi)
209 struct reduction_info tmpred, *red;
211 if (htab_elements (reduction_list) == 0)
212 return NULL;
214 tmpred.reduc_phi = phi;
215 red = htab_find (reduction_list, &tmpred);
217 return red;
220 /* Element of hashtable of names to copy. */
222 struct name_to_copy_elt
224 unsigned version; /* The version of the name to copy. */
225 tree new_name; /* The new name used in the copy. */
226 tree field; /* The field of the structure used to pass the
227 value. */
230 /* Equality and hash functions for hashtab code. */
232 static int
233 name_to_copy_elt_eq (const void *aa, const void *bb)
235 const struct name_to_copy_elt *a = (const struct name_to_copy_elt *) aa;
236 const struct name_to_copy_elt *b = (const struct name_to_copy_elt *) bb;
238 return a->version == b->version;
241 static hashval_t
242 name_to_copy_elt_hash (const void *aa)
244 const struct name_to_copy_elt *a = (const struct name_to_copy_elt *) aa;
246 return (hashval_t) a->version;
249 /* Returns true if the iterations of LOOP are independent on each other (that
250 is, if we can execute them in parallel), and if LOOP satisfies other
251 conditions that we need to be able to parallelize it. Description of number
252 of iterations is stored to NITER. Reduction analysis is done, if
253 reductions are found, they are inserted to the REDUCTION_LIST. */
255 static bool
256 loop_parallel_p (struct loop *loop, htab_t reduction_list, struct tree_niter_desc *niter)
258 edge exit = single_dom_exit (loop);
259 VEC (ddr_p, heap) * dependence_relations;
260 VEC (data_reference_p, heap) * datarefs;
261 lambda_trans_matrix trans;
262 bool ret = false;
263 tree phi;
264 loop_vec_info simple_loop_info;
266 /* Only consider innermost loops with just one exit. The innermost-loop
267 restriction is not necessary, but it makes things simpler. */
268 if (loop->inner || !exit)
269 return false;
271 if (dump_file && (dump_flags & TDF_DETAILS))
272 fprintf (dump_file, "\nConsidering loop %d\n", loop->num);
274 /* We need to know # of iterations, and there should be no uses of values
275 defined inside loop outside of it, unless the values are invariants of
276 the loop. */
277 if (!number_of_iterations_exit (loop, exit, niter, false))
279 if (dump_file && (dump_flags & TDF_DETAILS))
280 fprintf (dump_file, " FAILED: number of iterations not known\n");
281 return false;
284 simple_loop_info = vect_analyze_loop_form (loop);
286 for (phi = phi_nodes (loop->header); phi; phi = PHI_CHAIN (phi))
288 tree reduc_stmt = NULL, operation;
290 /* ??? TODO: Change this into a generic function that
291 recognizes reductions. */
292 if (!is_gimple_reg (PHI_RESULT (phi)))
293 continue;
294 if (simple_loop_info)
295 reduc_stmt = vect_is_simple_reduction (simple_loop_info, phi);
297 /* Create a reduction_info struct, initialize it and insert it to
298 the reduction list. */
300 if (reduc_stmt)
302 PTR *slot;
303 struct reduction_info *new_reduction;
305 if (dump_file && (dump_flags & TDF_DETAILS))
307 fprintf (dump_file,
308 "Detected reduction. reduction stmt is: \n");
309 print_generic_stmt (dump_file, reduc_stmt, 0);
310 fprintf (dump_file, "\n");
313 new_reduction = XCNEW (struct reduction_info);
315 new_reduction->reduc_stmt = reduc_stmt;
316 new_reduction->reduc_phi = phi;
317 operation = GIMPLE_STMT_OPERAND (reduc_stmt, 1);
318 new_reduction->reduction_code = TREE_CODE (operation);
319 slot = htab_find_slot (reduction_list, new_reduction, INSERT);
320 *slot = new_reduction;
324 for (phi = phi_nodes (exit->dest); phi; phi = PHI_CHAIN (phi))
326 struct reduction_info *red;
327 imm_use_iterator imm_iter;
328 use_operand_p use_p;
329 tree reduc_phi;
331 tree val = PHI_ARG_DEF_FROM_EDGE (phi, exit);
333 if (is_gimple_reg (val))
335 if (dump_file && (dump_flags & TDF_DETAILS))
337 fprintf (dump_file, "phi is ");
338 print_generic_expr (dump_file, phi, 0);
339 fprintf (dump_file, "arg of phi to exit: value ");
340 print_generic_expr (dump_file, val, 0);
341 fprintf (dump_file, " used outside loop\n");
342 fprintf (dump_file,
343 " checking if it a part of reduction pattern: \n");
345 if (htab_elements (reduction_list) == 0)
347 if (dump_file && (dump_flags & TDF_DETAILS))
348 fprintf (dump_file,
349 " FAILED: it is not a part of reduction.\n");
350 return false;
352 reduc_phi = NULL;
353 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, val)
355 if (flow_bb_inside_loop_p (loop, bb_for_stmt (USE_STMT (use_p))))
357 reduc_phi = USE_STMT (use_p);
358 break;
361 red = reduction_phi (reduction_list, reduc_phi);
362 if (red == NULL)
364 if (dump_file && (dump_flags & TDF_DETAILS))
365 fprintf (dump_file,
366 " FAILED: it is not a part of reduction.\n");
367 return false;
369 if (dump_file && (dump_flags & TDF_DETAILS))
371 fprintf (dump_file, "reduction phi is ");
372 print_generic_expr (dump_file, red->reduc_phi, 0);
373 fprintf (dump_file, "reduction stmt is ");
374 print_generic_expr (dump_file, red->reduc_stmt, 0);
380 /* The iterations of the loop may communicate only through bivs whose
381 iteration space can be distributed efficiently. */
382 for (phi = phi_nodes (loop->header); phi; phi = PHI_CHAIN (phi))
384 tree def = PHI_RESULT (phi);
385 affine_iv iv;
387 if (is_gimple_reg (def) && !simple_iv (loop, phi, def, &iv, true))
389 struct reduction_info *red;
391 red = reduction_phi (reduction_list, phi);
392 if (red == NULL)
394 if (dump_file && (dump_flags & TDF_DETAILS))
395 fprintf (dump_file,
396 " FAILED: scalar dependency between iterations\n");
397 return false;
402 /* We need to version the loop to verify assumptions in runtime. */
403 if (!can_duplicate_loop_p (loop))
405 if (dump_file && (dump_flags & TDF_DETAILS))
406 fprintf (dump_file, " FAILED: cannot be duplicated\n");
407 return false;
410 /* Check for problems with dependences. If the loop can be reversed,
411 the iterations are independent. */
412 datarefs = VEC_alloc (data_reference_p, heap, 10);
413 dependence_relations = VEC_alloc (ddr_p, heap, 10 * 10);
414 compute_data_dependences_for_loop (loop, true, &datarefs,
415 &dependence_relations);
416 if (dump_file && (dump_flags & TDF_DETAILS))
417 dump_data_dependence_relations (dump_file, dependence_relations);
419 trans = lambda_trans_matrix_new (1, 1);
420 LTM_MATRIX (trans)[0][0] = -1;
422 if (lambda_transform_legal_p (trans, 1, dependence_relations))
424 ret = true;
425 if (dump_file && (dump_flags & TDF_DETAILS))
426 fprintf (dump_file, " SUCCESS: may be parallelized\n");
428 else if (dump_file && (dump_flags & TDF_DETAILS))
429 fprintf (dump_file,
430 " FAILED: data dependencies exist across iterations\n");
432 free_dependence_relations (dependence_relations);
433 free_data_refs (datarefs);
435 return ret;
438 /* Assigns the address of VAR in TYPE to an ssa name, and returns this name.
439 The assignment statement is placed before LOOP. DECL_ADDRESS maps decls
440 to their addresses that can be reused. */
442 static tree
443 take_address_of (tree var, tree type, struct loop *loop, htab_t decl_address)
445 int uid = DECL_UID (var);
446 void **dslot;
447 struct int_tree_map ielt, *nielt;
448 tree name, bvar, stmt;
449 edge entry = loop_preheader_edge (loop);
451 ielt.uid = uid;
452 dslot = htab_find_slot_with_hash (decl_address, &ielt, uid, INSERT);
453 if (!*dslot)
455 bvar = create_tmp_var (type, get_name (var));
456 add_referenced_var (bvar);
457 stmt = build_gimple_modify_stmt (bvar,
458 fold_convert (type,
459 build_addr (var,
460 current_function_decl)));
461 name = make_ssa_name (bvar, stmt);
462 GIMPLE_STMT_OPERAND (stmt, 0) = name;
463 bsi_insert_on_edge_immediate (entry, stmt);
465 nielt = XNEW (struct int_tree_map);
466 nielt->uid = uid;
467 nielt->to = name;
468 *dslot = nielt;
470 return name;
473 name = ((struct int_tree_map *) *dslot)->to;
474 if (TREE_TYPE (name) == type)
475 return name;
477 bvar = SSA_NAME_VAR (name);
478 stmt = build_gimple_modify_stmt (bvar, fold_convert (type, name));
479 name = make_ssa_name (bvar, stmt);
480 GIMPLE_STMT_OPERAND (stmt, 0) = name;
481 bsi_insert_on_edge_immediate (entry, stmt);
483 return name;
486 /* Callback for htab_traverse. Create the initialization statement
487 for reduction described in SLOT, and place it at the preheader of
488 the loop described in DATA. */
490 static int
491 initialize_reductions (void **slot, void *data)
493 tree stmt;
494 tree init, c;
495 tree name1;
496 tree bvar, type, arg;
497 edge e;
499 struct reduction_info *reduc = *slot;
500 struct loop *loop = (struct loop *) data;
502 /* Create initialization in preheader:
503 reduction_variable = initialization value of reduction. */
505 /* In the phi node at the header, replace the argument coming
506 from the preheader with the reduction initialization value. */
508 /* Create a new variable to initialize the reduction. */
509 type = TREE_TYPE (PHI_RESULT (reduc->reduc_phi));
510 bvar = create_tmp_var (type, "reduction");
511 add_referenced_var (bvar);
513 c = build_omp_clause (OMP_CLAUSE_REDUCTION);
514 OMP_CLAUSE_REDUCTION_CODE (c) = reduc->reduction_code;
515 OMP_CLAUSE_DECL (c) =
516 SSA_NAME_VAR (GIMPLE_STMT_OPERAND (reduc->reduc_stmt, 0));
518 init = omp_reduction_init (c, TREE_TYPE (bvar));
519 reduc->init = init;
521 /* Replace the argument representing the initialization value
522 with the initialization value for the reduction (neutral
523 element for the particular operation, e.g. 0 for PLUS_EXPR,
524 1 for MULT_EXPR, etc).
525 Keep the old value in a new variable "reduction_initial",
526 that will be taken in consideration after the parallel
527 computing is done. */
529 e = loop_preheader_edge (loop);
530 arg = PHI_ARG_DEF_FROM_EDGE (reduc->reduc_phi, e);
531 /* Create new variable to hold the initial value. */
532 type = TREE_TYPE (bvar);
533 bvar = create_tmp_var (type, "reduction_initial");
534 add_referenced_var (bvar);
536 stmt = build_gimple_modify_stmt (bvar, arg);
537 name1 = make_ssa_name (bvar, stmt);
538 GIMPLE_STMT_OPERAND (stmt, 0) = name1;
539 SSA_NAME_DEF_STMT (name1) = stmt;
541 bsi_insert_on_edge_immediate (e, stmt);
542 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE
543 (reduc->reduc_phi, loop_preheader_edge (loop)), init);
544 reduc->initial_value = name1;
545 return 1;
548 struct elv_data
550 struct loop *loop;
551 htab_t decl_address;
552 bool changed;
555 /* Eliminates references to local variables in *TP out of LOOP. DECL_ADDRESS
556 contains addresses of the references that had their address taken already.
557 If the expression is changed, CHANGED is set to true. Callback for
558 walk_tree. */
560 static tree
561 eliminate_local_variables_1 (tree * tp, int *walk_subtrees, void *data)
563 struct elv_data *dta = data;
564 tree t = *tp, var, addr, addr_type, type;
566 if (DECL_P (t))
568 *walk_subtrees = 0;
570 if (!SSA_VAR_P (t) || DECL_EXTERNAL (t))
571 return NULL_TREE;
573 type = TREE_TYPE (t);
574 addr_type = build_pointer_type (type);
575 addr = take_address_of (t, addr_type, dta->loop, dta->decl_address);
576 *tp = build1 (INDIRECT_REF, TREE_TYPE (*tp), addr);
578 dta->changed = true;
579 return NULL_TREE;
582 if (TREE_CODE (t) == ADDR_EXPR)
584 var = TREE_OPERAND (t, 0);
585 if (!DECL_P (var))
586 return NULL_TREE;
588 *walk_subtrees = 0;
589 if (!SSA_VAR_P (var) || DECL_EXTERNAL (var))
590 return NULL_TREE;
592 addr_type = TREE_TYPE (t);
593 addr = take_address_of (var, addr_type, dta->loop, dta->decl_address);
594 *tp = addr;
596 dta->changed = true;
597 return NULL_TREE;
600 if (!EXPR_P (t) && !GIMPLE_STMT_P (t))
601 *walk_subtrees = 0;
603 return NULL_TREE;
606 /* Moves the references to local variables in STMT from LOOP. DECL_ADDRESS
607 contains addresses for the references for that we have already taken
608 them. */
610 static void
611 eliminate_local_variables_stmt (struct loop *loop, tree stmt,
612 htab_t decl_address)
614 struct elv_data dta;
616 dta.loop = loop;
617 dta.decl_address = decl_address;
618 dta.changed = false;
620 walk_tree (&stmt, eliminate_local_variables_1, &dta, NULL);
622 if (dta.changed)
623 update_stmt (stmt);
626 /* Eliminates the references to local variables from LOOP.
627 This includes:
628 1) Taking address of a local variable -- these are moved out of the
629 loop (and temporary variable is created to hold the address if
630 necessary).
631 2) Dereferencing a local variable -- these are replaced with indirect
632 references. */
634 static void
635 eliminate_local_variables (struct loop *loop)
637 basic_block bb, *body = get_loop_body (loop);
638 unsigned i;
639 block_stmt_iterator bsi;
640 htab_t decl_address = htab_create (10, int_tree_map_hash, int_tree_map_eq,
641 free);
643 /* Find and rename the ssa names defined outside of loop. */
644 for (i = 0; i < loop->num_nodes; i++)
646 bb = body[i];
648 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
649 eliminate_local_variables_stmt (loop, bsi_stmt (bsi), decl_address);
652 htab_delete (decl_address);
655 /* If COPY_NAME_P is true, creates and returns a duplicate of NAME.
656 The copies are stored to NAME_COPIES, if NAME was already duplicated,
657 its duplicate stored in NAME_COPIES is returned.
659 Regardless of COPY_NAME_P, the decl used as a base of the ssa name is also
660 duplicated, storing the copies in DECL_COPIES. */
662 static tree
663 separate_decls_in_loop_name (tree name,
664 htab_t name_copies, htab_t decl_copies,
665 bool copy_name_p)
667 tree copy, var, var_copy;
668 unsigned idx, uid, nuid;
669 struct int_tree_map ielt, *nielt;
670 struct name_to_copy_elt elt, *nelt;
671 void **slot, **dslot;
673 if (TREE_CODE (name) != SSA_NAME)
674 return name;
676 idx = SSA_NAME_VERSION (name);
677 elt.version = idx;
678 slot = htab_find_slot_with_hash (name_copies, &elt, idx,
679 copy_name_p ? INSERT : NO_INSERT);
680 if (slot && *slot)
681 return ((struct name_to_copy_elt *) *slot)->new_name;
683 var = SSA_NAME_VAR (name);
684 uid = DECL_UID (var);
685 ielt.uid = uid;
686 dslot = htab_find_slot_with_hash (decl_copies, &ielt, uid, INSERT);
687 if (!*dslot)
689 var_copy = create_tmp_var (TREE_TYPE (var), get_name (var));
690 add_referenced_var (var_copy);
691 nielt = XNEW (struct int_tree_map);
692 nielt->uid = uid;
693 nielt->to = var_copy;
694 *dslot = nielt;
696 /* Ensure that when we meet this decl next time, we won't duplicate
697 it again. */
698 nuid = DECL_UID (var_copy);
699 ielt.uid = nuid;
700 dslot = htab_find_slot_with_hash (decl_copies, &ielt, nuid, INSERT);
701 gcc_assert (!*dslot);
702 nielt = XNEW (struct int_tree_map);
703 nielt->uid = nuid;
704 nielt->to = var_copy;
705 *dslot = nielt;
707 else
708 var_copy = ((struct int_tree_map *) *dslot)->to;
710 if (copy_name_p)
712 copy = duplicate_ssa_name (name, NULL_TREE);
713 nelt = XNEW (struct name_to_copy_elt);
714 nelt->version = idx;
715 nelt->new_name = copy;
716 nelt->field = NULL_TREE;
717 *slot = nelt;
719 else
721 gcc_assert (!slot);
722 copy = name;
725 SSA_NAME_VAR (copy) = var_copy;
726 return copy;
729 /* Finds the ssa names used in STMT that are defined outside of LOOP and
730 replaces such ssa names with their duplicates. The duplicates are stored to
731 NAME_COPIES. Base decls of all ssa names used in STMT
732 (including those defined in LOOP) are replaced with the new temporary
733 variables; the replacement decls are stored in DECL_COPIES. */
735 static void
736 separate_decls_in_loop_stmt (struct loop *loop, tree stmt,
737 htab_t name_copies, htab_t decl_copies)
739 use_operand_p use;
740 def_operand_p def;
741 ssa_op_iter oi;
742 tree name, copy;
743 bool copy_name_p;
745 mark_virtual_ops_for_renaming (stmt);
747 FOR_EACH_PHI_OR_STMT_DEF (def, stmt, oi, SSA_OP_DEF)
749 name = DEF_FROM_PTR (def);
750 gcc_assert (TREE_CODE (name) == SSA_NAME);
751 copy = separate_decls_in_loop_name (name, name_copies, decl_copies,
752 false);
753 gcc_assert (copy == name);
756 FOR_EACH_PHI_OR_STMT_USE (use, stmt, oi, SSA_OP_USE)
758 name = USE_FROM_PTR (use);
759 if (TREE_CODE (name) != SSA_NAME)
760 continue;
762 copy_name_p = expr_invariant_in_loop_p (loop, name);
763 copy = separate_decls_in_loop_name (name, name_copies, decl_copies,
764 copy_name_p);
765 SET_USE (use, copy);
769 /* Callback for htab_traverse. Adds a field corresponding to the reduction
770 specified in SLOT. The type is passed in DATA. */
772 static int
773 add_field_for_reduction (void **slot, void *data)
776 struct reduction_info *red = *slot;
777 tree type = data;
778 tree var = SSA_NAME_VAR (GIMPLE_STMT_OPERAND (red->reduc_stmt, 0));
779 tree field = build_decl (FIELD_DECL, DECL_NAME (var), TREE_TYPE (var));
781 insert_field_into_struct (type, field);
783 red->field = field;
785 return 1;
788 /* Callback for htab_traverse. Adds a field corresponding to a ssa name
789 described in SLOT. The type is passed in DATA. */
791 static int
792 add_field_for_name (void **slot, void *data)
794 struct name_to_copy_elt *elt = *slot;
795 tree type = data;
796 tree name = ssa_name (elt->version);
797 tree var = SSA_NAME_VAR (name);
798 tree field = build_decl (FIELD_DECL, DECL_NAME (var), TREE_TYPE (var));
800 insert_field_into_struct (type, field);
801 elt->field = field;
803 return 1;
806 /* Callback for htab_traverse. A local result is the intermediate result
807 computed by a single
808 thread, or the intial value in case no iteration was executed.
809 This function creates a phi node reflecting these values.
810 The phi's result will be stored in NEW_PHI field of the
811 reduction's data structure. */
813 static int
814 create_phi_for_local_result (void **slot, void *data)
816 struct reduction_info *reduc = *slot;
817 struct loop *loop = data;
818 edge e;
819 tree new_phi;
820 basic_block store_bb;
821 tree local_res;
823 /* STORE_BB is the block where the phi
824 should be stored. It is the destination of the loop exit.
825 (Find the fallthru edge from OMP_CONTINUE). */
826 store_bb = FALLTHRU_EDGE (loop->latch)->dest;
828 /* STORE_BB has two predecessors. One coming from the loop
829 (the reduction's result is computed at the loop),
830 and another coming from a block preceding the loop,
831 when no iterations
832 are executed (the initial value should be taken). */
833 if (EDGE_PRED (store_bb, 0) == FALLTHRU_EDGE (loop->latch))
834 e = EDGE_PRED (store_bb, 1);
835 else
836 e = EDGE_PRED (store_bb, 0);
837 local_res = make_ssa_name (SSA_NAME_VAR (GIMPLE_STMT_OPERAND (reduc->reduc_stmt, 0)), NULL_TREE);
838 new_phi = create_phi_node (local_res, store_bb);
839 SSA_NAME_DEF_STMT (local_res) = new_phi;
840 add_phi_arg (new_phi, reduc->init, e);
841 add_phi_arg (new_phi, GIMPLE_STMT_OPERAND (reduc->reduc_stmt, 0),
842 FALLTHRU_EDGE (loop->latch));
843 reduc->new_phi = new_phi;
845 return 1;
848 struct clsn_data
850 tree store;
851 tree load;
853 basic_block store_bb;
854 basic_block load_bb;
857 /* Callback for htab_traverse. Create an atomic instruction for the
858 reduction described in SLOT.
859 DATA annotates the place in memory the atomic operation relates to,
860 and the basic block it needs to be generated in. */
862 static int
863 create_call_for_reduction_1 (void **slot, void *data)
865 struct reduction_info *reduc = *slot;
866 struct clsn_data *clsn_data = data;
867 block_stmt_iterator bsi;
868 tree type = TREE_TYPE (PHI_RESULT (reduc->reduc_phi));
869 tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load));
870 tree load_struct;
871 basic_block bb;
872 basic_block new_bb;
873 edge e;
874 tree t, addr, addr_type, ref, x;
875 tree tmp_load, load, name;
877 load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load);
878 t = build3 (COMPONENT_REF, type, load_struct, reduc->field, NULL_TREE);
879 addr_type = build_pointer_type (type);
881 addr = build_addr (t, current_function_decl);
883 /* Create phi node. */
884 bb = clsn_data->load_bb;
886 e = split_block (bb, t);
887 new_bb = e->dest;
889 tmp_load = create_tmp_var (TREE_TYPE (TREE_TYPE (addr)), NULL);
890 add_referenced_var (tmp_load);
891 tmp_load = make_ssa_name (tmp_load, NULL);
892 load = build2 (OMP_ATOMIC_LOAD, void_type_node, tmp_load, addr);
893 SSA_NAME_DEF_STMT (tmp_load) = load;
894 bsi = bsi_start (new_bb);
895 bsi_insert_after (&bsi, load, BSI_NEW_STMT);
897 e = split_block (new_bb, load);
898 new_bb = e->dest;
899 bsi = bsi_start (new_bb);
900 ref = tmp_load;
902 fold_build2 (reduc->reduction_code,
903 TREE_TYPE (PHI_RESULT (reduc->new_phi)), ref,
904 PHI_RESULT (reduc->new_phi));
906 name =
907 force_gimple_operand_bsi (&bsi, x, true, NULL_TREE, true,
908 BSI_CONTINUE_LINKING);
910 x = build1 (OMP_ATOMIC_STORE, void_type_node, name);
912 bsi_insert_after (&bsi, x, BSI_NEW_STMT);
913 return 1;
916 /* Create the atomic operation at the join point of the threads.
917 REDUCTION_LIST describes the reductions in the LOOP.
918 LD_ST_DATA describes the shared data structure where
919 shared data is stored in and loaded from. */
920 static void
921 create_call_for_reduction (struct loop *loop, htab_t reduction_list,
922 struct clsn_data *ld_st_data)
924 htab_traverse (reduction_list, create_phi_for_local_result, loop);
925 /* Find the fallthru edge from OMP_CONTINUE. */
926 ld_st_data->load_bb = FALLTHRU_EDGE (loop->latch)->dest;
927 htab_traverse (reduction_list, create_call_for_reduction_1, ld_st_data);
930 /* Callback for htab_traverse. Create a new variable that loads the
931 final reduction value at the
932 join point of all threads, adds the initial value the reduction
933 variable had before the parallel computation started, and
934 inserts it in the right place. */
936 static int
937 create_loads_for_reductions (void **slot, void *data)
939 struct reduction_info *red = *slot;
940 struct clsn_data *clsn_data = data;
941 tree stmt;
942 block_stmt_iterator bsi;
943 tree type = TREE_TYPE (GIMPLE_STMT_OPERAND (red->reduc_stmt, 0));
944 tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load));
945 tree load_struct;
946 tree bvar, name;
947 tree x;
949 bsi = bsi_after_labels (clsn_data->load_bb);
950 load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load);
951 load_struct = build3 (COMPONENT_REF, type, load_struct, red->field,
952 NULL_TREE);
953 bvar = create_tmp_var (type, "reduction_final");
954 add_referenced_var (bvar);
956 /* Apply operation between the new variable which is the result
957 of computation all threads, and the initial value which is kept
958 at reduction->inital_value. */
960 stmt = build_gimple_modify_stmt (bvar, load_struct);
961 name = make_ssa_name (bvar, stmt);
962 GIMPLE_STMT_OPERAND (stmt, 0) = name;
963 SSA_NAME_DEF_STMT (name) = stmt;
964 bsi_insert_after (&bsi, stmt, BSI_NEW_STMT);
966 fold_build2 (red->reduction_code, TREE_TYPE (load_struct),
967 name, red->initial_value);
968 name = PHI_RESULT (red->keep_res);
969 stmt = build_gimple_modify_stmt (name, x);
970 GIMPLE_STMT_OPERAND (stmt, 0) = name;
971 SSA_NAME_DEF_STMT (name) = stmt;
973 bsi_insert_after (&bsi, stmt, BSI_NEW_STMT);
975 remove_phi_node (red->keep_res, NULL_TREE, false);
977 return 1;
980 /* Load the reduction result that was stored in LD_ST_DATA.
981 REDUCTION_LIST describes the list of reductions that the
982 loades should be generated for. */
983 static void
984 create_final_loads_for_reduction (htab_t reduction_list,
985 struct clsn_data *ld_st_data)
987 block_stmt_iterator bsi;
988 tree t;
990 bsi = bsi_after_labels (ld_st_data->load_bb);
991 t = build_fold_addr_expr (ld_st_data->store);
993 build_gimple_modify_stmt (ld_st_data->load,
994 build_fold_addr_expr (ld_st_data->store));
996 bsi_insert_before (&bsi, t, BSI_NEW_STMT);
997 SSA_NAME_DEF_STMT (ld_st_data->load) = t;
998 GIMPLE_STMT_OPERAND (t, 0) = ld_st_data->load;
1000 htab_traverse (reduction_list, create_loads_for_reductions, ld_st_data);
1004 /* Callback for htab_traverse. Store the neutral value for the
1005 particular reduction's operation, e.g. 0 for PLUS_EXPR,
1006 1 for MULT_EXPR, etc. into the reduction field.
1007 The reduction is specified in SLOT. The store information is
1008 passed in DATA. */
1010 static int
1011 create_stores_for_reduction (void **slot, void *data)
1013 struct reduction_info *red = *slot;
1014 struct clsn_data *clsn_data = data;
1015 tree stmt;
1016 block_stmt_iterator bsi;
1017 tree type = TREE_TYPE (GIMPLE_STMT_OPERAND (red->reduc_stmt, 0));
1019 bsi = bsi_last (clsn_data->store_bb);
1020 stmt =
1021 build_gimple_modify_stmt (build3
1022 (COMPONENT_REF, type, clsn_data->store,
1023 red->field, NULL_TREE),
1024 red->init );
1025 mark_virtual_ops_for_renaming (stmt);
1026 bsi_insert_after (&bsi, stmt, BSI_NEW_STMT);
1028 return 1;
1031 /* Callback for htab_traverse. Creates loads to a field of LOAD in LOAD_BB and
1032 store to a field of STORE in STORE_BB for the ssa name and its duplicate
1033 specified in SLOT. */
1035 static int
1036 create_loads_and_stores_for_name (void **slot, void *data)
1038 struct name_to_copy_elt *elt = *slot;
1039 struct clsn_data *clsn_data = data;
1040 tree stmt;
1041 block_stmt_iterator bsi;
1042 tree type = TREE_TYPE (elt->new_name);
1043 tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load));
1044 tree load_struct;
1046 bsi = bsi_last (clsn_data->store_bb);
1047 stmt =
1048 build_gimple_modify_stmt (build3
1049 (COMPONENT_REF, type, clsn_data->store,
1050 elt->field, NULL_TREE),
1051 ssa_name (elt->version));
1052 mark_virtual_ops_for_renaming (stmt);
1053 bsi_insert_after (&bsi, stmt, BSI_NEW_STMT);
1055 bsi = bsi_last (clsn_data->load_bb);
1056 load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load);
1057 stmt = build_gimple_modify_stmt (elt->new_name,
1058 build3 (COMPONENT_REF, type, load_struct,
1059 elt->field, NULL_TREE));
1060 SSA_NAME_DEF_STMT (elt->new_name) = stmt;
1061 bsi_insert_after (&bsi, stmt, BSI_NEW_STMT);
1063 return 1;
1066 /* Moves all the variables used in LOOP and defined outside of it (including
1067 the initial values of loop phi nodes, and *PER_THREAD if it is a ssa
1068 name) to a structure created for this purpose. The code
1070 while (1)
1072 use (a);
1073 use (b);
1076 is transformed this way:
1078 bb0:
1079 old.a = a;
1080 old.b = b;
1082 bb1:
1083 a' = new->a;
1084 b' = new->b;
1085 while (1)
1087 use (a');
1088 use (b');
1091 `old' is stored to *ARG_STRUCT and `new' is stored to NEW_ARG_STRUCT. The
1092 pointer `new' is intentionally not initialized (the loop will be split to a
1093 separate function later, and `new' will be initialized from its arguments).
1094 LD_ST_DATA holds information about the shared data structure used to pass
1095 information among the threads. It is initialized here, and
1096 gen_parallel_loop will pass it to create_call_for_reduction that
1097 needs this information. REDUCTION_LIST describes the reductions
1098 in LOOP. */
1100 static void
1101 separate_decls_in_loop (struct loop *loop, htab_t reduction_list,
1102 tree * arg_struct, tree * new_arg_struct,
1103 struct clsn_data *ld_st_data)
1106 basic_block bb1 = split_edge (loop_preheader_edge (loop));
1107 basic_block bb0 = single_pred (bb1);
1108 htab_t name_copies = htab_create (10, name_to_copy_elt_hash,
1109 name_to_copy_elt_eq, free);
1110 htab_t decl_copies = htab_create (10, int_tree_map_hash, int_tree_map_eq,
1111 free);
1112 basic_block bb, *body = get_loop_body (loop);
1113 unsigned i;
1114 tree phi, type, type_name, nvar;
1115 block_stmt_iterator bsi;
1116 struct clsn_data clsn_data;
1118 /* Find and rename the ssa names defined outside of loop. */
1119 for (i = 0; i < loop->num_nodes; i++)
1121 bb = body[i];
1123 for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
1124 separate_decls_in_loop_stmt (loop, phi, name_copies, decl_copies);
1126 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
1127 separate_decls_in_loop_stmt (loop, bsi_stmt (bsi), name_copies,
1128 decl_copies);
1130 free (body);
1132 if (htab_elements (name_copies) == 0)
1134 /* It may happen that there is nothing to copy (if there are only
1135 loop carried and external variables in the loop). */
1136 *arg_struct = NULL;
1137 *new_arg_struct = NULL;
1139 else
1141 /* Create the type for the structure to store the ssa names to. */
1142 type = lang_hooks.types.make_type (RECORD_TYPE);
1143 type_name = build_decl (TYPE_DECL, create_tmp_var_name (".paral_data"),
1144 type);
1145 TYPE_NAME (type) = type_name;
1147 htab_traverse (name_copies, add_field_for_name, type);
1148 if (htab_elements (reduction_list) > 0)
1150 /* Create the fields for reductions. */
1151 htab_traverse (reduction_list, add_field_for_reduction,
1152 type);
1154 layout_type (type);
1156 /* Create the loads and stores. */
1157 *arg_struct = create_tmp_var (type, ".paral_data_store");
1158 add_referenced_var (*arg_struct);
1159 nvar = create_tmp_var (build_pointer_type (type), ".paral_data_load");
1160 add_referenced_var (nvar);
1161 *new_arg_struct = make_ssa_name (nvar, NULL_TREE);
1163 ld_st_data->store = *arg_struct;
1164 ld_st_data->load = *new_arg_struct;
1165 ld_st_data->store_bb = bb0;
1166 ld_st_data->load_bb = bb1;
1168 htab_traverse (name_copies, create_loads_and_stores_for_name,
1169 ld_st_data);
1171 /* Load the calculation from memory into a new
1172 reduction variable (after the join of the threads). */
1173 if (htab_elements (reduction_list) > 0)
1175 htab_traverse (reduction_list, create_stores_for_reduction,
1176 ld_st_data);
1177 clsn_data.load = make_ssa_name (nvar, NULL_TREE);
1178 clsn_data.load_bb = single_dom_exit (loop)->dest;
1179 clsn_data.store = ld_st_data->store;
1180 create_final_loads_for_reduction (reduction_list, &clsn_data);
1184 htab_delete (decl_copies);
1185 htab_delete (name_copies);
1188 /* Bitmap containing uids of functions created by parallelization. We cannot
1189 allocate it from the default obstack, as it must live across compilation
1190 of several functions; we make it gc allocated instead. */
1192 static GTY(()) bitmap parallelized_functions;
1194 /* Returns true if FN was created by create_loop_fn. */
1196 static bool
1197 parallelized_function_p (tree fn)
1199 if (!parallelized_functions || !DECL_ARTIFICIAL (fn))
1200 return false;
1202 return bitmap_bit_p (parallelized_functions, DECL_UID (fn));
1205 /* Creates and returns an empty function that will receive the body of
1206 a parallelized loop. */
1208 static tree
1209 create_loop_fn (void)
1211 char buf[100];
1212 char *tname;
1213 tree decl, type, name, t;
1214 struct function *act_cfun = cfun;
1215 static unsigned loopfn_num;
1217 snprintf (buf, 100, "%s.$loopfn", current_function_name ());
1218 ASM_FORMAT_PRIVATE_NAME (tname, buf, loopfn_num++);
1219 clean_symbol_name (tname);
1220 name = get_identifier (tname);
1221 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1223 decl = build_decl (FUNCTION_DECL, name, type);
1224 if (!parallelized_functions)
1225 parallelized_functions = BITMAP_GGC_ALLOC ();
1226 bitmap_set_bit (parallelized_functions, DECL_UID (decl));
1228 TREE_STATIC (decl) = 1;
1229 TREE_USED (decl) = 1;
1230 DECL_ARTIFICIAL (decl) = 1;
1231 DECL_IGNORED_P (decl) = 0;
1232 TREE_PUBLIC (decl) = 0;
1233 DECL_UNINLINABLE (decl) = 1;
1234 DECL_EXTERNAL (decl) = 0;
1235 DECL_CONTEXT (decl) = NULL_TREE;
1236 DECL_INITIAL (decl) = make_node (BLOCK);
1238 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1239 DECL_ARTIFICIAL (t) = 1;
1240 DECL_IGNORED_P (t) = 1;
1241 DECL_RESULT (decl) = t;
1243 t = build_decl (PARM_DECL, get_identifier (".paral_data_param"),
1244 ptr_type_node);
1245 DECL_ARTIFICIAL (t) = 1;
1246 DECL_ARG_TYPE (t) = ptr_type_node;
1247 DECL_CONTEXT (t) = decl;
1248 TREE_USED (t) = 1;
1249 DECL_ARGUMENTS (decl) = t;
1251 allocate_struct_function (decl);
1253 /* The call to allocate_struct_function clobbers CFUN, so we need to restore
1254 it. */
1255 cfun = act_cfun;
1257 return decl;
1260 /* Bases all the induction variables in LOOP on a single induction variable
1261 (unsigned with base 0 and step 1), whose final value is compared with
1262 NIT. The induction variable is incremented in the loop latch.
1263 REDUCTION_LIST describes the reductions in LOOP. */
1265 static void
1266 canonicalize_loop_ivs (struct loop *loop, htab_t reduction_list, tree nit)
1268 unsigned precision = TYPE_PRECISION (TREE_TYPE (nit));
1269 tree phi, prev, res, type, var_before, val, atype, t, next;
1270 block_stmt_iterator bsi;
1271 bool ok;
1272 affine_iv iv;
1273 edge exit = single_dom_exit (loop);
1274 struct reduction_info *red;
1276 for (phi = phi_nodes (loop->header); phi; phi = PHI_CHAIN (phi))
1278 res = PHI_RESULT (phi);
1280 if (is_gimple_reg (res) && TYPE_PRECISION (TREE_TYPE (res)) > precision)
1281 precision = TYPE_PRECISION (TREE_TYPE (res));
1284 type = lang_hooks.types.type_for_size (precision, 1);
1286 bsi = bsi_last (loop->latch);
1287 create_iv (build_int_cst_type (type, 0), build_int_cst (type, 1), NULL_TREE,
1288 loop, &bsi, true, &var_before, NULL);
1290 bsi = bsi_after_labels (loop->header);
1291 prev = NULL;
1292 for (phi = phi_nodes (loop->header); phi; phi = next)
1294 next = PHI_CHAIN (phi);
1295 res = PHI_RESULT (phi);
1297 if (!is_gimple_reg (res) || res == var_before)
1299 prev = phi;
1300 continue;
1303 ok = simple_iv (loop, phi, res, &iv, true);
1304 red = reduction_phi (reduction_list, phi);
1305 /* We preserve the reduction phi nodes. */
1306 if (!ok && red)
1308 prev = phi;
1309 continue;
1311 else
1312 gcc_assert (ok);
1313 remove_phi_node (phi, prev, false);
1315 atype = TREE_TYPE (res);
1316 val = fold_build2 (PLUS_EXPR, atype,
1317 unshare_expr (iv.base),
1318 fold_build2 (MULT_EXPR, atype,
1319 unshare_expr (iv.step),
1320 fold_convert (atype, var_before)));
1321 val = force_gimple_operand_bsi (&bsi, val, false, NULL_TREE, true,
1322 BSI_SAME_STMT);
1323 t = build_gimple_modify_stmt (res, val);
1324 bsi_insert_before (&bsi, t, BSI_SAME_STMT);
1325 SSA_NAME_DEF_STMT (res) = t;
1328 t = last_stmt (exit->src);
1329 /* Make the loop exit if the control condition is not satisfied. */
1330 if (exit->flags & EDGE_TRUE_VALUE)
1332 edge te, fe;
1334 extract_true_false_edges_from_block (exit->src, &te, &fe);
1335 te->flags = EDGE_FALSE_VALUE;
1336 fe->flags = EDGE_TRUE_VALUE;
1338 COND_EXPR_COND (t) = build2 (LT_EXPR, boolean_type_node, var_before, nit);
1341 /* Moves the exit condition of LOOP to the beginning of its header, and
1342 duplicates the part of the last iteration that gets disabled to the
1343 exit of the loop. NIT is the number of iterations of the loop
1344 (used to initialize the variables in the duplicated part).
1346 TODO: the common case is that latch of the loop is empty and immediatelly
1347 follows the loop exit. In this case, it would be better not to copy the
1348 body of the loop, but only move the entry of the loop directly before the
1349 exit check and increase the number of iterations of the loop by one.
1350 This may need some additional preconditioning in case NIT = ~0.
1351 REDUCTION_LIST describes the reductions in LOOP. */
1353 static void
1354 transform_to_exit_first_loop (struct loop *loop, htab_t reduction_list, tree nit)
1356 basic_block *bbs, *nbbs, ex_bb, orig_header;
1357 unsigned n;
1358 bool ok;
1359 edge exit = single_dom_exit (loop), hpred;
1360 tree phi, nphi, cond, control, control_name, res, t, cond_stmt;
1361 block_stmt_iterator bsi;
1363 split_block_after_labels (loop->header);
1364 orig_header = single_succ (loop->header);
1365 hpred = single_succ_edge (loop->header);
1367 cond_stmt = last_stmt (exit->src);
1368 cond = COND_EXPR_COND (cond_stmt);
1369 control = TREE_OPERAND (cond, 0);
1370 gcc_assert (TREE_OPERAND (cond, 1) == nit);
1372 /* Make sure that we have phi nodes on exit for all loop header phis
1373 (create_parallel_loop requires that). */
1374 for (phi = phi_nodes (loop->header); phi; phi = PHI_CHAIN (phi))
1376 res = PHI_RESULT (phi);
1377 t = make_ssa_name (SSA_NAME_VAR (res), phi);
1378 SET_PHI_RESULT (phi, t);
1380 nphi = create_phi_node (res, orig_header);
1381 SSA_NAME_DEF_STMT (res) = nphi;
1382 add_phi_arg (nphi, t, hpred);
1384 if (res == control)
1386 TREE_OPERAND (cond, 0) = t;
1387 update_stmt (cond_stmt);
1388 control = t;
1392 bbs = get_loop_body_in_dom_order (loop);
1393 for (n = 0; bbs[n] != exit->src; n++)
1394 continue;
1395 nbbs = XNEWVEC (basic_block, n);
1396 ok = tree_duplicate_sese_tail (single_succ_edge (loop->header), exit,
1397 bbs + 1, n, nbbs);
1398 gcc_assert (ok);
1399 free (bbs);
1400 ex_bb = nbbs[0];
1401 free (nbbs);
1403 /* Other than reductions, the only gimple reg that should be copied
1404 out of the loop is the control variable. */
1406 control_name = NULL_TREE;
1407 for (phi = phi_nodes (ex_bb); phi; phi = PHI_CHAIN (phi))
1409 res = PHI_RESULT (phi);
1410 if (!is_gimple_reg (res))
1411 continue;
1413 /* Check if it is a part of reduction. If it is,
1414 keep the phi at the reduction's keep_res field. The
1415 PHI_RESULT of this phi is the resulting value of the reduction
1416 variable when exiting the loop. */
1418 exit = single_dom_exit (loop);
1420 if (htab_elements (reduction_list) > 0)
1422 struct reduction_info *red;
1424 tree val = PHI_ARG_DEF_FROM_EDGE (phi, exit);
1426 red = reduction_phi (reduction_list, SSA_NAME_DEF_STMT (val));
1427 if (red)
1428 red->keep_res = phi;
1430 else
1431 gcc_assert (control_name == NULL_TREE
1432 && SSA_NAME_VAR (res) == SSA_NAME_VAR (control));
1433 control_name = res;
1435 gcc_assert (control_name != NULL_TREE);
1436 phi = SSA_NAME_DEF_STMT (control_name);
1437 remove_phi_node (phi, NULL_TREE, false);
1439 /* Initialize the control variable to NIT. */
1440 bsi = bsi_after_labels (ex_bb);
1441 t = build_gimple_modify_stmt (control_name, nit);
1442 bsi_insert_before (&bsi, t, BSI_NEW_STMT);
1443 SSA_NAME_DEF_STMT (control_name) = t;
1446 /* Create the parallel constructs for LOOP as described in gen_parallel_loop.
1447 LOOP_FN and DATA are the arguments of OMP_PARALLEL.
1448 NEW_DATA is the variable that should be initialized from the argument
1449 of LOOP_FN. N_THREADS is the requested number of threads. Returns the
1450 basic block containing OMP_PARALLEL tree. */
1452 static basic_block
1453 create_parallel_loop (struct loop *loop, tree loop_fn, tree data,
1454 tree new_data, unsigned n_threads)
1456 block_stmt_iterator bsi;
1457 basic_block bb, paral_bb, for_bb, ex_bb;
1458 tree t, param, res, for_stmt;
1459 tree cvar, cvar_init, initvar, cvar_next, cvar_base, cond, phi, type;
1460 edge exit, nexit, guard, end, e;
1462 /* Prepare the OMP_PARALLEL statement. */
1463 bb = loop_preheader_edge (loop)->src;
1464 paral_bb = single_pred (bb);
1465 bsi = bsi_last (paral_bb);
1467 t = build_omp_clause (OMP_CLAUSE_NUM_THREADS);
1468 OMP_CLAUSE_NUM_THREADS_EXPR (t)
1469 = build_int_cst (integer_type_node, n_threads);
1470 t = build4 (OMP_PARALLEL, void_type_node, NULL_TREE, t, loop_fn, data);
1472 bsi_insert_after (&bsi, t, BSI_NEW_STMT);
1474 /* Initialize NEW_DATA. */
1475 if (data)
1477 bsi = bsi_after_labels (bb);
1479 param = make_ssa_name (DECL_ARGUMENTS (loop_fn), NULL_TREE);
1480 t = build_gimple_modify_stmt (param, build_fold_addr_expr (data));
1481 bsi_insert_before (&bsi, t, BSI_SAME_STMT);
1482 SSA_NAME_DEF_STMT (param) = t;
1484 t = build_gimple_modify_stmt (new_data,
1485 fold_convert (TREE_TYPE (new_data),
1486 param));
1487 bsi_insert_before (&bsi, t, BSI_SAME_STMT);
1488 SSA_NAME_DEF_STMT (new_data) = t;
1491 /* Emit OMP_RETURN for OMP_PARALLEL. */
1492 bb = split_loop_exit_edge (single_dom_exit (loop));
1493 bsi = bsi_last (bb);
1494 bsi_insert_after (&bsi, make_node (OMP_RETURN), BSI_NEW_STMT);
1496 /* Extract data for OMP_FOR. */
1497 gcc_assert (loop->header == single_dom_exit (loop)->src);
1498 cond = COND_EXPR_COND (last_stmt (loop->header));
1500 cvar = TREE_OPERAND (cond, 0);
1501 cvar_base = SSA_NAME_VAR (cvar);
1502 phi = SSA_NAME_DEF_STMT (cvar);
1503 cvar_init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
1504 initvar = make_ssa_name (cvar_base, NULL_TREE);
1505 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, loop_preheader_edge (loop)),
1506 initvar);
1507 cvar_next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
1509 bsi = bsi_last (loop->latch);
1510 gcc_assert (bsi_stmt (bsi) == SSA_NAME_DEF_STMT (cvar_next));
1511 bsi_remove (&bsi, true);
1513 /* Prepare cfg. */
1514 for_bb = split_edge (loop_preheader_edge (loop));
1515 ex_bb = split_loop_exit_edge (single_dom_exit (loop));
1516 extract_true_false_edges_from_block (loop->header, &nexit, &exit);
1517 gcc_assert (exit == single_dom_exit (loop));
1519 guard = make_edge (for_bb, ex_bb, 0);
1520 single_succ_edge (loop->latch)->flags = 0;
1521 end = make_edge (loop->latch, ex_bb, EDGE_FALLTHRU);
1522 for (phi = phi_nodes (ex_bb); phi; phi = PHI_CHAIN (phi))
1524 res = PHI_RESULT (phi);
1525 gcc_assert (!is_gimple_reg (phi));
1526 t = SSA_NAME_DEF_STMT (PHI_ARG_DEF_FROM_EDGE (phi, exit));
1527 add_phi_arg (phi, PHI_ARG_DEF_FROM_EDGE (t, loop_preheader_edge (loop)),
1528 guard);
1529 add_phi_arg (phi, PHI_ARG_DEF_FROM_EDGE (t, loop_latch_edge (loop)),
1530 end);
1532 e = redirect_edge_and_branch (exit, nexit->dest);
1533 PENDING_STMT (e) = NULL;
1535 /* Emit OMP_FOR. */
1536 TREE_OPERAND (cond, 0) = cvar_base;
1537 type = TREE_TYPE (cvar);
1538 t = build_omp_clause (OMP_CLAUSE_SCHEDULE);
1539 OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_STATIC;
1541 for_stmt = make_node (OMP_FOR);
1542 TREE_TYPE (for_stmt) = void_type_node;
1543 OMP_FOR_CLAUSES (for_stmt) = t;
1544 OMP_FOR_INIT (for_stmt) = build_gimple_modify_stmt (initvar, cvar_init);
1545 OMP_FOR_COND (for_stmt) = cond;
1546 OMP_FOR_INCR (for_stmt) = build_gimple_modify_stmt (cvar_base,
1547 build2 (PLUS_EXPR, type,
1548 cvar_base,
1549 build_int_cst
1550 (type, 1)));
1551 OMP_FOR_BODY (for_stmt) = NULL_TREE;
1552 OMP_FOR_PRE_BODY (for_stmt) = NULL_TREE;
1554 bsi = bsi_last (for_bb);
1555 bsi_insert_after (&bsi, for_stmt, BSI_NEW_STMT);
1556 SSA_NAME_DEF_STMT (initvar) = for_stmt;
1558 /* Emit OMP_CONTINUE. */
1559 bsi = bsi_last (loop->latch);
1560 t = build2 (OMP_CONTINUE, void_type_node, cvar_next, cvar);
1561 bsi_insert_after (&bsi, t, BSI_NEW_STMT);
1562 SSA_NAME_DEF_STMT (cvar_next) = t;
1564 /* Emit OMP_RETURN for OMP_FOR. */
1565 bsi = bsi_last (ex_bb);
1566 bsi_insert_after (&bsi, make_node (OMP_RETURN), BSI_NEW_STMT);
1568 return paral_bb;
1571 /* Generates code to execute the iterations of LOOP in N_THREADS threads in
1572 parallel. NITER describes number of iterations of LOOP.
1573 REDUCTION_LIST describes the reductions existant in the LOOP. */
1575 static void
1576 gen_parallel_loop (struct loop *loop, htab_t reduction_list,
1577 unsigned n_threads, struct tree_niter_desc *niter)
1579 struct loop *nloop;
1580 tree many_iterations_cond, type, nit;
1581 tree stmts, arg_struct, new_arg_struct;
1582 basic_block parallel_head;
1583 struct clsn_data clsn_data;
1584 unsigned prob;
1586 /* From
1588 ---------------------------------------------------------------------
1589 loop
1591 IV = phi (INIT, IV + STEP)
1592 BODY1;
1593 if (COND)
1594 break;
1595 BODY2;
1597 ---------------------------------------------------------------------
1599 with # of iterations NITER (possibly with MAY_BE_ZERO assumption),
1600 we generate the following code:
1602 ---------------------------------------------------------------------
1604 if (MAY_BE_ZERO
1605 || NITER < MIN_PER_THREAD * N_THREADS)
1606 goto original;
1608 BODY1;
1609 store all local loop-invariant variables used in body of the loop to DATA.
1610 OMP_PARALLEL (OMP_CLAUSE_NUM_THREADS (N_THREADS), LOOPFN, DATA);
1611 load the variables from DATA.
1612 OMP_FOR (IV = INIT; COND; IV += STEP) (OMP_CLAUSE_SCHEDULE (static))
1613 BODY2;
1614 BODY1;
1615 OMP_CONTINUE;
1616 OMP_RETURN -- OMP_FOR
1617 OMP_RETURN -- OMP_PARALLEL
1618 goto end;
1620 original:
1621 loop
1623 IV = phi (INIT, IV + STEP)
1624 BODY1;
1625 if (COND)
1626 break;
1627 BODY2;
1630 end:
1634 /* Create two versions of the loop -- in the old one, we know that the
1635 number of iterations is large enough, and we will transform it into the
1636 loop that will be split to loop_fn, the new one will be used for the
1637 remaining iterations. */
1639 type = TREE_TYPE (niter->niter);
1640 nit = force_gimple_operand (unshare_expr (niter->niter), &stmts, true,
1641 NULL_TREE);
1642 if (stmts)
1643 bsi_insert_on_edge_immediate (loop_preheader_edge (loop), stmts);
1645 many_iterations_cond =
1646 fold_build2 (GE_EXPR, boolean_type_node,
1647 nit, build_int_cst (type, MIN_PER_THREAD * n_threads));
1648 many_iterations_cond
1649 = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1650 invert_truthvalue (unshare_expr (niter->may_be_zero)),
1651 many_iterations_cond);
1652 many_iterations_cond
1653 = force_gimple_operand (many_iterations_cond, &stmts, false, NULL_TREE);
1654 if (stmts)
1655 bsi_insert_on_edge_immediate (loop_preheader_edge (loop), stmts);
1656 if (!is_gimple_condexpr (many_iterations_cond))
1658 many_iterations_cond
1659 = force_gimple_operand (many_iterations_cond, &stmts,
1660 true, NULL_TREE);
1661 if (stmts)
1662 bsi_insert_on_edge_immediate (loop_preheader_edge (loop), stmts);
1665 initialize_original_copy_tables ();
1667 /* We assume that the loop usually iterates a lot. */
1668 prob = 4 * REG_BR_PROB_BASE / 5;
1669 nloop = loop_version (loop, many_iterations_cond, NULL,
1670 prob, prob, REG_BR_PROB_BASE - prob, true);
1671 update_ssa (TODO_update_ssa);
1672 free_original_copy_tables ();
1674 /* Base all the induction variables in LOOP on a single control one. */
1675 canonicalize_loop_ivs (loop, reduction_list, nit);
1677 /* Ensure that the exit condition is the first statement in the loop. */
1678 transform_to_exit_first_loop (loop, reduction_list, nit);
1681 /* Generate intializations for reductions. */
1683 if (htab_elements (reduction_list) > 0)
1684 htab_traverse (reduction_list, initialize_reductions, loop);
1686 /* Eliminate the references to local variables from the loop. */
1687 eliminate_local_variables (loop);
1689 /* In the old loop, move all variables non-local to the loop to a structure
1690 and back, and create separate decls for the variables used in loop. */
1691 separate_decls_in_loop (loop, reduction_list, &arg_struct, &new_arg_struct, &clsn_data);
1693 /* Create the parallel constructs. */
1694 parallel_head = create_parallel_loop (loop, create_loop_fn (), arg_struct,
1695 new_arg_struct, n_threads);
1696 if (htab_elements (reduction_list) > 0)
1697 create_call_for_reduction (loop, reduction_list, &clsn_data);
1699 scev_reset ();
1701 /* Cancel the loop (it is simpler to do it here rather than to teach the
1702 expander to do it). */
1703 cancel_loop_tree (loop);
1705 /* Expand the parallel constructs. We do it directly here instead of running
1706 a separate expand_omp pass, since it is more efficient, and less likely to
1707 cause troubles with further analyses not being able to deal with the
1708 OMP trees. */
1710 omp_expand_local (parallel_head);
1713 /* Detect parallel loops and generate parallel code using libgomp
1714 primitives. Returns true if some loop was parallelized, false
1715 otherwise. */
1717 bool
1718 parallelize_loops (void)
1720 unsigned n_threads = flag_tree_parallelize_loops;
1721 bool changed = false;
1722 struct loop *loop;
1723 struct tree_niter_desc niter_desc;
1724 loop_iterator li;
1725 htab_t reduction_list;
1727 /* Do not parallelize loops in the functions created by parallelization. */
1728 if (parallelized_function_p (cfun->decl))
1729 return false;
1731 reduction_list = htab_create (10, reduction_info_hash,
1732 reduction_info_eq, free);
1734 FOR_EACH_LOOP (li, loop, 0)
1736 htab_empty (reduction_list);
1737 if (/* Do not bother with loops in cold areas. */
1738 !maybe_hot_bb_p (loop->header)
1739 /* Or loops that roll too little. */
1740 || expected_loop_iterations (loop) <= n_threads
1741 /* And of course, the loop must be parallelizable. */
1742 || !can_duplicate_loop_p (loop)
1743 || !loop_parallel_p (loop, reduction_list, &niter_desc))
1744 continue;
1746 changed = true;
1747 gen_parallel_loop (loop, reduction_list, n_threads, &niter_desc);
1748 verify_flow_info ();
1749 verify_dominators (CDI_DOMINATORS);
1750 verify_loop_structure ();
1751 verify_loop_closed_ssa ();
1754 htab_delete (reduction_list);
1755 return changed;
1758 #include "gt-tree-parloops.h"