Update ChangeLog and version files for release
[official-gcc.git] / gcc / graphite-sese-to-poly.c
blob4230f62051b27b1990988aa49c71c64da06b99b7
1 /* Conversion of SESE regions to Polyhedra.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <sebastian.pop@amd.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
23 #ifdef HAVE_isl
24 #include <isl/constraint.h>
25 #include <isl/set.h>
26 #include <isl/map.h>
27 #include <isl/union_map.h>
28 #include <isl/constraint.h>
29 #include <isl/aff.h>
30 #include <isl/val.h>
32 /* Since ISL-0.13, the extern is in val_gmp.h. */
33 #if !defined(HAVE_ISL_SCHED_CONSTRAINTS_COMPUTE_SCHEDULE) && defined(__cplusplus)
34 extern "C" {
35 #endif
36 #include <isl/val_gmp.h>
37 #if !defined(HAVE_ISL_SCHED_CONSTRAINTS_COMPUTE_SCHEDULE) && defined(__cplusplus)
39 #endif
40 #endif
42 #include "system.h"
43 #include "coretypes.h"
44 #include "hash-set.h"
45 #include "machmode.h"
46 #include "vec.h"
47 #include "double-int.h"
48 #include "input.h"
49 #include "alias.h"
50 #include "symtab.h"
51 #include "options.h"
52 #include "wide-int.h"
53 #include "inchash.h"
54 #include "tree.h"
55 #include "fold-const.h"
56 #include "predict.h"
57 #include "tm.h"
58 #include "hard-reg-set.h"
59 #include "function.h"
60 #include "dominance.h"
61 #include "cfg.h"
62 #include "basic-block.h"
63 #include "tree-ssa-alias.h"
64 #include "internal-fn.h"
65 #include "gimple-expr.h"
66 #include "is-a.h"
67 #include "gimple.h"
68 #include "gimple-iterator.h"
69 #include "gimplify.h"
70 #include "gimplify-me.h"
71 #include "gimple-ssa.h"
72 #include "tree-cfg.h"
73 #include "tree-phinodes.h"
74 #include "ssa-iterators.h"
75 #include "stringpool.h"
76 #include "tree-ssanames.h"
77 #include "tree-ssa-loop-manip.h"
78 #include "tree-ssa-loop-niter.h"
79 #include "tree-ssa-loop.h"
80 #include "tree-into-ssa.h"
81 #include "tree-pass.h"
82 #include "cfgloop.h"
83 #include "tree-chrec.h"
84 #include "tree-data-ref.h"
85 #include "tree-scalar-evolution.h"
86 #include "domwalk.h"
87 #include "sese.h"
88 #include "tree-ssa-propagate.h"
90 #ifdef HAVE_isl
91 #include "hashtab.h"
92 #include "rtl.h"
93 #include "flags.h"
94 #include "statistics.h"
95 #include "real.h"
96 #include "fixed-value.h"
97 #include "insn-config.h"
98 #include "expmed.h"
99 #include "dojump.h"
100 #include "explow.h"
101 #include "calls.h"
102 #include "emit-rtl.h"
103 #include "varasm.h"
104 #include "stmt.h"
105 #include "expr.h"
106 #include "graphite-poly.h"
107 #include "graphite-sese-to-poly.h"
110 /* Assigns to RES the value of the INTEGER_CST T. */
112 static inline void
113 tree_int_to_gmp (tree t, mpz_t res)
115 wi::to_mpz (t, res, TYPE_SIGN (TREE_TYPE (t)));
118 /* Returns the index of the PHI argument defined in the outermost
119 loop. */
121 static size_t
122 phi_arg_in_outermost_loop (gphi *phi)
124 loop_p loop = gimple_bb (phi)->loop_father;
125 size_t i, res = 0;
127 for (i = 0; i < gimple_phi_num_args (phi); i++)
128 if (!flow_bb_inside_loop_p (loop, gimple_phi_arg_edge (phi, i)->src))
130 loop = gimple_phi_arg_edge (phi, i)->src->loop_father;
131 res = i;
134 return res;
137 /* Removes a simple copy phi node "RES = phi (INIT, RES)" at position
138 PSI by inserting on the loop ENTRY edge assignment "RES = INIT". */
140 static void
141 remove_simple_copy_phi (gphi_iterator *psi)
143 gphi *phi = psi->phi ();
144 tree res = gimple_phi_result (phi);
145 size_t entry = phi_arg_in_outermost_loop (phi);
146 tree init = gimple_phi_arg_def (phi, entry);
147 gassign *stmt = gimple_build_assign (res, init);
148 edge e = gimple_phi_arg_edge (phi, entry);
150 remove_phi_node (psi, false);
151 gsi_insert_on_edge_immediate (e, stmt);
154 /* Removes an invariant phi node at position PSI by inserting on the
155 loop ENTRY edge the assignment RES = INIT. */
157 static void
158 remove_invariant_phi (sese region, gphi_iterator *psi)
160 gphi *phi = psi->phi ();
161 loop_p loop = loop_containing_stmt (phi);
162 tree res = gimple_phi_result (phi);
163 tree scev = scalar_evolution_in_region (region, loop, res);
164 size_t entry = phi_arg_in_outermost_loop (phi);
165 edge e = gimple_phi_arg_edge (phi, entry);
166 tree var;
167 gassign *stmt;
168 gimple_seq stmts = NULL;
170 if (tree_contains_chrecs (scev, NULL))
171 scev = gimple_phi_arg_def (phi, entry);
173 var = force_gimple_operand (scev, &stmts, true, NULL_TREE);
174 stmt = gimple_build_assign (res, var);
175 remove_phi_node (psi, false);
177 gimple_seq_add_stmt (&stmts, stmt);
178 gsi_insert_seq_on_edge (e, stmts);
179 gsi_commit_edge_inserts ();
180 SSA_NAME_DEF_STMT (res) = stmt;
183 /* Returns true when the phi node at PSI is of the form "a = phi (a, x)". */
185 static inline bool
186 simple_copy_phi_p (gphi *phi)
188 tree res;
190 if (gimple_phi_num_args (phi) != 2)
191 return false;
193 res = gimple_phi_result (phi);
194 return (res == gimple_phi_arg_def (phi, 0)
195 || res == gimple_phi_arg_def (phi, 1));
198 /* Returns true when the phi node at position PSI is a reduction phi
199 node in REGION. Otherwise moves the pointer PSI to the next phi to
200 be considered. */
202 static bool
203 reduction_phi_p (sese region, gphi_iterator *psi)
205 loop_p loop;
206 gphi *phi = psi->phi ();
207 tree res = gimple_phi_result (phi);
209 loop = loop_containing_stmt (phi);
211 if (simple_copy_phi_p (phi))
213 /* PRE introduces phi nodes like these, for an example,
214 see id-5.f in the fortran graphite testsuite:
216 # prephitmp.85_265 = PHI <prephitmp.85_258(33), prephitmp.85_265(18)>
218 remove_simple_copy_phi (psi);
219 return false;
222 if (scev_analyzable_p (res, region))
224 tree scev = scalar_evolution_in_region (region, loop, res);
226 if (evolution_function_is_invariant_p (scev, loop->num))
227 remove_invariant_phi (region, psi);
228 else
229 gsi_next (psi);
231 return false;
234 /* All the other cases are considered reductions. */
235 return true;
238 /* Store the GRAPHITE representation of BB. */
240 static gimple_bb_p
241 new_gimple_bb (basic_block bb, vec<data_reference_p> drs)
243 struct gimple_bb *gbb;
245 gbb = XNEW (struct gimple_bb);
246 bb->aux = gbb;
247 GBB_BB (gbb) = bb;
248 GBB_DATA_REFS (gbb) = drs;
249 GBB_CONDITIONS (gbb).create (0);
250 GBB_CONDITION_CASES (gbb).create (0);
252 return gbb;
255 static void
256 free_data_refs_aux (vec<data_reference_p> datarefs)
258 unsigned int i;
259 struct data_reference *dr;
261 FOR_EACH_VEC_ELT (datarefs, i, dr)
262 if (dr->aux)
264 base_alias_pair *bap = (base_alias_pair *)(dr->aux);
266 free (bap->alias_set);
268 free (bap);
269 dr->aux = NULL;
272 /* Frees GBB. */
274 static void
275 free_gimple_bb (struct gimple_bb *gbb)
277 free_data_refs_aux (GBB_DATA_REFS (gbb));
278 free_data_refs (GBB_DATA_REFS (gbb));
280 GBB_CONDITIONS (gbb).release ();
281 GBB_CONDITION_CASES (gbb).release ();
282 GBB_BB (gbb)->aux = 0;
283 XDELETE (gbb);
286 /* Deletes all gimple bbs in SCOP. */
288 static void
289 remove_gbbs_in_scop (scop_p scop)
291 int i;
292 poly_bb_p pbb;
294 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
295 free_gimple_bb (PBB_BLACK_BOX (pbb));
298 /* Deletes all scops in SCOPS. */
300 void
301 free_scops (vec<scop_p> scops)
303 int i;
304 scop_p scop;
306 FOR_EACH_VEC_ELT (scops, i, scop)
308 remove_gbbs_in_scop (scop);
309 free_sese (SCOP_REGION (scop));
310 free_scop (scop);
313 scops.release ();
316 /* Same as outermost_loop_in_sese, returns the outermost loop
317 containing BB in REGION, but makes sure that the returned loop
318 belongs to the REGION, and so this returns the first loop in the
319 REGION when the loop containing BB does not belong to REGION. */
321 static loop_p
322 outermost_loop_in_sese_1 (sese region, basic_block bb)
324 loop_p nest = outermost_loop_in_sese (region, bb);
326 if (loop_in_sese_p (nest, region))
327 return nest;
329 /* When the basic block BB does not belong to a loop in the region,
330 return the first loop in the region. */
331 nest = nest->inner;
332 while (nest)
333 if (loop_in_sese_p (nest, region))
334 break;
335 else
336 nest = nest->next;
338 gcc_assert (nest);
339 return nest;
342 /* Generates a polyhedral black box only if the bb contains interesting
343 information. */
345 static gimple_bb_p
346 try_generate_gimple_bb (scop_p scop, basic_block bb)
348 vec<data_reference_p> drs;
349 drs.create (5);
350 sese region = SCOP_REGION (scop);
351 loop_p nest = outermost_loop_in_sese_1 (region, bb);
352 gimple_stmt_iterator gsi;
354 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
356 gimple stmt = gsi_stmt (gsi);
357 loop_p loop;
359 if (is_gimple_debug (stmt))
360 continue;
362 loop = loop_containing_stmt (stmt);
363 if (!loop_in_sese_p (loop, region))
364 loop = nest;
366 graphite_find_data_references_in_stmt (nest, loop, stmt, &drs);
369 return new_gimple_bb (bb, drs);
372 /* Returns true if all predecessors of BB, that are not dominated by BB, are
373 marked in MAP. The predecessors dominated by BB are loop latches and will
374 be handled after BB. */
376 static bool
377 all_non_dominated_preds_marked_p (basic_block bb, sbitmap map)
379 edge e;
380 edge_iterator ei;
382 FOR_EACH_EDGE (e, ei, bb->preds)
383 if (!bitmap_bit_p (map, e->src->index)
384 && !dominated_by_p (CDI_DOMINATORS, e->src, bb))
385 return false;
387 return true;
390 /* Compare the depth of two basic_block's P1 and P2. */
392 static int
393 compare_bb_depths (const void *p1, const void *p2)
395 const_basic_block const bb1 = *(const_basic_block const*)p1;
396 const_basic_block const bb2 = *(const_basic_block const*)p2;
397 int d1 = loop_depth (bb1->loop_father);
398 int d2 = loop_depth (bb2->loop_father);
400 if (d1 < d2)
401 return 1;
403 if (d1 > d2)
404 return -1;
406 return 0;
409 /* Sort the basic blocks from DOM such that the first are the ones at
410 a deepest loop level. */
412 static void
413 graphite_sort_dominated_info (vec<basic_block> dom)
415 dom.qsort (compare_bb_depths);
418 /* Recursive helper function for build_scops_bbs. */
420 static void
421 build_scop_bbs_1 (scop_p scop, sbitmap visited, basic_block bb)
423 sese region = SCOP_REGION (scop);
424 vec<basic_block> dom;
425 poly_bb_p pbb;
427 if (bitmap_bit_p (visited, bb->index)
428 || !bb_in_sese_p (bb, region))
429 return;
431 pbb = new_poly_bb (scop, try_generate_gimple_bb (scop, bb));
432 SCOP_BBS (scop).safe_push (pbb);
433 bitmap_set_bit (visited, bb->index);
435 dom = get_dominated_by (CDI_DOMINATORS, bb);
437 if (!dom.exists ())
438 return;
440 graphite_sort_dominated_info (dom);
442 while (!dom.is_empty ())
444 int i;
445 basic_block dom_bb;
447 FOR_EACH_VEC_ELT (dom, i, dom_bb)
448 if (all_non_dominated_preds_marked_p (dom_bb, visited))
450 build_scop_bbs_1 (scop, visited, dom_bb);
451 dom.unordered_remove (i);
452 break;
456 dom.release ();
459 /* Gather the basic blocks belonging to the SCOP. */
461 static void
462 build_scop_bbs (scop_p scop)
464 sbitmap visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
465 sese region = SCOP_REGION (scop);
467 bitmap_clear (visited);
468 build_scop_bbs_1 (scop, visited, SESE_ENTRY_BB (region));
469 sbitmap_free (visited);
472 /* Return an ISL identifier for the polyhedral basic block PBB. */
474 static isl_id *
475 isl_id_for_pbb (scop_p s, poly_bb_p pbb)
477 char name[50];
478 snprintf (name, sizeof (name), "S_%d", pbb_index (pbb));
479 return isl_id_alloc (s->ctx, name, pbb);
482 /* Converts the STATIC_SCHEDULE of PBB into a scattering polyhedron.
483 We generate SCATTERING_DIMENSIONS scattering dimensions.
485 CLooG 0.15.0 and previous versions require, that all
486 scattering functions of one CloogProgram have the same number of
487 scattering dimensions, therefore we allow to specify it. This
488 should be removed in future versions of CLooG.
490 The scattering polyhedron consists of these dimensions: scattering,
491 loop_iterators, parameters.
493 Example:
495 | scattering_dimensions = 5
496 | used_scattering_dimensions = 3
497 | nb_iterators = 1
498 | scop_nb_params = 2
500 | Schedule:
502 | 4 5
504 | Scattering polyhedron:
506 | scattering: {s1, s2, s3, s4, s5}
507 | loop_iterators: {i}
508 | parameters: {p1, p2}
510 | s1 s2 s3 s4 s5 i p1 p2 1
511 | 1 0 0 0 0 0 0 0 -4 = 0
512 | 0 1 0 0 0 -1 0 0 0 = 0
513 | 0 0 1 0 0 0 0 0 -5 = 0 */
515 static void
516 build_pbb_scattering_polyhedrons (isl_aff *static_sched,
517 poly_bb_p pbb, int scattering_dimensions)
519 int i;
520 int nb_iterators = pbb_dim_iter_domain (pbb);
521 int used_scattering_dimensions = nb_iterators * 2 + 1;
522 isl_val *val;
523 isl_space *dc, *dm;
525 gcc_assert (scattering_dimensions >= used_scattering_dimensions);
527 dc = isl_set_get_space (pbb->domain);
528 dm = isl_space_add_dims (isl_space_from_domain (dc),
529 isl_dim_out, scattering_dimensions);
530 pbb->schedule = isl_map_universe (dm);
532 for (i = 0; i < scattering_dimensions; i++)
534 /* Textual order inside this loop. */
535 if ((i % 2) == 0)
537 isl_constraint *c = isl_equality_alloc
538 (isl_local_space_from_space (isl_map_get_space (pbb->schedule)));
540 val = isl_aff_get_coefficient_val (static_sched, isl_dim_in, i / 2);
542 val = isl_val_neg (val);
543 c = isl_constraint_set_constant_val (c, val);
544 c = isl_constraint_set_coefficient_si (c, isl_dim_out, i, 1);
545 pbb->schedule = isl_map_add_constraint (pbb->schedule, c);
548 /* Iterations of this loop. */
549 else /* if ((i % 2) == 1) */
551 int loop = (i - 1) / 2;
552 pbb->schedule = isl_map_equate (pbb->schedule, isl_dim_in, loop,
553 isl_dim_out, i);
557 pbb->transformed = isl_map_copy (pbb->schedule);
560 /* Build for BB the static schedule.
562 The static schedule is a Dewey numbering of the abstract syntax
563 tree: http://en.wikipedia.org/wiki/Dewey_Decimal_Classification
565 The following example informally defines the static schedule:
568 for (i: ...)
570 for (j: ...)
576 for (k: ...)
584 Static schedules for A to F:
586 DEPTH
587 0 1 2
589 B 1 0 0
590 C 1 0 1
591 D 1 1 0
592 E 1 1 1
596 static void
597 build_scop_scattering (scop_p scop)
599 int i;
600 poly_bb_p pbb;
601 gimple_bb_p previous_gbb = NULL;
602 isl_space *dc = isl_set_get_space (scop->context);
603 isl_aff *static_sched;
605 dc = isl_space_add_dims (dc, isl_dim_set, number_of_loops (cfun));
606 static_sched = isl_aff_zero_on_domain (isl_local_space_from_space (dc));
608 /* We have to start schedules at 0 on the first component and
609 because we cannot compare_prefix_loops against a previous loop,
610 prefix will be equal to zero, and that index will be
611 incremented before copying. */
612 static_sched = isl_aff_add_coefficient_si (static_sched, isl_dim_in, 0, -1);
614 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
616 gimple_bb_p gbb = PBB_BLACK_BOX (pbb);
617 int prefix;
618 int nb_scat_dims = pbb_dim_iter_domain (pbb) * 2 + 1;
620 if (previous_gbb)
621 prefix = nb_common_loops (SCOP_REGION (scop), previous_gbb, gbb);
622 else
623 prefix = 0;
625 previous_gbb = gbb;
627 static_sched = isl_aff_add_coefficient_si (static_sched, isl_dim_in,
628 prefix, 1);
629 build_pbb_scattering_polyhedrons (static_sched, pbb, nb_scat_dims);
632 isl_aff_free (static_sched);
635 static isl_pw_aff *extract_affine (scop_p, tree, __isl_take isl_space *space);
637 /* Extract an affine expression from the chain of recurrence E. */
639 static isl_pw_aff *
640 extract_affine_chrec (scop_p s, tree e, __isl_take isl_space *space)
642 isl_pw_aff *lhs = extract_affine (s, CHREC_LEFT (e), isl_space_copy (space));
643 isl_pw_aff *rhs = extract_affine (s, CHREC_RIGHT (e), isl_space_copy (space));
644 isl_local_space *ls = isl_local_space_from_space (space);
645 unsigned pos = sese_loop_depth ((sese) s->region, get_chrec_loop (e)) - 1;
646 isl_aff *loop = isl_aff_set_coefficient_si
647 (isl_aff_zero_on_domain (ls), isl_dim_in, pos, 1);
648 isl_pw_aff *l = isl_pw_aff_from_aff (loop);
650 /* Before multiplying, make sure that the result is affine. */
651 gcc_assert (isl_pw_aff_is_cst (rhs)
652 || isl_pw_aff_is_cst (l));
654 return isl_pw_aff_add (lhs, isl_pw_aff_mul (rhs, l));
657 /* Extract an affine expression from the mult_expr E. */
659 static isl_pw_aff *
660 extract_affine_mul (scop_p s, tree e, __isl_take isl_space *space)
662 isl_pw_aff *lhs = extract_affine (s, TREE_OPERAND (e, 0),
663 isl_space_copy (space));
664 isl_pw_aff *rhs = extract_affine (s, TREE_OPERAND (e, 1), space);
666 if (!isl_pw_aff_is_cst (lhs)
667 && !isl_pw_aff_is_cst (rhs))
669 isl_pw_aff_free (lhs);
670 isl_pw_aff_free (rhs);
671 return NULL;
674 return isl_pw_aff_mul (lhs, rhs);
677 /* Return an ISL identifier from the name of the ssa_name E. */
679 static isl_id *
680 isl_id_for_ssa_name (scop_p s, tree e)
682 const char *name = get_name (e);
683 isl_id *id;
685 if (name)
686 id = isl_id_alloc (s->ctx, name, e);
687 else
689 char name1[50];
690 snprintf (name1, sizeof (name1), "P_%d", SSA_NAME_VERSION (e));
691 id = isl_id_alloc (s->ctx, name1, e);
694 return id;
697 /* Return an ISL identifier for the data reference DR. */
699 static isl_id *
700 isl_id_for_dr (scop_p s, data_reference_p dr ATTRIBUTE_UNUSED)
702 /* Data references all get the same isl_id. They need to be comparable
703 and are distinguished through the first dimension, which contains the
704 alias set number. */
705 return isl_id_alloc (s->ctx, "", 0);
708 /* Extract an affine expression from the ssa_name E. */
710 static isl_pw_aff *
711 extract_affine_name (scop_p s, tree e, __isl_take isl_space *space)
713 isl_aff *aff;
714 isl_set *dom;
715 isl_id *id;
716 int dimension;
718 id = isl_id_for_ssa_name (s, e);
719 dimension = isl_space_find_dim_by_id (space, isl_dim_param, id);
720 isl_id_free (id);
721 dom = isl_set_universe (isl_space_copy (space));
722 aff = isl_aff_zero_on_domain (isl_local_space_from_space (space));
723 aff = isl_aff_add_coefficient_si (aff, isl_dim_param, dimension, 1);
724 return isl_pw_aff_alloc (dom, aff);
727 /* Extract an affine expression from the gmp constant G. */
729 static isl_pw_aff *
730 extract_affine_gmp (mpz_t g, __isl_take isl_space *space)
732 isl_local_space *ls = isl_local_space_from_space (isl_space_copy (space));
733 isl_aff *aff = isl_aff_zero_on_domain (ls);
734 isl_set *dom = isl_set_universe (space);
735 isl_val *v;
736 isl_ctx *ct;
738 ct = isl_aff_get_ctx (aff);
739 v = isl_val_int_from_gmp (ct, g);
740 aff = isl_aff_add_constant_val (aff, v);
742 return isl_pw_aff_alloc (dom, aff);
745 /* Extract an affine expression from the integer_cst E. */
747 static isl_pw_aff *
748 extract_affine_int (tree e, __isl_take isl_space *space)
750 isl_pw_aff *res;
751 mpz_t g;
753 mpz_init (g);
754 tree_int_to_gmp (e, g);
755 res = extract_affine_gmp (g, space);
756 mpz_clear (g);
758 return res;
761 /* Compute pwaff mod 2^width. */
763 extern isl_ctx *the_isl_ctx;
765 static isl_pw_aff *
766 wrap (isl_pw_aff *pwaff, unsigned width)
768 isl_val *mod;
770 mod = isl_val_int_from_ui(the_isl_ctx, width);
771 mod = isl_val_2exp (mod);
772 pwaff = isl_pw_aff_mod_val (pwaff, mod);
774 return pwaff;
777 /* When parameter NAME is in REGION, returns its index in SESE_PARAMS.
778 Otherwise returns -1. */
780 static inline int
781 parameter_index_in_region_1 (tree name, sese region)
783 int i;
784 tree p;
786 gcc_assert (TREE_CODE (name) == SSA_NAME);
788 FOR_EACH_VEC_ELT (SESE_PARAMS (region), i, p)
789 if (p == name)
790 return i;
792 return -1;
795 /* When the parameter NAME is in REGION, returns its index in
796 SESE_PARAMS. Otherwise this function inserts NAME in SESE_PARAMS
797 and returns the index of NAME. */
799 static int
800 parameter_index_in_region (tree name, sese region)
802 int i;
804 gcc_assert (TREE_CODE (name) == SSA_NAME);
806 i = parameter_index_in_region_1 (name, region);
807 if (i != -1)
808 return i;
810 gcc_assert (SESE_ADD_PARAMS (region));
812 i = SESE_PARAMS (region).length ();
813 SESE_PARAMS (region).safe_push (name);
814 return i;
817 /* Extract an affine expression from the tree E in the scop S. */
819 static isl_pw_aff *
820 extract_affine (scop_p s, tree e, __isl_take isl_space *space)
822 isl_pw_aff *lhs, *rhs, *res;
823 tree type;
825 if (e == chrec_dont_know) {
826 isl_space_free (space);
827 return NULL;
830 switch (TREE_CODE (e))
832 case POLYNOMIAL_CHREC:
833 res = extract_affine_chrec (s, e, space);
834 break;
836 case MULT_EXPR:
837 res = extract_affine_mul (s, e, space);
838 break;
840 case PLUS_EXPR:
841 case POINTER_PLUS_EXPR:
842 lhs = extract_affine (s, TREE_OPERAND (e, 0), isl_space_copy (space));
843 rhs = extract_affine (s, TREE_OPERAND (e, 1), space);
844 res = isl_pw_aff_add (lhs, rhs);
845 break;
847 case MINUS_EXPR:
848 lhs = extract_affine (s, TREE_OPERAND (e, 0), isl_space_copy (space));
849 rhs = extract_affine (s, TREE_OPERAND (e, 1), space);
850 res = isl_pw_aff_sub (lhs, rhs);
851 break;
853 case NEGATE_EXPR:
854 case BIT_NOT_EXPR:
855 lhs = extract_affine (s, TREE_OPERAND (e, 0), isl_space_copy (space));
856 rhs = extract_affine (s, integer_minus_one_node, space);
857 res = isl_pw_aff_mul (lhs, rhs);
858 break;
860 case SSA_NAME:
861 gcc_assert (-1 != parameter_index_in_region_1 (e, SCOP_REGION (s)));
862 res = extract_affine_name (s, e, space);
863 break;
865 case INTEGER_CST:
866 res = extract_affine_int (e, space);
867 /* No need to wrap a single integer. */
868 return res;
870 CASE_CONVERT:
871 case NON_LVALUE_EXPR:
872 res = extract_affine (s, TREE_OPERAND (e, 0), space);
873 break;
875 default:
876 gcc_unreachable ();
877 break;
880 type = TREE_TYPE (e);
881 if (TYPE_UNSIGNED (type))
882 res = wrap (res, TYPE_PRECISION (type));
884 return res;
887 /* In the context of sese S, scan the expression E and translate it to
888 a linear expression C. When parsing a symbolic multiplication, K
889 represents the constant multiplier of an expression containing
890 parameters. */
892 static void
893 scan_tree_for_params (sese s, tree e)
895 if (e == chrec_dont_know)
896 return;
898 switch (TREE_CODE (e))
900 case POLYNOMIAL_CHREC:
901 scan_tree_for_params (s, CHREC_LEFT (e));
902 break;
904 case MULT_EXPR:
905 if (chrec_contains_symbols (TREE_OPERAND (e, 0)))
906 scan_tree_for_params (s, TREE_OPERAND (e, 0));
907 else
908 scan_tree_for_params (s, TREE_OPERAND (e, 1));
909 break;
911 case PLUS_EXPR:
912 case POINTER_PLUS_EXPR:
913 case MINUS_EXPR:
914 scan_tree_for_params (s, TREE_OPERAND (e, 0));
915 scan_tree_for_params (s, TREE_OPERAND (e, 1));
916 break;
918 case NEGATE_EXPR:
919 case BIT_NOT_EXPR:
920 CASE_CONVERT:
921 case NON_LVALUE_EXPR:
922 scan_tree_for_params (s, TREE_OPERAND (e, 0));
923 break;
925 case SSA_NAME:
926 parameter_index_in_region (e, s);
927 break;
929 case INTEGER_CST:
930 case ADDR_EXPR:
931 break;
933 default:
934 gcc_unreachable ();
935 break;
939 /* Find parameters with respect to REGION in BB. We are looking in memory
940 access functions, conditions and loop bounds. */
942 static void
943 find_params_in_bb (sese region, gimple_bb_p gbb)
945 int i;
946 unsigned j;
947 data_reference_p dr;
948 gimple stmt;
949 loop_p loop = GBB_BB (gbb)->loop_father;
951 /* Find parameters in the access functions of data references. */
952 FOR_EACH_VEC_ELT (GBB_DATA_REFS (gbb), i, dr)
953 for (j = 0; j < DR_NUM_DIMENSIONS (dr); j++)
954 scan_tree_for_params (region, DR_ACCESS_FN (dr, j));
956 /* Find parameters in conditional statements. */
957 FOR_EACH_VEC_ELT (GBB_CONDITIONS (gbb), i, stmt)
959 tree lhs = scalar_evolution_in_region (region, loop,
960 gimple_cond_lhs (stmt));
961 tree rhs = scalar_evolution_in_region (region, loop,
962 gimple_cond_rhs (stmt));
964 scan_tree_for_params (region, lhs);
965 scan_tree_for_params (region, rhs);
969 /* Record the parameters used in the SCOP. A variable is a parameter
970 in a scop if it does not vary during the execution of that scop. */
972 static void
973 find_scop_parameters (scop_p scop)
975 poly_bb_p pbb;
976 unsigned i;
977 sese region = SCOP_REGION (scop);
978 struct loop *loop;
979 int nbp;
981 /* Find the parameters used in the loop bounds. */
982 FOR_EACH_VEC_ELT (SESE_LOOP_NEST (region), i, loop)
984 tree nb_iters = number_of_latch_executions (loop);
986 if (!chrec_contains_symbols (nb_iters))
987 continue;
989 nb_iters = scalar_evolution_in_region (region, loop, nb_iters);
990 scan_tree_for_params (region, nb_iters);
993 /* Find the parameters used in data accesses. */
994 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
995 find_params_in_bb (region, PBB_BLACK_BOX (pbb));
997 nbp = sese_nb_params (region);
998 scop_set_nb_params (scop, nbp);
999 SESE_ADD_PARAMS (region) = false;
1002 tree e;
1003 isl_space *space = isl_space_set_alloc (scop->ctx, nbp, 0);
1005 FOR_EACH_VEC_ELT (SESE_PARAMS (region), i, e)
1006 space = isl_space_set_dim_id (space, isl_dim_param, i,
1007 isl_id_for_ssa_name (scop, e));
1009 scop->context = isl_set_universe (space);
1013 /* Builds the constraint polyhedra for LOOP in SCOP. OUTER_PH gives
1014 the constraints for the surrounding loops. */
1016 static void
1017 build_loop_iteration_domains (scop_p scop, struct loop *loop,
1018 int nb,
1019 isl_set *outer, isl_set **doms)
1021 tree nb_iters = number_of_latch_executions (loop);
1022 sese region = SCOP_REGION (scop);
1024 isl_set *inner = isl_set_copy (outer);
1025 isl_space *space;
1026 isl_constraint *c;
1027 int pos = isl_set_dim (outer, isl_dim_set);
1028 isl_val *v;
1029 mpz_t g;
1031 mpz_init (g);
1033 inner = isl_set_add_dims (inner, isl_dim_set, 1);
1034 space = isl_set_get_space (inner);
1036 /* 0 <= loop_i */
1037 c = isl_inequality_alloc
1038 (isl_local_space_from_space (isl_space_copy (space)));
1039 c = isl_constraint_set_coefficient_si (c, isl_dim_set, pos, 1);
1040 inner = isl_set_add_constraint (inner, c);
1042 /* loop_i <= cst_nb_iters */
1043 if (TREE_CODE (nb_iters) == INTEGER_CST)
1045 c = isl_inequality_alloc
1046 (isl_local_space_from_space (isl_space_copy (space)));
1047 c = isl_constraint_set_coefficient_si (c, isl_dim_set, pos, -1);
1048 tree_int_to_gmp (nb_iters, g);
1049 v = isl_val_int_from_gmp (the_isl_ctx, g);
1050 c = isl_constraint_set_constant_val (c, v);
1051 inner = isl_set_add_constraint (inner, c);
1054 /* loop_i <= expr_nb_iters */
1055 else if (!chrec_contains_undetermined (nb_iters))
1057 widest_int nit;
1058 isl_pw_aff *aff;
1059 isl_set *valid;
1060 isl_local_space *ls;
1061 isl_aff *al;
1062 isl_set *le;
1064 nb_iters = scalar_evolution_in_region (region, loop, nb_iters);
1066 aff = extract_affine (scop, nb_iters, isl_set_get_space (inner));
1067 valid = isl_pw_aff_nonneg_set (isl_pw_aff_copy (aff));
1068 valid = isl_set_project_out (valid, isl_dim_set, 0,
1069 isl_set_dim (valid, isl_dim_set));
1070 scop->context = isl_set_intersect (scop->context, valid);
1072 ls = isl_local_space_from_space (isl_space_copy (space));
1073 al = isl_aff_set_coefficient_si (isl_aff_zero_on_domain (ls),
1074 isl_dim_in, pos, 1);
1075 le = isl_pw_aff_le_set (isl_pw_aff_from_aff (al),
1076 isl_pw_aff_copy (aff));
1077 inner = isl_set_intersect (inner, le);
1079 if (max_stmt_executions (loop, &nit))
1081 /* Insert in the context the constraints from the
1082 estimation of the number of iterations NIT and the
1083 symbolic number of iterations (involving parameter
1084 names) NB_ITERS. First, build the affine expression
1085 "NIT - NB_ITERS" and then say that it is positive,
1086 i.e., NIT approximates NB_ITERS: "NIT >= NB_ITERS". */
1087 isl_pw_aff *approx;
1088 mpz_t g;
1089 isl_set *x;
1090 isl_constraint *c;
1092 mpz_init (g);
1093 wi::to_mpz (nit, g, SIGNED);
1094 mpz_sub_ui (g, g, 1);
1095 approx = extract_affine_gmp (g, isl_set_get_space (inner));
1096 x = isl_pw_aff_ge_set (approx, aff);
1097 x = isl_set_project_out (x, isl_dim_set, 0,
1098 isl_set_dim (x, isl_dim_set));
1099 scop->context = isl_set_intersect (scop->context, x);
1101 c = isl_inequality_alloc
1102 (isl_local_space_from_space (isl_space_copy (space)));
1103 c = isl_constraint_set_coefficient_si (c, isl_dim_set, pos, -1);
1104 v = isl_val_int_from_gmp (the_isl_ctx, g);
1105 mpz_clear (g);
1106 c = isl_constraint_set_constant_val (c, v);
1107 inner = isl_set_add_constraint (inner, c);
1109 else
1110 isl_pw_aff_free (aff);
1112 else
1113 gcc_unreachable ();
1115 if (loop->inner && loop_in_sese_p (loop->inner, region))
1116 build_loop_iteration_domains (scop, loop->inner, nb + 1,
1117 isl_set_copy (inner), doms);
1119 if (nb != 0
1120 && loop->next
1121 && loop_in_sese_p (loop->next, region))
1122 build_loop_iteration_domains (scop, loop->next, nb,
1123 isl_set_copy (outer), doms);
1125 doms[loop->num] = inner;
1127 isl_set_free (outer);
1128 isl_space_free (space);
1129 mpz_clear (g);
1132 /* Returns a linear expression for tree T evaluated in PBB. */
1134 static isl_pw_aff *
1135 create_pw_aff_from_tree (poly_bb_p pbb, tree t)
1137 scop_p scop = PBB_SCOP (pbb);
1139 t = scalar_evolution_in_region (SCOP_REGION (scop), pbb_loop (pbb), t);
1140 gcc_assert (!automatically_generated_chrec_p (t));
1142 return extract_affine (scop, t, isl_set_get_space (pbb->domain));
1145 /* Add conditional statement STMT to pbb. CODE is used as the comparison
1146 operator. This allows us to invert the condition or to handle
1147 inequalities. */
1149 static void
1150 add_condition_to_pbb (poly_bb_p pbb, gcond *stmt, enum tree_code code)
1152 isl_pw_aff *lhs = create_pw_aff_from_tree (pbb, gimple_cond_lhs (stmt));
1153 isl_pw_aff *rhs = create_pw_aff_from_tree (pbb, gimple_cond_rhs (stmt));
1154 isl_set *cond;
1156 switch (code)
1158 case LT_EXPR:
1159 cond = isl_pw_aff_lt_set (lhs, rhs);
1160 break;
1162 case GT_EXPR:
1163 cond = isl_pw_aff_gt_set (lhs, rhs);
1164 break;
1166 case LE_EXPR:
1167 cond = isl_pw_aff_le_set (lhs, rhs);
1168 break;
1170 case GE_EXPR:
1171 cond = isl_pw_aff_ge_set (lhs, rhs);
1172 break;
1174 case EQ_EXPR:
1175 cond = isl_pw_aff_eq_set (lhs, rhs);
1176 break;
1178 case NE_EXPR:
1179 cond = isl_pw_aff_ne_set (lhs, rhs);
1180 break;
1182 default:
1183 isl_pw_aff_free (lhs);
1184 isl_pw_aff_free (rhs);
1185 return;
1188 cond = isl_set_coalesce (cond);
1189 cond = isl_set_set_tuple_id (cond, isl_set_get_tuple_id (pbb->domain));
1190 pbb->domain = isl_set_intersect (pbb->domain, cond);
1193 /* Add conditions to the domain of PBB. */
1195 static void
1196 add_conditions_to_domain (poly_bb_p pbb)
1198 unsigned int i;
1199 gimple stmt;
1200 gimple_bb_p gbb = PBB_BLACK_BOX (pbb);
1202 if (GBB_CONDITIONS (gbb).is_empty ())
1203 return;
1205 FOR_EACH_VEC_ELT (GBB_CONDITIONS (gbb), i, stmt)
1206 switch (gimple_code (stmt))
1208 case GIMPLE_COND:
1210 gcond *cond_stmt = as_a <gcond *> (stmt);
1211 enum tree_code code = gimple_cond_code (cond_stmt);
1213 /* The conditions for ELSE-branches are inverted. */
1214 if (!GBB_CONDITION_CASES (gbb)[i])
1215 code = invert_tree_comparison (code, false);
1217 add_condition_to_pbb (pbb, cond_stmt, code);
1218 break;
1221 case GIMPLE_SWITCH:
1222 /* Switch statements are not supported right now - fall through. */
1224 default:
1225 gcc_unreachable ();
1226 break;
1230 /* Traverses all the GBBs of the SCOP and add their constraints to the
1231 iteration domains. */
1233 static void
1234 add_conditions_to_constraints (scop_p scop)
1236 int i;
1237 poly_bb_p pbb;
1239 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
1240 add_conditions_to_domain (pbb);
1243 /* Returns a COND_EXPR statement when BB has a single predecessor, the
1244 edge between BB and its predecessor is not a loop exit edge, and
1245 the last statement of the single predecessor is a COND_EXPR. */
1247 static gcond *
1248 single_pred_cond_non_loop_exit (basic_block bb)
1250 if (single_pred_p (bb))
1252 edge e = single_pred_edge (bb);
1253 basic_block pred = e->src;
1254 gimple stmt;
1256 if (loop_depth (pred->loop_father) > loop_depth (bb->loop_father))
1257 return NULL;
1259 stmt = last_stmt (pred);
1261 if (stmt && gimple_code (stmt) == GIMPLE_COND)
1262 return as_a <gcond *> (stmt);
1265 return NULL;
1268 class sese_dom_walker : public dom_walker
1270 public:
1271 sese_dom_walker (cdi_direction, sese);
1273 virtual void before_dom_children (basic_block);
1274 virtual void after_dom_children (basic_block);
1276 private:
1277 auto_vec<gimple, 3> m_conditions, m_cases;
1278 sese m_region;
1281 sese_dom_walker::sese_dom_walker (cdi_direction direction, sese region)
1282 : dom_walker (direction), m_region (region)
1286 /* Call-back for dom_walk executed before visiting the dominated
1287 blocks. */
1289 void
1290 sese_dom_walker::before_dom_children (basic_block bb)
1292 gimple_bb_p gbb;
1293 gcond *stmt;
1295 if (!bb_in_sese_p (bb, m_region))
1296 return;
1298 stmt = single_pred_cond_non_loop_exit (bb);
1300 if (stmt)
1302 edge e = single_pred_edge (bb);
1304 m_conditions.safe_push (stmt);
1306 if (e->flags & EDGE_TRUE_VALUE)
1307 m_cases.safe_push (stmt);
1308 else
1309 m_cases.safe_push (NULL);
1312 gbb = gbb_from_bb (bb);
1314 if (gbb)
1316 GBB_CONDITIONS (gbb) = m_conditions.copy ();
1317 GBB_CONDITION_CASES (gbb) = m_cases.copy ();
1321 /* Call-back for dom_walk executed after visiting the dominated
1322 blocks. */
1324 void
1325 sese_dom_walker::after_dom_children (basic_block bb)
1327 if (!bb_in_sese_p (bb, m_region))
1328 return;
1330 if (single_pred_cond_non_loop_exit (bb))
1332 m_conditions.pop ();
1333 m_cases.pop ();
1337 /* Add constraints on the possible values of parameter P from the type
1338 of P. */
1340 static void
1341 add_param_constraints (scop_p scop, graphite_dim_t p)
1343 tree parameter = SESE_PARAMS (SCOP_REGION (scop))[p];
1344 tree type = TREE_TYPE (parameter);
1345 tree lb = NULL_TREE;
1346 tree ub = NULL_TREE;
1348 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
1349 lb = lower_bound_in_type (type, type);
1350 else
1351 lb = TYPE_MIN_VALUE (type);
1353 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
1354 ub = upper_bound_in_type (type, type);
1355 else
1356 ub = TYPE_MAX_VALUE (type);
1358 if (lb)
1360 isl_space *space = isl_set_get_space (scop->context);
1361 isl_constraint *c;
1362 mpz_t g;
1363 isl_val *v;
1365 c = isl_inequality_alloc (isl_local_space_from_space (space));
1366 mpz_init (g);
1367 tree_int_to_gmp (lb, g);
1368 v = isl_val_int_from_gmp (the_isl_ctx, g);
1369 v = isl_val_neg (v);
1370 mpz_clear (g);
1371 c = isl_constraint_set_constant_val (c, v);
1372 c = isl_constraint_set_coefficient_si (c, isl_dim_param, p, 1);
1374 scop->context = isl_set_add_constraint (scop->context, c);
1377 if (ub)
1379 isl_space *space = isl_set_get_space (scop->context);
1380 isl_constraint *c;
1381 mpz_t g;
1382 isl_val *v;
1384 c = isl_inequality_alloc (isl_local_space_from_space (space));
1386 mpz_init (g);
1387 tree_int_to_gmp (ub, g);
1388 v = isl_val_int_from_gmp (the_isl_ctx, g);
1389 mpz_clear (g);
1390 c = isl_constraint_set_constant_val (c, v);
1391 c = isl_constraint_set_coefficient_si (c, isl_dim_param, p, -1);
1393 scop->context = isl_set_add_constraint (scop->context, c);
1397 /* Build the context of the SCOP. The context usually contains extra
1398 constraints that are added to the iteration domains that constrain
1399 some parameters. */
1401 static void
1402 build_scop_context (scop_p scop)
1404 graphite_dim_t p, n = scop_nb_params (scop);
1406 for (p = 0; p < n; p++)
1407 add_param_constraints (scop, p);
1410 /* Build the iteration domains: the loops belonging to the current
1411 SCOP, and that vary for the execution of the current basic block.
1412 Returns false if there is no loop in SCOP. */
1414 static void
1415 build_scop_iteration_domain (scop_p scop)
1417 struct loop *loop;
1418 sese region = SCOP_REGION (scop);
1419 int i;
1420 poly_bb_p pbb;
1421 int nb_loops = number_of_loops (cfun);
1422 isl_set **doms = XCNEWVEC (isl_set *, nb_loops);
1424 FOR_EACH_VEC_ELT (SESE_LOOP_NEST (region), i, loop)
1425 if (!loop_in_sese_p (loop_outer (loop), region))
1426 build_loop_iteration_domains (scop, loop, 0,
1427 isl_set_copy (scop->context), doms);
1429 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
1431 loop = pbb_loop (pbb);
1433 if (doms[loop->num])
1434 pbb->domain = isl_set_copy (doms[loop->num]);
1435 else
1436 pbb->domain = isl_set_copy (scop->context);
1438 pbb->domain = isl_set_set_tuple_id (pbb->domain,
1439 isl_id_for_pbb (scop, pbb));
1442 for (i = 0; i < nb_loops; i++)
1443 if (doms[i])
1444 isl_set_free (doms[i]);
1446 free (doms);
1449 /* Add a constrain to the ACCESSES polyhedron for the alias set of
1450 data reference DR. ACCESSP_NB_DIMS is the dimension of the
1451 ACCESSES polyhedron, DOM_NB_DIMS is the dimension of the iteration
1452 domain. */
1454 static isl_map *
1455 pdr_add_alias_set (isl_map *acc, data_reference_p dr)
1457 isl_constraint *c;
1458 int alias_set_num = 0;
1459 base_alias_pair *bap = (base_alias_pair *)(dr->aux);
1461 if (bap && bap->alias_set)
1462 alias_set_num = *(bap->alias_set);
1464 c = isl_equality_alloc
1465 (isl_local_space_from_space (isl_map_get_space (acc)));
1466 c = isl_constraint_set_constant_si (c, -alias_set_num);
1467 c = isl_constraint_set_coefficient_si (c, isl_dim_out, 0, 1);
1469 return isl_map_add_constraint (acc, c);
1472 /* Assign the affine expression INDEX to the output dimension POS of
1473 MAP and return the result. */
1475 static isl_map *
1476 set_index (isl_map *map, int pos, isl_pw_aff *index)
1478 isl_map *index_map;
1479 int len = isl_map_dim (map, isl_dim_out);
1480 isl_id *id;
1482 index_map = isl_map_from_pw_aff (index);
1483 index_map = isl_map_insert_dims (index_map, isl_dim_out, 0, pos);
1484 index_map = isl_map_add_dims (index_map, isl_dim_out, len - pos - 1);
1486 id = isl_map_get_tuple_id (map, isl_dim_out);
1487 index_map = isl_map_set_tuple_id (index_map, isl_dim_out, id);
1488 id = isl_map_get_tuple_id (map, isl_dim_in);
1489 index_map = isl_map_set_tuple_id (index_map, isl_dim_in, id);
1491 return isl_map_intersect (map, index_map);
1494 /* Add to ACCESSES polyhedron equalities defining the access functions
1495 to the memory. ACCESSP_NB_DIMS is the dimension of the ACCESSES
1496 polyhedron, DOM_NB_DIMS is the dimension of the iteration domain.
1497 PBB is the poly_bb_p that contains the data reference DR. */
1499 static isl_map *
1500 pdr_add_memory_accesses (isl_map *acc, data_reference_p dr, poly_bb_p pbb)
1502 int i, nb_subscripts = DR_NUM_DIMENSIONS (dr);
1503 scop_p scop = PBB_SCOP (pbb);
1505 for (i = 0; i < nb_subscripts; i++)
1507 isl_pw_aff *aff;
1508 tree afn = DR_ACCESS_FN (dr, nb_subscripts - 1 - i);
1510 aff = extract_affine (scop, afn,
1511 isl_space_domain (isl_map_get_space (acc)));
1512 acc = set_index (acc, i + 1, aff);
1515 return acc;
1518 /* Add constrains representing the size of the accessed data to the
1519 ACCESSES polyhedron. ACCESSP_NB_DIMS is the dimension of the
1520 ACCESSES polyhedron, DOM_NB_DIMS is the dimension of the iteration
1521 domain. */
1523 static isl_set *
1524 pdr_add_data_dimensions (isl_set *extent, scop_p scop, data_reference_p dr)
1526 tree ref = DR_REF (dr);
1527 int i, nb_subscripts = DR_NUM_DIMENSIONS (dr);
1529 for (i = nb_subscripts - 1; i >= 0; i--, ref = TREE_OPERAND (ref, 0))
1531 tree low, high;
1533 if (TREE_CODE (ref) != ARRAY_REF)
1534 break;
1536 low = array_ref_low_bound (ref);
1537 high = array_ref_up_bound (ref);
1539 /* XXX The PPL code dealt separately with
1540 subscript - low >= 0 and high - subscript >= 0 in case one of
1541 the two bounds isn't known. Do the same here? */
1543 if (tree_fits_shwi_p (low)
1544 && high
1545 && tree_fits_shwi_p (high)
1546 /* 1-element arrays at end of structures may extend over
1547 their declared size. */
1548 && !(array_at_struct_end_p (ref)
1549 && operand_equal_p (low, high, 0)))
1551 isl_id *id;
1552 isl_aff *aff;
1553 isl_set *univ, *lbs, *ubs;
1554 isl_pw_aff *index;
1555 isl_space *space;
1556 isl_set *valid;
1557 isl_pw_aff *lb = extract_affine_int (low, isl_set_get_space (extent));
1558 isl_pw_aff *ub = extract_affine_int (high, isl_set_get_space (extent));
1560 /* high >= 0 */
1561 valid = isl_pw_aff_nonneg_set (isl_pw_aff_copy (ub));
1562 valid = isl_set_project_out (valid, isl_dim_set, 0,
1563 isl_set_dim (valid, isl_dim_set));
1564 scop->context = isl_set_intersect (scop->context, valid);
1566 space = isl_set_get_space (extent);
1567 aff = isl_aff_zero_on_domain (isl_local_space_from_space (space));
1568 aff = isl_aff_add_coefficient_si (aff, isl_dim_in, i + 1, 1);
1569 univ = isl_set_universe (isl_space_domain (isl_aff_get_space (aff)));
1570 index = isl_pw_aff_alloc (univ, aff);
1572 id = isl_set_get_tuple_id (extent);
1573 lb = isl_pw_aff_set_tuple_id (lb, isl_dim_in, isl_id_copy (id));
1574 ub = isl_pw_aff_set_tuple_id (ub, isl_dim_in, id);
1576 /* low <= sub_i <= high */
1577 lbs = isl_pw_aff_ge_set (isl_pw_aff_copy (index), lb);
1578 ubs = isl_pw_aff_le_set (index, ub);
1579 extent = isl_set_intersect (extent, lbs);
1580 extent = isl_set_intersect (extent, ubs);
1584 return extent;
1587 /* Build data accesses for DR in PBB. */
1589 static void
1590 build_poly_dr (data_reference_p dr, poly_bb_p pbb)
1592 int dr_base_object_set;
1593 isl_map *acc;
1594 isl_set *extent;
1595 scop_p scop = PBB_SCOP (pbb);
1598 isl_space *dc = isl_set_get_space (pbb->domain);
1599 int nb_out = 1 + DR_NUM_DIMENSIONS (dr);
1600 isl_space *space = isl_space_add_dims (isl_space_from_domain (dc),
1601 isl_dim_out, nb_out);
1603 acc = isl_map_universe (space);
1604 acc = isl_map_set_tuple_id (acc, isl_dim_out, isl_id_for_dr (scop, dr));
1607 acc = pdr_add_alias_set (acc, dr);
1608 acc = pdr_add_memory_accesses (acc, dr, pbb);
1611 isl_id *id = isl_id_for_dr (scop, dr);
1612 int nb = 1 + DR_NUM_DIMENSIONS (dr);
1613 isl_space *space = isl_space_set_alloc (scop->ctx, 0, nb);
1614 int alias_set_num = 0;
1615 base_alias_pair *bap = (base_alias_pair *)(dr->aux);
1617 if (bap && bap->alias_set)
1618 alias_set_num = *(bap->alias_set);
1620 space = isl_space_set_tuple_id (space, isl_dim_set, id);
1621 extent = isl_set_nat_universe (space);
1622 extent = isl_set_fix_si (extent, isl_dim_set, 0, alias_set_num);
1623 extent = pdr_add_data_dimensions (extent, scop, dr);
1626 gcc_assert (dr->aux);
1627 dr_base_object_set = ((base_alias_pair *)(dr->aux))->base_obj_set;
1629 new_poly_dr (pbb, dr_base_object_set,
1630 DR_IS_READ (dr) ? PDR_READ : PDR_WRITE,
1631 dr, DR_NUM_DIMENSIONS (dr), acc, extent);
1634 /* Write to FILE the alias graph of data references in DIMACS format. */
1636 static inline bool
1637 write_alias_graph_to_ascii_dimacs (FILE *file, char *comment,
1638 vec<data_reference_p> drs)
1640 int num_vertex = drs.length ();
1641 int edge_num = 0;
1642 data_reference_p dr1, dr2;
1643 int i, j;
1645 if (num_vertex == 0)
1646 return true;
1648 FOR_EACH_VEC_ELT (drs, i, dr1)
1649 for (j = i + 1; drs.iterate (j, &dr2); j++)
1650 if (dr_may_alias_p (dr1, dr2, true))
1651 edge_num++;
1653 fprintf (file, "$\n");
1655 if (comment)
1656 fprintf (file, "c %s\n", comment);
1658 fprintf (file, "p edge %d %d\n", num_vertex, edge_num);
1660 FOR_EACH_VEC_ELT (drs, i, dr1)
1661 for (j = i + 1; drs.iterate (j, &dr2); j++)
1662 if (dr_may_alias_p (dr1, dr2, true))
1663 fprintf (file, "e %d %d\n", i + 1, j + 1);
1665 return true;
1668 /* Write to FILE the alias graph of data references in DOT format. */
1670 static inline bool
1671 write_alias_graph_to_ascii_dot (FILE *file, char *comment,
1672 vec<data_reference_p> drs)
1674 int num_vertex = drs.length ();
1675 data_reference_p dr1, dr2;
1676 int i, j;
1678 if (num_vertex == 0)
1679 return true;
1681 fprintf (file, "$\n");
1683 if (comment)
1684 fprintf (file, "c %s\n", comment);
1686 /* First print all the vertices. */
1687 FOR_EACH_VEC_ELT (drs, i, dr1)
1688 fprintf (file, "n%d;\n", i);
1690 FOR_EACH_VEC_ELT (drs, i, dr1)
1691 for (j = i + 1; drs.iterate (j, &dr2); j++)
1692 if (dr_may_alias_p (dr1, dr2, true))
1693 fprintf (file, "n%d n%d\n", i, j);
1695 return true;
1698 /* Write to FILE the alias graph of data references in ECC format. */
1700 static inline bool
1701 write_alias_graph_to_ascii_ecc (FILE *file, char *comment,
1702 vec<data_reference_p> drs)
1704 int num_vertex = drs.length ();
1705 data_reference_p dr1, dr2;
1706 int i, j;
1708 if (num_vertex == 0)
1709 return true;
1711 fprintf (file, "$\n");
1713 if (comment)
1714 fprintf (file, "c %s\n", comment);
1716 FOR_EACH_VEC_ELT (drs, i, dr1)
1717 for (j = i + 1; drs.iterate (j, &dr2); j++)
1718 if (dr_may_alias_p (dr1, dr2, true))
1719 fprintf (file, "%d %d\n", i, j);
1721 return true;
1724 /* Check if DR1 and DR2 are in the same object set. */
1726 static bool
1727 dr_same_base_object_p (const struct data_reference *dr1,
1728 const struct data_reference *dr2)
1730 return operand_equal_p (DR_BASE_OBJECT (dr1), DR_BASE_OBJECT (dr2), 0);
1733 /* Uses DFS component number as representative of alias-sets. Also tests for
1734 optimality by verifying if every connected component is a clique. Returns
1735 true (1) if the above test is true, and false (0) otherwise. */
1737 static int
1738 build_alias_set_optimal_p (vec<data_reference_p> drs)
1740 int num_vertices = drs.length ();
1741 struct graph *g = new_graph (num_vertices);
1742 data_reference_p dr1, dr2;
1743 int i, j;
1744 int num_connected_components;
1745 int v_indx1, v_indx2, num_vertices_in_component;
1746 int *all_vertices;
1747 int *vertices;
1748 struct graph_edge *e;
1749 int this_component_is_clique;
1750 int all_components_are_cliques = 1;
1752 FOR_EACH_VEC_ELT (drs, i, dr1)
1753 for (j = i+1; drs.iterate (j, &dr2); j++)
1754 if (dr_may_alias_p (dr1, dr2, true))
1756 add_edge (g, i, j);
1757 add_edge (g, j, i);
1760 all_vertices = XNEWVEC (int, num_vertices);
1761 vertices = XNEWVEC (int, num_vertices);
1762 for (i = 0; i < num_vertices; i++)
1763 all_vertices[i] = i;
1765 num_connected_components = graphds_dfs (g, all_vertices, num_vertices,
1766 NULL, true, NULL);
1767 for (i = 0; i < g->n_vertices; i++)
1769 data_reference_p dr = drs[i];
1770 base_alias_pair *bap;
1772 gcc_assert (dr->aux);
1773 bap = (base_alias_pair *)(dr->aux);
1775 bap->alias_set = XNEW (int);
1776 *(bap->alias_set) = g->vertices[i].component + 1;
1779 /* Verify if the DFS numbering results in optimal solution. */
1780 for (i = 0; i < num_connected_components; i++)
1782 num_vertices_in_component = 0;
1783 /* Get all vertices whose DFS component number is the same as i. */
1784 for (j = 0; j < num_vertices; j++)
1785 if (g->vertices[j].component == i)
1786 vertices[num_vertices_in_component++] = j;
1788 /* Now test if the vertices in 'vertices' form a clique, by testing
1789 for edges among each pair. */
1790 this_component_is_clique = 1;
1791 for (v_indx1 = 0; v_indx1 < num_vertices_in_component; v_indx1++)
1793 for (v_indx2 = v_indx1+1; v_indx2 < num_vertices_in_component; v_indx2++)
1795 /* Check if the two vertices are connected by iterating
1796 through all the edges which have one of these are source. */
1797 e = g->vertices[vertices[v_indx2]].pred;
1798 while (e)
1800 if (e->src == vertices[v_indx1])
1801 break;
1802 e = e->pred_next;
1804 if (!e)
1806 this_component_is_clique = 0;
1807 break;
1810 if (!this_component_is_clique)
1811 all_components_are_cliques = 0;
1815 free (all_vertices);
1816 free (vertices);
1817 free_graph (g);
1818 return all_components_are_cliques;
1821 /* Group each data reference in DRS with its base object set num. */
1823 static void
1824 build_base_obj_set_for_drs (vec<data_reference_p> drs)
1826 int num_vertex = drs.length ();
1827 struct graph *g = new_graph (num_vertex);
1828 data_reference_p dr1, dr2;
1829 int i, j;
1830 int *queue;
1832 FOR_EACH_VEC_ELT (drs, i, dr1)
1833 for (j = i + 1; drs.iterate (j, &dr2); j++)
1834 if (dr_same_base_object_p (dr1, dr2))
1836 add_edge (g, i, j);
1837 add_edge (g, j, i);
1840 queue = XNEWVEC (int, num_vertex);
1841 for (i = 0; i < num_vertex; i++)
1842 queue[i] = i;
1844 graphds_dfs (g, queue, num_vertex, NULL, true, NULL);
1846 for (i = 0; i < g->n_vertices; i++)
1848 data_reference_p dr = drs[i];
1849 base_alias_pair *bap;
1851 gcc_assert (dr->aux);
1852 bap = (base_alias_pair *)(dr->aux);
1854 bap->base_obj_set = g->vertices[i].component + 1;
1857 free (queue);
1858 free_graph (g);
1861 /* Build the data references for PBB. */
1863 static void
1864 build_pbb_drs (poly_bb_p pbb)
1866 int j;
1867 data_reference_p dr;
1868 vec<data_reference_p> gbb_drs = GBB_DATA_REFS (PBB_BLACK_BOX (pbb));
1870 FOR_EACH_VEC_ELT (gbb_drs, j, dr)
1871 build_poly_dr (dr, pbb);
1874 /* Dump to file the alias graphs for the data references in DRS. */
1876 static void
1877 dump_alias_graphs (vec<data_reference_p> drs)
1879 char comment[100];
1880 FILE *file_dimacs, *file_ecc, *file_dot;
1882 file_dimacs = fopen ("/tmp/dr_alias_graph_dimacs", "ab");
1883 if (file_dimacs)
1885 snprintf (comment, sizeof (comment), "%s %s", main_input_filename,
1886 current_function_name ());
1887 write_alias_graph_to_ascii_dimacs (file_dimacs, comment, drs);
1888 fclose (file_dimacs);
1891 file_ecc = fopen ("/tmp/dr_alias_graph_ecc", "ab");
1892 if (file_ecc)
1894 snprintf (comment, sizeof (comment), "%s %s", main_input_filename,
1895 current_function_name ());
1896 write_alias_graph_to_ascii_ecc (file_ecc, comment, drs);
1897 fclose (file_ecc);
1900 file_dot = fopen ("/tmp/dr_alias_graph_dot", "ab");
1901 if (file_dot)
1903 snprintf (comment, sizeof (comment), "%s %s", main_input_filename,
1904 current_function_name ());
1905 write_alias_graph_to_ascii_dot (file_dot, comment, drs);
1906 fclose (file_dot);
1910 /* Build data references in SCOP. */
1912 static void
1913 build_scop_drs (scop_p scop)
1915 int i, j;
1916 poly_bb_p pbb;
1917 data_reference_p dr;
1918 auto_vec<data_reference_p, 3> drs;
1920 /* Remove all the PBBs that do not have data references: these basic
1921 blocks are not handled in the polyhedral representation. */
1922 for (i = 0; SCOP_BBS (scop).iterate (i, &pbb); i++)
1923 if (GBB_DATA_REFS (PBB_BLACK_BOX (pbb)).is_empty ())
1925 free_gimple_bb (PBB_BLACK_BOX (pbb));
1926 free_poly_bb (pbb);
1927 SCOP_BBS (scop).ordered_remove (i);
1928 i--;
1931 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
1932 for (j = 0; GBB_DATA_REFS (PBB_BLACK_BOX (pbb)).iterate (j, &dr); j++)
1933 drs.safe_push (dr);
1935 FOR_EACH_VEC_ELT (drs, i, dr)
1936 dr->aux = XNEW (base_alias_pair);
1938 if (!build_alias_set_optimal_p (drs))
1940 /* TODO: Add support when building alias set is not optimal. */
1944 build_base_obj_set_for_drs (drs);
1946 /* When debugging, enable the following code. This cannot be used
1947 in production compilers. */
1948 if (0)
1949 dump_alias_graphs (drs);
1951 drs.release ();
1953 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
1954 build_pbb_drs (pbb);
1957 /* Return a gsi at the position of the phi node STMT. */
1959 static gphi_iterator
1960 gsi_for_phi_node (gphi *stmt)
1962 gphi_iterator psi;
1963 basic_block bb = gimple_bb (stmt);
1965 for (psi = gsi_start_phis (bb); !gsi_end_p (psi); gsi_next (&psi))
1966 if (stmt == psi.phi ())
1967 return psi;
1969 gcc_unreachable ();
1970 return psi;
1973 /* Analyze all the data references of STMTS and add them to the
1974 GBB_DATA_REFS vector of BB. */
1976 static void
1977 analyze_drs_in_stmts (scop_p scop, basic_block bb, vec<gimple> stmts)
1979 loop_p nest;
1980 gimple_bb_p gbb;
1981 gimple stmt;
1982 int i;
1983 sese region = SCOP_REGION (scop);
1985 if (!bb_in_sese_p (bb, region))
1986 return;
1988 nest = outermost_loop_in_sese_1 (region, bb);
1989 gbb = gbb_from_bb (bb);
1991 FOR_EACH_VEC_ELT (stmts, i, stmt)
1993 loop_p loop;
1995 if (is_gimple_debug (stmt))
1996 continue;
1998 loop = loop_containing_stmt (stmt);
1999 if (!loop_in_sese_p (loop, region))
2000 loop = nest;
2002 graphite_find_data_references_in_stmt (nest, loop, stmt,
2003 &GBB_DATA_REFS (gbb));
2007 /* Insert STMT at the end of the STMTS sequence and then insert the
2008 statements from STMTS at INSERT_GSI and call analyze_drs_in_stmts
2009 on STMTS. */
2011 static void
2012 insert_stmts (scop_p scop, gimple stmt, gimple_seq stmts,
2013 gimple_stmt_iterator insert_gsi)
2015 gimple_stmt_iterator gsi;
2016 auto_vec<gimple, 3> x;
2018 gimple_seq_add_stmt (&stmts, stmt);
2019 for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
2020 x.safe_push (gsi_stmt (gsi));
2022 gsi_insert_seq_before (&insert_gsi, stmts, GSI_SAME_STMT);
2023 analyze_drs_in_stmts (scop, gsi_bb (insert_gsi), x);
2026 /* Insert the assignment "RES := EXPR" just after AFTER_STMT. */
2028 static void
2029 insert_out_of_ssa_copy (scop_p scop, tree res, tree expr, gimple after_stmt)
2031 gimple_seq stmts;
2032 gimple_stmt_iterator gsi;
2033 tree var = force_gimple_operand (expr, &stmts, true, NULL_TREE);
2034 gassign *stmt = gimple_build_assign (unshare_expr (res), var);
2035 auto_vec<gimple, 3> x;
2037 gimple_seq_add_stmt (&stmts, stmt);
2038 for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
2039 x.safe_push (gsi_stmt (gsi));
2041 if (gimple_code (after_stmt) == GIMPLE_PHI)
2043 gsi = gsi_after_labels (gimple_bb (after_stmt));
2044 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
2046 else
2048 gsi = gsi_for_stmt (after_stmt);
2049 gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
2052 analyze_drs_in_stmts (scop, gimple_bb (after_stmt), x);
2055 /* Creates a poly_bb_p for basic_block BB from the existing PBB. */
2057 static void
2058 new_pbb_from_pbb (scop_p scop, poly_bb_p pbb, basic_block bb)
2060 vec<data_reference_p> drs;
2061 drs.create (3);
2062 gimple_bb_p gbb = PBB_BLACK_BOX (pbb);
2063 gimple_bb_p gbb1 = new_gimple_bb (bb, drs);
2064 poly_bb_p pbb1 = new_poly_bb (scop, gbb1);
2065 int index, n = SCOP_BBS (scop).length ();
2067 /* The INDEX of PBB in SCOP_BBS. */
2068 for (index = 0; index < n; index++)
2069 if (SCOP_BBS (scop)[index] == pbb)
2070 break;
2072 pbb1->domain = isl_set_copy (pbb->domain);
2073 pbb1->domain = isl_set_set_tuple_id (pbb1->domain,
2074 isl_id_for_pbb (scop, pbb1));
2076 GBB_PBB (gbb1) = pbb1;
2077 GBB_CONDITIONS (gbb1) = GBB_CONDITIONS (gbb).copy ();
2078 GBB_CONDITION_CASES (gbb1) = GBB_CONDITION_CASES (gbb).copy ();
2079 SCOP_BBS (scop).safe_insert (index + 1, pbb1);
2082 /* Insert on edge E the assignment "RES := EXPR". */
2084 static void
2085 insert_out_of_ssa_copy_on_edge (scop_p scop, edge e, tree res, tree expr)
2087 gimple_stmt_iterator gsi;
2088 gimple_seq stmts = NULL;
2089 tree var = force_gimple_operand (expr, &stmts, true, NULL_TREE);
2090 gimple stmt = gimple_build_assign (unshare_expr (res), var);
2091 basic_block bb;
2092 auto_vec<gimple, 3> x;
2094 gimple_seq_add_stmt (&stmts, stmt);
2095 for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
2096 x.safe_push (gsi_stmt (gsi));
2098 gsi_insert_seq_on_edge (e, stmts);
2099 gsi_commit_edge_inserts ();
2100 bb = gimple_bb (stmt);
2102 if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
2103 return;
2105 if (!gbb_from_bb (bb))
2106 new_pbb_from_pbb (scop, pbb_from_bb (e->src), bb);
2108 analyze_drs_in_stmts (scop, bb, x);
2111 /* Creates a zero dimension array of the same type as VAR. */
2113 static tree
2114 create_zero_dim_array (tree var, const char *base_name)
2116 tree index_type = build_index_type (integer_zero_node);
2117 tree elt_type = TREE_TYPE (var);
2118 tree array_type = build_array_type (elt_type, index_type);
2119 tree base = create_tmp_var (array_type, base_name);
2121 return build4 (ARRAY_REF, elt_type, base, integer_zero_node, NULL_TREE,
2122 NULL_TREE);
2125 /* Returns true when PHI is a loop close phi node. */
2127 static bool
2128 scalar_close_phi_node_p (gimple phi)
2130 if (gimple_code (phi) != GIMPLE_PHI
2131 || virtual_operand_p (gimple_phi_result (phi)))
2132 return false;
2134 /* Note that loop close phi nodes should have a single argument
2135 because we translated the representation into a canonical form
2136 before Graphite: see canonicalize_loop_closed_ssa_form. */
2137 return (gimple_phi_num_args (phi) == 1);
2140 /* For a definition DEF in REGION, propagates the expression EXPR in
2141 all the uses of DEF outside REGION. */
2143 static void
2144 propagate_expr_outside_region (tree def, tree expr, sese region)
2146 imm_use_iterator imm_iter;
2147 gimple use_stmt;
2148 gimple_seq stmts;
2149 bool replaced_once = false;
2151 gcc_assert (TREE_CODE (def) == SSA_NAME);
2153 expr = force_gimple_operand (unshare_expr (expr), &stmts, true,
2154 NULL_TREE);
2156 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
2157 if (!is_gimple_debug (use_stmt)
2158 && !bb_in_sese_p (gimple_bb (use_stmt), region))
2160 ssa_op_iter iter;
2161 use_operand_p use_p;
2163 FOR_EACH_PHI_OR_STMT_USE (use_p, use_stmt, iter, SSA_OP_ALL_USES)
2164 if (operand_equal_p (def, USE_FROM_PTR (use_p), 0)
2165 && (replaced_once = true))
2166 replace_exp (use_p, expr);
2168 update_stmt (use_stmt);
2171 if (replaced_once)
2173 gsi_insert_seq_on_edge (SESE_ENTRY (region), stmts);
2174 gsi_commit_edge_inserts ();
2178 /* Rewrite out of SSA the reduction phi node at PSI by creating a zero
2179 dimension array for it. */
2181 static void
2182 rewrite_close_phi_out_of_ssa (scop_p scop, gimple_stmt_iterator *psi)
2184 sese region = SCOP_REGION (scop);
2185 gimple phi = gsi_stmt (*psi);
2186 tree res = gimple_phi_result (phi);
2187 basic_block bb = gimple_bb (phi);
2188 gimple_stmt_iterator gsi = gsi_after_labels (bb);
2189 tree arg = gimple_phi_arg_def (phi, 0);
2190 gimple stmt;
2192 /* Note that loop close phi nodes should have a single argument
2193 because we translated the representation into a canonical form
2194 before Graphite: see canonicalize_loop_closed_ssa_form. */
2195 gcc_assert (gimple_phi_num_args (phi) == 1);
2197 /* The phi node can be a non close phi node, when its argument is
2198 invariant, or a default definition. */
2199 if (is_gimple_min_invariant (arg)
2200 || SSA_NAME_IS_DEFAULT_DEF (arg))
2202 propagate_expr_outside_region (res, arg, region);
2203 gsi_next (psi);
2204 return;
2207 else if (gimple_bb (SSA_NAME_DEF_STMT (arg))->loop_father == bb->loop_father)
2209 propagate_expr_outside_region (res, arg, region);
2210 stmt = gimple_build_assign (res, arg);
2211 remove_phi_node (psi, false);
2212 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
2213 return;
2216 /* If res is scev analyzable and is not a scalar value, it is safe
2217 to ignore the close phi node: it will be code generated in the
2218 out of Graphite pass. */
2219 else if (scev_analyzable_p (res, region))
2221 loop_p loop = loop_containing_stmt (SSA_NAME_DEF_STMT (res));
2222 tree scev;
2224 if (!loop_in_sese_p (loop, region))
2226 loop = loop_containing_stmt (SSA_NAME_DEF_STMT (arg));
2227 scev = scalar_evolution_in_region (region, loop, arg);
2228 scev = compute_overall_effect_of_inner_loop (loop, scev);
2230 else
2231 scev = scalar_evolution_in_region (region, loop, res);
2233 if (tree_does_not_contain_chrecs (scev))
2234 propagate_expr_outside_region (res, scev, region);
2236 gsi_next (psi);
2237 return;
2239 else
2241 tree zero_dim_array = create_zero_dim_array (res, "Close_Phi");
2243 stmt = gimple_build_assign (res, unshare_expr (zero_dim_array));
2245 if (TREE_CODE (arg) == SSA_NAME)
2246 insert_out_of_ssa_copy (scop, zero_dim_array, arg,
2247 SSA_NAME_DEF_STMT (arg));
2248 else
2249 insert_out_of_ssa_copy_on_edge (scop, single_pred_edge (bb),
2250 zero_dim_array, arg);
2253 remove_phi_node (psi, false);
2254 SSA_NAME_DEF_STMT (res) = stmt;
2256 insert_stmts (scop, stmt, NULL, gsi_after_labels (bb));
2259 /* Rewrite out of SSA the reduction phi node at PSI by creating a zero
2260 dimension array for it. */
2262 static void
2263 rewrite_phi_out_of_ssa (scop_p scop, gphi_iterator *psi)
2265 size_t i;
2266 gphi *phi = psi->phi ();
2267 basic_block bb = gimple_bb (phi);
2268 tree res = gimple_phi_result (phi);
2269 tree zero_dim_array = create_zero_dim_array (res, "phi_out_of_ssa");
2270 gimple stmt;
2272 for (i = 0; i < gimple_phi_num_args (phi); i++)
2274 tree arg = gimple_phi_arg_def (phi, i);
2275 edge e = gimple_phi_arg_edge (phi, i);
2277 /* Avoid the insertion of code in the loop latch to please the
2278 pattern matching of the vectorizer. */
2279 if (TREE_CODE (arg) == SSA_NAME
2280 && !SSA_NAME_IS_DEFAULT_DEF (arg)
2281 && e->src == bb->loop_father->latch)
2282 insert_out_of_ssa_copy (scop, zero_dim_array, arg,
2283 SSA_NAME_DEF_STMT (arg));
2284 else
2285 insert_out_of_ssa_copy_on_edge (scop, e, zero_dim_array, arg);
2288 stmt = gimple_build_assign (res, unshare_expr (zero_dim_array));
2289 remove_phi_node (psi, false);
2290 insert_stmts (scop, stmt, NULL, gsi_after_labels (bb));
2293 /* Rewrite the degenerate phi node at position PSI from the degenerate
2294 form "x = phi (y, y, ..., y)" to "x = y". */
2296 static void
2297 rewrite_degenerate_phi (gphi_iterator *psi)
2299 tree rhs;
2300 gimple stmt;
2301 gimple_stmt_iterator gsi;
2302 gphi *phi = psi->phi ();
2303 tree res = gimple_phi_result (phi);
2304 basic_block bb;
2306 bb = gimple_bb (phi);
2307 rhs = degenerate_phi_result (phi);
2308 gcc_assert (rhs);
2310 stmt = gimple_build_assign (res, rhs);
2311 remove_phi_node (psi, false);
2313 gsi = gsi_after_labels (bb);
2314 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
2317 /* Rewrite out of SSA all the reduction phi nodes of SCOP. */
2319 static void
2320 rewrite_reductions_out_of_ssa (scop_p scop)
2322 basic_block bb;
2323 gphi_iterator psi;
2324 sese region = SCOP_REGION (scop);
2326 FOR_EACH_BB_FN (bb, cfun)
2327 if (bb_in_sese_p (bb, region))
2328 for (psi = gsi_start_phis (bb); !gsi_end_p (psi);)
2330 gphi *phi = psi.phi ();
2332 if (virtual_operand_p (gimple_phi_result (phi)))
2334 gsi_next (&psi);
2335 continue;
2338 if (gimple_phi_num_args (phi) > 1
2339 && degenerate_phi_result (phi))
2340 rewrite_degenerate_phi (&psi);
2342 else if (scalar_close_phi_node_p (phi))
2343 rewrite_close_phi_out_of_ssa (scop, &psi);
2345 else if (reduction_phi_p (region, &psi))
2346 rewrite_phi_out_of_ssa (scop, &psi);
2349 update_ssa (TODO_update_ssa);
2350 #ifdef ENABLE_CHECKING
2351 verify_loop_closed_ssa (true);
2352 #endif
2355 /* Rewrite the scalar dependence of DEF used in USE_STMT with a memory
2356 read from ZERO_DIM_ARRAY. */
2358 static void
2359 rewrite_cross_bb_scalar_dependence (scop_p scop, tree zero_dim_array,
2360 tree def, gimple use_stmt)
2362 gimple name_stmt;
2363 tree name;
2364 ssa_op_iter iter;
2365 use_operand_p use_p;
2367 gcc_assert (gimple_code (use_stmt) != GIMPLE_PHI);
2369 name = copy_ssa_name (def);
2370 name_stmt = gimple_build_assign (name, zero_dim_array);
2372 gimple_assign_set_lhs (name_stmt, name);
2373 insert_stmts (scop, name_stmt, NULL, gsi_for_stmt (use_stmt));
2375 FOR_EACH_SSA_USE_OPERAND (use_p, use_stmt, iter, SSA_OP_ALL_USES)
2376 if (operand_equal_p (def, USE_FROM_PTR (use_p), 0))
2377 replace_exp (use_p, name);
2379 update_stmt (use_stmt);
2382 /* For every definition DEF in the SCOP that is used outside the scop,
2383 insert a closing-scop definition in the basic block just after this
2384 SCOP. */
2386 static void
2387 handle_scalar_deps_crossing_scop_limits (scop_p scop, tree def, gimple stmt)
2389 tree var = create_tmp_reg (TREE_TYPE (def));
2390 tree new_name = make_ssa_name (var, stmt);
2391 bool needs_copy = false;
2392 use_operand_p use_p;
2393 imm_use_iterator imm_iter;
2394 gimple use_stmt;
2395 sese region = SCOP_REGION (scop);
2397 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
2399 if (!bb_in_sese_p (gimple_bb (use_stmt), region))
2401 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
2403 SET_USE (use_p, new_name);
2405 update_stmt (use_stmt);
2406 needs_copy = true;
2410 /* Insert in the empty BB just after the scop a use of DEF such
2411 that the rewrite of cross_bb_scalar_dependences won't insert
2412 arrays everywhere else. */
2413 if (needs_copy)
2415 gimple assign = gimple_build_assign (new_name, def);
2416 gimple_stmt_iterator psi = gsi_after_labels (SESE_EXIT (region)->dest);
2418 update_stmt (assign);
2419 gsi_insert_before (&psi, assign, GSI_SAME_STMT);
2423 /* Rewrite the scalar dependences crossing the boundary of the BB
2424 containing STMT with an array. Return true when something has been
2425 changed. */
2427 static bool
2428 rewrite_cross_bb_scalar_deps (scop_p scop, gimple_stmt_iterator *gsi)
2430 sese region = SCOP_REGION (scop);
2431 gimple stmt = gsi_stmt (*gsi);
2432 imm_use_iterator imm_iter;
2433 tree def;
2434 basic_block def_bb;
2435 tree zero_dim_array = NULL_TREE;
2436 gimple use_stmt;
2437 bool res = false;
2439 switch (gimple_code (stmt))
2441 case GIMPLE_ASSIGN:
2442 def = gimple_assign_lhs (stmt);
2443 break;
2445 case GIMPLE_CALL:
2446 def = gimple_call_lhs (stmt);
2447 break;
2449 default:
2450 return false;
2453 if (!def
2454 || !is_gimple_reg (def))
2455 return false;
2457 if (scev_analyzable_p (def, region))
2459 loop_p loop = loop_containing_stmt (SSA_NAME_DEF_STMT (def));
2460 tree scev = scalar_evolution_in_region (region, loop, def);
2462 if (tree_contains_chrecs (scev, NULL))
2463 return false;
2465 propagate_expr_outside_region (def, scev, region);
2466 return true;
2469 def_bb = gimple_bb (stmt);
2471 handle_scalar_deps_crossing_scop_limits (scop, def, stmt);
2473 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
2474 if (gimple_code (use_stmt) == GIMPLE_PHI
2475 && (res = true))
2477 gphi_iterator psi = gsi_start_phis (gimple_bb (use_stmt));
2479 if (scalar_close_phi_node_p (gsi_stmt (psi)))
2480 rewrite_close_phi_out_of_ssa (scop, &psi);
2481 else
2482 rewrite_phi_out_of_ssa (scop, &psi);
2485 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
2486 if (gimple_code (use_stmt) != GIMPLE_PHI
2487 && def_bb != gimple_bb (use_stmt)
2488 && !is_gimple_debug (use_stmt)
2489 && (res = true))
2491 if (!zero_dim_array)
2493 zero_dim_array = create_zero_dim_array
2494 (def, "Cross_BB_scalar_dependence");
2495 insert_out_of_ssa_copy (scop, zero_dim_array, def,
2496 SSA_NAME_DEF_STMT (def));
2497 gsi_next (gsi);
2500 rewrite_cross_bb_scalar_dependence (scop, unshare_expr (zero_dim_array),
2501 def, use_stmt);
2504 return res;
2507 /* Rewrite out of SSA all the reduction phi nodes of SCOP. */
2509 static void
2510 rewrite_cross_bb_scalar_deps_out_of_ssa (scop_p scop)
2512 basic_block bb;
2513 gimple_stmt_iterator psi;
2514 sese region = SCOP_REGION (scop);
2515 bool changed = false;
2517 /* Create an extra empty BB after the scop. */
2518 split_edge (SESE_EXIT (region));
2520 FOR_EACH_BB_FN (bb, cfun)
2521 if (bb_in_sese_p (bb, region))
2522 for (psi = gsi_start_bb (bb); !gsi_end_p (psi); gsi_next (&psi))
2523 changed |= rewrite_cross_bb_scalar_deps (scop, &psi);
2525 if (changed)
2527 scev_reset_htab ();
2528 update_ssa (TODO_update_ssa);
2529 #ifdef ENABLE_CHECKING
2530 verify_loop_closed_ssa (true);
2531 #endif
2535 /* Returns the number of pbbs that are in loops contained in SCOP. */
2537 static int
2538 nb_pbbs_in_loops (scop_p scop)
2540 int i;
2541 poly_bb_p pbb;
2542 int res = 0;
2544 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
2545 if (loop_in_sese_p (gbb_loop (PBB_BLACK_BOX (pbb)), SCOP_REGION (scop)))
2546 res++;
2548 return res;
2551 /* Return the number of data references in BB that write in
2552 memory. */
2554 static int
2555 nb_data_writes_in_bb (basic_block bb)
2557 int res = 0;
2558 gimple_stmt_iterator gsi;
2560 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2561 if (gimple_vdef (gsi_stmt (gsi)))
2562 res++;
2564 return res;
2567 /* Splits at STMT the basic block BB represented as PBB in the
2568 polyhedral form. */
2570 static edge
2571 split_pbb (scop_p scop, poly_bb_p pbb, basic_block bb, gimple stmt)
2573 edge e1 = split_block (bb, stmt);
2574 new_pbb_from_pbb (scop, pbb, e1->dest);
2575 return e1;
2578 /* Splits STMT out of its current BB. This is done for reduction
2579 statements for which we want to ignore data dependences. */
2581 static basic_block
2582 split_reduction_stmt (scop_p scop, gimple stmt)
2584 basic_block bb = gimple_bb (stmt);
2585 poly_bb_p pbb = pbb_from_bb (bb);
2586 gimple_bb_p gbb = gbb_from_bb (bb);
2587 edge e1;
2588 int i;
2589 data_reference_p dr;
2591 /* Do not split basic blocks with no writes to memory: the reduction
2592 will be the only write to memory. */
2593 if (nb_data_writes_in_bb (bb) == 0
2594 /* Or if we have already marked BB as a reduction. */
2595 || PBB_IS_REDUCTION (pbb_from_bb (bb)))
2596 return bb;
2598 e1 = split_pbb (scop, pbb, bb, stmt);
2600 /* Split once more only when the reduction stmt is not the only one
2601 left in the original BB. */
2602 if (!gsi_one_before_end_p (gsi_start_nondebug_bb (bb)))
2604 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2605 gsi_prev (&gsi);
2606 e1 = split_pbb (scop, pbb, bb, gsi_stmt (gsi));
2609 /* A part of the data references will end in a different basic block
2610 after the split: move the DRs from the original GBB to the newly
2611 created GBB1. */
2612 FOR_EACH_VEC_ELT (GBB_DATA_REFS (gbb), i, dr)
2614 basic_block bb1 = gimple_bb (DR_STMT (dr));
2616 if (bb1 != bb)
2618 gimple_bb_p gbb1 = gbb_from_bb (bb1);
2619 GBB_DATA_REFS (gbb1).safe_push (dr);
2620 GBB_DATA_REFS (gbb).ordered_remove (i);
2621 i--;
2625 return e1->dest;
2628 /* Return true when stmt is a reduction operation. */
2630 static inline bool
2631 is_reduction_operation_p (gimple stmt)
2633 enum tree_code code;
2635 gcc_assert (is_gimple_assign (stmt));
2636 code = gimple_assign_rhs_code (stmt);
2638 if (!commutative_tree_code (code)
2639 || !associative_tree_code (code))
2640 return false;
2642 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
2644 if (FLOAT_TYPE_P (type))
2645 return flag_associative_math;
2647 return (INTEGRAL_TYPE_P (type)
2648 && TYPE_OVERFLOW_WRAPS (type));
2651 /* Returns true when PHI contains an argument ARG. */
2653 static bool
2654 phi_contains_arg (gphi *phi, tree arg)
2656 size_t i;
2658 for (i = 0; i < gimple_phi_num_args (phi); i++)
2659 if (operand_equal_p (arg, gimple_phi_arg_def (phi, i), 0))
2660 return true;
2662 return false;
2665 /* Return a loop phi node that corresponds to a reduction containing LHS. */
2667 static gphi *
2668 follow_ssa_with_commutative_ops (tree arg, tree lhs)
2670 gimple stmt;
2672 if (TREE_CODE (arg) != SSA_NAME)
2673 return NULL;
2675 stmt = SSA_NAME_DEF_STMT (arg);
2677 if (gimple_code (stmt) == GIMPLE_NOP
2678 || gimple_code (stmt) == GIMPLE_CALL)
2679 return NULL;
2681 if (gphi *phi = dyn_cast <gphi *> (stmt))
2683 if (phi_contains_arg (phi, lhs))
2684 return phi;
2685 return NULL;
2688 if (!is_gimple_assign (stmt))
2689 return NULL;
2691 if (gimple_num_ops (stmt) == 2)
2692 return follow_ssa_with_commutative_ops (gimple_assign_rhs1 (stmt), lhs);
2694 if (is_reduction_operation_p (stmt))
2696 gphi *res
2697 = follow_ssa_with_commutative_ops (gimple_assign_rhs1 (stmt), lhs);
2699 return res ? res :
2700 follow_ssa_with_commutative_ops (gimple_assign_rhs2 (stmt), lhs);
2703 return NULL;
2706 /* Detect commutative and associative scalar reductions starting at
2707 the STMT. Return the phi node of the reduction cycle, or NULL. */
2709 static gphi *
2710 detect_commutative_reduction_arg (tree lhs, gimple stmt, tree arg,
2711 vec<gimple> *in,
2712 vec<gimple> *out)
2714 gphi *phi = follow_ssa_with_commutative_ops (arg, lhs);
2716 if (!phi)
2717 return NULL;
2719 in->safe_push (stmt);
2720 out->safe_push (stmt);
2721 return phi;
2724 /* Detect commutative and associative scalar reductions starting at
2725 STMT. Return the phi node of the reduction cycle, or NULL. */
2727 static gphi *
2728 detect_commutative_reduction_assign (gimple stmt, vec<gimple> *in,
2729 vec<gimple> *out)
2731 tree lhs = gimple_assign_lhs (stmt);
2733 if (gimple_num_ops (stmt) == 2)
2734 return detect_commutative_reduction_arg (lhs, stmt,
2735 gimple_assign_rhs1 (stmt),
2736 in, out);
2738 if (is_reduction_operation_p (stmt))
2740 gphi *res = detect_commutative_reduction_arg (lhs, stmt,
2741 gimple_assign_rhs1 (stmt),
2742 in, out);
2743 return res ? res
2744 : detect_commutative_reduction_arg (lhs, stmt,
2745 gimple_assign_rhs2 (stmt),
2746 in, out);
2749 return NULL;
2752 /* Return a loop phi node that corresponds to a reduction containing LHS. */
2754 static gphi *
2755 follow_inital_value_to_phi (tree arg, tree lhs)
2757 gimple stmt;
2759 if (!arg || TREE_CODE (arg) != SSA_NAME)
2760 return NULL;
2762 stmt = SSA_NAME_DEF_STMT (arg);
2764 if (gphi *phi = dyn_cast <gphi *> (stmt))
2765 if (phi_contains_arg (phi, lhs))
2766 return phi;
2768 return NULL;
2772 /* Return the argument of the loop PHI that is the initial value coming
2773 from outside the loop. */
2775 static edge
2776 edge_initial_value_for_loop_phi (gphi *phi)
2778 size_t i;
2780 for (i = 0; i < gimple_phi_num_args (phi); i++)
2782 edge e = gimple_phi_arg_edge (phi, i);
2784 if (loop_depth (e->src->loop_father)
2785 < loop_depth (e->dest->loop_father))
2786 return e;
2789 return NULL;
2792 /* Return the argument of the loop PHI that is the initial value coming
2793 from outside the loop. */
2795 static tree
2796 initial_value_for_loop_phi (gphi *phi)
2798 size_t i;
2800 for (i = 0; i < gimple_phi_num_args (phi); i++)
2802 edge e = gimple_phi_arg_edge (phi, i);
2804 if (loop_depth (e->src->loop_father)
2805 < loop_depth (e->dest->loop_father))
2806 return gimple_phi_arg_def (phi, i);
2809 return NULL_TREE;
2812 /* Returns true when DEF is used outside the reduction cycle of
2813 LOOP_PHI. */
2815 static bool
2816 used_outside_reduction (tree def, gimple loop_phi)
2818 use_operand_p use_p;
2819 imm_use_iterator imm_iter;
2820 loop_p loop = loop_containing_stmt (loop_phi);
2822 /* In LOOP, DEF should be used only in LOOP_PHI. */
2823 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, def)
2825 gimple stmt = USE_STMT (use_p);
2827 if (stmt != loop_phi
2828 && !is_gimple_debug (stmt)
2829 && flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
2830 return true;
2833 return false;
2836 /* Detect commutative and associative scalar reductions belonging to
2837 the SCOP starting at the loop closed phi node STMT. Return the phi
2838 node of the reduction cycle, or NULL. */
2840 static gphi *
2841 detect_commutative_reduction (scop_p scop, gimple stmt, vec<gimple> *in,
2842 vec<gimple> *out)
2844 if (scalar_close_phi_node_p (stmt))
2846 gimple def;
2847 gphi *loop_phi, *phi, *close_phi = as_a <gphi *> (stmt);
2848 tree init, lhs, arg = gimple_phi_arg_def (close_phi, 0);
2850 if (TREE_CODE (arg) != SSA_NAME)
2851 return NULL;
2853 /* Note that loop close phi nodes should have a single argument
2854 because we translated the representation into a canonical form
2855 before Graphite: see canonicalize_loop_closed_ssa_form. */
2856 gcc_assert (gimple_phi_num_args (close_phi) == 1);
2858 def = SSA_NAME_DEF_STMT (arg);
2859 if (!stmt_in_sese_p (def, SCOP_REGION (scop))
2860 || !(loop_phi = detect_commutative_reduction (scop, def, in, out)))
2861 return NULL;
2863 lhs = gimple_phi_result (close_phi);
2864 init = initial_value_for_loop_phi (loop_phi);
2865 phi = follow_inital_value_to_phi (init, lhs);
2867 if (phi && (used_outside_reduction (lhs, phi)
2868 || !has_single_use (gimple_phi_result (phi))))
2869 return NULL;
2871 in->safe_push (loop_phi);
2872 out->safe_push (close_phi);
2873 return phi;
2876 if (gimple_code (stmt) == GIMPLE_ASSIGN)
2877 return detect_commutative_reduction_assign (stmt, in, out);
2879 return NULL;
2882 /* Translate the scalar reduction statement STMT to an array RED
2883 knowing that its recursive phi node is LOOP_PHI. */
2885 static void
2886 translate_scalar_reduction_to_array_for_stmt (scop_p scop, tree red,
2887 gimple stmt, gphi *loop_phi)
2889 tree res = gimple_phi_result (loop_phi);
2890 gassign *assign = gimple_build_assign (res, unshare_expr (red));
2891 gimple_stmt_iterator gsi;
2893 insert_stmts (scop, assign, NULL, gsi_after_labels (gimple_bb (loop_phi)));
2895 assign = gimple_build_assign (unshare_expr (red), gimple_assign_lhs (stmt));
2896 gsi = gsi_for_stmt (stmt);
2897 gsi_next (&gsi);
2898 insert_stmts (scop, assign, NULL, gsi);
2901 /* Removes the PHI node and resets all the debug stmts that are using
2902 the PHI_RESULT. */
2904 static void
2905 remove_phi (gphi *phi)
2907 imm_use_iterator imm_iter;
2908 tree def;
2909 use_operand_p use_p;
2910 gimple_stmt_iterator gsi;
2911 auto_vec<gimple, 3> update;
2912 unsigned int i;
2913 gimple stmt;
2915 def = PHI_RESULT (phi);
2916 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, def)
2918 stmt = USE_STMT (use_p);
2920 if (is_gimple_debug (stmt))
2922 gimple_debug_bind_reset_value (stmt);
2923 update.safe_push (stmt);
2927 FOR_EACH_VEC_ELT (update, i, stmt)
2928 update_stmt (stmt);
2930 gsi = gsi_for_phi_node (phi);
2931 remove_phi_node (&gsi, false);
2934 /* Helper function for for_each_index. For each INDEX of the data
2935 reference REF, returns true when its indices are valid in the loop
2936 nest LOOP passed in as DATA. */
2938 static bool
2939 dr_indices_valid_in_loop (tree ref ATTRIBUTE_UNUSED, tree *index, void *data)
2941 loop_p loop;
2942 basic_block header, def_bb;
2943 gimple stmt;
2945 if (TREE_CODE (*index) != SSA_NAME)
2946 return true;
2948 loop = *((loop_p *) data);
2949 header = loop->header;
2950 stmt = SSA_NAME_DEF_STMT (*index);
2952 if (!stmt)
2953 return true;
2955 def_bb = gimple_bb (stmt);
2957 if (!def_bb)
2958 return true;
2960 return dominated_by_p (CDI_DOMINATORS, header, def_bb);
2963 /* When the result of a CLOSE_PHI is written to a memory location,
2964 return a pointer to that memory reference, otherwise return
2965 NULL_TREE. */
2967 static tree
2968 close_phi_written_to_memory (gphi *close_phi)
2970 imm_use_iterator imm_iter;
2971 use_operand_p use_p;
2972 gimple stmt;
2973 tree res, def = gimple_phi_result (close_phi);
2975 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, def)
2976 if ((stmt = USE_STMT (use_p))
2977 && gimple_code (stmt) == GIMPLE_ASSIGN
2978 && (res = gimple_assign_lhs (stmt)))
2980 switch (TREE_CODE (res))
2982 case VAR_DECL:
2983 case PARM_DECL:
2984 case RESULT_DECL:
2985 return res;
2987 case ARRAY_REF:
2988 case MEM_REF:
2990 tree arg = gimple_phi_arg_def (close_phi, 0);
2991 loop_p nest = loop_containing_stmt (SSA_NAME_DEF_STMT (arg));
2993 /* FIXME: this restriction is for id-{24,25}.f and
2994 could be handled by duplicating the computation of
2995 array indices before the loop of the close_phi. */
2996 if (for_each_index (&res, dr_indices_valid_in_loop, &nest))
2997 return res;
2999 /* Fallthru. */
3001 default:
3002 continue;
3005 return NULL_TREE;
3008 /* Rewrite out of SSA the reduction described by the loop phi nodes
3009 IN, and the close phi nodes OUT. IN and OUT are structured by loop
3010 levels like this:
3012 IN: stmt, loop_n, ..., loop_0
3013 OUT: stmt, close_n, ..., close_0
3015 the first element is the reduction statement, and the next elements
3016 are the loop and close phi nodes of each of the outer loops. */
3018 static void
3019 translate_scalar_reduction_to_array (scop_p scop,
3020 vec<gimple> in,
3021 vec<gimple> out)
3023 gimple loop_stmt;
3024 unsigned int i = out.length () - 1;
3025 tree red = close_phi_written_to_memory (as_a <gphi *> (out[i]));
3027 FOR_EACH_VEC_ELT (in, i, loop_stmt)
3029 gimple close_stmt = out[i];
3031 if (i == 0)
3033 basic_block bb = split_reduction_stmt (scop, loop_stmt);
3034 poly_bb_p pbb = pbb_from_bb (bb);
3035 PBB_IS_REDUCTION (pbb) = true;
3036 gcc_assert (close_stmt == loop_stmt);
3038 if (!red)
3039 red = create_zero_dim_array
3040 (gimple_assign_lhs (loop_stmt), "Commutative_Associative_Reduction");
3042 translate_scalar_reduction_to_array_for_stmt (scop, red, loop_stmt,
3043 as_a <gphi *> (in[1]));
3044 continue;
3047 gphi *loop_phi = as_a <gphi *> (loop_stmt);
3048 gphi *close_phi = as_a <gphi *> (close_stmt);
3050 if (i == in.length () - 1)
3052 insert_out_of_ssa_copy (scop, gimple_phi_result (close_phi),
3053 unshare_expr (red), close_phi);
3054 insert_out_of_ssa_copy_on_edge
3055 (scop, edge_initial_value_for_loop_phi (loop_phi),
3056 unshare_expr (red), initial_value_for_loop_phi (loop_phi));
3059 remove_phi (loop_phi);
3060 remove_phi (close_phi);
3064 /* Rewrites out of SSA a commutative reduction at CLOSE_PHI. Returns
3065 true when something has been changed. */
3067 static bool
3068 rewrite_commutative_reductions_out_of_ssa_close_phi (scop_p scop,
3069 gphi *close_phi)
3071 bool res;
3072 auto_vec<gimple, 10> in;
3073 auto_vec<gimple, 10> out;
3075 detect_commutative_reduction (scop, close_phi, &in, &out);
3076 res = in.length () > 1;
3077 if (res)
3078 translate_scalar_reduction_to_array (scop, in, out);
3080 return res;
3083 /* Rewrites all the commutative reductions from LOOP out of SSA.
3084 Returns true when something has been changed. */
3086 static bool
3087 rewrite_commutative_reductions_out_of_ssa_loop (scop_p scop,
3088 loop_p loop)
3090 gphi_iterator gsi;
3091 edge exit = single_exit (loop);
3092 tree res;
3093 bool changed = false;
3095 if (!exit)
3096 return false;
3098 for (gsi = gsi_start_phis (exit->dest); !gsi_end_p (gsi); gsi_next (&gsi))
3099 if ((res = gimple_phi_result (gsi.phi ()))
3100 && !virtual_operand_p (res)
3101 && !scev_analyzable_p (res, SCOP_REGION (scop)))
3102 changed |= rewrite_commutative_reductions_out_of_ssa_close_phi
3103 (scop, gsi.phi ());
3105 return changed;
3108 /* Rewrites all the commutative reductions from SCOP out of SSA. */
3110 static void
3111 rewrite_commutative_reductions_out_of_ssa (scop_p scop)
3113 loop_p loop;
3114 bool changed = false;
3115 sese region = SCOP_REGION (scop);
3117 FOR_EACH_LOOP (loop, 0)
3118 if (loop_in_sese_p (loop, region))
3119 changed |= rewrite_commutative_reductions_out_of_ssa_loop (scop, loop);
3121 if (changed)
3123 scev_reset_htab ();
3124 gsi_commit_edge_inserts ();
3125 update_ssa (TODO_update_ssa);
3126 #ifdef ENABLE_CHECKING
3127 verify_loop_closed_ssa (true);
3128 #endif
3132 /* Can all ivs be represented by a signed integer?
3133 As CLooG might generate negative values in its expressions, signed loop ivs
3134 are required in the backend. */
3136 static bool
3137 scop_ivs_can_be_represented (scop_p scop)
3139 loop_p loop;
3140 gphi_iterator psi;
3141 bool result = true;
3143 FOR_EACH_LOOP (loop, 0)
3145 if (!loop_in_sese_p (loop, SCOP_REGION (scop)))
3146 continue;
3148 for (psi = gsi_start_phis (loop->header);
3149 !gsi_end_p (psi); gsi_next (&psi))
3151 gphi *phi = psi.phi ();
3152 tree res = PHI_RESULT (phi);
3153 tree type = TREE_TYPE (res);
3155 if (TYPE_UNSIGNED (type)
3156 && TYPE_PRECISION (type) >= TYPE_PRECISION (long_long_integer_type_node))
3158 result = false;
3159 break;
3162 if (!result)
3163 break;
3166 return result;
3169 /* Builds the polyhedral representation for a SESE region. */
3171 void
3172 build_poly_scop (scop_p scop)
3174 sese region = SCOP_REGION (scop);
3175 graphite_dim_t max_dim;
3177 build_scop_bbs (scop);
3179 /* FIXME: This restriction is needed to avoid a problem in CLooG.
3180 Once CLooG is fixed, remove this guard. Anyways, it makes no
3181 sense to optimize a scop containing only PBBs that do not belong
3182 to any loops. */
3183 if (nb_pbbs_in_loops (scop) == 0)
3184 return;
3186 if (!scop_ivs_can_be_represented (scop))
3187 return;
3189 if (flag_associative_math)
3190 rewrite_commutative_reductions_out_of_ssa (scop);
3192 build_sese_loop_nests (region);
3193 /* Record all conditions in REGION. */
3194 sese_dom_walker (CDI_DOMINATORS, region).walk (cfun->cfg->x_entry_block_ptr);
3195 find_scop_parameters (scop);
3197 max_dim = PARAM_VALUE (PARAM_GRAPHITE_MAX_NB_SCOP_PARAMS);
3198 if (scop_nb_params (scop) > max_dim)
3199 return;
3201 build_scop_iteration_domain (scop);
3202 build_scop_context (scop);
3203 add_conditions_to_constraints (scop);
3205 /* Rewrite out of SSA only after having translated the
3206 representation to the polyhedral representation to avoid scev
3207 analysis failures. That means that these functions will insert
3208 new data references that they create in the right place. */
3209 rewrite_reductions_out_of_ssa (scop);
3210 rewrite_cross_bb_scalar_deps_out_of_ssa (scop);
3212 build_scop_drs (scop);
3213 scop_to_lst (scop);
3214 build_scop_scattering (scop);
3216 /* This SCoP has been translated to the polyhedral
3217 representation. */
3218 POLY_SCOP_P (scop) = true;
3220 #endif