* lib/target-supports.exp
[official-gcc.git] / gcc / lambda-code.c
blob60e78a25072057008f4320335866e02fc108d918
1 /* Loop transformation code generation
2 Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dberlin@dberlin.org>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "ggc.h"
27 #include "tree.h"
28 #include "target.h"
29 #include "rtl.h"
30 #include "basic-block.h"
31 #include "diagnostic.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "timevar.h"
35 #include "cfgloop.h"
36 #include "expr.h"
37 #include "optabs.h"
38 #include "tree-chrec.h"
39 #include "tree-data-ref.h"
40 #include "tree-pass.h"
41 #include "tree-scalar-evolution.h"
42 #include "vec.h"
43 #include "lambda.h"
44 #include "vecprim.h"
46 /* This loop nest code generation is based on non-singular matrix
47 math.
49 A little terminology and a general sketch of the algorithm. See "A singular
50 loop transformation framework based on non-singular matrices" by Wei Li and
51 Keshav Pingali for formal proofs that the various statements below are
52 correct.
54 A loop iteration space represents the points traversed by the loop. A point in the
55 iteration space can be represented by a vector of size <loop depth>. You can
56 therefore represent the iteration space as an integral combinations of a set
57 of basis vectors.
59 A loop iteration space is dense if every integer point between the loop
60 bounds is a point in the iteration space. Every loop with a step of 1
61 therefore has a dense iteration space.
63 for i = 1 to 3, step 1 is a dense iteration space.
65 A loop iteration space is sparse if it is not dense. That is, the iteration
66 space skips integer points that are within the loop bounds.
68 for i = 1 to 3, step 2 is a sparse iteration space, because the integer point
69 2 is skipped.
71 Dense source spaces are easy to transform, because they don't skip any
72 points to begin with. Thus we can compute the exact bounds of the target
73 space using min/max and floor/ceil.
75 For a dense source space, we take the transformation matrix, decompose it
76 into a lower triangular part (H) and a unimodular part (U).
77 We then compute the auxiliary space from the unimodular part (source loop
78 nest . U = auxiliary space) , which has two important properties:
79 1. It traverses the iterations in the same lexicographic order as the source
80 space.
81 2. It is a dense space when the source is a dense space (even if the target
82 space is going to be sparse).
84 Given the auxiliary space, we use the lower triangular part to compute the
85 bounds in the target space by simple matrix multiplication.
86 The gaps in the target space (IE the new loop step sizes) will be the
87 diagonals of the H matrix.
89 Sparse source spaces require another step, because you can't directly compute
90 the exact bounds of the auxiliary and target space from the sparse space.
91 Rather than try to come up with a separate algorithm to handle sparse source
92 spaces directly, we just find a legal transformation matrix that gives you
93 the sparse source space, from a dense space, and then transform the dense
94 space.
96 For a regular sparse space, you can represent the source space as an integer
97 lattice, and the base space of that lattice will always be dense. Thus, we
98 effectively use the lattice to figure out the transformation from the lattice
99 base space, to the sparse iteration space (IE what transform was applied to
100 the dense space to make it sparse). We then compose this transform with the
101 transformation matrix specified by the user (since our matrix transformations
102 are closed under composition, this is okay). We can then use the base space
103 (which is dense) plus the composed transformation matrix, to compute the rest
104 of the transform using the dense space algorithm above.
106 In other words, our sparse source space (B) is decomposed into a dense base
107 space (A), and a matrix (L) that transforms A into B, such that A.L = B.
108 We then compute the composition of L and the user transformation matrix (T),
109 so that T is now a transform from A to the result, instead of from B to the
110 result.
111 IE A.(LT) = result instead of B.T = result
112 Since A is now a dense source space, we can use the dense source space
113 algorithm above to compute the result of applying transform (LT) to A.
115 Fourier-Motzkin elimination is used to compute the bounds of the base space
116 of the lattice. */
118 static bool perfect_nestify (struct loop *, VEC(tree,heap) *,
119 VEC(tree,heap) *, VEC(int,heap) *,
120 VEC(tree,heap) *);
121 /* Lattice stuff that is internal to the code generation algorithm. */
123 typedef struct
125 /* Lattice base matrix. */
126 lambda_matrix base;
127 /* Lattice dimension. */
128 int dimension;
129 /* Origin vector for the coefficients. */
130 lambda_vector origin;
131 /* Origin matrix for the invariants. */
132 lambda_matrix origin_invariants;
133 /* Number of invariants. */
134 int invariants;
135 } *lambda_lattice;
137 #define LATTICE_BASE(T) ((T)->base)
138 #define LATTICE_DIMENSION(T) ((T)->dimension)
139 #define LATTICE_ORIGIN(T) ((T)->origin)
140 #define LATTICE_ORIGIN_INVARIANTS(T) ((T)->origin_invariants)
141 #define LATTICE_INVARIANTS(T) ((T)->invariants)
143 static bool lle_equal (lambda_linear_expression, lambda_linear_expression,
144 int, int);
145 static lambda_lattice lambda_lattice_new (int, int);
146 static lambda_lattice lambda_lattice_compute_base (lambda_loopnest);
148 static tree find_induction_var_from_exit_cond (struct loop *);
149 static bool can_convert_to_perfect_nest (struct loop *);
151 /* Create a new lambda body vector. */
153 lambda_body_vector
154 lambda_body_vector_new (int size)
156 lambda_body_vector ret;
158 ret = ggc_alloc (sizeof (*ret));
159 LBV_COEFFICIENTS (ret) = lambda_vector_new (size);
160 LBV_SIZE (ret) = size;
161 LBV_DENOMINATOR (ret) = 1;
162 return ret;
165 /* Compute the new coefficients for the vector based on the
166 *inverse* of the transformation matrix. */
168 lambda_body_vector
169 lambda_body_vector_compute_new (lambda_trans_matrix transform,
170 lambda_body_vector vect)
172 lambda_body_vector temp;
173 int depth;
175 /* Make sure the matrix is square. */
176 gcc_assert (LTM_ROWSIZE (transform) == LTM_COLSIZE (transform));
178 depth = LTM_ROWSIZE (transform);
180 temp = lambda_body_vector_new (depth);
181 LBV_DENOMINATOR (temp) =
182 LBV_DENOMINATOR (vect) * LTM_DENOMINATOR (transform);
183 lambda_vector_matrix_mult (LBV_COEFFICIENTS (vect), depth,
184 LTM_MATRIX (transform), depth,
185 LBV_COEFFICIENTS (temp));
186 LBV_SIZE (temp) = LBV_SIZE (vect);
187 return temp;
190 /* Print out a lambda body vector. */
192 void
193 print_lambda_body_vector (FILE * outfile, lambda_body_vector body)
195 print_lambda_vector (outfile, LBV_COEFFICIENTS (body), LBV_SIZE (body));
198 /* Return TRUE if two linear expressions are equal. */
200 static bool
201 lle_equal (lambda_linear_expression lle1, lambda_linear_expression lle2,
202 int depth, int invariants)
204 int i;
206 if (lle1 == NULL || lle2 == NULL)
207 return false;
208 if (LLE_CONSTANT (lle1) != LLE_CONSTANT (lle2))
209 return false;
210 if (LLE_DENOMINATOR (lle1) != LLE_DENOMINATOR (lle2))
211 return false;
212 for (i = 0; i < depth; i++)
213 if (LLE_COEFFICIENTS (lle1)[i] != LLE_COEFFICIENTS (lle2)[i])
214 return false;
215 for (i = 0; i < invariants; i++)
216 if (LLE_INVARIANT_COEFFICIENTS (lle1)[i] !=
217 LLE_INVARIANT_COEFFICIENTS (lle2)[i])
218 return false;
219 return true;
222 /* Create a new linear expression with dimension DIM, and total number
223 of invariants INVARIANTS. */
225 lambda_linear_expression
226 lambda_linear_expression_new (int dim, int invariants)
228 lambda_linear_expression ret;
230 ret = ggc_alloc_cleared (sizeof (*ret));
232 LLE_COEFFICIENTS (ret) = lambda_vector_new (dim);
233 LLE_CONSTANT (ret) = 0;
234 LLE_INVARIANT_COEFFICIENTS (ret) = lambda_vector_new (invariants);
235 LLE_DENOMINATOR (ret) = 1;
236 LLE_NEXT (ret) = NULL;
238 return ret;
241 /* Print out a linear expression EXPR, with SIZE coefficients, to OUTFILE.
242 The starting letter used for variable names is START. */
244 static void
245 print_linear_expression (FILE * outfile, lambda_vector expr, int size,
246 char start)
248 int i;
249 bool first = true;
250 for (i = 0; i < size; i++)
252 if (expr[i] != 0)
254 if (first)
256 if (expr[i] < 0)
257 fprintf (outfile, "-");
258 first = false;
260 else if (expr[i] > 0)
261 fprintf (outfile, " + ");
262 else
263 fprintf (outfile, " - ");
264 if (abs (expr[i]) == 1)
265 fprintf (outfile, "%c", start + i);
266 else
267 fprintf (outfile, "%d%c", abs (expr[i]), start + i);
272 /* Print out a lambda linear expression structure, EXPR, to OUTFILE. The
273 depth/number of coefficients is given by DEPTH, the number of invariants is
274 given by INVARIANTS, and the character to start variable names with is given
275 by START. */
277 void
278 print_lambda_linear_expression (FILE * outfile,
279 lambda_linear_expression expr,
280 int depth, int invariants, char start)
282 fprintf (outfile, "\tLinear expression: ");
283 print_linear_expression (outfile, LLE_COEFFICIENTS (expr), depth, start);
284 fprintf (outfile, " constant: %d ", LLE_CONSTANT (expr));
285 fprintf (outfile, " invariants: ");
286 print_linear_expression (outfile, LLE_INVARIANT_COEFFICIENTS (expr),
287 invariants, 'A');
288 fprintf (outfile, " denominator: %d\n", LLE_DENOMINATOR (expr));
291 /* Print a lambda loop structure LOOP to OUTFILE. The depth/number of
292 coefficients is given by DEPTH, the number of invariants is
293 given by INVARIANTS, and the character to start variable names with is given
294 by START. */
296 void
297 print_lambda_loop (FILE * outfile, lambda_loop loop, int depth,
298 int invariants, char start)
300 int step;
301 lambda_linear_expression expr;
303 gcc_assert (loop);
305 expr = LL_LINEAR_OFFSET (loop);
306 step = LL_STEP (loop);
307 fprintf (outfile, " step size = %d \n", step);
309 if (expr)
311 fprintf (outfile, " linear offset: \n");
312 print_lambda_linear_expression (outfile, expr, depth, invariants,
313 start);
316 fprintf (outfile, " lower bound: \n");
317 for (expr = LL_LOWER_BOUND (loop); expr != NULL; expr = LLE_NEXT (expr))
318 print_lambda_linear_expression (outfile, expr, depth, invariants, start);
319 fprintf (outfile, " upper bound: \n");
320 for (expr = LL_UPPER_BOUND (loop); expr != NULL; expr = LLE_NEXT (expr))
321 print_lambda_linear_expression (outfile, expr, depth, invariants, start);
324 /* Create a new loop nest structure with DEPTH loops, and INVARIANTS as the
325 number of invariants. */
327 lambda_loopnest
328 lambda_loopnest_new (int depth, int invariants)
330 lambda_loopnest ret;
331 ret = ggc_alloc (sizeof (*ret));
333 LN_LOOPS (ret) = ggc_alloc_cleared (depth * sizeof (lambda_loop));
334 LN_DEPTH (ret) = depth;
335 LN_INVARIANTS (ret) = invariants;
337 return ret;
340 /* Print a lambda loopnest structure, NEST, to OUTFILE. The starting
341 character to use for loop names is given by START. */
343 void
344 print_lambda_loopnest (FILE * outfile, lambda_loopnest nest, char start)
346 int i;
347 for (i = 0; i < LN_DEPTH (nest); i++)
349 fprintf (outfile, "Loop %c\n", start + i);
350 print_lambda_loop (outfile, LN_LOOPS (nest)[i], LN_DEPTH (nest),
351 LN_INVARIANTS (nest), 'i');
352 fprintf (outfile, "\n");
356 /* Allocate a new lattice structure of DEPTH x DEPTH, with INVARIANTS number
357 of invariants. */
359 static lambda_lattice
360 lambda_lattice_new (int depth, int invariants)
362 lambda_lattice ret;
363 ret = ggc_alloc (sizeof (*ret));
364 LATTICE_BASE (ret) = lambda_matrix_new (depth, depth);
365 LATTICE_ORIGIN (ret) = lambda_vector_new (depth);
366 LATTICE_ORIGIN_INVARIANTS (ret) = lambda_matrix_new (depth, invariants);
367 LATTICE_DIMENSION (ret) = depth;
368 LATTICE_INVARIANTS (ret) = invariants;
369 return ret;
372 /* Compute the lattice base for NEST. The lattice base is essentially a
373 non-singular transform from a dense base space to a sparse iteration space.
374 We use it so that we don't have to specially handle the case of a sparse
375 iteration space in other parts of the algorithm. As a result, this routine
376 only does something interesting (IE produce a matrix that isn't the
377 identity matrix) if NEST is a sparse space. */
379 static lambda_lattice
380 lambda_lattice_compute_base (lambda_loopnest nest)
382 lambda_lattice ret;
383 int depth, invariants;
384 lambda_matrix base;
386 int i, j, step;
387 lambda_loop loop;
388 lambda_linear_expression expression;
390 depth = LN_DEPTH (nest);
391 invariants = LN_INVARIANTS (nest);
393 ret = lambda_lattice_new (depth, invariants);
394 base = LATTICE_BASE (ret);
395 for (i = 0; i < depth; i++)
397 loop = LN_LOOPS (nest)[i];
398 gcc_assert (loop);
399 step = LL_STEP (loop);
400 /* If we have a step of 1, then the base is one, and the
401 origin and invariant coefficients are 0. */
402 if (step == 1)
404 for (j = 0; j < depth; j++)
405 base[i][j] = 0;
406 base[i][i] = 1;
407 LATTICE_ORIGIN (ret)[i] = 0;
408 for (j = 0; j < invariants; j++)
409 LATTICE_ORIGIN_INVARIANTS (ret)[i][j] = 0;
411 else
413 /* Otherwise, we need the lower bound expression (which must
414 be an affine function) to determine the base. */
415 expression = LL_LOWER_BOUND (loop);
416 gcc_assert (expression && !LLE_NEXT (expression)
417 && LLE_DENOMINATOR (expression) == 1);
419 /* The lower triangular portion of the base is going to be the
420 coefficient times the step */
421 for (j = 0; j < i; j++)
422 base[i][j] = LLE_COEFFICIENTS (expression)[j]
423 * LL_STEP (LN_LOOPS (nest)[j]);
424 base[i][i] = step;
425 for (j = i + 1; j < depth; j++)
426 base[i][j] = 0;
428 /* Origin for this loop is the constant of the lower bound
429 expression. */
430 LATTICE_ORIGIN (ret)[i] = LLE_CONSTANT (expression);
432 /* Coefficient for the invariants are equal to the invariant
433 coefficients in the expression. */
434 for (j = 0; j < invariants; j++)
435 LATTICE_ORIGIN_INVARIANTS (ret)[i][j] =
436 LLE_INVARIANT_COEFFICIENTS (expression)[j];
439 return ret;
442 /* Compute the least common multiple of two numbers A and B . */
445 least_common_multiple (int a, int b)
447 return (abs (a) * abs (b) / gcd (a, b));
450 /* Perform Fourier-Motzkin elimination to calculate the bounds of the
451 auxiliary nest.
452 Fourier-Motzkin is a way of reducing systems of linear inequalities so that
453 it is easy to calculate the answer and bounds.
454 A sketch of how it works:
455 Given a system of linear inequalities, ai * xj >= bk, you can always
456 rewrite the constraints so they are all of the form
457 a <= x, or x <= b, or x >= constant for some x in x1 ... xj (and some b
458 in b1 ... bk, and some a in a1...ai)
459 You can then eliminate this x from the non-constant inequalities by
460 rewriting these as a <= b, x >= constant, and delete the x variable.
461 You can then repeat this for any remaining x variables, and then we have
462 an easy to use variable <= constant (or no variables at all) form that we
463 can construct our bounds from.
465 In our case, each time we eliminate, we construct part of the bound from
466 the ith variable, then delete the ith variable.
468 Remember the constant are in our vector a, our coefficient matrix is A,
469 and our invariant coefficient matrix is B.
471 SIZE is the size of the matrices being passed.
472 DEPTH is the loop nest depth.
473 INVARIANTS is the number of loop invariants.
474 A, B, and a are the coefficient matrix, invariant coefficient, and a
475 vector of constants, respectively. */
477 static lambda_loopnest
478 compute_nest_using_fourier_motzkin (int size,
479 int depth,
480 int invariants,
481 lambda_matrix A,
482 lambda_matrix B,
483 lambda_vector a)
486 int multiple, f1, f2;
487 int i, j, k;
488 lambda_linear_expression expression;
489 lambda_loop loop;
490 lambda_loopnest auxillary_nest;
491 lambda_matrix swapmatrix, A1, B1;
492 lambda_vector swapvector, a1;
493 int newsize;
495 A1 = lambda_matrix_new (128, depth);
496 B1 = lambda_matrix_new (128, invariants);
497 a1 = lambda_vector_new (128);
499 auxillary_nest = lambda_loopnest_new (depth, invariants);
501 for (i = depth - 1; i >= 0; i--)
503 loop = lambda_loop_new ();
504 LN_LOOPS (auxillary_nest)[i] = loop;
505 LL_STEP (loop) = 1;
507 for (j = 0; j < size; j++)
509 if (A[j][i] < 0)
511 /* Any linear expression in the matrix with a coefficient less
512 than 0 becomes part of the new lower bound. */
513 expression = lambda_linear_expression_new (depth, invariants);
515 for (k = 0; k < i; k++)
516 LLE_COEFFICIENTS (expression)[k] = A[j][k];
518 for (k = 0; k < invariants; k++)
519 LLE_INVARIANT_COEFFICIENTS (expression)[k] = -1 * B[j][k];
521 LLE_DENOMINATOR (expression) = -1 * A[j][i];
522 LLE_CONSTANT (expression) = -1 * a[j];
524 /* Ignore if identical to the existing lower bound. */
525 if (!lle_equal (LL_LOWER_BOUND (loop),
526 expression, depth, invariants))
528 LLE_NEXT (expression) = LL_LOWER_BOUND (loop);
529 LL_LOWER_BOUND (loop) = expression;
533 else if (A[j][i] > 0)
535 /* Any linear expression with a coefficient greater than 0
536 becomes part of the new upper bound. */
537 expression = lambda_linear_expression_new (depth, invariants);
538 for (k = 0; k < i; k++)
539 LLE_COEFFICIENTS (expression)[k] = -1 * A[j][k];
541 for (k = 0; k < invariants; k++)
542 LLE_INVARIANT_COEFFICIENTS (expression)[k] = B[j][k];
544 LLE_DENOMINATOR (expression) = A[j][i];
545 LLE_CONSTANT (expression) = a[j];
547 /* Ignore if identical to the existing upper bound. */
548 if (!lle_equal (LL_UPPER_BOUND (loop),
549 expression, depth, invariants))
551 LLE_NEXT (expression) = LL_UPPER_BOUND (loop);
552 LL_UPPER_BOUND (loop) = expression;
558 /* This portion creates a new system of linear inequalities by deleting
559 the i'th variable, reducing the system by one variable. */
560 newsize = 0;
561 for (j = 0; j < size; j++)
563 /* If the coefficient for the i'th variable is 0, then we can just
564 eliminate the variable straightaway. Otherwise, we have to
565 multiply through by the coefficients we are eliminating. */
566 if (A[j][i] == 0)
568 lambda_vector_copy (A[j], A1[newsize], depth);
569 lambda_vector_copy (B[j], B1[newsize], invariants);
570 a1[newsize] = a[j];
571 newsize++;
573 else if (A[j][i] > 0)
575 for (k = 0; k < size; k++)
577 if (A[k][i] < 0)
579 multiple = least_common_multiple (A[j][i], A[k][i]);
580 f1 = multiple / A[j][i];
581 f2 = -1 * multiple / A[k][i];
583 lambda_vector_add_mc (A[j], f1, A[k], f2,
584 A1[newsize], depth);
585 lambda_vector_add_mc (B[j], f1, B[k], f2,
586 B1[newsize], invariants);
587 a1[newsize] = f1 * a[j] + f2 * a[k];
588 newsize++;
594 swapmatrix = A;
595 A = A1;
596 A1 = swapmatrix;
598 swapmatrix = B;
599 B = B1;
600 B1 = swapmatrix;
602 swapvector = a;
603 a = a1;
604 a1 = swapvector;
606 size = newsize;
609 return auxillary_nest;
612 /* Compute the loop bounds for the auxiliary space NEST.
613 Input system used is Ax <= b. TRANS is the unimodular transformation.
614 Given the original nest, this function will
615 1. Convert the nest into matrix form, which consists of a matrix for the
616 coefficients, a matrix for the
617 invariant coefficients, and a vector for the constants.
618 2. Use the matrix form to calculate the lattice base for the nest (which is
619 a dense space)
620 3. Compose the dense space transform with the user specified transform, to
621 get a transform we can easily calculate transformed bounds for.
622 4. Multiply the composed transformation matrix times the matrix form of the
623 loop.
624 5. Transform the newly created matrix (from step 4) back into a loop nest
625 using Fourier-Motzkin elimination to figure out the bounds. */
627 static lambda_loopnest
628 lambda_compute_auxillary_space (lambda_loopnest nest,
629 lambda_trans_matrix trans)
631 lambda_matrix A, B, A1, B1;
632 lambda_vector a, a1;
633 lambda_matrix invertedtrans;
634 int depth, invariants, size;
635 int i, j;
636 lambda_loop loop;
637 lambda_linear_expression expression;
638 lambda_lattice lattice;
640 depth = LN_DEPTH (nest);
641 invariants = LN_INVARIANTS (nest);
643 /* Unfortunately, we can't know the number of constraints we'll have
644 ahead of time, but this should be enough even in ridiculous loop nest
645 cases. We must not go over this limit. */
646 A = lambda_matrix_new (128, depth);
647 B = lambda_matrix_new (128, invariants);
648 a = lambda_vector_new (128);
650 A1 = lambda_matrix_new (128, depth);
651 B1 = lambda_matrix_new (128, invariants);
652 a1 = lambda_vector_new (128);
654 /* Store the bounds in the equation matrix A, constant vector a, and
655 invariant matrix B, so that we have Ax <= a + B.
656 This requires a little equation rearranging so that everything is on the
657 correct side of the inequality. */
658 size = 0;
659 for (i = 0; i < depth; i++)
661 loop = LN_LOOPS (nest)[i];
663 /* First we do the lower bound. */
664 if (LL_STEP (loop) > 0)
665 expression = LL_LOWER_BOUND (loop);
666 else
667 expression = LL_UPPER_BOUND (loop);
669 for (; expression != NULL; expression = LLE_NEXT (expression))
671 /* Fill in the coefficient. */
672 for (j = 0; j < i; j++)
673 A[size][j] = LLE_COEFFICIENTS (expression)[j];
675 /* And the invariant coefficient. */
676 for (j = 0; j < invariants; j++)
677 B[size][j] = LLE_INVARIANT_COEFFICIENTS (expression)[j];
679 /* And the constant. */
680 a[size] = LLE_CONSTANT (expression);
682 /* Convert (2x+3y+2+b)/4 <= z to 2x+3y-4z <= -2-b. IE put all
683 constants and single variables on */
684 A[size][i] = -1 * LLE_DENOMINATOR (expression);
685 a[size] *= -1;
686 for (j = 0; j < invariants; j++)
687 B[size][j] *= -1;
689 size++;
690 /* Need to increase matrix sizes above. */
691 gcc_assert (size <= 127);
695 /* Then do the exact same thing for the upper bounds. */
696 if (LL_STEP (loop) > 0)
697 expression = LL_UPPER_BOUND (loop);
698 else
699 expression = LL_LOWER_BOUND (loop);
701 for (; expression != NULL; expression = LLE_NEXT (expression))
703 /* Fill in the coefficient. */
704 for (j = 0; j < i; j++)
705 A[size][j] = LLE_COEFFICIENTS (expression)[j];
707 /* And the invariant coefficient. */
708 for (j = 0; j < invariants; j++)
709 B[size][j] = LLE_INVARIANT_COEFFICIENTS (expression)[j];
711 /* And the constant. */
712 a[size] = LLE_CONSTANT (expression);
714 /* Convert z <= (2x+3y+2+b)/4 to -2x-3y+4z <= 2+b. */
715 for (j = 0; j < i; j++)
716 A[size][j] *= -1;
717 A[size][i] = LLE_DENOMINATOR (expression);
718 size++;
719 /* Need to increase matrix sizes above. */
720 gcc_assert (size <= 127);
725 /* Compute the lattice base x = base * y + origin, where y is the
726 base space. */
727 lattice = lambda_lattice_compute_base (nest);
729 /* Ax <= a + B then becomes ALy <= a+B - A*origin. L is the lattice base */
731 /* A1 = A * L */
732 lambda_matrix_mult (A, LATTICE_BASE (lattice), A1, size, depth, depth);
734 /* a1 = a - A * origin constant. */
735 lambda_matrix_vector_mult (A, size, depth, LATTICE_ORIGIN (lattice), a1);
736 lambda_vector_add_mc (a, 1, a1, -1, a1, size);
738 /* B1 = B - A * origin invariant. */
739 lambda_matrix_mult (A, LATTICE_ORIGIN_INVARIANTS (lattice), B1, size, depth,
740 invariants);
741 lambda_matrix_add_mc (B, 1, B1, -1, B1, size, invariants);
743 /* Now compute the auxiliary space bounds by first inverting U, multiplying
744 it by A1, then performing Fourier-Motzkin. */
746 invertedtrans = lambda_matrix_new (depth, depth);
748 /* Compute the inverse of U. */
749 lambda_matrix_inverse (LTM_MATRIX (trans),
750 invertedtrans, depth);
752 /* A = A1 inv(U). */
753 lambda_matrix_mult (A1, invertedtrans, A, size, depth, depth);
755 return compute_nest_using_fourier_motzkin (size, depth, invariants,
756 A, B1, a1);
759 /* Compute the loop bounds for the target space, using the bounds of
760 the auxiliary nest AUXILLARY_NEST, and the triangular matrix H.
761 The target space loop bounds are computed by multiplying the triangular
762 matrix H by the auxiliary nest, to get the new loop bounds. The sign of
763 the loop steps (positive or negative) is then used to swap the bounds if
764 the loop counts downwards.
765 Return the target loopnest. */
767 static lambda_loopnest
768 lambda_compute_target_space (lambda_loopnest auxillary_nest,
769 lambda_trans_matrix H, lambda_vector stepsigns)
771 lambda_matrix inverse, H1;
772 int determinant, i, j;
773 int gcd1, gcd2;
774 int factor;
776 lambda_loopnest target_nest;
777 int depth, invariants;
778 lambda_matrix target;
780 lambda_loop auxillary_loop, target_loop;
781 lambda_linear_expression expression, auxillary_expr, target_expr, tmp_expr;
783 depth = LN_DEPTH (auxillary_nest);
784 invariants = LN_INVARIANTS (auxillary_nest);
786 inverse = lambda_matrix_new (depth, depth);
787 determinant = lambda_matrix_inverse (LTM_MATRIX (H), inverse, depth);
789 /* H1 is H excluding its diagonal. */
790 H1 = lambda_matrix_new (depth, depth);
791 lambda_matrix_copy (LTM_MATRIX (H), H1, depth, depth);
793 for (i = 0; i < depth; i++)
794 H1[i][i] = 0;
796 /* Computes the linear offsets of the loop bounds. */
797 target = lambda_matrix_new (depth, depth);
798 lambda_matrix_mult (H1, inverse, target, depth, depth, depth);
800 target_nest = lambda_loopnest_new (depth, invariants);
802 for (i = 0; i < depth; i++)
805 /* Get a new loop structure. */
806 target_loop = lambda_loop_new ();
807 LN_LOOPS (target_nest)[i] = target_loop;
809 /* Computes the gcd of the coefficients of the linear part. */
810 gcd1 = lambda_vector_gcd (target[i], i);
812 /* Include the denominator in the GCD. */
813 gcd1 = gcd (gcd1, determinant);
815 /* Now divide through by the gcd. */
816 for (j = 0; j < i; j++)
817 target[i][j] = target[i][j] / gcd1;
819 expression = lambda_linear_expression_new (depth, invariants);
820 lambda_vector_copy (target[i], LLE_COEFFICIENTS (expression), depth);
821 LLE_DENOMINATOR (expression) = determinant / gcd1;
822 LLE_CONSTANT (expression) = 0;
823 lambda_vector_clear (LLE_INVARIANT_COEFFICIENTS (expression),
824 invariants);
825 LL_LINEAR_OFFSET (target_loop) = expression;
828 /* For each loop, compute the new bounds from H. */
829 for (i = 0; i < depth; i++)
831 auxillary_loop = LN_LOOPS (auxillary_nest)[i];
832 target_loop = LN_LOOPS (target_nest)[i];
833 LL_STEP (target_loop) = LTM_MATRIX (H)[i][i];
834 factor = LTM_MATRIX (H)[i][i];
836 /* First we do the lower bound. */
837 auxillary_expr = LL_LOWER_BOUND (auxillary_loop);
839 for (; auxillary_expr != NULL;
840 auxillary_expr = LLE_NEXT (auxillary_expr))
842 target_expr = lambda_linear_expression_new (depth, invariants);
843 lambda_vector_matrix_mult (LLE_COEFFICIENTS (auxillary_expr),
844 depth, inverse, depth,
845 LLE_COEFFICIENTS (target_expr));
846 lambda_vector_mult_const (LLE_COEFFICIENTS (target_expr),
847 LLE_COEFFICIENTS (target_expr), depth,
848 factor);
850 LLE_CONSTANT (target_expr) = LLE_CONSTANT (auxillary_expr) * factor;
851 lambda_vector_copy (LLE_INVARIANT_COEFFICIENTS (auxillary_expr),
852 LLE_INVARIANT_COEFFICIENTS (target_expr),
853 invariants);
854 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS (target_expr),
855 LLE_INVARIANT_COEFFICIENTS (target_expr),
856 invariants, factor);
857 LLE_DENOMINATOR (target_expr) = LLE_DENOMINATOR (auxillary_expr);
859 if (!lambda_vector_zerop (LLE_COEFFICIENTS (target_expr), depth))
861 LLE_CONSTANT (target_expr) = LLE_CONSTANT (target_expr)
862 * determinant;
863 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS
864 (target_expr),
865 LLE_INVARIANT_COEFFICIENTS
866 (target_expr), invariants,
867 determinant);
868 LLE_DENOMINATOR (target_expr) =
869 LLE_DENOMINATOR (target_expr) * determinant;
871 /* Find the gcd and divide by it here, rather than doing it
872 at the tree level. */
873 gcd1 = lambda_vector_gcd (LLE_COEFFICIENTS (target_expr), depth);
874 gcd2 = lambda_vector_gcd (LLE_INVARIANT_COEFFICIENTS (target_expr),
875 invariants);
876 gcd1 = gcd (gcd1, gcd2);
877 gcd1 = gcd (gcd1, LLE_CONSTANT (target_expr));
878 gcd1 = gcd (gcd1, LLE_DENOMINATOR (target_expr));
879 for (j = 0; j < depth; j++)
880 LLE_COEFFICIENTS (target_expr)[j] /= gcd1;
881 for (j = 0; j < invariants; j++)
882 LLE_INVARIANT_COEFFICIENTS (target_expr)[j] /= gcd1;
883 LLE_CONSTANT (target_expr) /= gcd1;
884 LLE_DENOMINATOR (target_expr) /= gcd1;
885 /* Ignore if identical to existing bound. */
886 if (!lle_equal (LL_LOWER_BOUND (target_loop), target_expr, depth,
887 invariants))
889 LLE_NEXT (target_expr) = LL_LOWER_BOUND (target_loop);
890 LL_LOWER_BOUND (target_loop) = target_expr;
893 /* Now do the upper bound. */
894 auxillary_expr = LL_UPPER_BOUND (auxillary_loop);
896 for (; auxillary_expr != NULL;
897 auxillary_expr = LLE_NEXT (auxillary_expr))
899 target_expr = lambda_linear_expression_new (depth, invariants);
900 lambda_vector_matrix_mult (LLE_COEFFICIENTS (auxillary_expr),
901 depth, inverse, depth,
902 LLE_COEFFICIENTS (target_expr));
903 lambda_vector_mult_const (LLE_COEFFICIENTS (target_expr),
904 LLE_COEFFICIENTS (target_expr), depth,
905 factor);
906 LLE_CONSTANT (target_expr) = LLE_CONSTANT (auxillary_expr) * factor;
907 lambda_vector_copy (LLE_INVARIANT_COEFFICIENTS (auxillary_expr),
908 LLE_INVARIANT_COEFFICIENTS (target_expr),
909 invariants);
910 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS (target_expr),
911 LLE_INVARIANT_COEFFICIENTS (target_expr),
912 invariants, factor);
913 LLE_DENOMINATOR (target_expr) = LLE_DENOMINATOR (auxillary_expr);
915 if (!lambda_vector_zerop (LLE_COEFFICIENTS (target_expr), depth))
917 LLE_CONSTANT (target_expr) = LLE_CONSTANT (target_expr)
918 * determinant;
919 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS
920 (target_expr),
921 LLE_INVARIANT_COEFFICIENTS
922 (target_expr), invariants,
923 determinant);
924 LLE_DENOMINATOR (target_expr) =
925 LLE_DENOMINATOR (target_expr) * determinant;
927 /* Find the gcd and divide by it here, instead of at the
928 tree level. */
929 gcd1 = lambda_vector_gcd (LLE_COEFFICIENTS (target_expr), depth);
930 gcd2 = lambda_vector_gcd (LLE_INVARIANT_COEFFICIENTS (target_expr),
931 invariants);
932 gcd1 = gcd (gcd1, gcd2);
933 gcd1 = gcd (gcd1, LLE_CONSTANT (target_expr));
934 gcd1 = gcd (gcd1, LLE_DENOMINATOR (target_expr));
935 for (j = 0; j < depth; j++)
936 LLE_COEFFICIENTS (target_expr)[j] /= gcd1;
937 for (j = 0; j < invariants; j++)
938 LLE_INVARIANT_COEFFICIENTS (target_expr)[j] /= gcd1;
939 LLE_CONSTANT (target_expr) /= gcd1;
940 LLE_DENOMINATOR (target_expr) /= gcd1;
941 /* Ignore if equal to existing bound. */
942 if (!lle_equal (LL_UPPER_BOUND (target_loop), target_expr, depth,
943 invariants))
945 LLE_NEXT (target_expr) = LL_UPPER_BOUND (target_loop);
946 LL_UPPER_BOUND (target_loop) = target_expr;
950 for (i = 0; i < depth; i++)
952 target_loop = LN_LOOPS (target_nest)[i];
953 /* If necessary, exchange the upper and lower bounds and negate
954 the step size. */
955 if (stepsigns[i] < 0)
957 LL_STEP (target_loop) *= -1;
958 tmp_expr = LL_LOWER_BOUND (target_loop);
959 LL_LOWER_BOUND (target_loop) = LL_UPPER_BOUND (target_loop);
960 LL_UPPER_BOUND (target_loop) = tmp_expr;
963 return target_nest;
966 /* Compute the step signs of TRANS, using TRANS and stepsigns. Return the new
967 result. */
969 static lambda_vector
970 lambda_compute_step_signs (lambda_trans_matrix trans, lambda_vector stepsigns)
972 lambda_matrix matrix, H;
973 int size;
974 lambda_vector newsteps;
975 int i, j, factor, minimum_column;
976 int temp;
978 matrix = LTM_MATRIX (trans);
979 size = LTM_ROWSIZE (trans);
980 H = lambda_matrix_new (size, size);
982 newsteps = lambda_vector_new (size);
983 lambda_vector_copy (stepsigns, newsteps, size);
985 lambda_matrix_copy (matrix, H, size, size);
987 for (j = 0; j < size; j++)
989 lambda_vector row;
990 row = H[j];
991 for (i = j; i < size; i++)
992 if (row[i] < 0)
993 lambda_matrix_col_negate (H, size, i);
994 while (lambda_vector_first_nz (row, size, j + 1) < size)
996 minimum_column = lambda_vector_min_nz (row, size, j);
997 lambda_matrix_col_exchange (H, size, j, minimum_column);
999 temp = newsteps[j];
1000 newsteps[j] = newsteps[minimum_column];
1001 newsteps[minimum_column] = temp;
1003 for (i = j + 1; i < size; i++)
1005 factor = row[i] / row[j];
1006 lambda_matrix_col_add (H, size, j, i, -1 * factor);
1010 return newsteps;
1013 /* Transform NEST according to TRANS, and return the new loopnest.
1014 This involves
1015 1. Computing a lattice base for the transformation
1016 2. Composing the dense base with the specified transformation (TRANS)
1017 3. Decomposing the combined transformation into a lower triangular portion,
1018 and a unimodular portion.
1019 4. Computing the auxiliary nest using the unimodular portion.
1020 5. Computing the target nest using the auxiliary nest and the lower
1021 triangular portion. */
1023 lambda_loopnest
1024 lambda_loopnest_transform (lambda_loopnest nest, lambda_trans_matrix trans)
1026 lambda_loopnest auxillary_nest, target_nest;
1028 int depth, invariants;
1029 int i, j;
1030 lambda_lattice lattice;
1031 lambda_trans_matrix trans1, H, U;
1032 lambda_loop loop;
1033 lambda_linear_expression expression;
1034 lambda_vector origin;
1035 lambda_matrix origin_invariants;
1036 lambda_vector stepsigns;
1037 int f;
1039 depth = LN_DEPTH (nest);
1040 invariants = LN_INVARIANTS (nest);
1042 /* Keep track of the signs of the loop steps. */
1043 stepsigns = lambda_vector_new (depth);
1044 for (i = 0; i < depth; i++)
1046 if (LL_STEP (LN_LOOPS (nest)[i]) > 0)
1047 stepsigns[i] = 1;
1048 else
1049 stepsigns[i] = -1;
1052 /* Compute the lattice base. */
1053 lattice = lambda_lattice_compute_base (nest);
1054 trans1 = lambda_trans_matrix_new (depth, depth);
1056 /* Multiply the transformation matrix by the lattice base. */
1058 lambda_matrix_mult (LTM_MATRIX (trans), LATTICE_BASE (lattice),
1059 LTM_MATRIX (trans1), depth, depth, depth);
1061 /* Compute the Hermite normal form for the new transformation matrix. */
1062 H = lambda_trans_matrix_new (depth, depth);
1063 U = lambda_trans_matrix_new (depth, depth);
1064 lambda_matrix_hermite (LTM_MATRIX (trans1), depth, LTM_MATRIX (H),
1065 LTM_MATRIX (U));
1067 /* Compute the auxiliary loop nest's space from the unimodular
1068 portion. */
1069 auxillary_nest = lambda_compute_auxillary_space (nest, U);
1071 /* Compute the loop step signs from the old step signs and the
1072 transformation matrix. */
1073 stepsigns = lambda_compute_step_signs (trans1, stepsigns);
1075 /* Compute the target loop nest space from the auxiliary nest and
1076 the lower triangular matrix H. */
1077 target_nest = lambda_compute_target_space (auxillary_nest, H, stepsigns);
1078 origin = lambda_vector_new (depth);
1079 origin_invariants = lambda_matrix_new (depth, invariants);
1080 lambda_matrix_vector_mult (LTM_MATRIX (trans), depth, depth,
1081 LATTICE_ORIGIN (lattice), origin);
1082 lambda_matrix_mult (LTM_MATRIX (trans), LATTICE_ORIGIN_INVARIANTS (lattice),
1083 origin_invariants, depth, depth, invariants);
1085 for (i = 0; i < depth; i++)
1087 loop = LN_LOOPS (target_nest)[i];
1088 expression = LL_LINEAR_OFFSET (loop);
1089 if (lambda_vector_zerop (LLE_COEFFICIENTS (expression), depth))
1090 f = 1;
1091 else
1092 f = LLE_DENOMINATOR (expression);
1094 LLE_CONSTANT (expression) += f * origin[i];
1096 for (j = 0; j < invariants; j++)
1097 LLE_INVARIANT_COEFFICIENTS (expression)[j] +=
1098 f * origin_invariants[i][j];
1101 return target_nest;
1105 /* Convert a gcc tree expression EXPR to a lambda linear expression, and
1106 return the new expression. DEPTH is the depth of the loopnest.
1107 OUTERINDUCTIONVARS is an array of the induction variables for outer loops
1108 in this nest. INVARIANTS is the array of invariants for the loop. EXTRA
1109 is the amount we have to add/subtract from the expression because of the
1110 type of comparison it is used in. */
1112 static lambda_linear_expression
1113 gcc_tree_to_linear_expression (int depth, tree expr,
1114 VEC(tree,heap) *outerinductionvars,
1115 VEC(tree,heap) *invariants, int extra)
1117 lambda_linear_expression lle = NULL;
1118 switch (TREE_CODE (expr))
1120 case INTEGER_CST:
1122 lle = lambda_linear_expression_new (depth, 2 * depth);
1123 LLE_CONSTANT (lle) = TREE_INT_CST_LOW (expr);
1124 if (extra != 0)
1125 LLE_CONSTANT (lle) += extra;
1127 LLE_DENOMINATOR (lle) = 1;
1129 break;
1130 case SSA_NAME:
1132 tree iv, invar;
1133 size_t i;
1134 for (i = 0; VEC_iterate (tree, outerinductionvars, i, iv); i++)
1135 if (iv != NULL)
1137 if (SSA_NAME_VAR (iv) == SSA_NAME_VAR (expr))
1139 lle = lambda_linear_expression_new (depth, 2 * depth);
1140 LLE_COEFFICIENTS (lle)[i] = 1;
1141 if (extra != 0)
1142 LLE_CONSTANT (lle) = extra;
1144 LLE_DENOMINATOR (lle) = 1;
1147 for (i = 0; VEC_iterate (tree, invariants, i, invar); i++)
1148 if (invar != NULL)
1150 if (SSA_NAME_VAR (invar) == SSA_NAME_VAR (expr))
1152 lle = lambda_linear_expression_new (depth, 2 * depth);
1153 LLE_INVARIANT_COEFFICIENTS (lle)[i] = 1;
1154 if (extra != 0)
1155 LLE_CONSTANT (lle) = extra;
1156 LLE_DENOMINATOR (lle) = 1;
1160 break;
1161 default:
1162 return NULL;
1165 return lle;
1168 /* Return the depth of the loopnest NEST */
1170 static int
1171 depth_of_nest (struct loop *nest)
1173 size_t depth = 0;
1174 while (nest)
1176 depth++;
1177 nest = nest->inner;
1179 return depth;
1183 /* Return true if OP is invariant in LOOP and all outer loops. */
1185 static bool
1186 invariant_in_loop_and_outer_loops (struct loop *loop, tree op)
1188 if (is_gimple_min_invariant (op))
1189 return true;
1190 if (loop->depth == 0)
1191 return true;
1192 if (!expr_invariant_in_loop_p (loop, op))
1193 return false;
1194 if (loop->outer
1195 && !invariant_in_loop_and_outer_loops (loop->outer, op))
1196 return false;
1197 return true;
1200 /* Generate a lambda loop from a gcc loop LOOP. Return the new lambda loop,
1201 or NULL if it could not be converted.
1202 DEPTH is the depth of the loop.
1203 INVARIANTS is a pointer to the array of loop invariants.
1204 The induction variable for this loop should be stored in the parameter
1205 OURINDUCTIONVAR.
1206 OUTERINDUCTIONVARS is an array of induction variables for outer loops. */
1208 static lambda_loop
1209 gcc_loop_to_lambda_loop (struct loop *loop, int depth,
1210 VEC(tree,heap) ** invariants,
1211 tree * ourinductionvar,
1212 VEC(tree,heap) * outerinductionvars,
1213 VEC(tree,heap) ** lboundvars,
1214 VEC(tree,heap) ** uboundvars,
1215 VEC(int,heap) ** steps)
1217 tree phi;
1218 tree exit_cond;
1219 tree access_fn, inductionvar;
1220 tree step;
1221 lambda_loop lloop = NULL;
1222 lambda_linear_expression lbound, ubound;
1223 tree test;
1224 int stepint;
1225 int extra = 0;
1226 tree lboundvar, uboundvar, uboundresult;
1228 /* Find out induction var and exit condition. */
1229 inductionvar = find_induction_var_from_exit_cond (loop);
1230 exit_cond = get_loop_exit_condition (loop);
1232 if (inductionvar == NULL || exit_cond == NULL)
1234 if (dump_file && (dump_flags & TDF_DETAILS))
1235 fprintf (dump_file,
1236 "Unable to convert loop: Cannot determine exit condition or induction variable for loop.\n");
1237 return NULL;
1240 test = TREE_OPERAND (exit_cond, 0);
1242 if (SSA_NAME_DEF_STMT (inductionvar) == NULL_TREE)
1245 if (dump_file && (dump_flags & TDF_DETAILS))
1246 fprintf (dump_file,
1247 "Unable to convert loop: Cannot find PHI node for induction variable\n");
1249 return NULL;
1252 phi = SSA_NAME_DEF_STMT (inductionvar);
1253 if (TREE_CODE (phi) != PHI_NODE)
1255 phi = SINGLE_SSA_TREE_OPERAND (phi, SSA_OP_USE);
1256 if (!phi)
1259 if (dump_file && (dump_flags & TDF_DETAILS))
1260 fprintf (dump_file,
1261 "Unable to convert loop: Cannot find PHI node for induction variable\n");
1263 return NULL;
1266 phi = SSA_NAME_DEF_STMT (phi);
1267 if (TREE_CODE (phi) != PHI_NODE)
1270 if (dump_file && (dump_flags & TDF_DETAILS))
1271 fprintf (dump_file,
1272 "Unable to convert loop: Cannot find PHI node for induction variable\n");
1273 return NULL;
1278 /* The induction variable name/version we want to put in the array is the
1279 result of the induction variable phi node. */
1280 *ourinductionvar = PHI_RESULT (phi);
1281 access_fn = instantiate_parameters
1282 (loop, analyze_scalar_evolution (loop, PHI_RESULT (phi)));
1283 if (access_fn == chrec_dont_know)
1285 if (dump_file && (dump_flags & TDF_DETAILS))
1286 fprintf (dump_file,
1287 "Unable to convert loop: Access function for induction variable phi is unknown\n");
1289 return NULL;
1292 step = evolution_part_in_loop_num (access_fn, loop->num);
1293 if (!step || step == chrec_dont_know)
1295 if (dump_file && (dump_flags & TDF_DETAILS))
1296 fprintf (dump_file,
1297 "Unable to convert loop: Cannot determine step of loop.\n");
1299 return NULL;
1301 if (TREE_CODE (step) != INTEGER_CST)
1304 if (dump_file && (dump_flags & TDF_DETAILS))
1305 fprintf (dump_file,
1306 "Unable to convert loop: Step of loop is not integer.\n");
1307 return NULL;
1310 stepint = TREE_INT_CST_LOW (step);
1312 /* Only want phis for induction vars, which will have two
1313 arguments. */
1314 if (PHI_NUM_ARGS (phi) != 2)
1316 if (dump_file && (dump_flags & TDF_DETAILS))
1317 fprintf (dump_file,
1318 "Unable to convert loop: PHI node for induction variable has >2 arguments\n");
1319 return NULL;
1322 /* Another induction variable check. One argument's source should be
1323 in the loop, one outside the loop. */
1324 if (flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, 0)->src)
1325 && flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, 1)->src))
1328 if (dump_file && (dump_flags & TDF_DETAILS))
1329 fprintf (dump_file,
1330 "Unable to convert loop: PHI edges both inside loop, or both outside loop.\n");
1332 return NULL;
1335 if (flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, 0)->src))
1337 lboundvar = PHI_ARG_DEF (phi, 1);
1338 lbound = gcc_tree_to_linear_expression (depth, lboundvar,
1339 outerinductionvars, *invariants,
1342 else
1344 lboundvar = PHI_ARG_DEF (phi, 0);
1345 lbound = gcc_tree_to_linear_expression (depth, lboundvar,
1346 outerinductionvars, *invariants,
1350 if (!lbound)
1353 if (dump_file && (dump_flags & TDF_DETAILS))
1354 fprintf (dump_file,
1355 "Unable to convert loop: Cannot convert lower bound to linear expression\n");
1357 return NULL;
1359 /* One part of the test may be a loop invariant tree. */
1360 VEC_reserve (tree, heap, *invariants, 1);
1361 if (TREE_CODE (TREE_OPERAND (test, 1)) == SSA_NAME
1362 && invariant_in_loop_and_outer_loops (loop, TREE_OPERAND (test, 1)))
1363 VEC_quick_push (tree, *invariants, TREE_OPERAND (test, 1));
1364 else if (TREE_CODE (TREE_OPERAND (test, 0)) == SSA_NAME
1365 && invariant_in_loop_and_outer_loops (loop, TREE_OPERAND (test, 0)))
1366 VEC_quick_push (tree, *invariants, TREE_OPERAND (test, 0));
1368 /* The non-induction variable part of the test is the upper bound variable.
1370 if (TREE_OPERAND (test, 0) == inductionvar)
1371 uboundvar = TREE_OPERAND (test, 1);
1372 else
1373 uboundvar = TREE_OPERAND (test, 0);
1376 /* We only size the vectors assuming we have, at max, 2 times as many
1377 invariants as we do loops (one for each bound).
1378 This is just an arbitrary number, but it has to be matched against the
1379 code below. */
1380 gcc_assert (VEC_length (tree, *invariants) <= (unsigned int) (2 * depth));
1383 /* We might have some leftover. */
1384 if (TREE_CODE (test) == LT_EXPR)
1385 extra = -1 * stepint;
1386 else if (TREE_CODE (test) == NE_EXPR)
1387 extra = -1 * stepint;
1388 else if (TREE_CODE (test) == GT_EXPR)
1389 extra = -1 * stepint;
1390 else if (TREE_CODE (test) == EQ_EXPR)
1391 extra = 1 * stepint;
1393 ubound = gcc_tree_to_linear_expression (depth, uboundvar,
1394 outerinductionvars,
1395 *invariants, extra);
1396 uboundresult = build2 (PLUS_EXPR, TREE_TYPE (uboundvar), uboundvar,
1397 build_int_cst (TREE_TYPE (uboundvar), extra));
1398 VEC_safe_push (tree, heap, *uboundvars, uboundresult);
1399 VEC_safe_push (tree, heap, *lboundvars, lboundvar);
1400 VEC_safe_push (int, heap, *steps, stepint);
1401 if (!ubound)
1403 if (dump_file && (dump_flags & TDF_DETAILS))
1404 fprintf (dump_file,
1405 "Unable to convert loop: Cannot convert upper bound to linear expression\n");
1406 return NULL;
1409 lloop = lambda_loop_new ();
1410 LL_STEP (lloop) = stepint;
1411 LL_LOWER_BOUND (lloop) = lbound;
1412 LL_UPPER_BOUND (lloop) = ubound;
1413 return lloop;
1416 /* Given a LOOP, find the induction variable it is testing against in the exit
1417 condition. Return the induction variable if found, NULL otherwise. */
1419 static tree
1420 find_induction_var_from_exit_cond (struct loop *loop)
1422 tree expr = get_loop_exit_condition (loop);
1423 tree ivarop;
1424 tree test;
1425 if (expr == NULL_TREE)
1426 return NULL_TREE;
1427 if (TREE_CODE (expr) != COND_EXPR)
1428 return NULL_TREE;
1429 test = TREE_OPERAND (expr, 0);
1430 if (!COMPARISON_CLASS_P (test))
1431 return NULL_TREE;
1433 /* Find the side that is invariant in this loop. The ivar must be the other
1434 side. */
1436 if (expr_invariant_in_loop_p (loop, TREE_OPERAND (test, 0)))
1437 ivarop = TREE_OPERAND (test, 1);
1438 else if (expr_invariant_in_loop_p (loop, TREE_OPERAND (test, 1)))
1439 ivarop = TREE_OPERAND (test, 0);
1440 else
1441 return NULL_TREE;
1443 if (TREE_CODE (ivarop) != SSA_NAME)
1444 return NULL_TREE;
1445 return ivarop;
1448 DEF_VEC_P(lambda_loop);
1449 DEF_VEC_ALLOC_P(lambda_loop,heap);
1451 /* Generate a lambda loopnest from a gcc loopnest LOOP_NEST.
1452 Return the new loop nest.
1453 INDUCTIONVARS is a pointer to an array of induction variables for the
1454 loopnest that will be filled in during this process.
1455 INVARIANTS is a pointer to an array of invariants that will be filled in
1456 during this process. */
1458 lambda_loopnest
1459 gcc_loopnest_to_lambda_loopnest (struct loop *loop_nest,
1460 VEC(tree,heap) **inductionvars,
1461 VEC(tree,heap) **invariants)
1463 lambda_loopnest ret = NULL;
1464 struct loop *temp = loop_nest;
1465 int depth = depth_of_nest (loop_nest);
1466 size_t i;
1467 VEC(lambda_loop,heap) *loops = NULL;
1468 VEC(tree,heap) *uboundvars = NULL;
1469 VEC(tree,heap) *lboundvars = NULL;
1470 VEC(int,heap) *steps = NULL;
1471 lambda_loop newloop;
1472 tree inductionvar = NULL;
1473 bool perfect_nest = perfect_nest_p (loop_nest);
1475 if (!perfect_nest && !can_convert_to_perfect_nest (loop_nest))
1476 goto fail;
1478 while (temp)
1480 newloop = gcc_loop_to_lambda_loop (temp, depth, invariants,
1481 &inductionvar, *inductionvars,
1482 &lboundvars, &uboundvars,
1483 &steps);
1484 if (!newloop)
1485 goto fail;
1487 VEC_safe_push (tree, heap, *inductionvars, inductionvar);
1488 VEC_safe_push (lambda_loop, heap, loops, newloop);
1489 temp = temp->inner;
1492 if (!perfect_nest)
1494 if (!perfect_nestify (loop_nest, lboundvars, uboundvars, steps,
1495 *inductionvars))
1497 if (dump_file)
1498 fprintf (dump_file,
1499 "Not a perfect loop nest and couldn't convert to one.\n");
1500 goto fail;
1502 else if (dump_file)
1503 fprintf (dump_file,
1504 "Successfully converted loop nest to perfect loop nest.\n");
1507 ret = lambda_loopnest_new (depth, 2 * depth);
1509 for (i = 0; VEC_iterate (lambda_loop, loops, i, newloop); i++)
1510 LN_LOOPS (ret)[i] = newloop;
1512 fail:
1513 VEC_free (lambda_loop, heap, loops);
1514 VEC_free (tree, heap, uboundvars);
1515 VEC_free (tree, heap, lboundvars);
1516 VEC_free (int, heap, steps);
1518 return ret;
1521 /* Convert a lambda body vector LBV to a gcc tree, and return the new tree.
1522 STMTS_TO_INSERT is a pointer to a tree where the statements we need to be
1523 inserted for us are stored. INDUCTION_VARS is the array of induction
1524 variables for the loop this LBV is from. TYPE is the tree type to use for
1525 the variables and trees involved. */
1527 static tree
1528 lbv_to_gcc_expression (lambda_body_vector lbv,
1529 tree type, VEC(tree,heap) *induction_vars,
1530 tree *stmts_to_insert)
1532 tree stmts, stmt, resvar, name;
1533 tree iv;
1534 size_t i;
1535 tree_stmt_iterator tsi;
1537 /* Create a statement list and a linear expression temporary. */
1538 stmts = alloc_stmt_list ();
1539 resvar = create_tmp_var (type, "lbvtmp");
1540 add_referenced_var (resvar);
1542 /* Start at 0. */
1543 stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, resvar,
1544 integer_zero_node);
1545 name = make_ssa_name (resvar, stmt);
1546 GIMPLE_STMT_OPERAND (stmt, 0) = name;
1547 tsi = tsi_last (stmts);
1548 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1550 for (i = 0; VEC_iterate (tree, induction_vars, i, iv); i++)
1552 if (LBV_COEFFICIENTS (lbv)[i] != 0)
1554 tree newname;
1555 tree coeffmult;
1557 /* newname = coefficient * induction_variable */
1558 coeffmult = build_int_cst (type, LBV_COEFFICIENTS (lbv)[i]);
1559 stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, resvar,
1560 fold_build2 (MULT_EXPR, type, iv, coeffmult));
1562 newname = make_ssa_name (resvar, stmt);
1563 GIMPLE_STMT_OPERAND (stmt, 0) = newname;
1564 fold_stmt (&stmt);
1565 tsi = tsi_last (stmts);
1566 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1568 /* name = name + newname */
1569 stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, resvar,
1570 build2 (PLUS_EXPR, type, name, newname));
1571 name = make_ssa_name (resvar, stmt);
1572 GIMPLE_STMT_OPERAND (stmt, 0) = name;
1573 fold_stmt (&stmt);
1574 tsi = tsi_last (stmts);
1575 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1580 /* Handle any denominator that occurs. */
1581 if (LBV_DENOMINATOR (lbv) != 1)
1583 tree denominator = build_int_cst (type, LBV_DENOMINATOR (lbv));
1584 stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, resvar,
1585 build2 (CEIL_DIV_EXPR, type, name, denominator));
1586 name = make_ssa_name (resvar, stmt);
1587 GIMPLE_STMT_OPERAND (stmt, 0) = name;
1588 fold_stmt (&stmt);
1589 tsi = tsi_last (stmts);
1590 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1592 *stmts_to_insert = stmts;
1593 return name;
1596 /* Convert a linear expression from coefficient and constant form to a
1597 gcc tree.
1598 Return the tree that represents the final value of the expression.
1599 LLE is the linear expression to convert.
1600 OFFSET is the linear offset to apply to the expression.
1601 TYPE is the tree type to use for the variables and math.
1602 INDUCTION_VARS is a vector of induction variables for the loops.
1603 INVARIANTS is a vector of the loop nest invariants.
1604 WRAP specifies what tree code to wrap the results in, if there is more than
1605 one (it is either MAX_EXPR, or MIN_EXPR).
1606 STMTS_TO_INSERT Is a pointer to the statement list we fill in with
1607 statements that need to be inserted for the linear expression. */
1609 static tree
1610 lle_to_gcc_expression (lambda_linear_expression lle,
1611 lambda_linear_expression offset,
1612 tree type,
1613 VEC(tree,heap) *induction_vars,
1614 VEC(tree,heap) *invariants,
1615 enum tree_code wrap, tree *stmts_to_insert)
1617 tree stmts, stmt, resvar, name;
1618 size_t i;
1619 tree_stmt_iterator tsi;
1620 tree iv, invar;
1621 VEC(tree,heap) *results = NULL;
1623 gcc_assert (wrap == MAX_EXPR || wrap == MIN_EXPR);
1624 name = NULL_TREE;
1625 /* Create a statement list and a linear expression temporary. */
1626 stmts = alloc_stmt_list ();
1627 resvar = create_tmp_var (type, "lletmp");
1628 add_referenced_var (resvar);
1630 /* Build up the linear expressions, and put the variable representing the
1631 result in the results array. */
1632 for (; lle != NULL; lle = LLE_NEXT (lle))
1634 /* Start at name = 0. */
1635 stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, resvar,
1636 integer_zero_node);
1637 name = make_ssa_name (resvar, stmt);
1638 GIMPLE_STMT_OPERAND (stmt, 0) = name;
1639 fold_stmt (&stmt);
1640 tsi = tsi_last (stmts);
1641 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1643 /* First do the induction variables.
1644 at the end, name = name + all the induction variables added
1645 together. */
1646 for (i = 0; VEC_iterate (tree, induction_vars, i, iv); i++)
1648 if (LLE_COEFFICIENTS (lle)[i] != 0)
1650 tree newname;
1651 tree mult;
1652 tree coeff;
1654 /* mult = induction variable * coefficient. */
1655 if (LLE_COEFFICIENTS (lle)[i] == 1)
1657 mult = VEC_index (tree, induction_vars, i);
1659 else
1661 coeff = build_int_cst (type,
1662 LLE_COEFFICIENTS (lle)[i]);
1663 mult = fold_build2 (MULT_EXPR, type, iv, coeff);
1666 /* newname = mult */
1667 stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, resvar, mult);
1668 newname = make_ssa_name (resvar, stmt);
1669 GIMPLE_STMT_OPERAND (stmt, 0) = newname;
1670 fold_stmt (&stmt);
1671 tsi = tsi_last (stmts);
1672 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1674 /* name = name + newname */
1675 stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, resvar,
1676 build2 (PLUS_EXPR, type, name, newname));
1677 name = make_ssa_name (resvar, stmt);
1678 GIMPLE_STMT_OPERAND (stmt, 0) = name;
1679 fold_stmt (&stmt);
1680 tsi = tsi_last (stmts);
1681 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1685 /* Handle our invariants.
1686 At the end, we have name = name + result of adding all multiplied
1687 invariants. */
1688 for (i = 0; VEC_iterate (tree, invariants, i, invar); i++)
1690 if (LLE_INVARIANT_COEFFICIENTS (lle)[i] != 0)
1692 tree newname;
1693 tree mult;
1694 tree coeff;
1695 int invcoeff = LLE_INVARIANT_COEFFICIENTS (lle)[i];
1696 /* mult = invariant * coefficient */
1697 if (invcoeff == 1)
1699 mult = invar;
1701 else
1703 coeff = build_int_cst (type, invcoeff);
1704 mult = fold_build2 (MULT_EXPR, type, invar, coeff);
1707 /* newname = mult */
1708 stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, resvar, mult);
1709 newname = make_ssa_name (resvar, stmt);
1710 GIMPLE_STMT_OPERAND (stmt, 0) = newname;
1711 fold_stmt (&stmt);
1712 tsi = tsi_last (stmts);
1713 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1715 /* name = name + newname */
1716 stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, resvar,
1717 build2 (PLUS_EXPR, type, name, newname));
1718 name = make_ssa_name (resvar, stmt);
1719 GIMPLE_STMT_OPERAND (stmt, 0) = name;
1720 fold_stmt (&stmt);
1721 tsi = tsi_last (stmts);
1722 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1726 /* Now handle the constant.
1727 name = name + constant. */
1728 if (LLE_CONSTANT (lle) != 0)
1730 stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, resvar,
1731 build2 (PLUS_EXPR, type, name,
1732 build_int_cst (type, LLE_CONSTANT (lle))));
1733 name = make_ssa_name (resvar, stmt);
1734 GIMPLE_STMT_OPERAND (stmt, 0) = name;
1735 fold_stmt (&stmt);
1736 tsi = tsi_last (stmts);
1737 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1740 /* Now handle the offset.
1741 name = name + linear offset. */
1742 if (LLE_CONSTANT (offset) != 0)
1744 stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, resvar,
1745 build2 (PLUS_EXPR, type, name,
1746 build_int_cst (type, LLE_CONSTANT (offset))));
1747 name = make_ssa_name (resvar, stmt);
1748 GIMPLE_STMT_OPERAND (stmt, 0) = name;
1749 fold_stmt (&stmt);
1750 tsi = tsi_last (stmts);
1751 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1754 /* Handle any denominator that occurs. */
1755 if (LLE_DENOMINATOR (lle) != 1)
1757 stmt = build_int_cst (type, LLE_DENOMINATOR (lle));
1758 stmt = build2 (wrap == MAX_EXPR ? CEIL_DIV_EXPR : FLOOR_DIV_EXPR,
1759 type, name, stmt);
1760 stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, resvar, stmt);
1762 /* name = {ceil, floor}(name/denominator) */
1763 name = make_ssa_name (resvar, stmt);
1764 GIMPLE_STMT_OPERAND (stmt, 0) = name;
1765 tsi = tsi_last (stmts);
1766 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1768 VEC_safe_push (tree, heap, results, name);
1771 /* Again, out of laziness, we don't handle this case yet. It's not
1772 hard, it just hasn't occurred. */
1773 gcc_assert (VEC_length (tree, results) <= 2);
1775 /* We may need to wrap the results in a MAX_EXPR or MIN_EXPR. */
1776 if (VEC_length (tree, results) > 1)
1778 tree op1 = VEC_index (tree, results, 0);
1779 tree op2 = VEC_index (tree, results, 1);
1780 stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, resvar,
1781 build2 (wrap, type, op1, op2));
1782 name = make_ssa_name (resvar, stmt);
1783 GIMPLE_STMT_OPERAND (stmt, 0) = name;
1784 tsi = tsi_last (stmts);
1785 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1788 VEC_free (tree, heap, results);
1790 *stmts_to_insert = stmts;
1791 return name;
1794 /* Transform a lambda loopnest NEW_LOOPNEST, which had TRANSFORM applied to
1795 it, back into gcc code. This changes the
1796 loops, their induction variables, and their bodies, so that they
1797 match the transformed loopnest.
1798 OLD_LOOPNEST is the loopnest before we've replaced it with the new
1799 loopnest.
1800 OLD_IVS is a vector of induction variables from the old loopnest.
1801 INVARIANTS is a vector of loop invariants from the old loopnest.
1802 NEW_LOOPNEST is the new lambda loopnest to replace OLD_LOOPNEST with.
1803 TRANSFORM is the matrix transform that was applied to OLD_LOOPNEST to get
1804 NEW_LOOPNEST. */
1806 void
1807 lambda_loopnest_to_gcc_loopnest (struct loop *old_loopnest,
1808 VEC(tree,heap) *old_ivs,
1809 VEC(tree,heap) *invariants,
1810 lambda_loopnest new_loopnest,
1811 lambda_trans_matrix transform)
1813 struct loop *temp;
1814 size_t i = 0;
1815 size_t depth = 0;
1816 VEC(tree,heap) *new_ivs = NULL;
1817 tree oldiv;
1819 block_stmt_iterator bsi;
1821 if (dump_file)
1823 transform = lambda_trans_matrix_inverse (transform);
1824 fprintf (dump_file, "Inverse of transformation matrix:\n");
1825 print_lambda_trans_matrix (dump_file, transform);
1827 depth = depth_of_nest (old_loopnest);
1828 temp = old_loopnest;
1830 while (temp)
1832 lambda_loop newloop;
1833 basic_block bb;
1834 edge exit;
1835 tree ivvar, ivvarinced, exitcond, stmts;
1836 enum tree_code testtype;
1837 tree newupperbound, newlowerbound;
1838 lambda_linear_expression offset;
1839 tree type;
1840 bool insert_after;
1841 tree inc_stmt;
1843 oldiv = VEC_index (tree, old_ivs, i);
1844 type = TREE_TYPE (oldiv);
1846 /* First, build the new induction variable temporary */
1848 ivvar = create_tmp_var (type, "lnivtmp");
1849 add_referenced_var (ivvar);
1851 VEC_safe_push (tree, heap, new_ivs, ivvar);
1853 newloop = LN_LOOPS (new_loopnest)[i];
1855 /* Linear offset is a bit tricky to handle. Punt on the unhandled
1856 cases for now. */
1857 offset = LL_LINEAR_OFFSET (newloop);
1859 gcc_assert (LLE_DENOMINATOR (offset) == 1 &&
1860 lambda_vector_zerop (LLE_COEFFICIENTS (offset), depth));
1862 /* Now build the new lower bounds, and insert the statements
1863 necessary to generate it on the loop preheader. */
1864 newlowerbound = lle_to_gcc_expression (LL_LOWER_BOUND (newloop),
1865 LL_LINEAR_OFFSET (newloop),
1866 type,
1867 new_ivs,
1868 invariants, MAX_EXPR, &stmts);
1869 bsi_insert_on_edge (loop_preheader_edge (temp), stmts);
1870 bsi_commit_edge_inserts ();
1871 /* Build the new upper bound and insert its statements in the
1872 basic block of the exit condition */
1873 newupperbound = lle_to_gcc_expression (LL_UPPER_BOUND (newloop),
1874 LL_LINEAR_OFFSET (newloop),
1875 type,
1876 new_ivs,
1877 invariants, MIN_EXPR, &stmts);
1878 exit = single_exit (temp);
1879 exitcond = get_loop_exit_condition (temp);
1880 bb = bb_for_stmt (exitcond);
1881 bsi = bsi_start (bb);
1882 bsi_insert_after (&bsi, stmts, BSI_NEW_STMT);
1884 /* Create the new iv. */
1886 standard_iv_increment_position (temp, &bsi, &insert_after);
1887 create_iv (newlowerbound,
1888 build_int_cst (type, LL_STEP (newloop)),
1889 ivvar, temp, &bsi, insert_after, &ivvar,
1890 NULL);
1892 /* Unfortunately, the incremented ivvar that create_iv inserted may not
1893 dominate the block containing the exit condition.
1894 So we simply create our own incremented iv to use in the new exit
1895 test, and let redundancy elimination sort it out. */
1896 inc_stmt = build2 (PLUS_EXPR, type,
1897 ivvar, build_int_cst (type, LL_STEP (newloop)));
1898 inc_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node,
1899 SSA_NAME_VAR (ivvar), inc_stmt);
1900 ivvarinced = make_ssa_name (SSA_NAME_VAR (ivvar), inc_stmt);
1901 GIMPLE_STMT_OPERAND (inc_stmt, 0) = ivvarinced;
1902 bsi = bsi_for_stmt (exitcond);
1903 bsi_insert_before (&bsi, inc_stmt, BSI_SAME_STMT);
1905 /* Replace the exit condition with the new upper bound
1906 comparison. */
1908 testtype = LL_STEP (newloop) >= 0 ? LE_EXPR : GE_EXPR;
1910 /* We want to build a conditional where true means exit the loop, and
1911 false means continue the loop.
1912 So swap the testtype if this isn't the way things are.*/
1914 if (exit->flags & EDGE_FALSE_VALUE)
1915 testtype = swap_tree_comparison (testtype);
1917 COND_EXPR_COND (exitcond) = build2 (testtype,
1918 boolean_type_node,
1919 newupperbound, ivvarinced);
1920 update_stmt (exitcond);
1921 VEC_replace (tree, new_ivs, i, ivvar);
1923 i++;
1924 temp = temp->inner;
1927 /* Rewrite uses of the old ivs so that they are now specified in terms of
1928 the new ivs. */
1930 for (i = 0; VEC_iterate (tree, old_ivs, i, oldiv); i++)
1932 imm_use_iterator imm_iter;
1933 use_operand_p use_p;
1934 tree oldiv_def;
1935 tree oldiv_stmt = SSA_NAME_DEF_STMT (oldiv);
1936 tree stmt;
1938 if (TREE_CODE (oldiv_stmt) == PHI_NODE)
1939 oldiv_def = PHI_RESULT (oldiv_stmt);
1940 else
1941 oldiv_def = SINGLE_SSA_TREE_OPERAND (oldiv_stmt, SSA_OP_DEF);
1942 gcc_assert (oldiv_def != NULL_TREE);
1944 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, oldiv_def)
1946 tree newiv, stmts;
1947 lambda_body_vector lbv, newlbv;
1949 gcc_assert (TREE_CODE (stmt) != PHI_NODE);
1951 /* Compute the new expression for the induction
1952 variable. */
1953 depth = VEC_length (tree, new_ivs);
1954 lbv = lambda_body_vector_new (depth);
1955 LBV_COEFFICIENTS (lbv)[i] = 1;
1957 newlbv = lambda_body_vector_compute_new (transform, lbv);
1959 newiv = lbv_to_gcc_expression (newlbv, TREE_TYPE (oldiv),
1960 new_ivs, &stmts);
1961 bsi = bsi_for_stmt (stmt);
1962 /* Insert the statements to build that
1963 expression. */
1964 bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
1966 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1967 propagate_value (use_p, newiv);
1968 update_stmt (stmt);
1971 VEC_free (tree, heap, new_ivs);
1974 /* Return TRUE if this is not interesting statement from the perspective of
1975 determining if we have a perfect loop nest. */
1977 static bool
1978 not_interesting_stmt (tree stmt)
1980 /* Note that COND_EXPR's aren't interesting because if they were exiting the
1981 loop, we would have already failed the number of exits tests. */
1982 if (TREE_CODE (stmt) == LABEL_EXPR
1983 || TREE_CODE (stmt) == GOTO_EXPR
1984 || TREE_CODE (stmt) == COND_EXPR)
1985 return true;
1986 return false;
1989 /* Return TRUE if PHI uses DEF for it's in-the-loop edge for LOOP. */
1991 static bool
1992 phi_loop_edge_uses_def (struct loop *loop, tree phi, tree def)
1994 int i;
1995 for (i = 0; i < PHI_NUM_ARGS (phi); i++)
1996 if (flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, i)->src))
1997 if (PHI_ARG_DEF (phi, i) == def)
1998 return true;
1999 return false;
2002 /* Return TRUE if STMT is a use of PHI_RESULT. */
2004 static bool
2005 stmt_uses_phi_result (tree stmt, tree phi_result)
2007 tree use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2009 /* This is conservatively true, because we only want SIMPLE bumpers
2010 of the form x +- constant for our pass. */
2011 return (use == phi_result);
2014 /* STMT is a bumper stmt for LOOP if the version it defines is used in the
2015 in-loop-edge in a phi node, and the operand it uses is the result of that
2016 phi node.
2017 I.E. i_29 = i_3 + 1
2018 i_3 = PHI (0, i_29); */
2020 static bool
2021 stmt_is_bumper_for_loop (struct loop *loop, tree stmt)
2023 tree use;
2024 tree def;
2025 imm_use_iterator iter;
2026 use_operand_p use_p;
2028 def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF);
2029 if (!def)
2030 return false;
2032 FOR_EACH_IMM_USE_FAST (use_p, iter, def)
2034 use = USE_STMT (use_p);
2035 if (TREE_CODE (use) == PHI_NODE)
2037 if (phi_loop_edge_uses_def (loop, use, def))
2038 if (stmt_uses_phi_result (stmt, PHI_RESULT (use)))
2039 return true;
2042 return false;
2046 /* Return true if LOOP is a perfect loop nest.
2047 Perfect loop nests are those loop nests where all code occurs in the
2048 innermost loop body.
2049 If S is a program statement, then
2051 i.e.
2052 DO I = 1, 20
2054 DO J = 1, 20
2056 END DO
2057 END DO
2058 is not a perfect loop nest because of S1.
2060 DO I = 1, 20
2061 DO J = 1, 20
2064 END DO
2065 END DO
2066 is a perfect loop nest.
2068 Since we don't have high level loops anymore, we basically have to walk our
2069 statements and ignore those that are there because the loop needs them (IE
2070 the induction variable increment, and jump back to the top of the loop). */
2072 bool
2073 perfect_nest_p (struct loop *loop)
2075 basic_block *bbs;
2076 size_t i;
2077 tree exit_cond;
2079 if (!loop->inner)
2080 return true;
2081 bbs = get_loop_body (loop);
2082 exit_cond = get_loop_exit_condition (loop);
2083 for (i = 0; i < loop->num_nodes; i++)
2085 if (bbs[i]->loop_father == loop)
2087 block_stmt_iterator bsi;
2088 for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi); bsi_next (&bsi))
2090 tree stmt = bsi_stmt (bsi);
2091 if (stmt == exit_cond
2092 || not_interesting_stmt (stmt)
2093 || stmt_is_bumper_for_loop (loop, stmt))
2094 continue;
2095 free (bbs);
2096 return false;
2100 free (bbs);
2101 /* See if the inner loops are perfectly nested as well. */
2102 if (loop->inner)
2103 return perfect_nest_p (loop->inner);
2104 return true;
2107 /* Replace the USES of X in STMT, or uses with the same step as X with Y.
2108 YINIT is the initial value of Y, REPLACEMENTS is a hash table to
2109 avoid creating duplicate temporaries and FIRSTBSI is statement
2110 iterator where new temporaries should be inserted at the beginning
2111 of body basic block. */
2113 static void
2114 replace_uses_equiv_to_x_with_y (struct loop *loop, tree stmt, tree x,
2115 int xstep, tree y, tree yinit,
2116 htab_t replacements,
2117 block_stmt_iterator *firstbsi)
2119 ssa_op_iter iter;
2120 use_operand_p use_p;
2122 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
2124 tree use = USE_FROM_PTR (use_p);
2125 tree step = NULL_TREE;
2126 tree scev, init, val, var, setstmt;
2127 struct tree_map *h, in;
2128 void **loc;
2130 /* Replace uses of X with Y right away. */
2131 if (use == x)
2133 SET_USE (use_p, y);
2134 continue;
2137 scev = instantiate_parameters (loop,
2138 analyze_scalar_evolution (loop, use));
2140 if (scev == NULL || scev == chrec_dont_know)
2141 continue;
2143 step = evolution_part_in_loop_num (scev, loop->num);
2144 if (step == NULL
2145 || step == chrec_dont_know
2146 || TREE_CODE (step) != INTEGER_CST
2147 || int_cst_value (step) != xstep)
2148 continue;
2150 /* Use REPLACEMENTS hash table to cache already created
2151 temporaries. */
2152 in.hash = htab_hash_pointer (use);
2153 in.from = use;
2154 h = htab_find_with_hash (replacements, &in, in.hash);
2155 if (h != NULL)
2157 SET_USE (use_p, h->to);
2158 continue;
2161 /* USE which has the same step as X should be replaced
2162 with a temporary set to Y + YINIT - INIT. */
2163 init = initial_condition_in_loop_num (scev, loop->num);
2164 gcc_assert (init != NULL && init != chrec_dont_know);
2165 if (TREE_TYPE (use) == TREE_TYPE (y))
2167 val = fold_build2 (MINUS_EXPR, TREE_TYPE (y), init, yinit);
2168 val = fold_build2 (PLUS_EXPR, TREE_TYPE (y), y, val);
2169 if (val == y)
2171 /* If X has the same type as USE, the same step
2172 and same initial value, it can be replaced by Y. */
2173 SET_USE (use_p, y);
2174 continue;
2177 else
2179 val = fold_build2 (MINUS_EXPR, TREE_TYPE (y), y, yinit);
2180 val = fold_convert (TREE_TYPE (use), val);
2181 val = fold_build2 (PLUS_EXPR, TREE_TYPE (use), val, init);
2184 /* Create a temporary variable and insert it at the beginning
2185 of the loop body basic block, right after the PHI node
2186 which sets Y. */
2187 var = create_tmp_var (TREE_TYPE (use), "perfecttmp");
2188 add_referenced_var (var);
2189 val = force_gimple_operand_bsi (firstbsi, val, false, NULL);
2190 setstmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, var, val);
2191 var = make_ssa_name (var, setstmt);
2192 GIMPLE_STMT_OPERAND (setstmt, 0) = var;
2193 bsi_insert_before (firstbsi, setstmt, BSI_SAME_STMT);
2194 update_stmt (setstmt);
2195 SET_USE (use_p, var);
2196 h = ggc_alloc (sizeof (struct tree_map));
2197 h->hash = in.hash;
2198 h->from = use;
2199 h->to = var;
2200 loc = htab_find_slot_with_hash (replacements, h, in.hash, INSERT);
2201 gcc_assert ((*(struct tree_map **)loc) == NULL);
2202 *(struct tree_map **) loc = h;
2206 /* Return true if STMT is an exit PHI for LOOP */
2208 static bool
2209 exit_phi_for_loop_p (struct loop *loop, tree stmt)
2212 if (TREE_CODE (stmt) != PHI_NODE
2213 || PHI_NUM_ARGS (stmt) != 1
2214 || bb_for_stmt (stmt) != single_exit (loop)->dest)
2215 return false;
2217 return true;
2220 /* Return true if STMT can be put back into the loop INNER, by
2221 copying it to the beginning of that loop and changing the uses. */
2223 static bool
2224 can_put_in_inner_loop (struct loop *inner, tree stmt)
2226 imm_use_iterator imm_iter;
2227 use_operand_p use_p;
2229 gcc_assert (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT);
2230 if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS)
2231 || !expr_invariant_in_loop_p (inner, GIMPLE_STMT_OPERAND (stmt, 1)))
2232 return false;
2234 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, GIMPLE_STMT_OPERAND (stmt, 0))
2236 if (!exit_phi_for_loop_p (inner, USE_STMT (use_p)))
2238 basic_block immbb = bb_for_stmt (USE_STMT (use_p));
2240 if (!flow_bb_inside_loop_p (inner, immbb))
2241 return false;
2244 return true;
2247 /* Return true if STMT can be put *after* the inner loop of LOOP. */
2248 static bool
2249 can_put_after_inner_loop (struct loop *loop, tree stmt)
2251 imm_use_iterator imm_iter;
2252 use_operand_p use_p;
2254 if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS))
2255 return false;
2257 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, GIMPLE_STMT_OPERAND (stmt, 0))
2259 if (!exit_phi_for_loop_p (loop, USE_STMT (use_p)))
2261 basic_block immbb = bb_for_stmt (USE_STMT (use_p));
2263 if (!dominated_by_p (CDI_DOMINATORS,
2264 immbb,
2265 loop->inner->header)
2266 && !can_put_in_inner_loop (loop->inner, stmt))
2267 return false;
2270 return true;
2275 /* Return TRUE if LOOP is an imperfect nest that we can convert to a
2276 perfect one. At the moment, we only handle imperfect nests of
2277 depth 2, where all of the statements occur after the inner loop. */
2279 static bool
2280 can_convert_to_perfect_nest (struct loop *loop)
2282 basic_block *bbs;
2283 tree exit_condition, phi;
2284 size_t i;
2285 block_stmt_iterator bsi;
2286 basic_block exitdest;
2288 /* Can't handle triply nested+ loops yet. */
2289 if (!loop->inner || loop->inner->inner)
2290 return false;
2292 bbs = get_loop_body (loop);
2293 exit_condition = get_loop_exit_condition (loop);
2294 for (i = 0; i < loop->num_nodes; i++)
2296 if (bbs[i]->loop_father == loop)
2298 for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi); bsi_next (&bsi))
2300 tree stmt = bsi_stmt (bsi);
2302 if (stmt == exit_condition
2303 || not_interesting_stmt (stmt)
2304 || stmt_is_bumper_for_loop (loop, stmt))
2305 continue;
2307 /* If this is a scalar operation that can be put back
2308 into the inner loop, or after the inner loop, through
2309 copying, then do so. This works on the theory that
2310 any amount of scalar code we have to reduplicate
2311 into or after the loops is less expensive that the
2312 win we get from rearranging the memory walk
2313 the loop is doing so that it has better
2314 cache behavior. */
2315 if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
2317 use_operand_p use_a, use_b;
2318 imm_use_iterator imm_iter;
2319 ssa_op_iter op_iter, op_iter1;
2320 tree op0 = GIMPLE_STMT_OPERAND (stmt, 0);
2321 tree scev = instantiate_parameters
2322 (loop, analyze_scalar_evolution (loop, op0));
2324 /* If the IV is simple, it can be duplicated. */
2325 if (!automatically_generated_chrec_p (scev))
2327 tree step = evolution_part_in_loop_num (scev, loop->num);
2328 if (step && step != chrec_dont_know
2329 && TREE_CODE (step) == INTEGER_CST)
2330 continue;
2333 /* The statement should not define a variable used
2334 in the inner loop. */
2335 if (TREE_CODE (op0) == SSA_NAME)
2336 FOR_EACH_IMM_USE_FAST (use_a, imm_iter, op0)
2337 if (bb_for_stmt (USE_STMT (use_a))->loop_father
2338 == loop->inner)
2339 goto fail;
2341 FOR_EACH_SSA_USE_OPERAND (use_a, stmt, op_iter, SSA_OP_USE)
2343 tree node, op = USE_FROM_PTR (use_a);
2345 /* The variables should not be used in both loops. */
2346 FOR_EACH_IMM_USE_FAST (use_b, imm_iter, op)
2347 if (bb_for_stmt (USE_STMT (use_b))->loop_father
2348 == loop->inner)
2349 goto fail;
2351 /* The statement should not use the value of a
2352 scalar that was modified in the loop. */
2353 node = SSA_NAME_DEF_STMT (op);
2354 if (TREE_CODE (node) == PHI_NODE)
2355 FOR_EACH_PHI_ARG (use_b, node, op_iter1, SSA_OP_USE)
2357 tree arg = USE_FROM_PTR (use_b);
2359 if (TREE_CODE (arg) == SSA_NAME)
2361 tree arg_stmt = SSA_NAME_DEF_STMT (arg);
2363 if (bb_for_stmt (arg_stmt)->loop_father
2364 == loop->inner)
2365 goto fail;
2370 if (can_put_in_inner_loop (loop->inner, stmt)
2371 || can_put_after_inner_loop (loop, stmt))
2372 continue;
2375 /* Otherwise, if the bb of a statement we care about isn't
2376 dominated by the header of the inner loop, then we can't
2377 handle this case right now. This test ensures that the
2378 statement comes completely *after* the inner loop. */
2379 if (!dominated_by_p (CDI_DOMINATORS,
2380 bb_for_stmt (stmt),
2381 loop->inner->header))
2382 goto fail;
2387 /* We also need to make sure the loop exit only has simple copy phis in it,
2388 otherwise we don't know how to transform it into a perfect nest right
2389 now. */
2390 exitdest = single_exit (loop)->dest;
2392 for (phi = phi_nodes (exitdest); phi; phi = PHI_CHAIN (phi))
2393 if (PHI_NUM_ARGS (phi) != 1)
2394 goto fail;
2396 free (bbs);
2397 return true;
2399 fail:
2400 free (bbs);
2401 return false;
2404 /* Transform the loop nest into a perfect nest, if possible.
2405 LOOP is the loop nest to transform into a perfect nest
2406 LBOUNDS are the lower bounds for the loops to transform
2407 UBOUNDS are the upper bounds for the loops to transform
2408 STEPS is the STEPS for the loops to transform.
2409 LOOPIVS is the induction variables for the loops to transform.
2411 Basically, for the case of
2413 FOR (i = 0; i < 50; i++)
2415 FOR (j =0; j < 50; j++)
2417 <whatever>
2419 <some code>
2422 This function will transform it into a perfect loop nest by splitting the
2423 outer loop into two loops, like so:
2425 FOR (i = 0; i < 50; i++)
2427 FOR (j = 0; j < 50; j++)
2429 <whatever>
2433 FOR (i = 0; i < 50; i ++)
2435 <some code>
2438 Return FALSE if we can't make this loop into a perfect nest. */
2440 static bool
2441 perfect_nestify (struct loop *loop,
2442 VEC(tree,heap) *lbounds,
2443 VEC(tree,heap) *ubounds,
2444 VEC(int,heap) *steps,
2445 VEC(tree,heap) *loopivs)
2447 basic_block *bbs;
2448 tree exit_condition;
2449 tree then_label, else_label, cond_stmt;
2450 basic_block preheaderbb, headerbb, bodybb, latchbb, olddest;
2451 int i;
2452 block_stmt_iterator bsi, firstbsi;
2453 bool insert_after;
2454 edge e;
2455 struct loop *newloop;
2456 tree phi;
2457 tree uboundvar;
2458 tree stmt;
2459 tree oldivvar, ivvar, ivvarinced;
2460 VEC(tree,heap) *phis = NULL;
2461 htab_t replacements = NULL;
2463 /* Create the new loop. */
2464 olddest = single_exit (loop)->dest;
2465 preheaderbb = split_edge (single_exit (loop));
2466 headerbb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
2468 /* Push the exit phi nodes that we are moving. */
2469 for (phi = phi_nodes (olddest); phi; phi = PHI_CHAIN (phi))
2471 VEC_reserve (tree, heap, phis, 2);
2472 VEC_quick_push (tree, phis, PHI_RESULT (phi));
2473 VEC_quick_push (tree, phis, PHI_ARG_DEF (phi, 0));
2475 e = redirect_edge_and_branch (single_succ_edge (preheaderbb), headerbb);
2477 /* Remove the exit phis from the old basic block. */
2478 while (phi_nodes (olddest) != NULL)
2479 remove_phi_node (phi_nodes (olddest), NULL, false);
2481 /* and add them back to the new basic block. */
2482 while (VEC_length (tree, phis) != 0)
2484 tree def;
2485 tree phiname;
2486 def = VEC_pop (tree, phis);
2487 phiname = VEC_pop (tree, phis);
2488 phi = create_phi_node (phiname, preheaderbb);
2489 add_phi_arg (phi, def, single_pred_edge (preheaderbb));
2491 flush_pending_stmts (e);
2492 VEC_free (tree, heap, phis);
2494 bodybb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
2495 latchbb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
2496 make_edge (headerbb, bodybb, EDGE_FALLTHRU);
2497 then_label = build1 (GOTO_EXPR, void_type_node, tree_block_label (latchbb));
2498 else_label = build1 (GOTO_EXPR, void_type_node, tree_block_label (olddest));
2499 cond_stmt = build3 (COND_EXPR, void_type_node,
2500 build2 (NE_EXPR, boolean_type_node,
2501 integer_one_node,
2502 integer_zero_node),
2503 then_label, else_label);
2504 bsi = bsi_start (bodybb);
2505 bsi_insert_after (&bsi, cond_stmt, BSI_NEW_STMT);
2506 e = make_edge (bodybb, olddest, EDGE_FALSE_VALUE);
2507 make_edge (bodybb, latchbb, EDGE_TRUE_VALUE);
2508 make_edge (latchbb, headerbb, EDGE_FALLTHRU);
2510 /* Update the loop structures. */
2511 newloop = duplicate_loop (loop, olddest->loop_father);
2512 newloop->header = headerbb;
2513 newloop->latch = latchbb;
2514 add_bb_to_loop (latchbb, newloop);
2515 add_bb_to_loop (bodybb, newloop);
2516 add_bb_to_loop (headerbb, newloop);
2517 set_immediate_dominator (CDI_DOMINATORS, bodybb, headerbb);
2518 set_immediate_dominator (CDI_DOMINATORS, headerbb, preheaderbb);
2519 set_immediate_dominator (CDI_DOMINATORS, preheaderbb,
2520 single_exit (loop)->src);
2521 set_immediate_dominator (CDI_DOMINATORS, latchbb, bodybb);
2522 set_immediate_dominator (CDI_DOMINATORS, olddest, bodybb);
2523 /* Create the new iv. */
2524 oldivvar = VEC_index (tree, loopivs, 0);
2525 ivvar = create_tmp_var (TREE_TYPE (oldivvar), "perfectiv");
2526 add_referenced_var (ivvar);
2527 standard_iv_increment_position (newloop, &bsi, &insert_after);
2528 create_iv (VEC_index (tree, lbounds, 0),
2529 build_int_cst (TREE_TYPE (oldivvar), VEC_index (int, steps, 0)),
2530 ivvar, newloop, &bsi, insert_after, &ivvar, &ivvarinced);
2532 /* Create the new upper bound. This may be not just a variable, so we copy
2533 it to one just in case. */
2535 exit_condition = get_loop_exit_condition (newloop);
2536 uboundvar = create_tmp_var (integer_type_node, "uboundvar");
2537 add_referenced_var (uboundvar);
2538 stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, uboundvar,
2539 VEC_index (tree, ubounds, 0));
2540 uboundvar = make_ssa_name (uboundvar, stmt);
2541 GIMPLE_STMT_OPERAND (stmt, 0) = uboundvar;
2543 if (insert_after)
2544 bsi_insert_after (&bsi, stmt, BSI_SAME_STMT);
2545 else
2546 bsi_insert_before (&bsi, stmt, BSI_SAME_STMT);
2547 update_stmt (stmt);
2548 COND_EXPR_COND (exit_condition) = build2 (GE_EXPR,
2549 boolean_type_node,
2550 uboundvar,
2551 ivvarinced);
2552 update_stmt (exit_condition);
2553 replacements = htab_create_ggc (20, tree_map_hash,
2554 tree_map_eq, NULL);
2555 bbs = get_loop_body_in_dom_order (loop);
2556 /* Now move the statements, and replace the induction variable in the moved
2557 statements with the correct loop induction variable. */
2558 oldivvar = VEC_index (tree, loopivs, 0);
2559 firstbsi = bsi_start (bodybb);
2560 for (i = loop->num_nodes - 1; i >= 0 ; i--)
2562 block_stmt_iterator tobsi = bsi_last (bodybb);
2563 if (bbs[i]->loop_father == loop)
2565 /* If this is true, we are *before* the inner loop.
2566 If this isn't true, we are *after* it.
2568 The only time can_convert_to_perfect_nest returns true when we
2569 have statements before the inner loop is if they can be moved
2570 into the inner loop.
2572 The only time can_convert_to_perfect_nest returns true when we
2573 have statements after the inner loop is if they can be moved into
2574 the new split loop. */
2576 if (dominated_by_p (CDI_DOMINATORS, loop->inner->header, bbs[i]))
2578 block_stmt_iterator header_bsi
2579 = bsi_after_labels (loop->inner->header);
2581 for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi);)
2583 tree stmt = bsi_stmt (bsi);
2585 if (stmt == exit_condition
2586 || not_interesting_stmt (stmt)
2587 || stmt_is_bumper_for_loop (loop, stmt))
2589 bsi_next (&bsi);
2590 continue;
2593 bsi_move_before (&bsi, &header_bsi);
2596 else
2598 /* Note that the bsi only needs to be explicitly incremented
2599 when we don't move something, since it is automatically
2600 incremented when we do. */
2601 for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi);)
2603 ssa_op_iter i;
2604 tree n, stmt = bsi_stmt (bsi);
2606 if (stmt == exit_condition
2607 || not_interesting_stmt (stmt)
2608 || stmt_is_bumper_for_loop (loop, stmt))
2610 bsi_next (&bsi);
2611 continue;
2614 replace_uses_equiv_to_x_with_y
2615 (loop, stmt, oldivvar, VEC_index (int, steps, 0), ivvar,
2616 VEC_index (tree, lbounds, 0), replacements, &firstbsi);
2618 bsi_move_before (&bsi, &tobsi);
2620 /* If the statement has any virtual operands, they may
2621 need to be rewired because the original loop may
2622 still reference them. */
2623 FOR_EACH_SSA_TREE_OPERAND (n, stmt, i, SSA_OP_ALL_VIRTUALS)
2624 mark_sym_for_renaming (SSA_NAME_VAR (n));
2631 free (bbs);
2632 htab_delete (replacements);
2633 return perfect_nest_p (loop);
2636 /* Return true if TRANS is a legal transformation matrix that respects
2637 the dependence vectors in DISTS and DIRS. The conservative answer
2638 is false.
2640 "Wolfe proves that a unimodular transformation represented by the
2641 matrix T is legal when applied to a loop nest with a set of
2642 lexicographically non-negative distance vectors RDG if and only if
2643 for each vector d in RDG, (T.d >= 0) is lexicographically positive.
2644 i.e.: if and only if it transforms the lexicographically positive
2645 distance vectors to lexicographically positive vectors. Note that
2646 a unimodular matrix must transform the zero vector (and only it) to
2647 the zero vector." S.Muchnick. */
2649 bool
2650 lambda_transform_legal_p (lambda_trans_matrix trans,
2651 int nb_loops,
2652 VEC (ddr_p, heap) *dependence_relations)
2654 unsigned int i, j;
2655 lambda_vector distres;
2656 struct data_dependence_relation *ddr;
2658 gcc_assert (LTM_COLSIZE (trans) == nb_loops
2659 && LTM_ROWSIZE (trans) == nb_loops);
2661 /* When there is an unknown relation in the dependence_relations, we
2662 know that it is no worth looking at this loop nest: give up. */
2663 ddr = VEC_index (ddr_p, dependence_relations, 0);
2664 if (ddr == NULL)
2665 return true;
2666 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
2667 return false;
2669 distres = lambda_vector_new (nb_loops);
2671 /* For each distance vector in the dependence graph. */
2672 for (i = 0; VEC_iterate (ddr_p, dependence_relations, i, ddr); i++)
2674 /* Don't care about relations for which we know that there is no
2675 dependence, nor about read-read (aka. output-dependences):
2676 these data accesses can happen in any order. */
2677 if (DDR_ARE_DEPENDENT (ddr) == chrec_known
2678 || (DR_IS_READ (DDR_A (ddr)) && DR_IS_READ (DDR_B (ddr))))
2679 continue;
2681 /* Conservatively answer: "this transformation is not valid". */
2682 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
2683 return false;
2685 /* If the dependence could not be captured by a distance vector,
2686 conservatively answer that the transform is not valid. */
2687 if (DDR_NUM_DIST_VECTS (ddr) == 0)
2688 return false;
2690 /* Compute trans.dist_vect */
2691 for (j = 0; j < DDR_NUM_DIST_VECTS (ddr); j++)
2693 lambda_matrix_vector_mult (LTM_MATRIX (trans), nb_loops, nb_loops,
2694 DDR_DIST_VECT (ddr, j), distres);
2696 if (!lambda_vector_lexico_pos (distres, nb_loops))
2697 return false;
2700 return true;