Remove old autovect-branch by moving to "dead" directory.
[official-gcc.git] / old-autovect-branch / gcc / lambda-code.c
blobafbe7bf767fd0e2a38a932674a12c16bf826959d
1 /* Loop transformation code generation
2 Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dberlin@dberlin.org>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "ggc.h"
27 #include "tree.h"
28 #include "target.h"
29 #include "rtl.h"
30 #include "basic-block.h"
31 #include "diagnostic.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "timevar.h"
35 #include "cfgloop.h"
36 #include "expr.h"
37 #include "optabs.h"
38 #include "tree-chrec.h"
39 #include "tree-data-ref.h"
40 #include "tree-pass.h"
41 #include "tree-scalar-evolution.h"
42 #include "vec.h"
43 #include "lambda.h"
45 /* This loop nest code generation is based on non-singular matrix
46 math.
48 A little terminology and a general sketch of the algorithm. See "A singular
49 loop transformation framework based on non-singular matrices" by Wei Li and
50 Keshav Pingali for formal proofs that the various statements below are
51 correct.
53 A loop iteration space represents the points traversed by the loop. A point in the
54 iteration space can be represented by a vector of size <loop depth>. You can
55 therefore represent the iteration space as an integral combinations of a set
56 of basis vectors.
58 A loop iteration space is dense if every integer point between the loop
59 bounds is a point in the iteration space. Every loop with a step of 1
60 therefore has a dense iteration space.
62 for i = 1 to 3, step 1 is a dense iteration space.
64 A loop iteration space is sparse if it is not dense. That is, the iteration
65 space skips integer points that are within the loop bounds.
67 for i = 1 to 3, step 2 is a sparse iteration space, because the integer point
68 2 is skipped.
70 Dense source spaces are easy to transform, because they don't skip any
71 points to begin with. Thus we can compute the exact bounds of the target
72 space using min/max and floor/ceil.
74 For a dense source space, we take the transformation matrix, decompose it
75 into a lower triangular part (H) and a unimodular part (U).
76 We then compute the auxiliary space from the unimodular part (source loop
77 nest . U = auxiliary space) , which has two important properties:
78 1. It traverses the iterations in the same lexicographic order as the source
79 space.
80 2. It is a dense space when the source is a dense space (even if the target
81 space is going to be sparse).
83 Given the auxiliary space, we use the lower triangular part to compute the
84 bounds in the target space by simple matrix multiplication.
85 The gaps in the target space (IE the new loop step sizes) will be the
86 diagonals of the H matrix.
88 Sparse source spaces require another step, because you can't directly compute
89 the exact bounds of the auxiliary and target space from the sparse space.
90 Rather than try to come up with a separate algorithm to handle sparse source
91 spaces directly, we just find a legal transformation matrix that gives you
92 the sparse source space, from a dense space, and then transform the dense
93 space.
95 For a regular sparse space, you can represent the source space as an integer
96 lattice, and the base space of that lattice will always be dense. Thus, we
97 effectively use the lattice to figure out the transformation from the lattice
98 base space, to the sparse iteration space (IE what transform was applied to
99 the dense space to make it sparse). We then compose this transform with the
100 transformation matrix specified by the user (since our matrix transformations
101 are closed under composition, this is okay). We can then use the base space
102 (which is dense) plus the composed transformation matrix, to compute the rest
103 of the transform using the dense space algorithm above.
105 In other words, our sparse source space (B) is decomposed into a dense base
106 space (A), and a matrix (L) that transforms A into B, such that A.L = B.
107 We then compute the composition of L and the user transformation matrix (T),
108 so that T is now a transform from A to the result, instead of from B to the
109 result.
110 IE A.(LT) = result instead of B.T = result
111 Since A is now a dense source space, we can use the dense source space
112 algorithm above to compute the result of applying transform (LT) to A.
114 Fourier-Motzkin elimination is used to compute the bounds of the base space
115 of the lattice. */
117 DEF_VEC_I(int);
118 DEF_VEC_ALLOC_I(int,heap);
120 static bool perfect_nestify (struct loops *,
121 struct loop *, VEC(tree,heap) *,
122 VEC(tree,heap) *, VEC(int,heap) *,
123 VEC(tree,heap) *);
124 /* Lattice stuff that is internal to the code generation algorithm. */
126 typedef struct
128 /* Lattice base matrix. */
129 lambda_matrix base;
130 /* Lattice dimension. */
131 int dimension;
132 /* Origin vector for the coefficients. */
133 lambda_vector origin;
134 /* Origin matrix for the invariants. */
135 lambda_matrix origin_invariants;
136 /* Number of invariants. */
137 int invariants;
138 } *lambda_lattice;
140 #define LATTICE_BASE(T) ((T)->base)
141 #define LATTICE_DIMENSION(T) ((T)->dimension)
142 #define LATTICE_ORIGIN(T) ((T)->origin)
143 #define LATTICE_ORIGIN_INVARIANTS(T) ((T)->origin_invariants)
144 #define LATTICE_INVARIANTS(T) ((T)->invariants)
146 static bool lle_equal (lambda_linear_expression, lambda_linear_expression,
147 int, int);
148 static lambda_lattice lambda_lattice_new (int, int);
149 static lambda_lattice lambda_lattice_compute_base (lambda_loopnest);
151 static tree find_induction_var_from_exit_cond (struct loop *);
153 /* Create a new lambda body vector. */
155 lambda_body_vector
156 lambda_body_vector_new (int size)
158 lambda_body_vector ret;
160 ret = ggc_alloc (sizeof (*ret));
161 LBV_COEFFICIENTS (ret) = lambda_vector_new (size);
162 LBV_SIZE (ret) = size;
163 LBV_DENOMINATOR (ret) = 1;
164 return ret;
167 /* Compute the new coefficients for the vector based on the
168 *inverse* of the transformation matrix. */
170 lambda_body_vector
171 lambda_body_vector_compute_new (lambda_trans_matrix transform,
172 lambda_body_vector vect)
174 lambda_body_vector temp;
175 int depth;
177 /* Make sure the matrix is square. */
178 gcc_assert (LTM_ROWSIZE (transform) == LTM_COLSIZE (transform));
180 depth = LTM_ROWSIZE (transform);
182 temp = lambda_body_vector_new (depth);
183 LBV_DENOMINATOR (temp) =
184 LBV_DENOMINATOR (vect) * LTM_DENOMINATOR (transform);
185 lambda_vector_matrix_mult (LBV_COEFFICIENTS (vect), depth,
186 LTM_MATRIX (transform), depth,
187 LBV_COEFFICIENTS (temp));
188 LBV_SIZE (temp) = LBV_SIZE (vect);
189 return temp;
192 /* Print out a lambda body vector. */
194 void
195 print_lambda_body_vector (FILE * outfile, lambda_body_vector body)
197 print_lambda_vector (outfile, LBV_COEFFICIENTS (body), LBV_SIZE (body));
200 /* Return TRUE if two linear expressions are equal. */
202 static bool
203 lle_equal (lambda_linear_expression lle1, lambda_linear_expression lle2,
204 int depth, int invariants)
206 int i;
208 if (lle1 == NULL || lle2 == NULL)
209 return false;
210 if (LLE_CONSTANT (lle1) != LLE_CONSTANT (lle2))
211 return false;
212 if (LLE_DENOMINATOR (lle1) != LLE_DENOMINATOR (lle2))
213 return false;
214 for (i = 0; i < depth; i++)
215 if (LLE_COEFFICIENTS (lle1)[i] != LLE_COEFFICIENTS (lle2)[i])
216 return false;
217 for (i = 0; i < invariants; i++)
218 if (LLE_INVARIANT_COEFFICIENTS (lle1)[i] !=
219 LLE_INVARIANT_COEFFICIENTS (lle2)[i])
220 return false;
221 return true;
224 /* Create a new linear expression with dimension DIM, and total number
225 of invariants INVARIANTS. */
227 lambda_linear_expression
228 lambda_linear_expression_new (int dim, int invariants)
230 lambda_linear_expression ret;
232 ret = ggc_alloc_cleared (sizeof (*ret));
234 LLE_COEFFICIENTS (ret) = lambda_vector_new (dim);
235 LLE_CONSTANT (ret) = 0;
236 LLE_INVARIANT_COEFFICIENTS (ret) = lambda_vector_new (invariants);
237 LLE_DENOMINATOR (ret) = 1;
238 LLE_NEXT (ret) = NULL;
240 return ret;
243 /* Print out a linear expression EXPR, with SIZE coefficients, to OUTFILE.
244 The starting letter used for variable names is START. */
246 static void
247 print_linear_expression (FILE * outfile, lambda_vector expr, int size,
248 char start)
250 int i;
251 bool first = true;
252 for (i = 0; i < size; i++)
254 if (expr[i] != 0)
256 if (first)
258 if (expr[i] < 0)
259 fprintf (outfile, "-");
260 first = false;
262 else if (expr[i] > 0)
263 fprintf (outfile, " + ");
264 else
265 fprintf (outfile, " - ");
266 if (abs (expr[i]) == 1)
267 fprintf (outfile, "%c", start + i);
268 else
269 fprintf (outfile, "%d%c", abs (expr[i]), start + i);
274 /* Print out a lambda linear expression structure, EXPR, to OUTFILE. The
275 depth/number of coefficients is given by DEPTH, the number of invariants is
276 given by INVARIANTS, and the character to start variable names with is given
277 by START. */
279 void
280 print_lambda_linear_expression (FILE * outfile,
281 lambda_linear_expression expr,
282 int depth, int invariants, char start)
284 fprintf (outfile, "\tLinear expression: ");
285 print_linear_expression (outfile, LLE_COEFFICIENTS (expr), depth, start);
286 fprintf (outfile, " constant: %d ", LLE_CONSTANT (expr));
287 fprintf (outfile, " invariants: ");
288 print_linear_expression (outfile, LLE_INVARIANT_COEFFICIENTS (expr),
289 invariants, 'A');
290 fprintf (outfile, " denominator: %d\n", LLE_DENOMINATOR (expr));
293 /* Print a lambda loop structure LOOP to OUTFILE. The depth/number of
294 coefficients is given by DEPTH, the number of invariants is
295 given by INVARIANTS, and the character to start variable names with is given
296 by START. */
298 void
299 print_lambda_loop (FILE * outfile, lambda_loop loop, int depth,
300 int invariants, char start)
302 int step;
303 lambda_linear_expression expr;
305 gcc_assert (loop);
307 expr = LL_LINEAR_OFFSET (loop);
308 step = LL_STEP (loop);
309 fprintf (outfile, " step size = %d \n", step);
311 if (expr)
313 fprintf (outfile, " linear offset: \n");
314 print_lambda_linear_expression (outfile, expr, depth, invariants,
315 start);
318 fprintf (outfile, " lower bound: \n");
319 for (expr = LL_LOWER_BOUND (loop); expr != NULL; expr = LLE_NEXT (expr))
320 print_lambda_linear_expression (outfile, expr, depth, invariants, start);
321 fprintf (outfile, " upper bound: \n");
322 for (expr = LL_UPPER_BOUND (loop); expr != NULL; expr = LLE_NEXT (expr))
323 print_lambda_linear_expression (outfile, expr, depth, invariants, start);
326 /* Create a new loop nest structure with DEPTH loops, and INVARIANTS as the
327 number of invariants. */
329 lambda_loopnest
330 lambda_loopnest_new (int depth, int invariants)
332 lambda_loopnest ret;
333 ret = ggc_alloc (sizeof (*ret));
335 LN_LOOPS (ret) = ggc_alloc_cleared (depth * sizeof (lambda_loop));
336 LN_DEPTH (ret) = depth;
337 LN_INVARIANTS (ret) = invariants;
339 return ret;
342 /* Print a lambda loopnest structure, NEST, to OUTFILE. The starting
343 character to use for loop names is given by START. */
345 void
346 print_lambda_loopnest (FILE * outfile, lambda_loopnest nest, char start)
348 int i;
349 for (i = 0; i < LN_DEPTH (nest); i++)
351 fprintf (outfile, "Loop %c\n", start + i);
352 print_lambda_loop (outfile, LN_LOOPS (nest)[i], LN_DEPTH (nest),
353 LN_INVARIANTS (nest), 'i');
354 fprintf (outfile, "\n");
358 /* Allocate a new lattice structure of DEPTH x DEPTH, with INVARIANTS number
359 of invariants. */
361 static lambda_lattice
362 lambda_lattice_new (int depth, int invariants)
364 lambda_lattice ret;
365 ret = ggc_alloc (sizeof (*ret));
366 LATTICE_BASE (ret) = lambda_matrix_new (depth, depth);
367 LATTICE_ORIGIN (ret) = lambda_vector_new (depth);
368 LATTICE_ORIGIN_INVARIANTS (ret) = lambda_matrix_new (depth, invariants);
369 LATTICE_DIMENSION (ret) = depth;
370 LATTICE_INVARIANTS (ret) = invariants;
371 return ret;
374 /* Compute the lattice base for NEST. The lattice base is essentially a
375 non-singular transform from a dense base space to a sparse iteration space.
376 We use it so that we don't have to specially handle the case of a sparse
377 iteration space in other parts of the algorithm. As a result, this routine
378 only does something interesting (IE produce a matrix that isn't the
379 identity matrix) if NEST is a sparse space. */
381 static lambda_lattice
382 lambda_lattice_compute_base (lambda_loopnest nest)
384 lambda_lattice ret;
385 int depth, invariants;
386 lambda_matrix base;
388 int i, j, step;
389 lambda_loop loop;
390 lambda_linear_expression expression;
392 depth = LN_DEPTH (nest);
393 invariants = LN_INVARIANTS (nest);
395 ret = lambda_lattice_new (depth, invariants);
396 base = LATTICE_BASE (ret);
397 for (i = 0; i < depth; i++)
399 loop = LN_LOOPS (nest)[i];
400 gcc_assert (loop);
401 step = LL_STEP (loop);
402 /* If we have a step of 1, then the base is one, and the
403 origin and invariant coefficients are 0. */
404 if (step == 1)
406 for (j = 0; j < depth; j++)
407 base[i][j] = 0;
408 base[i][i] = 1;
409 LATTICE_ORIGIN (ret)[i] = 0;
410 for (j = 0; j < invariants; j++)
411 LATTICE_ORIGIN_INVARIANTS (ret)[i][j] = 0;
413 else
415 /* Otherwise, we need the lower bound expression (which must
416 be an affine function) to determine the base. */
417 expression = LL_LOWER_BOUND (loop);
418 gcc_assert (expression && !LLE_NEXT (expression)
419 && LLE_DENOMINATOR (expression) == 1);
421 /* The lower triangular portion of the base is going to be the
422 coefficient times the step */
423 for (j = 0; j < i; j++)
424 base[i][j] = LLE_COEFFICIENTS (expression)[j]
425 * LL_STEP (LN_LOOPS (nest)[j]);
426 base[i][i] = step;
427 for (j = i + 1; j < depth; j++)
428 base[i][j] = 0;
430 /* Origin for this loop is the constant of the lower bound
431 expression. */
432 LATTICE_ORIGIN (ret)[i] = LLE_CONSTANT (expression);
434 /* Coefficient for the invariants are equal to the invariant
435 coefficients in the expression. */
436 for (j = 0; j < invariants; j++)
437 LATTICE_ORIGIN_INVARIANTS (ret)[i][j] =
438 LLE_INVARIANT_COEFFICIENTS (expression)[j];
441 return ret;
444 /* Compute the least common multiple of two numbers A and B . */
446 static int
447 lcm (int a, int b)
449 return (abs (a) * abs (b) / gcd (a, b));
452 /* Perform Fourier-Motzkin elimination to calculate the bounds of the
453 auxiliary nest.
454 Fourier-Motzkin is a way of reducing systems of linear inequalities so that
455 it is easy to calculate the answer and bounds.
456 A sketch of how it works:
457 Given a system of linear inequalities, ai * xj >= bk, you can always
458 rewrite the constraints so they are all of the form
459 a <= x, or x <= b, or x >= constant for some x in x1 ... xj (and some b
460 in b1 ... bk, and some a in a1...ai)
461 You can then eliminate this x from the non-constant inequalities by
462 rewriting these as a <= b, x >= constant, and delete the x variable.
463 You can then repeat this for any remaining x variables, and then we have
464 an easy to use variable <= constant (or no variables at all) form that we
465 can construct our bounds from.
467 In our case, each time we eliminate, we construct part of the bound from
468 the ith variable, then delete the ith variable.
470 Remember the constant are in our vector a, our coefficient matrix is A,
471 and our invariant coefficient matrix is B.
473 SIZE is the size of the matrices being passed.
474 DEPTH is the loop nest depth.
475 INVARIANTS is the number of loop invariants.
476 A, B, and a are the coefficient matrix, invariant coefficient, and a
477 vector of constants, respectively. */
479 static lambda_loopnest
480 compute_nest_using_fourier_motzkin (int size,
481 int depth,
482 int invariants,
483 lambda_matrix A,
484 lambda_matrix B,
485 lambda_vector a)
488 int multiple, f1, f2;
489 int i, j, k;
490 lambda_linear_expression expression;
491 lambda_loop loop;
492 lambda_loopnest auxillary_nest;
493 lambda_matrix swapmatrix, A1, B1;
494 lambda_vector swapvector, a1;
495 int newsize;
497 A1 = lambda_matrix_new (128, depth);
498 B1 = lambda_matrix_new (128, invariants);
499 a1 = lambda_vector_new (128);
501 auxillary_nest = lambda_loopnest_new (depth, invariants);
503 for (i = depth - 1; i >= 0; i--)
505 loop = lambda_loop_new ();
506 LN_LOOPS (auxillary_nest)[i] = loop;
507 LL_STEP (loop) = 1;
509 for (j = 0; j < size; j++)
511 if (A[j][i] < 0)
513 /* Any linear expression in the matrix with a coefficient less
514 than 0 becomes part of the new lower bound. */
515 expression = lambda_linear_expression_new (depth, invariants);
517 for (k = 0; k < i; k++)
518 LLE_COEFFICIENTS (expression)[k] = A[j][k];
520 for (k = 0; k < invariants; k++)
521 LLE_INVARIANT_COEFFICIENTS (expression)[k] = -1 * B[j][k];
523 LLE_DENOMINATOR (expression) = -1 * A[j][i];
524 LLE_CONSTANT (expression) = -1 * a[j];
526 /* Ignore if identical to the existing lower bound. */
527 if (!lle_equal (LL_LOWER_BOUND (loop),
528 expression, depth, invariants))
530 LLE_NEXT (expression) = LL_LOWER_BOUND (loop);
531 LL_LOWER_BOUND (loop) = expression;
535 else if (A[j][i] > 0)
537 /* Any linear expression with a coefficient greater than 0
538 becomes part of the new upper bound. */
539 expression = lambda_linear_expression_new (depth, invariants);
540 for (k = 0; k < i; k++)
541 LLE_COEFFICIENTS (expression)[k] = -1 * A[j][k];
543 for (k = 0; k < invariants; k++)
544 LLE_INVARIANT_COEFFICIENTS (expression)[k] = B[j][k];
546 LLE_DENOMINATOR (expression) = A[j][i];
547 LLE_CONSTANT (expression) = a[j];
549 /* Ignore if identical to the existing upper bound. */
550 if (!lle_equal (LL_UPPER_BOUND (loop),
551 expression, depth, invariants))
553 LLE_NEXT (expression) = LL_UPPER_BOUND (loop);
554 LL_UPPER_BOUND (loop) = expression;
560 /* This portion creates a new system of linear inequalities by deleting
561 the i'th variable, reducing the system by one variable. */
562 newsize = 0;
563 for (j = 0; j < size; j++)
565 /* If the coefficient for the i'th variable is 0, then we can just
566 eliminate the variable straightaway. Otherwise, we have to
567 multiply through by the coefficients we are eliminating. */
568 if (A[j][i] == 0)
570 lambda_vector_copy (A[j], A1[newsize], depth);
571 lambda_vector_copy (B[j], B1[newsize], invariants);
572 a1[newsize] = a[j];
573 newsize++;
575 else if (A[j][i] > 0)
577 for (k = 0; k < size; k++)
579 if (A[k][i] < 0)
581 multiple = lcm (A[j][i], A[k][i]);
582 f1 = multiple / A[j][i];
583 f2 = -1 * multiple / A[k][i];
585 lambda_vector_add_mc (A[j], f1, A[k], f2,
586 A1[newsize], depth);
587 lambda_vector_add_mc (B[j], f1, B[k], f2,
588 B1[newsize], invariants);
589 a1[newsize] = f1 * a[j] + f2 * a[k];
590 newsize++;
596 swapmatrix = A;
597 A = A1;
598 A1 = swapmatrix;
600 swapmatrix = B;
601 B = B1;
602 B1 = swapmatrix;
604 swapvector = a;
605 a = a1;
606 a1 = swapvector;
608 size = newsize;
611 return auxillary_nest;
614 /* Compute the loop bounds for the auxiliary space NEST.
615 Input system used is Ax <= b. TRANS is the unimodular transformation.
616 Given the original nest, this function will
617 1. Convert the nest into matrix form, which consists of a matrix for the
618 coefficients, a matrix for the
619 invariant coefficients, and a vector for the constants.
620 2. Use the matrix form to calculate the lattice base for the nest (which is
621 a dense space)
622 3. Compose the dense space transform with the user specified transform, to
623 get a transform we can easily calculate transformed bounds for.
624 4. Multiply the composed transformation matrix times the matrix form of the
625 loop.
626 5. Transform the newly created matrix (from step 4) back into a loop nest
627 using fourier motzkin elimination to figure out the bounds. */
629 static lambda_loopnest
630 lambda_compute_auxillary_space (lambda_loopnest nest,
631 lambda_trans_matrix trans)
633 lambda_matrix A, B, A1, B1;
634 lambda_vector a, a1;
635 lambda_matrix invertedtrans;
636 int depth, invariants, size;
637 int i, j;
638 lambda_loop loop;
639 lambda_linear_expression expression;
640 lambda_lattice lattice;
642 depth = LN_DEPTH (nest);
643 invariants = LN_INVARIANTS (nest);
645 /* Unfortunately, we can't know the number of constraints we'll have
646 ahead of time, but this should be enough even in ridiculous loop nest
647 cases. We must not go over this limit. */
648 A = lambda_matrix_new (128, depth);
649 B = lambda_matrix_new (128, invariants);
650 a = lambda_vector_new (128);
652 A1 = lambda_matrix_new (128, depth);
653 B1 = lambda_matrix_new (128, invariants);
654 a1 = lambda_vector_new (128);
656 /* Store the bounds in the equation matrix A, constant vector a, and
657 invariant matrix B, so that we have Ax <= a + B.
658 This requires a little equation rearranging so that everything is on the
659 correct side of the inequality. */
660 size = 0;
661 for (i = 0; i < depth; i++)
663 loop = LN_LOOPS (nest)[i];
665 /* First we do the lower bound. */
666 if (LL_STEP (loop) > 0)
667 expression = LL_LOWER_BOUND (loop);
668 else
669 expression = LL_UPPER_BOUND (loop);
671 for (; expression != NULL; expression = LLE_NEXT (expression))
673 /* Fill in the coefficient. */
674 for (j = 0; j < i; j++)
675 A[size][j] = LLE_COEFFICIENTS (expression)[j];
677 /* And the invariant coefficient. */
678 for (j = 0; j < invariants; j++)
679 B[size][j] = LLE_INVARIANT_COEFFICIENTS (expression)[j];
681 /* And the constant. */
682 a[size] = LLE_CONSTANT (expression);
684 /* Convert (2x+3y+2+b)/4 <= z to 2x+3y-4z <= -2-b. IE put all
685 constants and single variables on */
686 A[size][i] = -1 * LLE_DENOMINATOR (expression);
687 a[size] *= -1;
688 for (j = 0; j < invariants; j++)
689 B[size][j] *= -1;
691 size++;
692 /* Need to increase matrix sizes above. */
693 gcc_assert (size <= 127);
697 /* Then do the exact same thing for the upper bounds. */
698 if (LL_STEP (loop) > 0)
699 expression = LL_UPPER_BOUND (loop);
700 else
701 expression = LL_LOWER_BOUND (loop);
703 for (; expression != NULL; expression = LLE_NEXT (expression))
705 /* Fill in the coefficient. */
706 for (j = 0; j < i; j++)
707 A[size][j] = LLE_COEFFICIENTS (expression)[j];
709 /* And the invariant coefficient. */
710 for (j = 0; j < invariants; j++)
711 B[size][j] = LLE_INVARIANT_COEFFICIENTS (expression)[j];
713 /* And the constant. */
714 a[size] = LLE_CONSTANT (expression);
716 /* Convert z <= (2x+3y+2+b)/4 to -2x-3y+4z <= 2+b. */
717 for (j = 0; j < i; j++)
718 A[size][j] *= -1;
719 A[size][i] = LLE_DENOMINATOR (expression);
720 size++;
721 /* Need to increase matrix sizes above. */
722 gcc_assert (size <= 127);
727 /* Compute the lattice base x = base * y + origin, where y is the
728 base space. */
729 lattice = lambda_lattice_compute_base (nest);
731 /* Ax <= a + B then becomes ALy <= a+B - A*origin. L is the lattice base */
733 /* A1 = A * L */
734 lambda_matrix_mult (A, LATTICE_BASE (lattice), A1, size, depth, depth);
736 /* a1 = a - A * origin constant. */
737 lambda_matrix_vector_mult (A, size, depth, LATTICE_ORIGIN (lattice), a1);
738 lambda_vector_add_mc (a, 1, a1, -1, a1, size);
740 /* B1 = B - A * origin invariant. */
741 lambda_matrix_mult (A, LATTICE_ORIGIN_INVARIANTS (lattice), B1, size, depth,
742 invariants);
743 lambda_matrix_add_mc (B, 1, B1, -1, B1, size, invariants);
745 /* Now compute the auxiliary space bounds by first inverting U, multiplying
746 it by A1, then performing fourier motzkin. */
748 invertedtrans = lambda_matrix_new (depth, depth);
750 /* Compute the inverse of U. */
751 lambda_matrix_inverse (LTM_MATRIX (trans),
752 invertedtrans, depth);
754 /* A = A1 inv(U). */
755 lambda_matrix_mult (A1, invertedtrans, A, size, depth, depth);
757 return compute_nest_using_fourier_motzkin (size, depth, invariants,
758 A, B1, a1);
761 /* Compute the loop bounds for the target space, using the bounds of
762 the auxiliary nest AUXILLARY_NEST, and the triangular matrix H.
763 The target space loop bounds are computed by multiplying the triangular
764 matrix H by the auxiliary nest, to get the new loop bounds. The sign of
765 the loop steps (positive or negative) is then used to swap the bounds if
766 the loop counts downwards.
767 Return the target loopnest. */
769 static lambda_loopnest
770 lambda_compute_target_space (lambda_loopnest auxillary_nest,
771 lambda_trans_matrix H, lambda_vector stepsigns)
773 lambda_matrix inverse, H1;
774 int determinant, i, j;
775 int gcd1, gcd2;
776 int factor;
778 lambda_loopnest target_nest;
779 int depth, invariants;
780 lambda_matrix target;
782 lambda_loop auxillary_loop, target_loop;
783 lambda_linear_expression expression, auxillary_expr, target_expr, tmp_expr;
785 depth = LN_DEPTH (auxillary_nest);
786 invariants = LN_INVARIANTS (auxillary_nest);
788 inverse = lambda_matrix_new (depth, depth);
789 determinant = lambda_matrix_inverse (LTM_MATRIX (H), inverse, depth);
791 /* H1 is H excluding its diagonal. */
792 H1 = lambda_matrix_new (depth, depth);
793 lambda_matrix_copy (LTM_MATRIX (H), H1, depth, depth);
795 for (i = 0; i < depth; i++)
796 H1[i][i] = 0;
798 /* Computes the linear offsets of the loop bounds. */
799 target = lambda_matrix_new (depth, depth);
800 lambda_matrix_mult (H1, inverse, target, depth, depth, depth);
802 target_nest = lambda_loopnest_new (depth, invariants);
804 for (i = 0; i < depth; i++)
807 /* Get a new loop structure. */
808 target_loop = lambda_loop_new ();
809 LN_LOOPS (target_nest)[i] = target_loop;
811 /* Computes the gcd of the coefficients of the linear part. */
812 gcd1 = lambda_vector_gcd (target[i], i);
814 /* Include the denominator in the GCD. */
815 gcd1 = gcd (gcd1, determinant);
817 /* Now divide through by the gcd. */
818 for (j = 0; j < i; j++)
819 target[i][j] = target[i][j] / gcd1;
821 expression = lambda_linear_expression_new (depth, invariants);
822 lambda_vector_copy (target[i], LLE_COEFFICIENTS (expression), depth);
823 LLE_DENOMINATOR (expression) = determinant / gcd1;
824 LLE_CONSTANT (expression) = 0;
825 lambda_vector_clear (LLE_INVARIANT_COEFFICIENTS (expression),
826 invariants);
827 LL_LINEAR_OFFSET (target_loop) = expression;
830 /* For each loop, compute the new bounds from H. */
831 for (i = 0; i < depth; i++)
833 auxillary_loop = LN_LOOPS (auxillary_nest)[i];
834 target_loop = LN_LOOPS (target_nest)[i];
835 LL_STEP (target_loop) = LTM_MATRIX (H)[i][i];
836 factor = LTM_MATRIX (H)[i][i];
838 /* First we do the lower bound. */
839 auxillary_expr = LL_LOWER_BOUND (auxillary_loop);
841 for (; auxillary_expr != NULL;
842 auxillary_expr = LLE_NEXT (auxillary_expr))
844 target_expr = lambda_linear_expression_new (depth, invariants);
845 lambda_vector_matrix_mult (LLE_COEFFICIENTS (auxillary_expr),
846 depth, inverse, depth,
847 LLE_COEFFICIENTS (target_expr));
848 lambda_vector_mult_const (LLE_COEFFICIENTS (target_expr),
849 LLE_COEFFICIENTS (target_expr), depth,
850 factor);
852 LLE_CONSTANT (target_expr) = LLE_CONSTANT (auxillary_expr) * factor;
853 lambda_vector_copy (LLE_INVARIANT_COEFFICIENTS (auxillary_expr),
854 LLE_INVARIANT_COEFFICIENTS (target_expr),
855 invariants);
856 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS (target_expr),
857 LLE_INVARIANT_COEFFICIENTS (target_expr),
858 invariants, factor);
859 LLE_DENOMINATOR (target_expr) = LLE_DENOMINATOR (auxillary_expr);
861 if (!lambda_vector_zerop (LLE_COEFFICIENTS (target_expr), depth))
863 LLE_CONSTANT (target_expr) = LLE_CONSTANT (target_expr)
864 * determinant;
865 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS
866 (target_expr),
867 LLE_INVARIANT_COEFFICIENTS
868 (target_expr), invariants,
869 determinant);
870 LLE_DENOMINATOR (target_expr) =
871 LLE_DENOMINATOR (target_expr) * determinant;
873 /* Find the gcd and divide by it here, rather than doing it
874 at the tree level. */
875 gcd1 = lambda_vector_gcd (LLE_COEFFICIENTS (target_expr), depth);
876 gcd2 = lambda_vector_gcd (LLE_INVARIANT_COEFFICIENTS (target_expr),
877 invariants);
878 gcd1 = gcd (gcd1, gcd2);
879 gcd1 = gcd (gcd1, LLE_CONSTANT (target_expr));
880 gcd1 = gcd (gcd1, LLE_DENOMINATOR (target_expr));
881 for (j = 0; j < depth; j++)
882 LLE_COEFFICIENTS (target_expr)[j] /= gcd1;
883 for (j = 0; j < invariants; j++)
884 LLE_INVARIANT_COEFFICIENTS (target_expr)[j] /= gcd1;
885 LLE_CONSTANT (target_expr) /= gcd1;
886 LLE_DENOMINATOR (target_expr) /= gcd1;
887 /* Ignore if identical to existing bound. */
888 if (!lle_equal (LL_LOWER_BOUND (target_loop), target_expr, depth,
889 invariants))
891 LLE_NEXT (target_expr) = LL_LOWER_BOUND (target_loop);
892 LL_LOWER_BOUND (target_loop) = target_expr;
895 /* Now do the upper bound. */
896 auxillary_expr = LL_UPPER_BOUND (auxillary_loop);
898 for (; auxillary_expr != NULL;
899 auxillary_expr = LLE_NEXT (auxillary_expr))
901 target_expr = lambda_linear_expression_new (depth, invariants);
902 lambda_vector_matrix_mult (LLE_COEFFICIENTS (auxillary_expr),
903 depth, inverse, depth,
904 LLE_COEFFICIENTS (target_expr));
905 lambda_vector_mult_const (LLE_COEFFICIENTS (target_expr),
906 LLE_COEFFICIENTS (target_expr), depth,
907 factor);
908 LLE_CONSTANT (target_expr) = LLE_CONSTANT (auxillary_expr) * factor;
909 lambda_vector_copy (LLE_INVARIANT_COEFFICIENTS (auxillary_expr),
910 LLE_INVARIANT_COEFFICIENTS (target_expr),
911 invariants);
912 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS (target_expr),
913 LLE_INVARIANT_COEFFICIENTS (target_expr),
914 invariants, factor);
915 LLE_DENOMINATOR (target_expr) = LLE_DENOMINATOR (auxillary_expr);
917 if (!lambda_vector_zerop (LLE_COEFFICIENTS (target_expr), depth))
919 LLE_CONSTANT (target_expr) = LLE_CONSTANT (target_expr)
920 * determinant;
921 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS
922 (target_expr),
923 LLE_INVARIANT_COEFFICIENTS
924 (target_expr), invariants,
925 determinant);
926 LLE_DENOMINATOR (target_expr) =
927 LLE_DENOMINATOR (target_expr) * determinant;
929 /* Find the gcd and divide by it here, instead of at the
930 tree level. */
931 gcd1 = lambda_vector_gcd (LLE_COEFFICIENTS (target_expr), depth);
932 gcd2 = lambda_vector_gcd (LLE_INVARIANT_COEFFICIENTS (target_expr),
933 invariants);
934 gcd1 = gcd (gcd1, gcd2);
935 gcd1 = gcd (gcd1, LLE_CONSTANT (target_expr));
936 gcd1 = gcd (gcd1, LLE_DENOMINATOR (target_expr));
937 for (j = 0; j < depth; j++)
938 LLE_COEFFICIENTS (target_expr)[j] /= gcd1;
939 for (j = 0; j < invariants; j++)
940 LLE_INVARIANT_COEFFICIENTS (target_expr)[j] /= gcd1;
941 LLE_CONSTANT (target_expr) /= gcd1;
942 LLE_DENOMINATOR (target_expr) /= gcd1;
943 /* Ignore if equal to existing bound. */
944 if (!lle_equal (LL_UPPER_BOUND (target_loop), target_expr, depth,
945 invariants))
947 LLE_NEXT (target_expr) = LL_UPPER_BOUND (target_loop);
948 LL_UPPER_BOUND (target_loop) = target_expr;
952 for (i = 0; i < depth; i++)
954 target_loop = LN_LOOPS (target_nest)[i];
955 /* If necessary, exchange the upper and lower bounds and negate
956 the step size. */
957 if (stepsigns[i] < 0)
959 LL_STEP (target_loop) *= -1;
960 tmp_expr = LL_LOWER_BOUND (target_loop);
961 LL_LOWER_BOUND (target_loop) = LL_UPPER_BOUND (target_loop);
962 LL_UPPER_BOUND (target_loop) = tmp_expr;
965 return target_nest;
968 /* Compute the step signs of TRANS, using TRANS and stepsigns. Return the new
969 result. */
971 static lambda_vector
972 lambda_compute_step_signs (lambda_trans_matrix trans, lambda_vector stepsigns)
974 lambda_matrix matrix, H;
975 int size;
976 lambda_vector newsteps;
977 int i, j, factor, minimum_column;
978 int temp;
980 matrix = LTM_MATRIX (trans);
981 size = LTM_ROWSIZE (trans);
982 H = lambda_matrix_new (size, size);
984 newsteps = lambda_vector_new (size);
985 lambda_vector_copy (stepsigns, newsteps, size);
987 lambda_matrix_copy (matrix, H, size, size);
989 for (j = 0; j < size; j++)
991 lambda_vector row;
992 row = H[j];
993 for (i = j; i < size; i++)
994 if (row[i] < 0)
995 lambda_matrix_col_negate (H, size, i);
996 while (lambda_vector_first_nz (row, size, j + 1) < size)
998 minimum_column = lambda_vector_min_nz (row, size, j);
999 lambda_matrix_col_exchange (H, size, j, minimum_column);
1001 temp = newsteps[j];
1002 newsteps[j] = newsteps[minimum_column];
1003 newsteps[minimum_column] = temp;
1005 for (i = j + 1; i < size; i++)
1007 factor = row[i] / row[j];
1008 lambda_matrix_col_add (H, size, j, i, -1 * factor);
1012 return newsteps;
1015 /* Transform NEST according to TRANS, and return the new loopnest.
1016 This involves
1017 1. Computing a lattice base for the transformation
1018 2. Composing the dense base with the specified transformation (TRANS)
1019 3. Decomposing the combined transformation into a lower triangular portion,
1020 and a unimodular portion.
1021 4. Computing the auxiliary nest using the unimodular portion.
1022 5. Computing the target nest using the auxiliary nest and the lower
1023 triangular portion. */
1025 lambda_loopnest
1026 lambda_loopnest_transform (lambda_loopnest nest, lambda_trans_matrix trans)
1028 lambda_loopnest auxillary_nest, target_nest;
1030 int depth, invariants;
1031 int i, j;
1032 lambda_lattice lattice;
1033 lambda_trans_matrix trans1, H, U;
1034 lambda_loop loop;
1035 lambda_linear_expression expression;
1036 lambda_vector origin;
1037 lambda_matrix origin_invariants;
1038 lambda_vector stepsigns;
1039 int f;
1041 depth = LN_DEPTH (nest);
1042 invariants = LN_INVARIANTS (nest);
1044 /* Keep track of the signs of the loop steps. */
1045 stepsigns = lambda_vector_new (depth);
1046 for (i = 0; i < depth; i++)
1048 if (LL_STEP (LN_LOOPS (nest)[i]) > 0)
1049 stepsigns[i] = 1;
1050 else
1051 stepsigns[i] = -1;
1054 /* Compute the lattice base. */
1055 lattice = lambda_lattice_compute_base (nest);
1056 trans1 = lambda_trans_matrix_new (depth, depth);
1058 /* Multiply the transformation matrix by the lattice base. */
1060 lambda_matrix_mult (LTM_MATRIX (trans), LATTICE_BASE (lattice),
1061 LTM_MATRIX (trans1), depth, depth, depth);
1063 /* Compute the Hermite normal form for the new transformation matrix. */
1064 H = lambda_trans_matrix_new (depth, depth);
1065 U = lambda_trans_matrix_new (depth, depth);
1066 lambda_matrix_hermite (LTM_MATRIX (trans1), depth, LTM_MATRIX (H),
1067 LTM_MATRIX (U));
1069 /* Compute the auxiliary loop nest's space from the unimodular
1070 portion. */
1071 auxillary_nest = lambda_compute_auxillary_space (nest, U);
1073 /* Compute the loop step signs from the old step signs and the
1074 transformation matrix. */
1075 stepsigns = lambda_compute_step_signs (trans1, stepsigns);
1077 /* Compute the target loop nest space from the auxiliary nest and
1078 the lower triangular matrix H. */
1079 target_nest = lambda_compute_target_space (auxillary_nest, H, stepsigns);
1080 origin = lambda_vector_new (depth);
1081 origin_invariants = lambda_matrix_new (depth, invariants);
1082 lambda_matrix_vector_mult (LTM_MATRIX (trans), depth, depth,
1083 LATTICE_ORIGIN (lattice), origin);
1084 lambda_matrix_mult (LTM_MATRIX (trans), LATTICE_ORIGIN_INVARIANTS (lattice),
1085 origin_invariants, depth, depth, invariants);
1087 for (i = 0; i < depth; i++)
1089 loop = LN_LOOPS (target_nest)[i];
1090 expression = LL_LINEAR_OFFSET (loop);
1091 if (lambda_vector_zerop (LLE_COEFFICIENTS (expression), depth))
1092 f = 1;
1093 else
1094 f = LLE_DENOMINATOR (expression);
1096 LLE_CONSTANT (expression) += f * origin[i];
1098 for (j = 0; j < invariants; j++)
1099 LLE_INVARIANT_COEFFICIENTS (expression)[j] +=
1100 f * origin_invariants[i][j];
1103 return target_nest;
1107 /* Convert a gcc tree expression EXPR to a lambda linear expression, and
1108 return the new expression. DEPTH is the depth of the loopnest.
1109 OUTERINDUCTIONVARS is an array of the induction variables for outer loops
1110 in this nest. INVARIANTS is the array of invariants for the loop. EXTRA
1111 is the amount we have to add/subtract from the expression because of the
1112 type of comparison it is used in. */
1114 static lambda_linear_expression
1115 gcc_tree_to_linear_expression (int depth, tree expr,
1116 VEC(tree,heap) *outerinductionvars,
1117 VEC(tree,heap) *invariants, int extra)
1119 lambda_linear_expression lle = NULL;
1120 switch (TREE_CODE (expr))
1122 case INTEGER_CST:
1124 lle = lambda_linear_expression_new (depth, 2 * depth);
1125 LLE_CONSTANT (lle) = TREE_INT_CST_LOW (expr);
1126 if (extra != 0)
1127 LLE_CONSTANT (lle) += extra;
1129 LLE_DENOMINATOR (lle) = 1;
1131 break;
1132 case SSA_NAME:
1134 tree iv, invar;
1135 size_t i;
1136 for (i = 0; VEC_iterate (tree, outerinductionvars, i, iv); i++)
1137 if (iv != NULL)
1139 if (SSA_NAME_VAR (iv) == SSA_NAME_VAR (expr))
1141 lle = lambda_linear_expression_new (depth, 2 * depth);
1142 LLE_COEFFICIENTS (lle)[i] = 1;
1143 if (extra != 0)
1144 LLE_CONSTANT (lle) = extra;
1146 LLE_DENOMINATOR (lle) = 1;
1149 for (i = 0; VEC_iterate (tree, invariants, i, invar); i++)
1150 if (invar != NULL)
1152 if (SSA_NAME_VAR (invar) == SSA_NAME_VAR (expr))
1154 lle = lambda_linear_expression_new (depth, 2 * depth);
1155 LLE_INVARIANT_COEFFICIENTS (lle)[i] = 1;
1156 if (extra != 0)
1157 LLE_CONSTANT (lle) = extra;
1158 LLE_DENOMINATOR (lle) = 1;
1162 break;
1163 default:
1164 return NULL;
1167 return lle;
1170 /* Return the depth of the loopnest NEST */
1172 static int
1173 depth_of_nest (struct loop *nest)
1175 size_t depth = 0;
1176 while (nest)
1178 depth++;
1179 nest = nest->inner;
1181 return depth;
1185 /* Return true if OP is invariant in LOOP and all outer loops. */
1187 static bool
1188 invariant_in_loop_and_outer_loops (struct loop *loop, tree op)
1190 if (is_gimple_min_invariant (op))
1191 return true;
1192 if (loop->depth == 0)
1193 return true;
1194 if (!expr_invariant_in_loop_p (loop, op))
1195 return false;
1196 if (loop->outer
1197 && !invariant_in_loop_and_outer_loops (loop->outer, op))
1198 return false;
1199 return true;
1202 /* Generate a lambda loop from a gcc loop LOOP. Return the new lambda loop,
1203 or NULL if it could not be converted.
1204 DEPTH is the depth of the loop.
1205 INVARIANTS is a pointer to the array of loop invariants.
1206 The induction variable for this loop should be stored in the parameter
1207 OURINDUCTIONVAR.
1208 OUTERINDUCTIONVARS is an array of induction variables for outer loops. */
1210 static lambda_loop
1211 gcc_loop_to_lambda_loop (struct loop *loop, int depth,
1212 VEC(tree,heap) ** invariants,
1213 tree * ourinductionvar,
1214 VEC(tree,heap) * outerinductionvars,
1215 VEC(tree,heap) ** lboundvars,
1216 VEC(tree,heap) ** uboundvars,
1217 VEC(int,heap) ** steps)
1219 tree phi;
1220 tree exit_cond;
1221 tree access_fn, inductionvar;
1222 tree step;
1223 lambda_loop lloop = NULL;
1224 lambda_linear_expression lbound, ubound;
1225 tree test;
1226 int stepint;
1227 int extra = 0;
1228 tree lboundvar, uboundvar, uboundresult;
1230 /* Find out induction var and exit condition. */
1231 inductionvar = find_induction_var_from_exit_cond (loop);
1232 exit_cond = get_loop_exit_condition (loop);
1234 if (inductionvar == NULL || exit_cond == NULL)
1236 if (dump_file && (dump_flags & TDF_DETAILS))
1237 fprintf (dump_file,
1238 "Unable to convert loop: Cannot determine exit condition or induction variable for loop.\n");
1239 return NULL;
1242 test = TREE_OPERAND (exit_cond, 0);
1244 if (SSA_NAME_DEF_STMT (inductionvar) == NULL_TREE)
1247 if (dump_file && (dump_flags & TDF_DETAILS))
1248 fprintf (dump_file,
1249 "Unable to convert loop: Cannot find PHI node for induction variable\n");
1251 return NULL;
1254 phi = SSA_NAME_DEF_STMT (inductionvar);
1255 if (TREE_CODE (phi) != PHI_NODE)
1257 phi = SINGLE_SSA_TREE_OPERAND (phi, SSA_OP_USE);
1258 if (!phi)
1261 if (dump_file && (dump_flags & TDF_DETAILS))
1262 fprintf (dump_file,
1263 "Unable to convert loop: Cannot find PHI node for induction variable\n");
1265 return NULL;
1268 phi = SSA_NAME_DEF_STMT (phi);
1269 if (TREE_CODE (phi) != PHI_NODE)
1272 if (dump_file && (dump_flags & TDF_DETAILS))
1273 fprintf (dump_file,
1274 "Unable to convert loop: Cannot find PHI node for induction variable\n");
1275 return NULL;
1280 /* The induction variable name/version we want to put in the array is the
1281 result of the induction variable phi node. */
1282 *ourinductionvar = PHI_RESULT (phi);
1283 access_fn = instantiate_parameters
1284 (loop, analyze_scalar_evolution (loop, PHI_RESULT (phi)));
1285 if (access_fn == chrec_dont_know)
1287 if (dump_file && (dump_flags & TDF_DETAILS))
1288 fprintf (dump_file,
1289 "Unable to convert loop: Access function for induction variable phi is unknown\n");
1291 return NULL;
1294 step = evolution_part_in_loop_num (access_fn, loop->num);
1295 if (!step || step == chrec_dont_know)
1297 if (dump_file && (dump_flags & TDF_DETAILS))
1298 fprintf (dump_file,
1299 "Unable to convert loop: Cannot determine step of loop.\n");
1301 return NULL;
1303 if (TREE_CODE (step) != INTEGER_CST)
1306 if (dump_file && (dump_flags & TDF_DETAILS))
1307 fprintf (dump_file,
1308 "Unable to convert loop: Step of loop is not integer.\n");
1309 return NULL;
1312 stepint = TREE_INT_CST_LOW (step);
1314 /* Only want phis for induction vars, which will have two
1315 arguments. */
1316 if (PHI_NUM_ARGS (phi) != 2)
1318 if (dump_file && (dump_flags & TDF_DETAILS))
1319 fprintf (dump_file,
1320 "Unable to convert loop: PHI node for induction variable has >2 arguments\n");
1321 return NULL;
1324 /* Another induction variable check. One argument's source should be
1325 in the loop, one outside the loop. */
1326 if (flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, 0)->src)
1327 && flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, 1)->src))
1330 if (dump_file && (dump_flags & TDF_DETAILS))
1331 fprintf (dump_file,
1332 "Unable to convert loop: PHI edges both inside loop, or both outside loop.\n");
1334 return NULL;
1337 if (flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, 0)->src))
1339 lboundvar = PHI_ARG_DEF (phi, 1);
1340 lbound = gcc_tree_to_linear_expression (depth, lboundvar,
1341 outerinductionvars, *invariants,
1344 else
1346 lboundvar = PHI_ARG_DEF (phi, 0);
1347 lbound = gcc_tree_to_linear_expression (depth, lboundvar,
1348 outerinductionvars, *invariants,
1352 if (!lbound)
1355 if (dump_file && (dump_flags & TDF_DETAILS))
1356 fprintf (dump_file,
1357 "Unable to convert loop: Cannot convert lower bound to linear expression\n");
1359 return NULL;
1361 /* One part of the test may be a loop invariant tree. */
1362 VEC_reserve (tree, heap, *invariants, 1);
1363 if (TREE_CODE (TREE_OPERAND (test, 1)) == SSA_NAME
1364 && invariant_in_loop_and_outer_loops (loop, TREE_OPERAND (test, 1)))
1365 VEC_quick_push (tree, *invariants, TREE_OPERAND (test, 1));
1366 else if (TREE_CODE (TREE_OPERAND (test, 0)) == SSA_NAME
1367 && invariant_in_loop_and_outer_loops (loop, TREE_OPERAND (test, 0)))
1368 VEC_quick_push (tree, *invariants, TREE_OPERAND (test, 0));
1370 /* The non-induction variable part of the test is the upper bound variable.
1372 if (TREE_OPERAND (test, 0) == inductionvar)
1373 uboundvar = TREE_OPERAND (test, 1);
1374 else
1375 uboundvar = TREE_OPERAND (test, 0);
1378 /* We only size the vectors assuming we have, at max, 2 times as many
1379 invariants as we do loops (one for each bound).
1380 This is just an arbitrary number, but it has to be matched against the
1381 code below. */
1382 gcc_assert (VEC_length (tree, *invariants) <= (unsigned int) (2 * depth));
1385 /* We might have some leftover. */
1386 if (TREE_CODE (test) == LT_EXPR)
1387 extra = -1 * stepint;
1388 else if (TREE_CODE (test) == NE_EXPR)
1389 extra = -1 * stepint;
1390 else if (TREE_CODE (test) == GT_EXPR)
1391 extra = -1 * stepint;
1392 else if (TREE_CODE (test) == EQ_EXPR)
1393 extra = 1 * stepint;
1395 ubound = gcc_tree_to_linear_expression (depth, uboundvar,
1396 outerinductionvars,
1397 *invariants, extra);
1398 uboundresult = build2 (PLUS_EXPR, TREE_TYPE (uboundvar), uboundvar,
1399 build_int_cst (TREE_TYPE (uboundvar), extra));
1400 VEC_safe_push (tree, heap, *uboundvars, uboundresult);
1401 VEC_safe_push (tree, heap, *lboundvars, lboundvar);
1402 VEC_safe_push (int, heap, *steps, stepint);
1403 if (!ubound)
1405 if (dump_file && (dump_flags & TDF_DETAILS))
1406 fprintf (dump_file,
1407 "Unable to convert loop: Cannot convert upper bound to linear expression\n");
1408 return NULL;
1411 lloop = lambda_loop_new ();
1412 LL_STEP (lloop) = stepint;
1413 LL_LOWER_BOUND (lloop) = lbound;
1414 LL_UPPER_BOUND (lloop) = ubound;
1415 return lloop;
1418 /* Given a LOOP, find the induction variable it is testing against in the exit
1419 condition. Return the induction variable if found, NULL otherwise. */
1421 static tree
1422 find_induction_var_from_exit_cond (struct loop *loop)
1424 tree expr = get_loop_exit_condition (loop);
1425 tree ivarop;
1426 tree test;
1427 if (expr == NULL_TREE)
1428 return NULL_TREE;
1429 if (TREE_CODE (expr) != COND_EXPR)
1430 return NULL_TREE;
1431 test = TREE_OPERAND (expr, 0);
1432 if (!COMPARISON_CLASS_P (test))
1433 return NULL_TREE;
1435 /* Find the side that is invariant in this loop. The ivar must be the other
1436 side. */
1438 if (expr_invariant_in_loop_p (loop, TREE_OPERAND (test, 0)))
1439 ivarop = TREE_OPERAND (test, 1);
1440 else if (expr_invariant_in_loop_p (loop, TREE_OPERAND (test, 1)))
1441 ivarop = TREE_OPERAND (test, 0);
1442 else
1443 return NULL_TREE;
1445 if (TREE_CODE (ivarop) != SSA_NAME)
1446 return NULL_TREE;
1447 return ivarop;
1450 DEF_VEC_P(lambda_loop);
1451 DEF_VEC_ALLOC_P(lambda_loop,heap);
1453 /* Generate a lambda loopnest from a gcc loopnest LOOP_NEST.
1454 Return the new loop nest.
1455 INDUCTIONVARS is a pointer to an array of induction variables for the
1456 loopnest that will be filled in during this process.
1457 INVARIANTS is a pointer to an array of invariants that will be filled in
1458 during this process. */
1460 lambda_loopnest
1461 gcc_loopnest_to_lambda_loopnest (struct loops *currloops,
1462 struct loop * loop_nest,
1463 VEC(tree,heap) **inductionvars,
1464 VEC(tree,heap) **invariants,
1465 bool need_perfect_nest)
1467 lambda_loopnest ret = NULL;
1468 struct loop *temp;
1469 int depth = 0;
1470 size_t i;
1471 VEC(lambda_loop,heap) *loops = NULL;
1472 VEC(tree,heap) *uboundvars = NULL;
1473 VEC(tree,heap) *lboundvars = NULL;
1474 VEC(int,heap) *steps = NULL;
1475 lambda_loop newloop;
1476 tree inductionvar = NULL;
1478 depth = depth_of_nest (loop_nest);
1479 temp = loop_nest;
1480 while (temp)
1482 newloop = gcc_loop_to_lambda_loop (temp, depth, invariants,
1483 &inductionvar, *inductionvars,
1484 &lboundvars, &uboundvars,
1485 &steps);
1486 if (!newloop)
1487 return NULL;
1488 VEC_safe_push (tree, heap, *inductionvars, inductionvar);
1489 VEC_safe_push (lambda_loop, heap, loops, newloop);
1490 temp = temp->inner;
1492 if (need_perfect_nest)
1494 if (!perfect_nestify (currloops, loop_nest,
1495 lboundvars, uboundvars, steps, *inductionvars))
1497 if (dump_file)
1498 fprintf (dump_file,
1499 "Not a perfect loop nest and couldn't convert to one.\n");
1500 goto fail;
1502 else if (dump_file)
1503 fprintf (dump_file,
1504 "Successfully converted loop nest to perfect loop nest.\n");
1506 ret = lambda_loopnest_new (depth, 2 * depth);
1507 for (i = 0; VEC_iterate (lambda_loop, loops, i, newloop); i++)
1508 LN_LOOPS (ret)[i] = newloop;
1509 fail:
1510 VEC_free (lambda_loop, heap, loops);
1511 VEC_free (tree, heap, uboundvars);
1512 VEC_free (tree, heap, lboundvars);
1513 VEC_free (int, heap, steps);
1515 return ret;
1518 /* Convert a lambda body vector LBV to a gcc tree, and return the new tree.
1519 STMTS_TO_INSERT is a pointer to a tree where the statements we need to be
1520 inserted for us are stored. INDUCTION_VARS is the array of induction
1521 variables for the loop this LBV is from. TYPE is the tree type to use for
1522 the variables and trees involved. */
1524 static tree
1525 lbv_to_gcc_expression (lambda_body_vector lbv,
1526 tree type, VEC(tree,heap) *induction_vars,
1527 tree *stmts_to_insert)
1529 tree stmts, stmt, resvar, name;
1530 tree iv;
1531 size_t i;
1532 tree_stmt_iterator tsi;
1534 /* Create a statement list and a linear expression temporary. */
1535 stmts = alloc_stmt_list ();
1536 resvar = create_tmp_var (type, "lbvtmp");
1537 add_referenced_tmp_var (resvar);
1539 /* Start at 0. */
1540 stmt = build2 (MODIFY_EXPR, void_type_node, resvar, integer_zero_node);
1541 name = make_ssa_name (resvar, stmt);
1542 TREE_OPERAND (stmt, 0) = name;
1543 tsi = tsi_last (stmts);
1544 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1546 for (i = 0; VEC_iterate (tree, induction_vars, i, iv); i++)
1548 if (LBV_COEFFICIENTS (lbv)[i] != 0)
1550 tree newname;
1551 tree coeffmult;
1553 /* newname = coefficient * induction_variable */
1554 coeffmult = build_int_cst (type, LBV_COEFFICIENTS (lbv)[i]);
1555 stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
1556 fold_build2 (MULT_EXPR, type, iv, coeffmult));
1558 newname = make_ssa_name (resvar, stmt);
1559 TREE_OPERAND (stmt, 0) = newname;
1560 fold_stmt (&stmt);
1561 tsi = tsi_last (stmts);
1562 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1564 /* name = name + newname */
1565 stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
1566 build2 (PLUS_EXPR, type, name, newname));
1567 name = make_ssa_name (resvar, stmt);
1568 TREE_OPERAND (stmt, 0) = name;
1569 fold_stmt (&stmt);
1570 tsi = tsi_last (stmts);
1571 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1576 /* Handle any denominator that occurs. */
1577 if (LBV_DENOMINATOR (lbv) != 1)
1579 tree denominator = build_int_cst (type, LBV_DENOMINATOR (lbv));
1580 stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
1581 build2 (CEIL_DIV_EXPR, type, name, denominator));
1582 name = make_ssa_name (resvar, stmt);
1583 TREE_OPERAND (stmt, 0) = name;
1584 fold_stmt (&stmt);
1585 tsi = tsi_last (stmts);
1586 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1588 *stmts_to_insert = stmts;
1589 return name;
1592 /* Convert a linear expression from coefficient and constant form to a
1593 gcc tree.
1594 Return the tree that represents the final value of the expression.
1595 LLE is the linear expression to convert.
1596 OFFSET is the linear offset to apply to the expression.
1597 TYPE is the tree type to use for the variables and math.
1598 INDUCTION_VARS is a vector of induction variables for the loops.
1599 INVARIANTS is a vector of the loop nest invariants.
1600 WRAP specifies what tree code to wrap the results in, if there is more than
1601 one (it is either MAX_EXPR, or MIN_EXPR).
1602 STMTS_TO_INSERT Is a pointer to the statement list we fill in with
1603 statements that need to be inserted for the linear expression. */
1605 static tree
1606 lle_to_gcc_expression (lambda_linear_expression lle,
1607 lambda_linear_expression offset,
1608 tree type,
1609 VEC(tree,heap) *induction_vars,
1610 VEC(tree,heap) *invariants,
1611 enum tree_code wrap, tree *stmts_to_insert)
1613 tree stmts, stmt, resvar, name;
1614 size_t i;
1615 tree_stmt_iterator tsi;
1616 tree iv, invar;
1617 VEC(tree,heap) *results = NULL;
1619 gcc_assert (wrap == MAX_EXPR || wrap == MIN_EXPR);
1620 name = NULL_TREE;
1621 /* Create a statement list and a linear expression temporary. */
1622 stmts = alloc_stmt_list ();
1623 resvar = create_tmp_var (type, "lletmp");
1624 add_referenced_tmp_var (resvar);
1626 /* Build up the linear expressions, and put the variable representing the
1627 result in the results array. */
1628 for (; lle != NULL; lle = LLE_NEXT (lle))
1630 /* Start at name = 0. */
1631 stmt = build2 (MODIFY_EXPR, void_type_node, resvar, integer_zero_node);
1632 name = make_ssa_name (resvar, stmt);
1633 TREE_OPERAND (stmt, 0) = name;
1634 fold_stmt (&stmt);
1635 tsi = tsi_last (stmts);
1636 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1638 /* First do the induction variables.
1639 at the end, name = name + all the induction variables added
1640 together. */
1641 for (i = 0; VEC_iterate (tree, induction_vars, i, iv); i++)
1643 if (LLE_COEFFICIENTS (lle)[i] != 0)
1645 tree newname;
1646 tree mult;
1647 tree coeff;
1649 /* mult = induction variable * coefficient. */
1650 if (LLE_COEFFICIENTS (lle)[i] == 1)
1652 mult = VEC_index (tree, induction_vars, i);
1654 else
1656 coeff = build_int_cst (type,
1657 LLE_COEFFICIENTS (lle)[i]);
1658 mult = fold_build2 (MULT_EXPR, type, iv, coeff);
1661 /* newname = mult */
1662 stmt = build2 (MODIFY_EXPR, void_type_node, resvar, mult);
1663 newname = make_ssa_name (resvar, stmt);
1664 TREE_OPERAND (stmt, 0) = newname;
1665 fold_stmt (&stmt);
1666 tsi = tsi_last (stmts);
1667 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1669 /* name = name + newname */
1670 stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
1671 build2 (PLUS_EXPR, type, name, newname));
1672 name = make_ssa_name (resvar, stmt);
1673 TREE_OPERAND (stmt, 0) = name;
1674 fold_stmt (&stmt);
1675 tsi = tsi_last (stmts);
1676 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1680 /* Handle our invariants.
1681 At the end, we have name = name + result of adding all multiplied
1682 invariants. */
1683 for (i = 0; VEC_iterate (tree, invariants, i, invar); i++)
1685 if (LLE_INVARIANT_COEFFICIENTS (lle)[i] != 0)
1687 tree newname;
1688 tree mult;
1689 tree coeff;
1690 int invcoeff = LLE_INVARIANT_COEFFICIENTS (lle)[i];
1691 /* mult = invariant * coefficient */
1692 if (invcoeff == 1)
1694 mult = invar;
1696 else
1698 coeff = build_int_cst (type, invcoeff);
1699 mult = fold_build2 (MULT_EXPR, type, invar, coeff);
1702 /* newname = mult */
1703 stmt = build2 (MODIFY_EXPR, void_type_node, resvar, mult);
1704 newname = make_ssa_name (resvar, stmt);
1705 TREE_OPERAND (stmt, 0) = newname;
1706 fold_stmt (&stmt);
1707 tsi = tsi_last (stmts);
1708 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1710 /* name = name + newname */
1711 stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
1712 build2 (PLUS_EXPR, type, name, newname));
1713 name = make_ssa_name (resvar, stmt);
1714 TREE_OPERAND (stmt, 0) = name;
1715 fold_stmt (&stmt);
1716 tsi = tsi_last (stmts);
1717 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1721 /* Now handle the constant.
1722 name = name + constant. */
1723 if (LLE_CONSTANT (lle) != 0)
1725 stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
1726 build2 (PLUS_EXPR, type, name,
1727 build_int_cst (type, LLE_CONSTANT (lle))));
1728 name = make_ssa_name (resvar, stmt);
1729 TREE_OPERAND (stmt, 0) = name;
1730 fold_stmt (&stmt);
1731 tsi = tsi_last (stmts);
1732 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1735 /* Now handle the offset.
1736 name = name + linear offset. */
1737 if (LLE_CONSTANT (offset) != 0)
1739 stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
1740 build2 (PLUS_EXPR, type, name,
1741 build_int_cst (type, LLE_CONSTANT (offset))));
1742 name = make_ssa_name (resvar, stmt);
1743 TREE_OPERAND (stmt, 0) = name;
1744 fold_stmt (&stmt);
1745 tsi = tsi_last (stmts);
1746 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1749 /* Handle any denominator that occurs. */
1750 if (LLE_DENOMINATOR (lle) != 1)
1752 stmt = build_int_cst (type, LLE_DENOMINATOR (lle));
1753 stmt = build2 (wrap == MAX_EXPR ? CEIL_DIV_EXPR : FLOOR_DIV_EXPR,
1754 type, name, stmt);
1755 stmt = build2 (MODIFY_EXPR, void_type_node, resvar, stmt);
1757 /* name = {ceil, floor}(name/denominator) */
1758 name = make_ssa_name (resvar, stmt);
1759 TREE_OPERAND (stmt, 0) = name;
1760 tsi = tsi_last (stmts);
1761 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1763 VEC_safe_push (tree, heap, results, name);
1766 /* Again, out of laziness, we don't handle this case yet. It's not
1767 hard, it just hasn't occurred. */
1768 gcc_assert (VEC_length (tree, results) <= 2);
1770 /* We may need to wrap the results in a MAX_EXPR or MIN_EXPR. */
1771 if (VEC_length (tree, results) > 1)
1773 tree op1 = VEC_index (tree, results, 0);
1774 tree op2 = VEC_index (tree, results, 1);
1775 stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
1776 build2 (wrap, type, op1, op2));
1777 name = make_ssa_name (resvar, stmt);
1778 TREE_OPERAND (stmt, 0) = name;
1779 tsi = tsi_last (stmts);
1780 tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1783 VEC_free (tree, heap, results);
1785 *stmts_to_insert = stmts;
1786 return name;
1789 /* Transform a lambda loopnest NEW_LOOPNEST, which had TRANSFORM applied to
1790 it, back into gcc code. This changes the
1791 loops, their induction variables, and their bodies, so that they
1792 match the transformed loopnest.
1793 OLD_LOOPNEST is the loopnest before we've replaced it with the new
1794 loopnest.
1795 OLD_IVS is a vector of induction variables from the old loopnest.
1796 INVARIANTS is a vector of loop invariants from the old loopnest.
1797 NEW_LOOPNEST is the new lambda loopnest to replace OLD_LOOPNEST with.
1798 TRANSFORM is the matrix transform that was applied to OLD_LOOPNEST to get
1799 NEW_LOOPNEST. */
1801 void
1802 lambda_loopnest_to_gcc_loopnest (struct loop *old_loopnest,
1803 VEC(tree,heap) *old_ivs,
1804 VEC(tree,heap) *invariants,
1805 lambda_loopnest new_loopnest,
1806 lambda_trans_matrix transform)
1808 struct loop *temp;
1809 size_t i = 0;
1810 size_t depth = 0;
1811 VEC(tree,heap) *new_ivs = NULL;
1812 tree oldiv;
1814 block_stmt_iterator bsi;
1816 if (dump_file)
1818 transform = lambda_trans_matrix_inverse (transform);
1819 fprintf (dump_file, "Inverse of transformation matrix:\n");
1820 print_lambda_trans_matrix (dump_file, transform);
1822 depth = depth_of_nest (old_loopnest);
1823 temp = old_loopnest;
1825 while (temp)
1827 lambda_loop newloop;
1828 basic_block bb;
1829 edge exit;
1830 tree ivvar, ivvarinced, exitcond, stmts;
1831 enum tree_code testtype;
1832 tree newupperbound, newlowerbound;
1833 lambda_linear_expression offset;
1834 tree type;
1835 bool insert_after;
1836 tree inc_stmt;
1838 oldiv = VEC_index (tree, old_ivs, i);
1839 type = TREE_TYPE (oldiv);
1841 /* First, build the new induction variable temporary */
1843 ivvar = create_tmp_var (type, "lnivtmp");
1844 add_referenced_tmp_var (ivvar);
1846 VEC_safe_push (tree, heap, new_ivs, ivvar);
1848 newloop = LN_LOOPS (new_loopnest)[i];
1850 /* Linear offset is a bit tricky to handle. Punt on the unhandled
1851 cases for now. */
1852 offset = LL_LINEAR_OFFSET (newloop);
1854 gcc_assert (LLE_DENOMINATOR (offset) == 1 &&
1855 lambda_vector_zerop (LLE_COEFFICIENTS (offset), depth));
1857 /* Now build the new lower bounds, and insert the statements
1858 necessary to generate it on the loop preheader. */
1859 newlowerbound = lle_to_gcc_expression (LL_LOWER_BOUND (newloop),
1860 LL_LINEAR_OFFSET (newloop),
1861 type,
1862 new_ivs,
1863 invariants, MAX_EXPR, &stmts);
1864 bsi_insert_on_edge (loop_preheader_edge (temp), stmts);
1865 bsi_commit_edge_inserts ();
1866 /* Build the new upper bound and insert its statements in the
1867 basic block of the exit condition */
1868 newupperbound = lle_to_gcc_expression (LL_UPPER_BOUND (newloop),
1869 LL_LINEAR_OFFSET (newloop),
1870 type,
1871 new_ivs,
1872 invariants, MIN_EXPR, &stmts);
1873 exit = temp->single_exit;
1874 exitcond = get_loop_exit_condition (temp);
1875 bb = bb_for_stmt (exitcond);
1876 bsi = bsi_start (bb);
1877 bsi_insert_after (&bsi, stmts, BSI_NEW_STMT);
1879 /* Create the new iv. */
1881 standard_iv_increment_position (temp, &bsi, &insert_after);
1882 create_iv (newlowerbound,
1883 build_int_cst (type, LL_STEP (newloop)),
1884 ivvar, temp, &bsi, insert_after, &ivvar,
1885 NULL);
1887 /* Unfortunately, the incremented ivvar that create_iv inserted may not
1888 dominate the block containing the exit condition.
1889 So we simply create our own incremented iv to use in the new exit
1890 test, and let redundancy elimination sort it out. */
1891 inc_stmt = build2 (PLUS_EXPR, type,
1892 ivvar, build_int_cst (type, LL_STEP (newloop)));
1893 inc_stmt = build2 (MODIFY_EXPR, void_type_node, SSA_NAME_VAR (ivvar),
1894 inc_stmt);
1895 ivvarinced = make_ssa_name (SSA_NAME_VAR (ivvar), inc_stmt);
1896 TREE_OPERAND (inc_stmt, 0) = ivvarinced;
1897 bsi = bsi_for_stmt (exitcond);
1898 bsi_insert_before (&bsi, inc_stmt, BSI_SAME_STMT);
1900 /* Replace the exit condition with the new upper bound
1901 comparison. */
1903 testtype = LL_STEP (newloop) >= 0 ? LE_EXPR : GE_EXPR;
1905 /* We want to build a conditional where true means exit the loop, and
1906 false means continue the loop.
1907 So swap the testtype if this isn't the way things are.*/
1909 if (exit->flags & EDGE_FALSE_VALUE)
1910 testtype = swap_tree_comparison (testtype);
1912 COND_EXPR_COND (exitcond) = build2 (testtype,
1913 boolean_type_node,
1914 newupperbound, ivvarinced);
1915 update_stmt (exitcond);
1916 VEC_replace (tree, new_ivs, i, ivvar);
1918 i++;
1919 temp = temp->inner;
1922 /* Rewrite uses of the old ivs so that they are now specified in terms of
1923 the new ivs. */
1925 for (i = 0; VEC_iterate (tree, old_ivs, i, oldiv); i++)
1927 imm_use_iterator imm_iter;
1928 use_operand_p imm_use;
1929 tree oldiv_def;
1930 tree oldiv_stmt = SSA_NAME_DEF_STMT (oldiv);
1932 if (TREE_CODE (oldiv_stmt) == PHI_NODE)
1933 oldiv_def = PHI_RESULT (oldiv_stmt);
1934 else
1935 oldiv_def = SINGLE_SSA_TREE_OPERAND (oldiv_stmt, SSA_OP_DEF);
1936 gcc_assert (oldiv_def != NULL_TREE);
1938 FOR_EACH_IMM_USE_SAFE (imm_use, imm_iter, oldiv_def)
1940 tree stmt = USE_STMT (imm_use);
1941 use_operand_p use_p;
1942 ssa_op_iter iter;
1943 gcc_assert (TREE_CODE (stmt) != PHI_NODE);
1944 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
1946 if (USE_FROM_PTR (use_p) == oldiv)
1948 tree newiv, stmts;
1949 lambda_body_vector lbv, newlbv;
1950 /* Compute the new expression for the induction
1951 variable. */
1952 depth = VEC_length (tree, new_ivs);
1953 lbv = lambda_body_vector_new (depth);
1954 LBV_COEFFICIENTS (lbv)[i] = 1;
1956 newlbv = lambda_body_vector_compute_new (transform, lbv);
1958 newiv = lbv_to_gcc_expression (newlbv, TREE_TYPE (oldiv),
1959 new_ivs, &stmts);
1960 bsi = bsi_for_stmt (stmt);
1961 /* Insert the statements to build that
1962 expression. */
1963 bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
1964 propagate_value (use_p, newiv);
1965 update_stmt (stmt);
1971 VEC_free (tree, heap, new_ivs);
1974 /* Return TRUE if this is not interesting statement from the perspective of
1975 determining if we have a perfect loop nest. */
1977 static bool
1978 not_interesting_stmt (tree stmt)
1980 /* Note that COND_EXPR's aren't interesting because if they were exiting the
1981 loop, we would have already failed the number of exits tests. */
1982 if (TREE_CODE (stmt) == LABEL_EXPR
1983 || TREE_CODE (stmt) == GOTO_EXPR
1984 || TREE_CODE (stmt) == COND_EXPR)
1985 return true;
1986 return false;
1989 /* Return TRUE if PHI uses DEF for it's in-the-loop edge for LOOP. */
1991 static bool
1992 phi_loop_edge_uses_def (struct loop *loop, tree phi, tree def)
1994 int i;
1995 for (i = 0; i < PHI_NUM_ARGS (phi); i++)
1996 if (flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, i)->src))
1997 if (PHI_ARG_DEF (phi, i) == def)
1998 return true;
1999 return false;
2002 /* Return TRUE if STMT is a use of PHI_RESULT. */
2004 static bool
2005 stmt_uses_phi_result (tree stmt, tree phi_result)
2007 tree use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2009 /* This is conservatively true, because we only want SIMPLE bumpers
2010 of the form x +- constant for our pass. */
2011 return (use == phi_result);
2014 /* STMT is a bumper stmt for LOOP if the version it defines is used in the
2015 in-loop-edge in a phi node, and the operand it uses is the result of that
2016 phi node.
2017 I.E. i_29 = i_3 + 1
2018 i_3 = PHI (0, i_29); */
2020 static bool
2021 stmt_is_bumper_for_loop (struct loop *loop, tree stmt)
2023 tree use;
2024 tree def;
2025 imm_use_iterator iter;
2026 use_operand_p use_p;
2028 def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF);
2029 if (!def)
2030 return false;
2032 FOR_EACH_IMM_USE_FAST (use_p, iter, def)
2034 use = USE_STMT (use_p);
2035 if (TREE_CODE (use) == PHI_NODE)
2037 if (phi_loop_edge_uses_def (loop, use, def))
2038 if (stmt_uses_phi_result (stmt, PHI_RESULT (use)))
2039 return true;
2042 return false;
2046 /* Return true if LOOP is a perfect loop nest.
2047 Perfect loop nests are those loop nests where all code occurs in the
2048 innermost loop body.
2049 If S is a program statement, then
2051 i.e.
2052 DO I = 1, 20
2054 DO J = 1, 20
2056 END DO
2057 END DO
2058 is not a perfect loop nest because of S1.
2060 DO I = 1, 20
2061 DO J = 1, 20
2064 END DO
2065 END DO
2066 is a perfect loop nest.
2068 Since we don't have high level loops anymore, we basically have to walk our
2069 statements and ignore those that are there because the loop needs them (IE
2070 the induction variable increment, and jump back to the top of the loop). */
2072 bool
2073 perfect_nest_p (struct loop *loop)
2075 basic_block *bbs;
2076 size_t i;
2077 tree exit_cond;
2079 if (!loop->inner)
2080 return true;
2081 bbs = get_loop_body (loop);
2082 exit_cond = get_loop_exit_condition (loop);
2083 for (i = 0; i < loop->num_nodes; i++)
2085 if (bbs[i]->loop_father == loop)
2087 block_stmt_iterator bsi;
2088 for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi); bsi_next (&bsi))
2090 tree stmt = bsi_stmt (bsi);
2091 if (stmt == exit_cond
2092 || not_interesting_stmt (stmt)
2093 || stmt_is_bumper_for_loop (loop, stmt))
2094 continue;
2095 free (bbs);
2096 return false;
2100 free (bbs);
2101 /* See if the inner loops are perfectly nested as well. */
2102 if (loop->inner)
2103 return perfect_nest_p (loop->inner);
2104 return true;
2107 /* Replace the USES of X in STMT, or uses with the same step as X with Y. */
2109 static void
2110 replace_uses_equiv_to_x_with_y (struct loop *loop, tree stmt, tree x,
2111 int xstep, tree y)
2113 ssa_op_iter iter;
2114 use_operand_p use_p;
2116 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
2118 tree use = USE_FROM_PTR (use_p);
2119 tree step = NULL_TREE;
2120 tree access_fn = NULL_TREE;
2123 access_fn = instantiate_parameters
2124 (loop, analyze_scalar_evolution (loop, use));
2125 if (access_fn != NULL_TREE && access_fn != chrec_dont_know)
2126 step = evolution_part_in_loop_num (access_fn, loop->num);
2127 if ((step && step != chrec_dont_know
2128 && TREE_CODE (step) == INTEGER_CST
2129 && int_cst_value (step) == xstep)
2130 || USE_FROM_PTR (use_p) == x)
2131 SET_USE (use_p, y);
2135 /* Return TRUE if STMT uses tree OP in it's uses. */
2137 static bool
2138 stmt_uses_op (tree stmt, tree op)
2140 ssa_op_iter iter;
2141 tree use;
2143 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2145 if (use == op)
2146 return true;
2148 return false;
2151 /* Return true if STMT is an exit PHI for LOOP */
2153 static bool
2154 exit_phi_for_loop_p (struct loop *loop, tree stmt)
2157 if (TREE_CODE (stmt) != PHI_NODE
2158 || PHI_NUM_ARGS (stmt) != 1
2159 || bb_for_stmt (stmt) != loop->single_exit->dest)
2160 return false;
2162 return true;
2165 /* Return true if STMT can be put back into INNER, a loop by moving it to the
2166 beginning of that loop. */
2168 static bool
2169 can_put_in_inner_loop (struct loop *inner, tree stmt)
2171 imm_use_iterator imm_iter;
2172 use_operand_p use_p;
2173 basic_block use_bb = NULL;
2175 gcc_assert (TREE_CODE (stmt) == MODIFY_EXPR);
2176 if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS)
2177 || !expr_invariant_in_loop_p (inner, TREE_OPERAND (stmt, 1)))
2178 return false;
2180 /* We require that the basic block of all uses be the same, or the use be an
2181 exit phi. */
2182 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, TREE_OPERAND (stmt, 0))
2184 if (!exit_phi_for_loop_p (inner, USE_STMT (use_p)))
2186 basic_block immbb = bb_for_stmt (USE_STMT (use_p));
2188 if (!flow_bb_inside_loop_p (inner, immbb))
2189 return false;
2190 if (use_bb == NULL)
2191 use_bb = immbb;
2192 else if (immbb != use_bb)
2193 return false;
2196 return true;
2201 /* Return TRUE if LOOP is an imperfect nest that we can convert to a perfect
2202 one. LOOPIVS is a vector of induction variables, one per loop.
2203 ATM, we only handle imperfect nests of depth 2, where all of the statements
2204 occur after the inner loop. */
2206 static bool
2207 can_convert_to_perfect_nest (struct loop *loop,
2208 VEC(tree,heap) *loopivs)
2210 basic_block *bbs;
2211 tree exit_condition, phi;
2212 size_t i;
2213 block_stmt_iterator bsi;
2214 basic_block exitdest;
2216 /* Can't handle triply nested+ loops yet. */
2217 if (!loop->inner || loop->inner->inner)
2218 return false;
2220 bbs = get_loop_body (loop);
2221 exit_condition = get_loop_exit_condition (loop);
2222 for (i = 0; i < loop->num_nodes; i++)
2224 if (bbs[i]->loop_father == loop)
2226 for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi); bsi_next (&bsi))
2228 size_t j;
2229 tree stmt = bsi_stmt (bsi);
2230 tree iv;
2232 if (stmt == exit_condition
2233 || not_interesting_stmt (stmt)
2234 || stmt_is_bumper_for_loop (loop, stmt))
2235 continue;
2236 /* If the statement uses inner loop ivs, we == screwed. */
2237 for (j = 1; VEC_iterate (tree, loopivs, j, iv); j++)
2238 if (stmt_uses_op (stmt, iv))
2239 goto fail;
2241 /* If this is a simple operation like a cast that is invariant
2242 in the inner loop, only used there, and we can place it
2243 there, then it's not going to hurt us.
2244 This means that we will propagate casts and other cheap
2245 invariant operations *back*
2246 into the inner loop if we can interchange the loop, on the
2247 theory that we are going to gain a lot more by interchanging
2248 the loop than we are by leaving some invariant code there for
2249 some other pass to clean up. */
2250 if (TREE_CODE (stmt) == MODIFY_EXPR
2251 && is_gimple_cast (TREE_OPERAND (stmt, 1))
2252 && can_put_in_inner_loop (loop->inner, stmt))
2253 continue;
2255 /* Otherwise, if the bb of a statement we care about isn't
2256 dominated by the header of the inner loop, then we can't
2257 handle this case right now. This test ensures that the
2258 statement comes completely *after* the inner loop. */
2259 if (!dominated_by_p (CDI_DOMINATORS,
2260 bb_for_stmt (stmt),
2261 loop->inner->header))
2262 goto fail;
2267 /* We also need to make sure the loop exit only has simple copy phis in it,
2268 otherwise we don't know how to transform it into a perfect nest right
2269 now. */
2270 exitdest = loop->single_exit->dest;
2272 for (phi = phi_nodes (exitdest); phi; phi = PHI_CHAIN (phi))
2273 if (PHI_NUM_ARGS (phi) != 1)
2274 goto fail;
2276 free (bbs);
2277 return true;
2279 fail:
2280 free (bbs);
2281 return false;
2284 /* Transform the loop nest into a perfect nest, if possible.
2285 LOOPS is the current struct loops *
2286 LOOP is the loop nest to transform into a perfect nest
2287 LBOUNDS are the lower bounds for the loops to transform
2288 UBOUNDS are the upper bounds for the loops to transform
2289 STEPS is the STEPS for the loops to transform.
2290 LOOPIVS is the induction variables for the loops to transform.
2292 Basically, for the case of
2294 FOR (i = 0; i < 50; i++)
2296 FOR (j =0; j < 50; j++)
2298 <whatever>
2300 <some code>
2303 This function will transform it into a perfect loop nest by splitting the
2304 outer loop into two loops, like so:
2306 FOR (i = 0; i < 50; i++)
2308 FOR (j = 0; j < 50; j++)
2310 <whatever>
2314 FOR (i = 0; i < 50; i ++)
2316 <some code>
2319 Return FALSE if we can't make this loop into a perfect nest. */
2321 static bool
2322 perfect_nestify (struct loops *loops,
2323 struct loop *loop,
2324 VEC(tree,heap) *lbounds,
2325 VEC(tree,heap) *ubounds,
2326 VEC(int,heap) *steps,
2327 VEC(tree,heap) *loopivs)
2329 basic_block *bbs;
2330 tree exit_condition;
2331 tree then_label, else_label, cond_stmt;
2332 basic_block preheaderbb, headerbb, bodybb, latchbb, olddest;
2333 int i;
2334 block_stmt_iterator bsi;
2335 bool insert_after;
2336 edge e;
2337 struct loop *newloop;
2338 tree phi;
2339 tree uboundvar;
2340 tree stmt;
2341 tree oldivvar, ivvar, ivvarinced;
2342 VEC(tree,heap) *phis = NULL;
2344 if (!can_convert_to_perfect_nest (loop, loopivs))
2345 return false;
2347 /* Create the new loop */
2349 olddest = loop->single_exit->dest;
2350 preheaderbb = loop_split_edge_with (loop->single_exit, NULL);
2351 headerbb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
2353 /* Push the exit phi nodes that we are moving. */
2354 for (phi = phi_nodes (olddest); phi; phi = PHI_CHAIN (phi))
2356 VEC_reserve (tree, heap, phis, 2);
2357 VEC_quick_push (tree, phis, PHI_RESULT (phi));
2358 VEC_quick_push (tree, phis, PHI_ARG_DEF (phi, 0));
2360 e = redirect_edge_and_branch (single_succ_edge (preheaderbb), headerbb);
2362 /* Remove the exit phis from the old basic block. Make sure to set
2363 PHI_RESULT to null so it doesn't get released. */
2364 while (phi_nodes (olddest) != NULL)
2366 SET_PHI_RESULT (phi_nodes (olddest), NULL);
2367 remove_phi_node (phi_nodes (olddest), NULL);
2370 /* and add them back to the new basic block. */
2371 while (VEC_length (tree, phis) != 0)
2373 tree def;
2374 tree phiname;
2375 def = VEC_pop (tree, phis);
2376 phiname = VEC_pop (tree, phis);
2377 phi = create_phi_node (phiname, preheaderbb);
2378 add_phi_arg (phi, def, single_pred_edge (preheaderbb));
2380 flush_pending_stmts (e);
2381 VEC_free (tree, heap, phis);
2383 bodybb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
2384 latchbb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
2385 make_edge (headerbb, bodybb, EDGE_FALLTHRU);
2386 then_label = build1 (GOTO_EXPR, void_type_node, tree_block_label (latchbb));
2387 else_label = build1 (GOTO_EXPR, void_type_node, tree_block_label (olddest));
2388 cond_stmt = build3 (COND_EXPR, void_type_node,
2389 build2 (NE_EXPR, boolean_type_node,
2390 integer_one_node,
2391 integer_zero_node),
2392 then_label, else_label);
2393 bsi = bsi_start (bodybb);
2394 bsi_insert_after (&bsi, cond_stmt, BSI_NEW_STMT);
2395 e = make_edge (bodybb, olddest, EDGE_FALSE_VALUE);
2396 make_edge (bodybb, latchbb, EDGE_TRUE_VALUE);
2397 make_edge (latchbb, headerbb, EDGE_FALLTHRU);
2399 /* Update the loop structures. */
2400 newloop = duplicate_loop (loops, loop, olddest->loop_father);
2401 newloop->header = headerbb;
2402 newloop->latch = latchbb;
2403 newloop->single_exit = e;
2404 add_bb_to_loop (latchbb, newloop);
2405 add_bb_to_loop (bodybb, newloop);
2406 add_bb_to_loop (headerbb, newloop);
2407 set_immediate_dominator (CDI_DOMINATORS, bodybb, headerbb);
2408 set_immediate_dominator (CDI_DOMINATORS, headerbb, preheaderbb);
2409 set_immediate_dominator (CDI_DOMINATORS, preheaderbb,
2410 loop->single_exit->src);
2411 set_immediate_dominator (CDI_DOMINATORS, latchbb, bodybb);
2412 set_immediate_dominator (CDI_DOMINATORS, olddest, bodybb);
2413 /* Create the new iv. */
2414 oldivvar = VEC_index (tree, loopivs, 0);
2415 ivvar = create_tmp_var (TREE_TYPE (oldivvar), "perfectiv");
2416 add_referenced_tmp_var (ivvar);
2417 standard_iv_increment_position (newloop, &bsi, &insert_after);
2418 create_iv (VEC_index (tree, lbounds, 0),
2419 build_int_cst (TREE_TYPE (oldivvar), VEC_index (int, steps, 0)),
2420 ivvar, newloop, &bsi, insert_after, &ivvar, &ivvarinced);
2422 /* Create the new upper bound. This may be not just a variable, so we copy
2423 it to one just in case. */
2425 exit_condition = get_loop_exit_condition (newloop);
2426 uboundvar = create_tmp_var (integer_type_node, "uboundvar");
2427 add_referenced_tmp_var (uboundvar);
2428 stmt = build2 (MODIFY_EXPR, void_type_node, uboundvar,
2429 VEC_index (tree, ubounds, 0));
2430 uboundvar = make_ssa_name (uboundvar, stmt);
2431 TREE_OPERAND (stmt, 0) = uboundvar;
2433 if (insert_after)
2434 bsi_insert_after (&bsi, stmt, BSI_SAME_STMT);
2435 else
2436 bsi_insert_before (&bsi, stmt, BSI_SAME_STMT);
2437 update_stmt (stmt);
2438 COND_EXPR_COND (exit_condition) = build2 (GE_EXPR,
2439 boolean_type_node,
2440 uboundvar,
2441 ivvarinced);
2442 update_stmt (exit_condition);
2443 bbs = get_loop_body_in_dom_order (loop);
2444 /* Now move the statements, and replace the induction variable in the moved
2445 statements with the correct loop induction variable. */
2446 oldivvar = VEC_index (tree, loopivs, 0);
2447 for (i = loop->num_nodes - 1; i >= 0 ; i--)
2449 block_stmt_iterator tobsi = bsi_last (bodybb);
2450 if (bbs[i]->loop_father == loop)
2452 /* If this is true, we are *before* the inner loop.
2453 If this isn't true, we are *after* it.
2455 The only time can_convert_to_perfect_nest returns true when we
2456 have statements before the inner loop is if they can be moved
2457 into the inner loop.
2459 The only time can_convert_to_perfect_nest returns true when we
2460 have statements after the inner loop is if they can be moved into
2461 the new split loop. */
2463 if (dominated_by_p (CDI_DOMINATORS, loop->inner->header, bbs[i]))
2465 for (bsi = bsi_last (bbs[i]); !bsi_end_p (bsi);)
2467 use_operand_p use_p;
2468 imm_use_iterator imm_iter;
2469 tree stmt = bsi_stmt (bsi);
2471 if (stmt == exit_condition
2472 || not_interesting_stmt (stmt)
2473 || stmt_is_bumper_for_loop (loop, stmt))
2475 if (!bsi_end_p (bsi))
2476 bsi_prev (&bsi);
2477 continue;
2479 /* Move this statement back into the inner loop.
2480 This looks a bit confusing, but we are really just
2481 finding the first non-exit phi use and moving the
2482 statement to the beginning of that use's basic
2483 block. */
2484 FOR_EACH_IMM_USE_SAFE (use_p, imm_iter,
2485 TREE_OPERAND (stmt, 0))
2487 tree imm_stmt = USE_STMT (use_p);
2488 if (!exit_phi_for_loop_p (loop->inner, imm_stmt))
2490 block_stmt_iterator tobsi = bsi_after_labels (bb_for_stmt (imm_stmt));
2491 bsi_move_after (&bsi, &tobsi);
2492 update_stmt (stmt);
2493 BREAK_FROM_SAFE_IMM_USE (imm_iter);
2498 else
2500 /* Note that the bsi only needs to be explicitly incremented
2501 when we don't move something, since it is automatically
2502 incremented when we do. */
2503 for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi);)
2505 ssa_op_iter i;
2506 tree n, stmt = bsi_stmt (bsi);
2508 if (stmt == exit_condition
2509 || not_interesting_stmt (stmt)
2510 || stmt_is_bumper_for_loop (loop, stmt))
2512 bsi_next (&bsi);
2513 continue;
2516 replace_uses_equiv_to_x_with_y (loop, stmt,
2517 oldivvar,
2518 VEC_index (int, steps, 0),
2519 ivvar);
2520 bsi_move_before (&bsi, &tobsi);
2522 /* If the statement has any virtual operands, they may
2523 need to be rewired because the original loop may
2524 still reference them. */
2525 FOR_EACH_SSA_TREE_OPERAND (n, stmt, i, SSA_OP_ALL_VIRTUALS)
2526 mark_sym_for_renaming (SSA_NAME_VAR (n));
2533 free (bbs);
2534 return perfect_nest_p (loop);
2537 /* Return true if TRANS is a legal transformation matrix that respects
2538 the dependence vectors in DISTS and DIRS. The conservative answer
2539 is false.
2541 "Wolfe proves that a unimodular transformation represented by the
2542 matrix T is legal when applied to a loop nest with a set of
2543 lexicographically non-negative distance vectors RDG if and only if
2544 for each vector d in RDG, (T.d >= 0) is lexicographically positive.
2545 i.e.: if and only if it transforms the lexicographically positive
2546 distance vectors to lexicographically positive vectors. Note that
2547 a unimodular matrix must transform the zero vector (and only it) to
2548 the zero vector." S.Muchnick. */
2550 bool
2551 lambda_transform_legal_p (lambda_trans_matrix trans,
2552 int nb_loops,
2553 varray_type dependence_relations)
2555 unsigned int i, j;
2556 lambda_vector distres;
2557 struct data_dependence_relation *ddr;
2559 gcc_assert (LTM_COLSIZE (trans) == nb_loops
2560 && LTM_ROWSIZE (trans) == nb_loops);
2562 /* When there is an unknown relation in the dependence_relations, we
2563 know that it is no worth looking at this loop nest: give up. */
2564 ddr = (struct data_dependence_relation *)
2565 VARRAY_GENERIC_PTR (dependence_relations, 0);
2566 if (ddr == NULL)
2567 return true;
2568 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
2569 return false;
2571 distres = lambda_vector_new (nb_loops);
2573 /* For each distance vector in the dependence graph. */
2574 for (i = 0; i < VARRAY_ACTIVE_SIZE (dependence_relations); i++)
2576 ddr = (struct data_dependence_relation *)
2577 VARRAY_GENERIC_PTR (dependence_relations, i);
2579 /* Don't care about relations for which we know that there is no
2580 dependence, nor about read-read (aka. output-dependences):
2581 these data accesses can happen in any order. */
2582 if (DDR_ARE_DEPENDENT (ddr) == chrec_known
2583 || (DR_IS_READ (DDR_A (ddr)) && DR_IS_READ (DDR_B (ddr))))
2584 continue;
2586 /* Conservatively answer: "this transformation is not valid". */
2587 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
2588 return false;
2590 /* If the dependence could not be captured by a distance vector,
2591 conservatively answer that the transform is not valid. */
2592 if (DDR_NUM_DIST_VECTS (ddr) == 0)
2593 return false;
2595 /* Compute trans.dist_vect */
2596 for (j = 0; j < DDR_NUM_DIST_VECTS (ddr); j++)
2598 lambda_matrix_vector_mult (LTM_MATRIX (trans), nb_loops, nb_loops,
2599 DDR_DIST_VECT (ddr, j), distres);
2601 if (!lambda_vector_lexico_pos (distres, nb_loops))
2602 return false;
2605 return true;