1 /* Loop transformation code generation
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Daniel Berlin <dberlin@dberlin.org>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "basic-block.h"
31 #include "diagnostic-core.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
39 #include "tree-chrec.h"
40 #include "tree-data-ref.h"
41 #include "tree-pass.h"
42 #include "tree-scalar-evolution.h"
46 #include "pointer-set.h"
48 /* This loop nest code generation is based on non-singular matrix
51 A little terminology and a general sketch of the algorithm. See "A singular
52 loop transformation framework based on non-singular matrices" by Wei Li and
53 Keshav Pingali for formal proofs that the various statements below are
56 A loop iteration space represents the points traversed by the loop. A point in the
57 iteration space can be represented by a vector of size <loop depth>. You can
58 therefore represent the iteration space as an integral combinations of a set
61 A loop iteration space is dense if every integer point between the loop
62 bounds is a point in the iteration space. Every loop with a step of 1
63 therefore has a dense iteration space.
65 for i = 1 to 3, step 1 is a dense iteration space.
67 A loop iteration space is sparse if it is not dense. That is, the iteration
68 space skips integer points that are within the loop bounds.
70 for i = 1 to 3, step 2 is a sparse iteration space, because the integer point
73 Dense source spaces are easy to transform, because they don't skip any
74 points to begin with. Thus we can compute the exact bounds of the target
75 space using min/max and floor/ceil.
77 For a dense source space, we take the transformation matrix, decompose it
78 into a lower triangular part (H) and a unimodular part (U).
79 We then compute the auxiliary space from the unimodular part (source loop
80 nest . U = auxiliary space) , which has two important properties:
81 1. It traverses the iterations in the same lexicographic order as the source
83 2. It is a dense space when the source is a dense space (even if the target
84 space is going to be sparse).
86 Given the auxiliary space, we use the lower triangular part to compute the
87 bounds in the target space by simple matrix multiplication.
88 The gaps in the target space (IE the new loop step sizes) will be the
89 diagonals of the H matrix.
91 Sparse source spaces require another step, because you can't directly compute
92 the exact bounds of the auxiliary and target space from the sparse space.
93 Rather than try to come up with a separate algorithm to handle sparse source
94 spaces directly, we just find a legal transformation matrix that gives you
95 the sparse source space, from a dense space, and then transform the dense
98 For a regular sparse space, you can represent the source space as an integer
99 lattice, and the base space of that lattice will always be dense. Thus, we
100 effectively use the lattice to figure out the transformation from the lattice
101 base space, to the sparse iteration space (IE what transform was applied to
102 the dense space to make it sparse). We then compose this transform with the
103 transformation matrix specified by the user (since our matrix transformations
104 are closed under composition, this is okay). We can then use the base space
105 (which is dense) plus the composed transformation matrix, to compute the rest
106 of the transform using the dense space algorithm above.
108 In other words, our sparse source space (B) is decomposed into a dense base
109 space (A), and a matrix (L) that transforms A into B, such that A.L = B.
110 We then compute the composition of L and the user transformation matrix (T),
111 so that T is now a transform from A to the result, instead of from B to the
113 IE A.(LT) = result instead of B.T = result
114 Since A is now a dense source space, we can use the dense source space
115 algorithm above to compute the result of applying transform (LT) to A.
117 Fourier-Motzkin elimination is used to compute the bounds of the base space
120 static bool perfect_nestify (struct loop
*, VEC(tree
,heap
) *,
121 VEC(tree
,heap
) *, VEC(int,heap
) *,
123 /* Lattice stuff that is internal to the code generation algorithm. */
125 typedef struct lambda_lattice_s
127 /* Lattice base matrix. */
129 /* Lattice dimension. */
131 /* Origin vector for the coefficients. */
132 lambda_vector origin
;
133 /* Origin matrix for the invariants. */
134 lambda_matrix origin_invariants
;
135 /* Number of invariants. */
139 #define LATTICE_BASE(T) ((T)->base)
140 #define LATTICE_DIMENSION(T) ((T)->dimension)
141 #define LATTICE_ORIGIN(T) ((T)->origin)
142 #define LATTICE_ORIGIN_INVARIANTS(T) ((T)->origin_invariants)
143 #define LATTICE_INVARIANTS(T) ((T)->invariants)
145 static bool lle_equal (lambda_linear_expression
, lambda_linear_expression
,
147 static lambda_lattice
lambda_lattice_new (int, int, struct obstack
*);
148 static lambda_lattice
lambda_lattice_compute_base (lambda_loopnest
,
151 static bool can_convert_to_perfect_nest (struct loop
*);
153 /* Create a new lambda loop in LAMBDA_OBSTACK. */
156 lambda_loop_new (struct obstack
* lambda_obstack
)
158 lambda_loop result
= (lambda_loop
)
159 obstack_alloc (lambda_obstack
, sizeof (struct lambda_loop_s
));
160 memset (result
, 0, sizeof (struct lambda_loop_s
));
164 /* Create a new lambda body vector. */
167 lambda_body_vector_new (int size
, struct obstack
* lambda_obstack
)
169 lambda_body_vector ret
;
171 ret
= (lambda_body_vector
) obstack_alloc (lambda_obstack
,
173 LBV_COEFFICIENTS (ret
) = lambda_vector_new (size
);
174 LBV_SIZE (ret
) = size
;
175 LBV_DENOMINATOR (ret
) = 1;
179 /* Compute the new coefficients for the vector based on the
180 *inverse* of the transformation matrix. */
183 lambda_body_vector_compute_new (lambda_trans_matrix transform
,
184 lambda_body_vector vect
,
185 struct obstack
* lambda_obstack
)
187 lambda_body_vector temp
;
190 /* Make sure the matrix is square. */
191 gcc_assert (LTM_ROWSIZE (transform
) == LTM_COLSIZE (transform
));
193 depth
= LTM_ROWSIZE (transform
);
195 temp
= lambda_body_vector_new (depth
, lambda_obstack
);
196 LBV_DENOMINATOR (temp
) =
197 LBV_DENOMINATOR (vect
) * LTM_DENOMINATOR (transform
);
198 lambda_vector_matrix_mult (LBV_COEFFICIENTS (vect
), depth
,
199 LTM_MATRIX (transform
), depth
,
200 LBV_COEFFICIENTS (temp
));
201 LBV_SIZE (temp
) = LBV_SIZE (vect
);
205 /* Print out a lambda body vector. */
208 print_lambda_body_vector (FILE * outfile
, lambda_body_vector body
)
210 print_lambda_vector (outfile
, LBV_COEFFICIENTS (body
), LBV_SIZE (body
));
213 /* Return TRUE if two linear expressions are equal. */
216 lle_equal (lambda_linear_expression lle1
, lambda_linear_expression lle2
,
217 int depth
, int invariants
)
221 if (lle1
== NULL
|| lle2
== NULL
)
223 if (LLE_CONSTANT (lle1
) != LLE_CONSTANT (lle2
))
225 if (LLE_DENOMINATOR (lle1
) != LLE_DENOMINATOR (lle2
))
227 for (i
= 0; i
< depth
; i
++)
228 if (LLE_COEFFICIENTS (lle1
)[i
] != LLE_COEFFICIENTS (lle2
)[i
])
230 for (i
= 0; i
< invariants
; i
++)
231 if (LLE_INVARIANT_COEFFICIENTS (lle1
)[i
] !=
232 LLE_INVARIANT_COEFFICIENTS (lle2
)[i
])
237 /* Create a new linear expression with dimension DIM, and total number
238 of invariants INVARIANTS. */
240 lambda_linear_expression
241 lambda_linear_expression_new (int dim
, int invariants
,
242 struct obstack
* lambda_obstack
)
244 lambda_linear_expression ret
;
246 ret
= (lambda_linear_expression
)obstack_alloc (lambda_obstack
,
248 LLE_COEFFICIENTS (ret
) = lambda_vector_new (dim
);
249 LLE_CONSTANT (ret
) = 0;
250 LLE_INVARIANT_COEFFICIENTS (ret
) = lambda_vector_new (invariants
);
251 LLE_DENOMINATOR (ret
) = 1;
252 LLE_NEXT (ret
) = NULL
;
257 /* Print out a linear expression EXPR, with SIZE coefficients, to OUTFILE.
258 The starting letter used for variable names is START. */
261 print_linear_expression (FILE * outfile
, lambda_vector expr
, int size
,
266 for (i
= 0; i
< size
; i
++)
273 fprintf (outfile
, "-");
276 else if (expr
[i
] > 0)
277 fprintf (outfile
, " + ");
279 fprintf (outfile
, " - ");
280 if (abs (expr
[i
]) == 1)
281 fprintf (outfile
, "%c", start
+ i
);
283 fprintf (outfile
, "%d%c", abs (expr
[i
]), start
+ i
);
288 /* Print out a lambda linear expression structure, EXPR, to OUTFILE. The
289 depth/number of coefficients is given by DEPTH, the number of invariants is
290 given by INVARIANTS, and the character to start variable names with is given
294 print_lambda_linear_expression (FILE * outfile
,
295 lambda_linear_expression expr
,
296 int depth
, int invariants
, char start
)
298 fprintf (outfile
, "\tLinear expression: ");
299 print_linear_expression (outfile
, LLE_COEFFICIENTS (expr
), depth
, start
);
300 fprintf (outfile
, " constant: %d ", LLE_CONSTANT (expr
));
301 fprintf (outfile
, " invariants: ");
302 print_linear_expression (outfile
, LLE_INVARIANT_COEFFICIENTS (expr
),
304 fprintf (outfile
, " denominator: %d\n", LLE_DENOMINATOR (expr
));
307 /* Print a lambda loop structure LOOP to OUTFILE. The depth/number of
308 coefficients is given by DEPTH, the number of invariants is
309 given by INVARIANTS, and the character to start variable names with is given
313 print_lambda_loop (FILE * outfile
, lambda_loop loop
, int depth
,
314 int invariants
, char start
)
317 lambda_linear_expression expr
;
321 expr
= LL_LINEAR_OFFSET (loop
);
322 step
= LL_STEP (loop
);
323 fprintf (outfile
, " step size = %d \n", step
);
327 fprintf (outfile
, " linear offset: \n");
328 print_lambda_linear_expression (outfile
, expr
, depth
, invariants
,
332 fprintf (outfile
, " lower bound: \n");
333 for (expr
= LL_LOWER_BOUND (loop
); expr
!= NULL
; expr
= LLE_NEXT (expr
))
334 print_lambda_linear_expression (outfile
, expr
, depth
, invariants
, start
);
335 fprintf (outfile
, " upper bound: \n");
336 for (expr
= LL_UPPER_BOUND (loop
); expr
!= NULL
; expr
= LLE_NEXT (expr
))
337 print_lambda_linear_expression (outfile
, expr
, depth
, invariants
, start
);
340 /* Create a new loop nest structure with DEPTH loops, and INVARIANTS as the
341 number of invariants. */
344 lambda_loopnest_new (int depth
, int invariants
,
345 struct obstack
* lambda_obstack
)
348 ret
= (lambda_loopnest
)obstack_alloc (lambda_obstack
, sizeof (*ret
));
350 LN_LOOPS (ret
) = (lambda_loop
*)
351 obstack_alloc (lambda_obstack
, depth
* sizeof(LN_LOOPS(ret
)));
352 LN_DEPTH (ret
) = depth
;
353 LN_INVARIANTS (ret
) = invariants
;
358 /* Print a lambda loopnest structure, NEST, to OUTFILE. The starting
359 character to use for loop names is given by START. */
362 print_lambda_loopnest (FILE * outfile
, lambda_loopnest nest
, char start
)
365 for (i
= 0; i
< LN_DEPTH (nest
); i
++)
367 fprintf (outfile
, "Loop %c\n", start
+ i
);
368 print_lambda_loop (outfile
, LN_LOOPS (nest
)[i
], LN_DEPTH (nest
),
369 LN_INVARIANTS (nest
), 'i');
370 fprintf (outfile
, "\n");
374 /* Allocate a new lattice structure of DEPTH x DEPTH, with INVARIANTS number
377 static lambda_lattice
378 lambda_lattice_new (int depth
, int invariants
, struct obstack
* lambda_obstack
)
381 = (lambda_lattice
)obstack_alloc (lambda_obstack
, sizeof (*ret
));
382 LATTICE_BASE (ret
) = lambda_matrix_new (depth
, depth
, lambda_obstack
);
383 LATTICE_ORIGIN (ret
) = lambda_vector_new (depth
);
384 LATTICE_ORIGIN_INVARIANTS (ret
) = lambda_matrix_new (depth
, invariants
,
386 LATTICE_DIMENSION (ret
) = depth
;
387 LATTICE_INVARIANTS (ret
) = invariants
;
391 /* Compute the lattice base for NEST. The lattice base is essentially a
392 non-singular transform from a dense base space to a sparse iteration space.
393 We use it so that we don't have to specially handle the case of a sparse
394 iteration space in other parts of the algorithm. As a result, this routine
395 only does something interesting (IE produce a matrix that isn't the
396 identity matrix) if NEST is a sparse space. */
398 static lambda_lattice
399 lambda_lattice_compute_base (lambda_loopnest nest
,
400 struct obstack
* lambda_obstack
)
403 int depth
, invariants
;
408 lambda_linear_expression expression
;
410 depth
= LN_DEPTH (nest
);
411 invariants
= LN_INVARIANTS (nest
);
413 ret
= lambda_lattice_new (depth
, invariants
, lambda_obstack
);
414 base
= LATTICE_BASE (ret
);
415 for (i
= 0; i
< depth
; i
++)
417 loop
= LN_LOOPS (nest
)[i
];
419 step
= LL_STEP (loop
);
420 /* If we have a step of 1, then the base is one, and the
421 origin and invariant coefficients are 0. */
424 for (j
= 0; j
< depth
; j
++)
427 LATTICE_ORIGIN (ret
)[i
] = 0;
428 for (j
= 0; j
< invariants
; j
++)
429 LATTICE_ORIGIN_INVARIANTS (ret
)[i
][j
] = 0;
433 /* Otherwise, we need the lower bound expression (which must
434 be an affine function) to determine the base. */
435 expression
= LL_LOWER_BOUND (loop
);
436 gcc_assert (expression
&& !LLE_NEXT (expression
)
437 && LLE_DENOMINATOR (expression
) == 1);
439 /* The lower triangular portion of the base is going to be the
440 coefficient times the step */
441 for (j
= 0; j
< i
; j
++)
442 base
[i
][j
] = LLE_COEFFICIENTS (expression
)[j
]
443 * LL_STEP (LN_LOOPS (nest
)[j
]);
445 for (j
= i
+ 1; j
< depth
; j
++)
448 /* Origin for this loop is the constant of the lower bound
450 LATTICE_ORIGIN (ret
)[i
] = LLE_CONSTANT (expression
);
452 /* Coefficient for the invariants are equal to the invariant
453 coefficients in the expression. */
454 for (j
= 0; j
< invariants
; j
++)
455 LATTICE_ORIGIN_INVARIANTS (ret
)[i
][j
] =
456 LLE_INVARIANT_COEFFICIENTS (expression
)[j
];
462 /* Compute the least common multiple of two numbers A and B . */
465 least_common_multiple (int a
, int b
)
467 return (abs (a
) * abs (b
) / gcd (a
, b
));
470 /* Perform Fourier-Motzkin elimination to calculate the bounds of the
472 Fourier-Motzkin is a way of reducing systems of linear inequalities so that
473 it is easy to calculate the answer and bounds.
474 A sketch of how it works:
475 Given a system of linear inequalities, ai * xj >= bk, you can always
476 rewrite the constraints so they are all of the form
477 a <= x, or x <= b, or x >= constant for some x in x1 ... xj (and some b
478 in b1 ... bk, and some a in a1...ai)
479 You can then eliminate this x from the non-constant inequalities by
480 rewriting these as a <= b, x >= constant, and delete the x variable.
481 You can then repeat this for any remaining x variables, and then we have
482 an easy to use variable <= constant (or no variables at all) form that we
483 can construct our bounds from.
485 In our case, each time we eliminate, we construct part of the bound from
486 the ith variable, then delete the ith variable.
488 Remember the constant are in our vector a, our coefficient matrix is A,
489 and our invariant coefficient matrix is B.
491 SIZE is the size of the matrices being passed.
492 DEPTH is the loop nest depth.
493 INVARIANTS is the number of loop invariants.
494 A, B, and a are the coefficient matrix, invariant coefficient, and a
495 vector of constants, respectively. */
497 static lambda_loopnest
498 compute_nest_using_fourier_motzkin (int size
,
504 struct obstack
* lambda_obstack
)
507 int multiple
, f1
, f2
;
509 lambda_linear_expression expression
;
511 lambda_loopnest auxillary_nest
;
512 lambda_matrix swapmatrix
, A1
, B1
;
513 lambda_vector swapvector
, a1
;
516 A1
= lambda_matrix_new (128, depth
, lambda_obstack
);
517 B1
= lambda_matrix_new (128, invariants
, lambda_obstack
);
518 a1
= lambda_vector_new (128);
520 auxillary_nest
= lambda_loopnest_new (depth
, invariants
, lambda_obstack
);
522 for (i
= depth
- 1; i
>= 0; i
--)
524 loop
= lambda_loop_new (lambda_obstack
);
525 LN_LOOPS (auxillary_nest
)[i
] = loop
;
528 for (j
= 0; j
< size
; j
++)
532 /* Any linear expression in the matrix with a coefficient less
533 than 0 becomes part of the new lower bound. */
534 expression
= lambda_linear_expression_new (depth
, invariants
,
537 for (k
= 0; k
< i
; k
++)
538 LLE_COEFFICIENTS (expression
)[k
] = A
[j
][k
];
540 for (k
= 0; k
< invariants
; k
++)
541 LLE_INVARIANT_COEFFICIENTS (expression
)[k
] = -1 * B
[j
][k
];
543 LLE_DENOMINATOR (expression
) = -1 * A
[j
][i
];
544 LLE_CONSTANT (expression
) = -1 * a
[j
];
546 /* Ignore if identical to the existing lower bound. */
547 if (!lle_equal (LL_LOWER_BOUND (loop
),
548 expression
, depth
, invariants
))
550 LLE_NEXT (expression
) = LL_LOWER_BOUND (loop
);
551 LL_LOWER_BOUND (loop
) = expression
;
555 else if (A
[j
][i
] > 0)
557 /* Any linear expression with a coefficient greater than 0
558 becomes part of the new upper bound. */
559 expression
= lambda_linear_expression_new (depth
, invariants
,
561 for (k
= 0; k
< i
; k
++)
562 LLE_COEFFICIENTS (expression
)[k
] = -1 * A
[j
][k
];
564 for (k
= 0; k
< invariants
; k
++)
565 LLE_INVARIANT_COEFFICIENTS (expression
)[k
] = B
[j
][k
];
567 LLE_DENOMINATOR (expression
) = A
[j
][i
];
568 LLE_CONSTANT (expression
) = a
[j
];
570 /* Ignore if identical to the existing upper bound. */
571 if (!lle_equal (LL_UPPER_BOUND (loop
),
572 expression
, depth
, invariants
))
574 LLE_NEXT (expression
) = LL_UPPER_BOUND (loop
);
575 LL_UPPER_BOUND (loop
) = expression
;
581 /* This portion creates a new system of linear inequalities by deleting
582 the i'th variable, reducing the system by one variable. */
584 for (j
= 0; j
< size
; j
++)
586 /* If the coefficient for the i'th variable is 0, then we can just
587 eliminate the variable straightaway. Otherwise, we have to
588 multiply through by the coefficients we are eliminating. */
591 lambda_vector_copy (A
[j
], A1
[newsize
], depth
);
592 lambda_vector_copy (B
[j
], B1
[newsize
], invariants
);
596 else if (A
[j
][i
] > 0)
598 for (k
= 0; k
< size
; k
++)
602 multiple
= least_common_multiple (A
[j
][i
], A
[k
][i
]);
603 f1
= multiple
/ A
[j
][i
];
604 f2
= -1 * multiple
/ A
[k
][i
];
606 lambda_vector_add_mc (A
[j
], f1
, A
[k
], f2
,
608 lambda_vector_add_mc (B
[j
], f1
, B
[k
], f2
,
609 B1
[newsize
], invariants
);
610 a1
[newsize
] = f1
* a
[j
] + f2
* a
[k
];
632 return auxillary_nest
;
635 /* Compute the loop bounds for the auxiliary space NEST.
636 Input system used is Ax <= b. TRANS is the unimodular transformation.
637 Given the original nest, this function will
638 1. Convert the nest into matrix form, which consists of a matrix for the
639 coefficients, a matrix for the
640 invariant coefficients, and a vector for the constants.
641 2. Use the matrix form to calculate the lattice base for the nest (which is
643 3. Compose the dense space transform with the user specified transform, to
644 get a transform we can easily calculate transformed bounds for.
645 4. Multiply the composed transformation matrix times the matrix form of the
647 5. Transform the newly created matrix (from step 4) back into a loop nest
648 using Fourier-Motzkin elimination to figure out the bounds. */
650 static lambda_loopnest
651 lambda_compute_auxillary_space (lambda_loopnest nest
,
652 lambda_trans_matrix trans
,
653 struct obstack
* lambda_obstack
)
655 lambda_matrix A
, B
, A1
, B1
;
657 lambda_matrix invertedtrans
;
658 int depth
, invariants
, size
;
661 lambda_linear_expression expression
;
662 lambda_lattice lattice
;
664 depth
= LN_DEPTH (nest
);
665 invariants
= LN_INVARIANTS (nest
);
667 /* Unfortunately, we can't know the number of constraints we'll have
668 ahead of time, but this should be enough even in ridiculous loop nest
669 cases. We must not go over this limit. */
670 A
= lambda_matrix_new (128, depth
, lambda_obstack
);
671 B
= lambda_matrix_new (128, invariants
, lambda_obstack
);
672 a
= lambda_vector_new (128);
674 A1
= lambda_matrix_new (128, depth
, lambda_obstack
);
675 B1
= lambda_matrix_new (128, invariants
, lambda_obstack
);
676 a1
= lambda_vector_new (128);
678 /* Store the bounds in the equation matrix A, constant vector a, and
679 invariant matrix B, so that we have Ax <= a + B.
680 This requires a little equation rearranging so that everything is on the
681 correct side of the inequality. */
683 for (i
= 0; i
< depth
; i
++)
685 loop
= LN_LOOPS (nest
)[i
];
687 /* First we do the lower bound. */
688 if (LL_STEP (loop
) > 0)
689 expression
= LL_LOWER_BOUND (loop
);
691 expression
= LL_UPPER_BOUND (loop
);
693 for (; expression
!= NULL
; expression
= LLE_NEXT (expression
))
695 /* Fill in the coefficient. */
696 for (j
= 0; j
< i
; j
++)
697 A
[size
][j
] = LLE_COEFFICIENTS (expression
)[j
];
699 /* And the invariant coefficient. */
700 for (j
= 0; j
< invariants
; j
++)
701 B
[size
][j
] = LLE_INVARIANT_COEFFICIENTS (expression
)[j
];
703 /* And the constant. */
704 a
[size
] = LLE_CONSTANT (expression
);
706 /* Convert (2x+3y+2+b)/4 <= z to 2x+3y-4z <= -2-b. IE put all
707 constants and single variables on */
708 A
[size
][i
] = -1 * LLE_DENOMINATOR (expression
);
710 for (j
= 0; j
< invariants
; j
++)
714 /* Need to increase matrix sizes above. */
715 gcc_assert (size
<= 127);
719 /* Then do the exact same thing for the upper bounds. */
720 if (LL_STEP (loop
) > 0)
721 expression
= LL_UPPER_BOUND (loop
);
723 expression
= LL_LOWER_BOUND (loop
);
725 for (; expression
!= NULL
; expression
= LLE_NEXT (expression
))
727 /* Fill in the coefficient. */
728 for (j
= 0; j
< i
; j
++)
729 A
[size
][j
] = LLE_COEFFICIENTS (expression
)[j
];
731 /* And the invariant coefficient. */
732 for (j
= 0; j
< invariants
; j
++)
733 B
[size
][j
] = LLE_INVARIANT_COEFFICIENTS (expression
)[j
];
735 /* And the constant. */
736 a
[size
] = LLE_CONSTANT (expression
);
738 /* Convert z <= (2x+3y+2+b)/4 to -2x-3y+4z <= 2+b. */
739 for (j
= 0; j
< i
; j
++)
741 A
[size
][i
] = LLE_DENOMINATOR (expression
);
743 /* Need to increase matrix sizes above. */
744 gcc_assert (size
<= 127);
749 /* Compute the lattice base x = base * y + origin, where y is the
751 lattice
= lambda_lattice_compute_base (nest
, lambda_obstack
);
753 /* Ax <= a + B then becomes ALy <= a+B - A*origin. L is the lattice base */
756 lambda_matrix_mult (A
, LATTICE_BASE (lattice
), A1
, size
, depth
, depth
);
758 /* a1 = a - A * origin constant. */
759 lambda_matrix_vector_mult (A
, size
, depth
, LATTICE_ORIGIN (lattice
), a1
);
760 lambda_vector_add_mc (a
, 1, a1
, -1, a1
, size
);
762 /* B1 = B - A * origin invariant. */
763 lambda_matrix_mult (A
, LATTICE_ORIGIN_INVARIANTS (lattice
), B1
, size
, depth
,
765 lambda_matrix_add_mc (B
, 1, B1
, -1, B1
, size
, invariants
);
767 /* Now compute the auxiliary space bounds by first inverting U, multiplying
768 it by A1, then performing Fourier-Motzkin. */
770 invertedtrans
= lambda_matrix_new (depth
, depth
, lambda_obstack
);
772 /* Compute the inverse of U. */
773 lambda_matrix_inverse (LTM_MATRIX (trans
),
774 invertedtrans
, depth
, lambda_obstack
);
777 lambda_matrix_mult (A1
, invertedtrans
, A
, size
, depth
, depth
);
779 return compute_nest_using_fourier_motzkin (size
, depth
, invariants
,
780 A
, B1
, a1
, lambda_obstack
);
783 /* Compute the loop bounds for the target space, using the bounds of
784 the auxiliary nest AUXILLARY_NEST, and the triangular matrix H.
785 The target space loop bounds are computed by multiplying the triangular
786 matrix H by the auxiliary nest, to get the new loop bounds. The sign of
787 the loop steps (positive or negative) is then used to swap the bounds if
788 the loop counts downwards.
789 Return the target loopnest. */
791 static lambda_loopnest
792 lambda_compute_target_space (lambda_loopnest auxillary_nest
,
793 lambda_trans_matrix H
, lambda_vector stepsigns
,
794 struct obstack
* lambda_obstack
)
796 lambda_matrix inverse
, H1
;
797 int determinant
, i
, j
;
801 lambda_loopnest target_nest
;
802 int depth
, invariants
;
803 lambda_matrix target
;
805 lambda_loop auxillary_loop
, target_loop
;
806 lambda_linear_expression expression
, auxillary_expr
, target_expr
, tmp_expr
;
808 depth
= LN_DEPTH (auxillary_nest
);
809 invariants
= LN_INVARIANTS (auxillary_nest
);
811 inverse
= lambda_matrix_new (depth
, depth
, lambda_obstack
);
812 determinant
= lambda_matrix_inverse (LTM_MATRIX (H
), inverse
, depth
,
815 /* H1 is H excluding its diagonal. */
816 H1
= lambda_matrix_new (depth
, depth
, lambda_obstack
);
817 lambda_matrix_copy (LTM_MATRIX (H
), H1
, depth
, depth
);
819 for (i
= 0; i
< depth
; i
++)
822 /* Computes the linear offsets of the loop bounds. */
823 target
= lambda_matrix_new (depth
, depth
, lambda_obstack
);
824 lambda_matrix_mult (H1
, inverse
, target
, depth
, depth
, depth
);
826 target_nest
= lambda_loopnest_new (depth
, invariants
, lambda_obstack
);
828 for (i
= 0; i
< depth
; i
++)
831 /* Get a new loop structure. */
832 target_loop
= lambda_loop_new (lambda_obstack
);
833 LN_LOOPS (target_nest
)[i
] = target_loop
;
835 /* Computes the gcd of the coefficients of the linear part. */
836 gcd1
= lambda_vector_gcd (target
[i
], i
);
838 /* Include the denominator in the GCD. */
839 gcd1
= gcd (gcd1
, determinant
);
841 /* Now divide through by the gcd. */
842 for (j
= 0; j
< i
; j
++)
843 target
[i
][j
] = target
[i
][j
] / gcd1
;
845 expression
= lambda_linear_expression_new (depth
, invariants
,
847 lambda_vector_copy (target
[i
], LLE_COEFFICIENTS (expression
), depth
);
848 LLE_DENOMINATOR (expression
) = determinant
/ gcd1
;
849 LLE_CONSTANT (expression
) = 0;
850 lambda_vector_clear (LLE_INVARIANT_COEFFICIENTS (expression
),
852 LL_LINEAR_OFFSET (target_loop
) = expression
;
855 /* For each loop, compute the new bounds from H. */
856 for (i
= 0; i
< depth
; i
++)
858 auxillary_loop
= LN_LOOPS (auxillary_nest
)[i
];
859 target_loop
= LN_LOOPS (target_nest
)[i
];
860 LL_STEP (target_loop
) = LTM_MATRIX (H
)[i
][i
];
861 factor
= LTM_MATRIX (H
)[i
][i
];
863 /* First we do the lower bound. */
864 auxillary_expr
= LL_LOWER_BOUND (auxillary_loop
);
866 for (; auxillary_expr
!= NULL
;
867 auxillary_expr
= LLE_NEXT (auxillary_expr
))
869 target_expr
= lambda_linear_expression_new (depth
, invariants
,
871 lambda_vector_matrix_mult (LLE_COEFFICIENTS (auxillary_expr
),
872 depth
, inverse
, depth
,
873 LLE_COEFFICIENTS (target_expr
));
874 lambda_vector_mult_const (LLE_COEFFICIENTS (target_expr
),
875 LLE_COEFFICIENTS (target_expr
), depth
,
878 LLE_CONSTANT (target_expr
) = LLE_CONSTANT (auxillary_expr
) * factor
;
879 lambda_vector_copy (LLE_INVARIANT_COEFFICIENTS (auxillary_expr
),
880 LLE_INVARIANT_COEFFICIENTS (target_expr
),
882 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS (target_expr
),
883 LLE_INVARIANT_COEFFICIENTS (target_expr
),
885 LLE_DENOMINATOR (target_expr
) = LLE_DENOMINATOR (auxillary_expr
);
887 if (!lambda_vector_zerop (LLE_COEFFICIENTS (target_expr
), depth
))
889 LLE_CONSTANT (target_expr
) = LLE_CONSTANT (target_expr
)
891 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS
893 LLE_INVARIANT_COEFFICIENTS
894 (target_expr
), invariants
,
896 LLE_DENOMINATOR (target_expr
) =
897 LLE_DENOMINATOR (target_expr
) * determinant
;
899 /* Find the gcd and divide by it here, rather than doing it
900 at the tree level. */
901 gcd1
= lambda_vector_gcd (LLE_COEFFICIENTS (target_expr
), depth
);
902 gcd2
= lambda_vector_gcd (LLE_INVARIANT_COEFFICIENTS (target_expr
),
904 gcd1
= gcd (gcd1
, gcd2
);
905 gcd1
= gcd (gcd1
, LLE_CONSTANT (target_expr
));
906 gcd1
= gcd (gcd1
, LLE_DENOMINATOR (target_expr
));
907 for (j
= 0; j
< depth
; j
++)
908 LLE_COEFFICIENTS (target_expr
)[j
] /= gcd1
;
909 for (j
= 0; j
< invariants
; j
++)
910 LLE_INVARIANT_COEFFICIENTS (target_expr
)[j
] /= gcd1
;
911 LLE_CONSTANT (target_expr
) /= gcd1
;
912 LLE_DENOMINATOR (target_expr
) /= gcd1
;
913 /* Ignore if identical to existing bound. */
914 if (!lle_equal (LL_LOWER_BOUND (target_loop
), target_expr
, depth
,
917 LLE_NEXT (target_expr
) = LL_LOWER_BOUND (target_loop
);
918 LL_LOWER_BOUND (target_loop
) = target_expr
;
921 /* Now do the upper bound. */
922 auxillary_expr
= LL_UPPER_BOUND (auxillary_loop
);
924 for (; auxillary_expr
!= NULL
;
925 auxillary_expr
= LLE_NEXT (auxillary_expr
))
927 target_expr
= lambda_linear_expression_new (depth
, invariants
,
929 lambda_vector_matrix_mult (LLE_COEFFICIENTS (auxillary_expr
),
930 depth
, inverse
, depth
,
931 LLE_COEFFICIENTS (target_expr
));
932 lambda_vector_mult_const (LLE_COEFFICIENTS (target_expr
),
933 LLE_COEFFICIENTS (target_expr
), depth
,
935 LLE_CONSTANT (target_expr
) = LLE_CONSTANT (auxillary_expr
) * factor
;
936 lambda_vector_copy (LLE_INVARIANT_COEFFICIENTS (auxillary_expr
),
937 LLE_INVARIANT_COEFFICIENTS (target_expr
),
939 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS (target_expr
),
940 LLE_INVARIANT_COEFFICIENTS (target_expr
),
942 LLE_DENOMINATOR (target_expr
) = LLE_DENOMINATOR (auxillary_expr
);
944 if (!lambda_vector_zerop (LLE_COEFFICIENTS (target_expr
), depth
))
946 LLE_CONSTANT (target_expr
) = LLE_CONSTANT (target_expr
)
948 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS
950 LLE_INVARIANT_COEFFICIENTS
951 (target_expr
), invariants
,
953 LLE_DENOMINATOR (target_expr
) =
954 LLE_DENOMINATOR (target_expr
) * determinant
;
956 /* Find the gcd and divide by it here, instead of at the
958 gcd1
= lambda_vector_gcd (LLE_COEFFICIENTS (target_expr
), depth
);
959 gcd2
= lambda_vector_gcd (LLE_INVARIANT_COEFFICIENTS (target_expr
),
961 gcd1
= gcd (gcd1
, gcd2
);
962 gcd1
= gcd (gcd1
, LLE_CONSTANT (target_expr
));
963 gcd1
= gcd (gcd1
, LLE_DENOMINATOR (target_expr
));
964 for (j
= 0; j
< depth
; j
++)
965 LLE_COEFFICIENTS (target_expr
)[j
] /= gcd1
;
966 for (j
= 0; j
< invariants
; j
++)
967 LLE_INVARIANT_COEFFICIENTS (target_expr
)[j
] /= gcd1
;
968 LLE_CONSTANT (target_expr
) /= gcd1
;
969 LLE_DENOMINATOR (target_expr
) /= gcd1
;
970 /* Ignore if equal to existing bound. */
971 if (!lle_equal (LL_UPPER_BOUND (target_loop
), target_expr
, depth
,
974 LLE_NEXT (target_expr
) = LL_UPPER_BOUND (target_loop
);
975 LL_UPPER_BOUND (target_loop
) = target_expr
;
979 for (i
= 0; i
< depth
; i
++)
981 target_loop
= LN_LOOPS (target_nest
)[i
];
982 /* If necessary, exchange the upper and lower bounds and negate
984 if (stepsigns
[i
] < 0)
986 LL_STEP (target_loop
) *= -1;
987 tmp_expr
= LL_LOWER_BOUND (target_loop
);
988 LL_LOWER_BOUND (target_loop
) = LL_UPPER_BOUND (target_loop
);
989 LL_UPPER_BOUND (target_loop
) = tmp_expr
;
995 /* Compute the step signs of TRANS, using TRANS and stepsigns. Return the new
999 lambda_compute_step_signs (lambda_trans_matrix trans
,
1000 lambda_vector stepsigns
,
1001 struct obstack
* lambda_obstack
)
1003 lambda_matrix matrix
, H
;
1005 lambda_vector newsteps
;
1006 int i
, j
, factor
, minimum_column
;
1009 matrix
= LTM_MATRIX (trans
);
1010 size
= LTM_ROWSIZE (trans
);
1011 H
= lambda_matrix_new (size
, size
, lambda_obstack
);
1013 newsteps
= lambda_vector_new (size
);
1014 lambda_vector_copy (stepsigns
, newsteps
, size
);
1016 lambda_matrix_copy (matrix
, H
, size
, size
);
1018 for (j
= 0; j
< size
; j
++)
1022 for (i
= j
; i
< size
; i
++)
1024 lambda_matrix_col_negate (H
, size
, i
);
1025 while (lambda_vector_first_nz (row
, size
, j
+ 1) < size
)
1027 minimum_column
= lambda_vector_min_nz (row
, size
, j
);
1028 lambda_matrix_col_exchange (H
, size
, j
, minimum_column
);
1031 newsteps
[j
] = newsteps
[minimum_column
];
1032 newsteps
[minimum_column
] = temp
;
1034 for (i
= j
+ 1; i
< size
; i
++)
1036 factor
= row
[i
] / row
[j
];
1037 lambda_matrix_col_add (H
, size
, j
, i
, -1 * factor
);
1044 /* Transform NEST according to TRANS, and return the new loopnest.
1046 1. Computing a lattice base for the transformation
1047 2. Composing the dense base with the specified transformation (TRANS)
1048 3. Decomposing the combined transformation into a lower triangular portion,
1049 and a unimodular portion.
1050 4. Computing the auxiliary nest using the unimodular portion.
1051 5. Computing the target nest using the auxiliary nest and the lower
1052 triangular portion. */
1055 lambda_loopnest_transform (lambda_loopnest nest
, lambda_trans_matrix trans
,
1056 struct obstack
* lambda_obstack
)
1058 lambda_loopnest auxillary_nest
, target_nest
;
1060 int depth
, invariants
;
1062 lambda_lattice lattice
;
1063 lambda_trans_matrix trans1
, H
, U
;
1065 lambda_linear_expression expression
;
1066 lambda_vector origin
;
1067 lambda_matrix origin_invariants
;
1068 lambda_vector stepsigns
;
1071 depth
= LN_DEPTH (nest
);
1072 invariants
= LN_INVARIANTS (nest
);
1074 /* Keep track of the signs of the loop steps. */
1075 stepsigns
= lambda_vector_new (depth
);
1076 for (i
= 0; i
< depth
; i
++)
1078 if (LL_STEP (LN_LOOPS (nest
)[i
]) > 0)
1084 /* Compute the lattice base. */
1085 lattice
= lambda_lattice_compute_base (nest
, lambda_obstack
);
1086 trans1
= lambda_trans_matrix_new (depth
, depth
, lambda_obstack
);
1088 /* Multiply the transformation matrix by the lattice base. */
1090 lambda_matrix_mult (LTM_MATRIX (trans
), LATTICE_BASE (lattice
),
1091 LTM_MATRIX (trans1
), depth
, depth
, depth
);
1093 /* Compute the Hermite normal form for the new transformation matrix. */
1094 H
= lambda_trans_matrix_new (depth
, depth
, lambda_obstack
);
1095 U
= lambda_trans_matrix_new (depth
, depth
, lambda_obstack
);
1096 lambda_matrix_hermite (LTM_MATRIX (trans1
), depth
, LTM_MATRIX (H
),
1099 /* Compute the auxiliary loop nest's space from the unimodular
1101 auxillary_nest
= lambda_compute_auxillary_space (nest
, U
,
1104 /* Compute the loop step signs from the old step signs and the
1105 transformation matrix. */
1106 stepsigns
= lambda_compute_step_signs (trans1
, stepsigns
,
1109 /* Compute the target loop nest space from the auxiliary nest and
1110 the lower triangular matrix H. */
1111 target_nest
= lambda_compute_target_space (auxillary_nest
, H
, stepsigns
,
1113 origin
= lambda_vector_new (depth
);
1114 origin_invariants
= lambda_matrix_new (depth
, invariants
, lambda_obstack
);
1115 lambda_matrix_vector_mult (LTM_MATRIX (trans
), depth
, depth
,
1116 LATTICE_ORIGIN (lattice
), origin
);
1117 lambda_matrix_mult (LTM_MATRIX (trans
), LATTICE_ORIGIN_INVARIANTS (lattice
),
1118 origin_invariants
, depth
, depth
, invariants
);
1120 for (i
= 0; i
< depth
; i
++)
1122 loop
= LN_LOOPS (target_nest
)[i
];
1123 expression
= LL_LINEAR_OFFSET (loop
);
1124 if (lambda_vector_zerop (LLE_COEFFICIENTS (expression
), depth
))
1127 f
= LLE_DENOMINATOR (expression
);
1129 LLE_CONSTANT (expression
) += f
* origin
[i
];
1131 for (j
= 0; j
< invariants
; j
++)
1132 LLE_INVARIANT_COEFFICIENTS (expression
)[j
] +=
1133 f
* origin_invariants
[i
][j
];
1140 /* Convert a gcc tree expression EXPR to a lambda linear expression, and
1141 return the new expression. DEPTH is the depth of the loopnest.
1142 OUTERINDUCTIONVARS is an array of the induction variables for outer loops
1143 in this nest. INVARIANTS is the array of invariants for the loop. EXTRA
1144 is the amount we have to add/subtract from the expression because of the
1145 type of comparison it is used in. */
1147 static lambda_linear_expression
1148 gcc_tree_to_linear_expression (int depth
, tree expr
,
1149 VEC(tree
,heap
) *outerinductionvars
,
1150 VEC(tree
,heap
) *invariants
, int extra
,
1151 struct obstack
* lambda_obstack
)
1153 lambda_linear_expression lle
= NULL
;
1154 switch (TREE_CODE (expr
))
1158 lle
= lambda_linear_expression_new (depth
, 2 * depth
, lambda_obstack
);
1159 LLE_CONSTANT (lle
) = TREE_INT_CST_LOW (expr
);
1161 LLE_CONSTANT (lle
) += extra
;
1163 LLE_DENOMINATOR (lle
) = 1;
1170 FOR_EACH_VEC_ELT (tree
, outerinductionvars
, i
, iv
)
1173 if (SSA_NAME_VAR (iv
) == SSA_NAME_VAR (expr
))
1175 lle
= lambda_linear_expression_new (depth
, 2 * depth
,
1177 LLE_COEFFICIENTS (lle
)[i
] = 1;
1179 LLE_CONSTANT (lle
) = extra
;
1181 LLE_DENOMINATOR (lle
) = 1;
1184 FOR_EACH_VEC_ELT (tree
, invariants
, i
, invar
)
1187 if (SSA_NAME_VAR (invar
) == SSA_NAME_VAR (expr
))
1189 lle
= lambda_linear_expression_new (depth
, 2 * depth
,
1191 LLE_INVARIANT_COEFFICIENTS (lle
)[i
] = 1;
1193 LLE_CONSTANT (lle
) = extra
;
1194 LLE_DENOMINATOR (lle
) = 1;
1206 /* Return the depth of the loopnest NEST */
1209 depth_of_nest (struct loop
*nest
)
1221 /* Return true if OP is invariant in LOOP and all outer loops. */
1224 invariant_in_loop_and_outer_loops (struct loop
*loop
, tree op
)
1226 if (is_gimple_min_invariant (op
))
1228 if (loop_depth (loop
) == 0)
1230 if (!expr_invariant_in_loop_p (loop
, op
))
1232 if (!invariant_in_loop_and_outer_loops (loop_outer (loop
), op
))
1237 /* Generate a lambda loop from a gcc loop LOOP. Return the new lambda loop,
1238 or NULL if it could not be converted.
1239 DEPTH is the depth of the loop.
1240 INVARIANTS is a pointer to the array of loop invariants.
1241 The induction variable for this loop should be stored in the parameter
1243 OUTERINDUCTIONVARS is an array of induction variables for outer loops. */
1246 gcc_loop_to_lambda_loop (struct loop
*loop
, int depth
,
1247 VEC(tree
,heap
) ** invariants
,
1248 tree
* ourinductionvar
,
1249 VEC(tree
,heap
) * outerinductionvars
,
1250 VEC(tree
,heap
) ** lboundvars
,
1251 VEC(tree
,heap
) ** uboundvars
,
1252 VEC(int,heap
) ** steps
,
1253 struct obstack
* lambda_obstack
)
1257 tree access_fn
, inductionvar
;
1259 lambda_loop lloop
= NULL
;
1260 lambda_linear_expression lbound
, ubound
;
1261 tree test_lhs
, test_rhs
;
1264 tree lboundvar
, uboundvar
, uboundresult
;
1266 /* Find out induction var and exit condition. */
1267 inductionvar
= find_induction_var_from_exit_cond (loop
);
1268 exit_cond
= get_loop_exit_condition (loop
);
1270 if (inductionvar
== NULL
|| exit_cond
== NULL
)
1272 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1274 "Unable to convert loop: Cannot determine exit condition or induction variable for loop.\n");
1278 if (SSA_NAME_DEF_STMT (inductionvar
) == NULL
)
1281 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1283 "Unable to convert loop: Cannot find PHI node for induction variable\n");
1288 phi
= SSA_NAME_DEF_STMT (inductionvar
);
1289 if (gimple_code (phi
) != GIMPLE_PHI
)
1291 tree op
= SINGLE_SSA_TREE_OPERAND (phi
, SSA_OP_USE
);
1295 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1297 "Unable to convert loop: Cannot find PHI node for induction variable\n");
1302 phi
= SSA_NAME_DEF_STMT (op
);
1303 if (gimple_code (phi
) != GIMPLE_PHI
)
1305 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1307 "Unable to convert loop: Cannot find PHI node for induction variable\n");
1312 /* The induction variable name/version we want to put in the array is the
1313 result of the induction variable phi node. */
1314 *ourinductionvar
= PHI_RESULT (phi
);
1315 access_fn
= instantiate_parameters
1316 (loop
, analyze_scalar_evolution (loop
, PHI_RESULT (phi
)));
1317 if (access_fn
== chrec_dont_know
)
1319 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1321 "Unable to convert loop: Access function for induction variable phi is unknown\n");
1326 step
= evolution_part_in_loop_num (access_fn
, loop
->num
);
1327 if (!step
|| step
== chrec_dont_know
)
1329 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1331 "Unable to convert loop: Cannot determine step of loop.\n");
1335 if (TREE_CODE (step
) != INTEGER_CST
)
1338 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1340 "Unable to convert loop: Step of loop is not integer.\n");
1344 stepint
= TREE_INT_CST_LOW (step
);
1346 /* Only want phis for induction vars, which will have two
1348 if (gimple_phi_num_args (phi
) != 2)
1350 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1352 "Unable to convert loop: PHI node for induction variable has >2 arguments\n");
1356 /* Another induction variable check. One argument's source should be
1357 in the loop, one outside the loop. */
1358 if (flow_bb_inside_loop_p (loop
, gimple_phi_arg_edge (phi
, 0)->src
)
1359 && flow_bb_inside_loop_p (loop
, gimple_phi_arg_edge (phi
, 1)->src
))
1362 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1364 "Unable to convert loop: PHI edges both inside loop, or both outside loop.\n");
1369 if (flow_bb_inside_loop_p (loop
, gimple_phi_arg_edge (phi
, 0)->src
))
1371 lboundvar
= PHI_ARG_DEF (phi
, 1);
1372 lbound
= gcc_tree_to_linear_expression (depth
, lboundvar
,
1373 outerinductionvars
, *invariants
,
1378 lboundvar
= PHI_ARG_DEF (phi
, 0);
1379 lbound
= gcc_tree_to_linear_expression (depth
, lboundvar
,
1380 outerinductionvars
, *invariants
,
1387 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1389 "Unable to convert loop: Cannot convert lower bound to linear expression\n");
1393 /* One part of the test may be a loop invariant tree. */
1394 VEC_reserve (tree
, heap
, *invariants
, 1);
1395 test_lhs
= gimple_cond_lhs (exit_cond
);
1396 test_rhs
= gimple_cond_rhs (exit_cond
);
1398 if (TREE_CODE (test_rhs
) == SSA_NAME
1399 && invariant_in_loop_and_outer_loops (loop
, test_rhs
))
1400 VEC_quick_push (tree
, *invariants
, test_rhs
);
1401 else if (TREE_CODE (test_lhs
) == SSA_NAME
1402 && invariant_in_loop_and_outer_loops (loop
, test_lhs
))
1403 VEC_quick_push (tree
, *invariants
, test_lhs
);
1405 /* The non-induction variable part of the test is the upper bound variable.
1407 if (test_lhs
== inductionvar
)
1408 uboundvar
= test_rhs
;
1410 uboundvar
= test_lhs
;
1412 /* We only size the vectors assuming we have, at max, 2 times as many
1413 invariants as we do loops (one for each bound).
1414 This is just an arbitrary number, but it has to be matched against the
1416 gcc_assert (VEC_length (tree
, *invariants
) <= (unsigned int) (2 * depth
));
1419 /* We might have some leftover. */
1420 if (gimple_cond_code (exit_cond
) == LT_EXPR
)
1421 extra
= -1 * stepint
;
1422 else if (gimple_cond_code (exit_cond
) == NE_EXPR
)
1423 extra
= -1 * stepint
;
1424 else if (gimple_cond_code (exit_cond
) == GT_EXPR
)
1425 extra
= -1 * stepint
;
1426 else if (gimple_cond_code (exit_cond
) == EQ_EXPR
)
1427 extra
= 1 * stepint
;
1429 ubound
= gcc_tree_to_linear_expression (depth
, uboundvar
,
1431 *invariants
, extra
, lambda_obstack
);
1432 uboundresult
= build2 (PLUS_EXPR
, TREE_TYPE (uboundvar
), uboundvar
,
1433 build_int_cst (TREE_TYPE (uboundvar
), extra
));
1434 VEC_safe_push (tree
, heap
, *uboundvars
, uboundresult
);
1435 VEC_safe_push (tree
, heap
, *lboundvars
, lboundvar
);
1436 VEC_safe_push (int, heap
, *steps
, stepint
);
1439 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1441 "Unable to convert loop: Cannot convert upper bound to linear expression\n");
1445 lloop
= lambda_loop_new (lambda_obstack
);
1446 LL_STEP (lloop
) = stepint
;
1447 LL_LOWER_BOUND (lloop
) = lbound
;
1448 LL_UPPER_BOUND (lloop
) = ubound
;
1452 /* Given a LOOP, find the induction variable it is testing against in the exit
1453 condition. Return the induction variable if found, NULL otherwise. */
1456 find_induction_var_from_exit_cond (struct loop
*loop
)
1458 gimple expr
= get_loop_exit_condition (loop
);
1460 tree test_lhs
, test_rhs
;
1463 if (gimple_code (expr
) != GIMPLE_COND
)
1465 test_lhs
= gimple_cond_lhs (expr
);
1466 test_rhs
= gimple_cond_rhs (expr
);
1468 /* Find the side that is invariant in this loop. The ivar must be the other
1471 if (expr_invariant_in_loop_p (loop
, test_lhs
))
1473 else if (expr_invariant_in_loop_p (loop
, test_rhs
))
1478 if (TREE_CODE (ivarop
) != SSA_NAME
)
1483 DEF_VEC_P(lambda_loop
);
1484 DEF_VEC_ALLOC_P(lambda_loop
,heap
);
1486 /* Generate a lambda loopnest from a gcc loopnest LOOP_NEST.
1487 Return the new loop nest.
1488 INDUCTIONVARS is a pointer to an array of induction variables for the
1489 loopnest that will be filled in during this process.
1490 INVARIANTS is a pointer to an array of invariants that will be filled in
1491 during this process. */
1494 gcc_loopnest_to_lambda_loopnest (struct loop
*loop_nest
,
1495 VEC(tree
,heap
) **inductionvars
,
1496 VEC(tree
,heap
) **invariants
,
1497 struct obstack
* lambda_obstack
)
1499 lambda_loopnest ret
= NULL
;
1500 struct loop
*temp
= loop_nest
;
1501 int depth
= depth_of_nest (loop_nest
);
1503 VEC(lambda_loop
,heap
) *loops
= NULL
;
1504 VEC(tree
,heap
) *uboundvars
= NULL
;
1505 VEC(tree
,heap
) *lboundvars
= NULL
;
1506 VEC(int,heap
) *steps
= NULL
;
1507 lambda_loop newloop
;
1508 tree inductionvar
= NULL
;
1509 bool perfect_nest
= perfect_nest_p (loop_nest
);
1511 if (!perfect_nest
&& !can_convert_to_perfect_nest (loop_nest
))
1516 newloop
= gcc_loop_to_lambda_loop (temp
, depth
, invariants
,
1517 &inductionvar
, *inductionvars
,
1518 &lboundvars
, &uboundvars
,
1519 &steps
, lambda_obstack
);
1523 VEC_safe_push (tree
, heap
, *inductionvars
, inductionvar
);
1524 VEC_safe_push (lambda_loop
, heap
, loops
, newloop
);
1530 if (!perfect_nestify (loop_nest
, lboundvars
, uboundvars
, steps
,
1535 "Not a perfect loop nest and couldn't convert to one.\n");
1540 "Successfully converted loop nest to perfect loop nest.\n");
1543 ret
= lambda_loopnest_new (depth
, 2 * depth
, lambda_obstack
);
1545 FOR_EACH_VEC_ELT (lambda_loop
, loops
, i
, newloop
)
1546 LN_LOOPS (ret
)[i
] = newloop
;
1549 VEC_free (lambda_loop
, heap
, loops
);
1550 VEC_free (tree
, heap
, uboundvars
);
1551 VEC_free (tree
, heap
, lboundvars
);
1552 VEC_free (int, heap
, steps
);
1557 /* Convert a lambda body vector LBV to a gcc tree, and return the new tree.
1558 STMTS_TO_INSERT is a pointer to a tree where the statements we need to be
1559 inserted for us are stored. INDUCTION_VARS is the array of induction
1560 variables for the loop this LBV is from. TYPE is the tree type to use for
1561 the variables and trees involved. */
1564 lbv_to_gcc_expression (lambda_body_vector lbv
,
1565 tree type
, VEC(tree
,heap
) *induction_vars
,
1566 gimple_seq
*stmts_to_insert
)
1570 tree expr
= build_linear_expr (type
, LBV_COEFFICIENTS (lbv
), induction_vars
);
1572 k
= LBV_DENOMINATOR (lbv
);
1573 gcc_assert (k
!= 0);
1575 expr
= fold_build2 (CEIL_DIV_EXPR
, type
, expr
, build_int_cst (type
, k
));
1577 resvar
= create_tmp_var (type
, "lbvtmp");
1578 add_referenced_var (resvar
);
1579 return force_gimple_operand (fold (expr
), stmts_to_insert
, true, resvar
);
1582 /* Convert a linear expression from coefficient and constant form to a
1584 Return the tree that represents the final value of the expression.
1585 LLE is the linear expression to convert.
1586 OFFSET is the linear offset to apply to the expression.
1587 TYPE is the tree type to use for the variables and math.
1588 INDUCTION_VARS is a vector of induction variables for the loops.
1589 INVARIANTS is a vector of the loop nest invariants.
1590 WRAP specifies what tree code to wrap the results in, if there is more than
1591 one (it is either MAX_EXPR, or MIN_EXPR).
1592 STMTS_TO_INSERT Is a pointer to the statement list we fill in with
1593 statements that need to be inserted for the linear expression. */
1596 lle_to_gcc_expression (lambda_linear_expression lle
,
1597 lambda_linear_expression offset
,
1599 VEC(tree
,heap
) *induction_vars
,
1600 VEC(tree
,heap
) *invariants
,
1601 enum tree_code wrap
, gimple_seq
*stmts_to_insert
)
1605 tree expr
= NULL_TREE
;
1606 VEC(tree
,heap
) *results
= NULL
;
1608 gcc_assert (wrap
== MAX_EXPR
|| wrap
== MIN_EXPR
);
1610 /* Build up the linear expressions. */
1611 for (; lle
!= NULL
; lle
= LLE_NEXT (lle
))
1613 expr
= build_linear_expr (type
, LLE_COEFFICIENTS (lle
), induction_vars
);
1614 expr
= fold_build2 (PLUS_EXPR
, type
, expr
,
1615 build_linear_expr (type
,
1616 LLE_INVARIANT_COEFFICIENTS (lle
),
1619 k
= LLE_CONSTANT (lle
);
1621 expr
= fold_build2 (PLUS_EXPR
, type
, expr
, build_int_cst (type
, k
));
1623 k
= LLE_CONSTANT (offset
);
1625 expr
= fold_build2 (PLUS_EXPR
, type
, expr
, build_int_cst (type
, k
));
1627 k
= LLE_DENOMINATOR (lle
);
1629 expr
= fold_build2 (wrap
== MAX_EXPR
? CEIL_DIV_EXPR
: FLOOR_DIV_EXPR
,
1630 type
, expr
, build_int_cst (type
, k
));
1633 VEC_safe_push (tree
, heap
, results
, expr
);
1638 /* We may need to wrap the results in a MAX_EXPR or MIN_EXPR. */
1639 if (VEC_length (tree
, results
) > 1)
1644 expr
= VEC_index (tree
, results
, 0);
1645 for (i
= 1; VEC_iterate (tree
, results
, i
, op
); i
++)
1646 expr
= fold_build2 (wrap
, type
, expr
, op
);
1649 VEC_free (tree
, heap
, results
);
1651 resvar
= create_tmp_var (type
, "lletmp");
1652 add_referenced_var (resvar
);
1653 return force_gimple_operand (fold (expr
), stmts_to_insert
, true, resvar
);
1656 /* Remove the induction variable defined at IV_STMT. */
1659 remove_iv (gimple iv_stmt
)
1661 gimple_stmt_iterator si
= gsi_for_stmt (iv_stmt
);
1663 if (gimple_code (iv_stmt
) == GIMPLE_PHI
)
1667 for (i
= 0; i
< gimple_phi_num_args (iv_stmt
); i
++)
1670 imm_use_iterator imm_iter
;
1671 tree arg
= gimple_phi_arg_def (iv_stmt
, i
);
1674 if (TREE_CODE (arg
) != SSA_NAME
)
1677 FOR_EACH_IMM_USE_STMT (stmt
, imm_iter
, arg
)
1678 if (stmt
!= iv_stmt
&& !is_gimple_debug (stmt
))
1682 remove_iv (SSA_NAME_DEF_STMT (arg
));
1685 remove_phi_node (&si
, true);
1689 gsi_remove (&si
, true);
1690 release_defs (iv_stmt
);
1694 /* Transform a lambda loopnest NEW_LOOPNEST, which had TRANSFORM applied to
1695 it, back into gcc code. This changes the
1696 loops, their induction variables, and their bodies, so that they
1697 match the transformed loopnest.
1698 OLD_LOOPNEST is the loopnest before we've replaced it with the new
1700 OLD_IVS is a vector of induction variables from the old loopnest.
1701 INVARIANTS is a vector of loop invariants from the old loopnest.
1702 NEW_LOOPNEST is the new lambda loopnest to replace OLD_LOOPNEST with.
1703 TRANSFORM is the matrix transform that was applied to OLD_LOOPNEST to get
1707 lambda_loopnest_to_gcc_loopnest (struct loop
*old_loopnest
,
1708 VEC(tree
,heap
) *old_ivs
,
1709 VEC(tree
,heap
) *invariants
,
1710 VEC(gimple
,heap
) **remove_ivs
,
1711 lambda_loopnest new_loopnest
,
1712 lambda_trans_matrix transform
,
1713 struct obstack
* lambda_obstack
)
1719 VEC(tree
,heap
) *new_ivs
= NULL
;
1721 gimple_stmt_iterator bsi
;
1723 transform
= lambda_trans_matrix_inverse (transform
, lambda_obstack
);
1727 fprintf (dump_file
, "Inverse of transformation matrix:\n");
1728 print_lambda_trans_matrix (dump_file
, transform
);
1730 depth
= depth_of_nest (old_loopnest
);
1731 temp
= old_loopnest
;
1735 lambda_loop newloop
;
1738 tree ivvar
, ivvarinced
;
1741 enum tree_code testtype
;
1742 tree newupperbound
, newlowerbound
;
1743 lambda_linear_expression offset
;
1748 oldiv
= VEC_index (tree
, old_ivs
, i
);
1749 type
= TREE_TYPE (oldiv
);
1751 /* First, build the new induction variable temporary */
1753 ivvar
= create_tmp_var (type
, "lnivtmp");
1754 add_referenced_var (ivvar
);
1756 VEC_safe_push (tree
, heap
, new_ivs
, ivvar
);
1758 newloop
= LN_LOOPS (new_loopnest
)[i
];
1760 /* Linear offset is a bit tricky to handle. Punt on the unhandled
1762 offset
= LL_LINEAR_OFFSET (newloop
);
1764 gcc_assert (LLE_DENOMINATOR (offset
) == 1 &&
1765 lambda_vector_zerop (LLE_COEFFICIENTS (offset
), depth
));
1767 /* Now build the new lower bounds, and insert the statements
1768 necessary to generate it on the loop preheader. */
1770 newlowerbound
= lle_to_gcc_expression (LL_LOWER_BOUND (newloop
),
1771 LL_LINEAR_OFFSET (newloop
),
1774 invariants
, MAX_EXPR
, &stmts
);
1778 gsi_insert_seq_on_edge (loop_preheader_edge (temp
), stmts
);
1779 gsi_commit_edge_inserts ();
1781 /* Build the new upper bound and insert its statements in the
1782 basic block of the exit condition */
1784 newupperbound
= lle_to_gcc_expression (LL_UPPER_BOUND (newloop
),
1785 LL_LINEAR_OFFSET (newloop
),
1788 invariants
, MIN_EXPR
, &stmts
);
1789 exit
= single_exit (temp
);
1790 exitcond
= get_loop_exit_condition (temp
);
1791 bb
= gimple_bb (exitcond
);
1792 bsi
= gsi_after_labels (bb
);
1794 gsi_insert_seq_before (&bsi
, stmts
, GSI_NEW_STMT
);
1796 /* Create the new iv. */
1798 standard_iv_increment_position (temp
, &bsi
, &insert_after
);
1799 create_iv (newlowerbound
,
1800 build_int_cst (type
, LL_STEP (newloop
)),
1801 ivvar
, temp
, &bsi
, insert_after
, &ivvar
,
1804 /* Unfortunately, the incremented ivvar that create_iv inserted may not
1805 dominate the block containing the exit condition.
1806 So we simply create our own incremented iv to use in the new exit
1807 test, and let redundancy elimination sort it out. */
1808 inc_stmt
= gimple_build_assign_with_ops (PLUS_EXPR
, SSA_NAME_VAR (ivvar
),
1810 build_int_cst (type
, LL_STEP (newloop
)));
1812 ivvarinced
= make_ssa_name (SSA_NAME_VAR (ivvar
), inc_stmt
);
1813 gimple_assign_set_lhs (inc_stmt
, ivvarinced
);
1814 bsi
= gsi_for_stmt (exitcond
);
1815 gsi_insert_before (&bsi
, inc_stmt
, GSI_SAME_STMT
);
1817 /* Replace the exit condition with the new upper bound
1820 testtype
= LL_STEP (newloop
) >= 0 ? LE_EXPR
: GE_EXPR
;
1822 /* We want to build a conditional where true means exit the loop, and
1823 false means continue the loop.
1824 So swap the testtype if this isn't the way things are.*/
1826 if (exit
->flags
& EDGE_FALSE_VALUE
)
1827 testtype
= swap_tree_comparison (testtype
);
1829 gimple_cond_set_condition (exitcond
, testtype
, newupperbound
, ivvarinced
);
1830 update_stmt (exitcond
);
1831 VEC_replace (tree
, new_ivs
, i
, ivvar
);
1837 /* Rewrite uses of the old ivs so that they are now specified in terms of
1840 FOR_EACH_VEC_ELT (tree
, old_ivs
, i
, oldiv
)
1842 imm_use_iterator imm_iter
;
1843 use_operand_p use_p
;
1845 gimple oldiv_stmt
= SSA_NAME_DEF_STMT (oldiv
);
1848 if (gimple_code (oldiv_stmt
) == GIMPLE_PHI
)
1849 oldiv_def
= PHI_RESULT (oldiv_stmt
);
1851 oldiv_def
= SINGLE_SSA_TREE_OPERAND (oldiv_stmt
, SSA_OP_DEF
);
1852 gcc_assert (oldiv_def
!= NULL_TREE
);
1854 FOR_EACH_IMM_USE_STMT (stmt
, imm_iter
, oldiv_def
)
1858 lambda_body_vector lbv
, newlbv
;
1860 if (is_gimple_debug (stmt
))
1863 /* Compute the new expression for the induction
1865 depth
= VEC_length (tree
, new_ivs
);
1866 lbv
= lambda_body_vector_new (depth
, lambda_obstack
);
1867 LBV_COEFFICIENTS (lbv
)[i
] = 1;
1869 newlbv
= lambda_body_vector_compute_new (transform
, lbv
,
1873 newiv
= lbv_to_gcc_expression (newlbv
, TREE_TYPE (oldiv
),
1876 if (stmts
&& gimple_code (stmt
) != GIMPLE_PHI
)
1878 bsi
= gsi_for_stmt (stmt
);
1879 gsi_insert_seq_before (&bsi
, stmts
, GSI_SAME_STMT
);
1882 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
1883 propagate_value (use_p
, newiv
);
1885 if (stmts
&& gimple_code (stmt
) == GIMPLE_PHI
)
1886 for (j
= 0; j
< gimple_phi_num_args (stmt
); j
++)
1887 if (gimple_phi_arg_def (stmt
, j
) == newiv
)
1888 gsi_insert_seq_on_edge (gimple_phi_arg_edge (stmt
, j
), stmts
);
1893 /* Remove the now unused induction variable. */
1894 VEC_safe_push (gimple
, heap
, *remove_ivs
, oldiv_stmt
);
1896 VEC_free (tree
, heap
, new_ivs
);
1899 /* Return TRUE if this is not interesting statement from the perspective of
1900 determining if we have a perfect loop nest. */
1903 not_interesting_stmt (gimple stmt
)
1905 /* Note that COND_EXPR's aren't interesting because if they were exiting the
1906 loop, we would have already failed the number of exits tests. */
1907 if (gimple_code (stmt
) == GIMPLE_LABEL
1908 || gimple_code (stmt
) == GIMPLE_GOTO
1909 || gimple_code (stmt
) == GIMPLE_COND
1910 || is_gimple_debug (stmt
))
1915 /* Return TRUE if PHI uses DEF for it's in-the-loop edge for LOOP. */
1918 phi_loop_edge_uses_def (struct loop
*loop
, gimple phi
, tree def
)
1921 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1922 if (flow_bb_inside_loop_p (loop
, gimple_phi_arg_edge (phi
, i
)->src
))
1923 if (PHI_ARG_DEF (phi
, i
) == def
)
1928 /* Return TRUE if STMT is a use of PHI_RESULT. */
1931 stmt_uses_phi_result (gimple stmt
, tree phi_result
)
1933 tree use
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_USE
);
1935 /* This is conservatively true, because we only want SIMPLE bumpers
1936 of the form x +- constant for our pass. */
1937 return (use
== phi_result
);
1940 /* STMT is a bumper stmt for LOOP if the version it defines is used in the
1941 in-loop-edge in a phi node, and the operand it uses is the result of that
1944 i_3 = PHI (0, i_29); */
1947 stmt_is_bumper_for_loop (struct loop
*loop
, gimple stmt
)
1951 imm_use_iterator iter
;
1952 use_operand_p use_p
;
1954 def
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_DEF
);
1958 FOR_EACH_IMM_USE_FAST (use_p
, iter
, def
)
1960 use
= USE_STMT (use_p
);
1961 if (gimple_code (use
) == GIMPLE_PHI
)
1963 if (phi_loop_edge_uses_def (loop
, use
, def
))
1964 if (stmt_uses_phi_result (stmt
, PHI_RESULT (use
)))
1972 /* Return true if LOOP is a perfect loop nest.
1973 Perfect loop nests are those loop nests where all code occurs in the
1974 innermost loop body.
1975 If S is a program statement, then
1984 is not a perfect loop nest because of S1.
1992 is a perfect loop nest.
1994 Since we don't have high level loops anymore, we basically have to walk our
1995 statements and ignore those that are there because the loop needs them (IE
1996 the induction variable increment, and jump back to the top of the loop). */
1999 perfect_nest_p (struct loop
*loop
)
2005 /* Loops at depth 0 are perfect nests. */
2009 bbs
= get_loop_body (loop
);
2010 exit_cond
= get_loop_exit_condition (loop
);
2012 for (i
= 0; i
< loop
->num_nodes
; i
++)
2014 if (bbs
[i
]->loop_father
== loop
)
2016 gimple_stmt_iterator bsi
;
2018 for (bsi
= gsi_start_bb (bbs
[i
]); !gsi_end_p (bsi
); gsi_next (&bsi
))
2020 gimple stmt
= gsi_stmt (bsi
);
2022 if (gimple_code (stmt
) == GIMPLE_COND
2023 && exit_cond
!= stmt
)
2024 goto non_perfectly_nested
;
2026 if (stmt
== exit_cond
2027 || not_interesting_stmt (stmt
)
2028 || stmt_is_bumper_for_loop (loop
, stmt
))
2031 non_perfectly_nested
:
2040 return perfect_nest_p (loop
->inner
);
2043 /* Replace the USES of X in STMT, or uses with the same step as X with Y.
2044 YINIT is the initial value of Y, REPLACEMENTS is a hash table to
2045 avoid creating duplicate temporaries and FIRSTBSI is statement
2046 iterator where new temporaries should be inserted at the beginning
2047 of body basic block. */
2050 replace_uses_equiv_to_x_with_y (struct loop
*loop
, gimple stmt
, tree x
,
2051 int xstep
, tree y
, tree yinit
,
2052 htab_t replacements
,
2053 gimple_stmt_iterator
*firstbsi
)
2056 use_operand_p use_p
;
2058 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
)
2060 tree use
= USE_FROM_PTR (use_p
);
2061 tree step
= NULL_TREE
;
2062 tree scev
, init
, val
, var
;
2064 struct tree_map
*h
, in
;
2067 /* Replace uses of X with Y right away. */
2074 scev
= instantiate_parameters (loop
,
2075 analyze_scalar_evolution (loop
, use
));
2077 if (scev
== NULL
|| scev
== chrec_dont_know
)
2080 step
= evolution_part_in_loop_num (scev
, loop
->num
);
2082 || step
== chrec_dont_know
2083 || TREE_CODE (step
) != INTEGER_CST
2084 || int_cst_value (step
) != xstep
)
2087 /* Use REPLACEMENTS hash table to cache already created
2089 in
.hash
= htab_hash_pointer (use
);
2091 h
= (struct tree_map
*) htab_find_with_hash (replacements
, &in
, in
.hash
);
2094 SET_USE (use_p
, h
->to
);
2098 /* USE which has the same step as X should be replaced
2099 with a temporary set to Y + YINIT - INIT. */
2100 init
= initial_condition_in_loop_num (scev
, loop
->num
);
2101 gcc_assert (init
!= NULL
&& init
!= chrec_dont_know
);
2102 if (TREE_TYPE (use
) == TREE_TYPE (y
))
2104 val
= fold_build2 (MINUS_EXPR
, TREE_TYPE (y
), init
, yinit
);
2105 val
= fold_build2 (PLUS_EXPR
, TREE_TYPE (y
), y
, val
);
2108 /* If X has the same type as USE, the same step
2109 and same initial value, it can be replaced by Y. */
2116 val
= fold_build2 (MINUS_EXPR
, TREE_TYPE (y
), y
, yinit
);
2117 val
= fold_convert (TREE_TYPE (use
), val
);
2118 val
= fold_build2 (PLUS_EXPR
, TREE_TYPE (use
), val
, init
);
2121 /* Create a temporary variable and insert it at the beginning
2122 of the loop body basic block, right after the PHI node
2124 var
= create_tmp_var (TREE_TYPE (use
), "perfecttmp");
2125 add_referenced_var (var
);
2126 val
= force_gimple_operand_gsi (firstbsi
, val
, false, NULL
,
2127 true, GSI_SAME_STMT
);
2128 setstmt
= gimple_build_assign (var
, val
);
2129 var
= make_ssa_name (var
, setstmt
);
2130 gimple_assign_set_lhs (setstmt
, var
);
2131 gsi_insert_before (firstbsi
, setstmt
, GSI_SAME_STMT
);
2132 update_stmt (setstmt
);
2133 SET_USE (use_p
, var
);
2134 h
= ggc_alloc_tree_map ();
2138 loc
= htab_find_slot_with_hash (replacements
, h
, in
.hash
, INSERT
);
2139 gcc_assert ((*(struct tree_map
**)loc
) == NULL
);
2140 *(struct tree_map
**) loc
= h
;
2144 /* Return true if STMT is an exit PHI for LOOP */
2147 exit_phi_for_loop_p (struct loop
*loop
, gimple stmt
)
2149 if (gimple_code (stmt
) != GIMPLE_PHI
2150 || gimple_phi_num_args (stmt
) != 1
2151 || gimple_bb (stmt
) != single_exit (loop
)->dest
)
2157 /* Return true if STMT can be put back into the loop INNER, by
2158 copying it to the beginning of that loop and changing the uses. */
2161 can_put_in_inner_loop (struct loop
*inner
, gimple stmt
)
2163 imm_use_iterator imm_iter
;
2164 use_operand_p use_p
;
2166 gcc_assert (is_gimple_assign (stmt
));
2167 if (gimple_vuse (stmt
)
2168 || !stmt_invariant_in_loop_p (inner
, stmt
))
2171 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, gimple_assign_lhs (stmt
))
2173 if (!exit_phi_for_loop_p (inner
, USE_STMT (use_p
)))
2175 basic_block immbb
= gimple_bb (USE_STMT (use_p
));
2177 if (!flow_bb_inside_loop_p (inner
, immbb
))
2184 /* Return true if STMT can be put *after* the inner loop of LOOP. */
2187 can_put_after_inner_loop (struct loop
*loop
, gimple stmt
)
2189 imm_use_iterator imm_iter
;
2190 use_operand_p use_p
;
2192 if (gimple_vuse (stmt
))
2195 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, gimple_assign_lhs (stmt
))
2197 if (!exit_phi_for_loop_p (loop
, USE_STMT (use_p
)))
2199 basic_block immbb
= gimple_bb (USE_STMT (use_p
));
2201 if (!dominated_by_p (CDI_DOMINATORS
,
2203 loop
->inner
->header
)
2204 && !can_put_in_inner_loop (loop
->inner
, stmt
))
2211 /* Return true when the induction variable IV is simple enough to be
2215 can_duplicate_iv (tree iv
, struct loop
*loop
)
2217 tree scev
= instantiate_parameters
2218 (loop
, analyze_scalar_evolution (loop
, iv
));
2220 if (!automatically_generated_chrec_p (scev
))
2222 tree step
= evolution_part_in_loop_num (scev
, loop
->num
);
2224 if (step
&& step
!= chrec_dont_know
&& TREE_CODE (step
) == INTEGER_CST
)
2231 /* If this is a scalar operation that can be put back into the inner
2232 loop, or after the inner loop, through copying, then do so. This
2233 works on the theory that any amount of scalar code we have to
2234 reduplicate into or after the loops is less expensive that the win
2235 we get from rearranging the memory walk the loop is doing so that
2236 it has better cache behavior. */
2239 cannot_convert_modify_to_perfect_nest (gimple stmt
, struct loop
*loop
)
2241 use_operand_p use_a
, use_b
;
2242 imm_use_iterator imm_iter
;
2243 ssa_op_iter op_iter
, op_iter1
;
2244 tree op0
= gimple_assign_lhs (stmt
);
2246 /* The statement should not define a variable used in the inner
2248 if (TREE_CODE (op0
) == SSA_NAME
2249 && !can_duplicate_iv (op0
, loop
))
2250 FOR_EACH_IMM_USE_FAST (use_a
, imm_iter
, op0
)
2251 if (gimple_bb (USE_STMT (use_a
))->loop_father
== loop
->inner
)
2254 FOR_EACH_SSA_USE_OPERAND (use_a
, stmt
, op_iter
, SSA_OP_USE
)
2257 tree op
= USE_FROM_PTR (use_a
);
2259 /* The variables should not be used in both loops. */
2260 if (!can_duplicate_iv (op
, loop
))
2261 FOR_EACH_IMM_USE_FAST (use_b
, imm_iter
, op
)
2262 if (gimple_bb (USE_STMT (use_b
))->loop_father
== loop
->inner
)
2265 /* The statement should not use the value of a scalar that was
2266 modified in the loop. */
2267 node
= SSA_NAME_DEF_STMT (op
);
2268 if (gimple_code (node
) == GIMPLE_PHI
)
2269 FOR_EACH_PHI_ARG (use_b
, node
, op_iter1
, SSA_OP_USE
)
2271 tree arg
= USE_FROM_PTR (use_b
);
2273 if (TREE_CODE (arg
) == SSA_NAME
)
2275 gimple arg_stmt
= SSA_NAME_DEF_STMT (arg
);
2277 if (gimple_bb (arg_stmt
)
2278 && (gimple_bb (arg_stmt
)->loop_father
== loop
->inner
))
2286 /* Return true when BB contains statements that can harm the transform
2287 to a perfect loop nest. */
2290 cannot_convert_bb_to_perfect_nest (basic_block bb
, struct loop
*loop
)
2292 gimple_stmt_iterator bsi
;
2293 gimple exit_condition
= get_loop_exit_condition (loop
);
2295 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
2297 gimple stmt
= gsi_stmt (bsi
);
2299 if (stmt
== exit_condition
2300 || not_interesting_stmt (stmt
)
2301 || stmt_is_bumper_for_loop (loop
, stmt
))
2304 if (is_gimple_assign (stmt
))
2306 if (cannot_convert_modify_to_perfect_nest (stmt
, loop
))
2309 if (can_duplicate_iv (gimple_assign_lhs (stmt
), loop
))
2312 if (can_put_in_inner_loop (loop
->inner
, stmt
)
2313 || can_put_after_inner_loop (loop
, stmt
))
2317 /* If the bb of a statement we care about isn't dominated by the
2318 header of the inner loop, then we can't handle this case
2319 right now. This test ensures that the statement comes
2320 completely *after* the inner loop. */
2321 if (!dominated_by_p (CDI_DOMINATORS
,
2323 loop
->inner
->header
))
2331 /* Return TRUE if LOOP is an imperfect nest that we can convert to a
2332 perfect one. At the moment, we only handle imperfect nests of
2333 depth 2, where all of the statements occur after the inner loop. */
2336 can_convert_to_perfect_nest (struct loop
*loop
)
2340 gimple_stmt_iterator si
;
2342 /* Can't handle triply nested+ loops yet. */
2343 if (!loop
->inner
|| loop
->inner
->inner
)
2346 bbs
= get_loop_body (loop
);
2347 for (i
= 0; i
< loop
->num_nodes
; i
++)
2348 if (bbs
[i
]->loop_father
== loop
2349 && cannot_convert_bb_to_perfect_nest (bbs
[i
], loop
))
2352 /* We also need to make sure the loop exit only has simple copy phis in it,
2353 otherwise we don't know how to transform it into a perfect nest. */
2354 for (si
= gsi_start_phis (single_exit (loop
)->dest
);
2357 if (gimple_phi_num_args (gsi_stmt (si
)) != 1)
2369 DEF_VEC_I(source_location
);
2370 DEF_VEC_ALLOC_I(source_location
,heap
);
2372 /* Transform the loop nest into a perfect nest, if possible.
2373 LOOP is the loop nest to transform into a perfect nest
2374 LBOUNDS are the lower bounds for the loops to transform
2375 UBOUNDS are the upper bounds for the loops to transform
2376 STEPS is the STEPS for the loops to transform.
2377 LOOPIVS is the induction variables for the loops to transform.
2379 Basically, for the case of
2381 FOR (i = 0; i < 50; i++)
2383 FOR (j =0; j < 50; j++)
2390 This function will transform it into a perfect loop nest by splitting the
2391 outer loop into two loops, like so:
2393 FOR (i = 0; i < 50; i++)
2395 FOR (j = 0; j < 50; j++)
2401 FOR (i = 0; i < 50; i ++)
2406 Return FALSE if we can't make this loop into a perfect nest. */
2409 perfect_nestify (struct loop
*loop
,
2410 VEC(tree
,heap
) *lbounds
,
2411 VEC(tree
,heap
) *ubounds
,
2412 VEC(int,heap
) *steps
,
2413 VEC(tree
,heap
) *loopivs
)
2416 gimple exit_condition
;
2418 basic_block preheaderbb
, headerbb
, bodybb
, latchbb
, olddest
;
2420 gimple_stmt_iterator bsi
, firstbsi
;
2423 struct loop
*newloop
;
2427 tree oldivvar
, ivvar
, ivvarinced
;
2428 VEC(tree
,heap
) *phis
= NULL
;
2429 VEC(source_location
,heap
) *locations
= NULL
;
2430 htab_t replacements
= NULL
;
2432 /* Create the new loop. */
2433 olddest
= single_exit (loop
)->dest
;
2434 preheaderbb
= split_edge (single_exit (loop
));
2435 headerbb
= create_empty_bb (EXIT_BLOCK_PTR
->prev_bb
);
2437 /* Push the exit phi nodes that we are moving. */
2438 for (bsi
= gsi_start_phis (olddest
); !gsi_end_p (bsi
); gsi_next (&bsi
))
2440 phi
= gsi_stmt (bsi
);
2441 VEC_reserve (tree
, heap
, phis
, 2);
2442 VEC_reserve (source_location
, heap
, locations
, 1);
2443 VEC_quick_push (tree
, phis
, PHI_RESULT (phi
));
2444 VEC_quick_push (tree
, phis
, PHI_ARG_DEF (phi
, 0));
2445 VEC_quick_push (source_location
, locations
,
2446 gimple_phi_arg_location (phi
, 0));
2448 e
= redirect_edge_and_branch (single_succ_edge (preheaderbb
), headerbb
);
2450 /* Remove the exit phis from the old basic block. */
2451 for (bsi
= gsi_start_phis (olddest
); !gsi_end_p (bsi
); )
2452 remove_phi_node (&bsi
, false);
2454 /* and add them back to the new basic block. */
2455 while (VEC_length (tree
, phis
) != 0)
2459 source_location locus
;
2460 def
= VEC_pop (tree
, phis
);
2461 phiname
= VEC_pop (tree
, phis
);
2462 locus
= VEC_pop (source_location
, locations
);
2463 phi
= create_phi_node (phiname
, preheaderbb
);
2464 add_phi_arg (phi
, def
, single_pred_edge (preheaderbb
), locus
);
2466 flush_pending_stmts (e
);
2467 VEC_free (tree
, heap
, phis
);
2469 bodybb
= create_empty_bb (EXIT_BLOCK_PTR
->prev_bb
);
2470 latchbb
= create_empty_bb (EXIT_BLOCK_PTR
->prev_bb
);
2471 make_edge (headerbb
, bodybb
, EDGE_FALLTHRU
);
2472 cond_stmt
= gimple_build_cond (NE_EXPR
, integer_one_node
, integer_zero_node
,
2473 NULL_TREE
, NULL_TREE
);
2474 bsi
= gsi_start_bb (bodybb
);
2475 gsi_insert_after (&bsi
, cond_stmt
, GSI_NEW_STMT
);
2476 e
= make_edge (bodybb
, olddest
, EDGE_FALSE_VALUE
);
2477 make_edge (bodybb
, latchbb
, EDGE_TRUE_VALUE
);
2478 make_edge (latchbb
, headerbb
, EDGE_FALLTHRU
);
2480 /* Update the loop structures. */
2481 newloop
= duplicate_loop (loop
, olddest
->loop_father
);
2482 newloop
->header
= headerbb
;
2483 newloop
->latch
= latchbb
;
2484 add_bb_to_loop (latchbb
, newloop
);
2485 add_bb_to_loop (bodybb
, newloop
);
2486 add_bb_to_loop (headerbb
, newloop
);
2487 set_immediate_dominator (CDI_DOMINATORS
, bodybb
, headerbb
);
2488 set_immediate_dominator (CDI_DOMINATORS
, headerbb
, preheaderbb
);
2489 set_immediate_dominator (CDI_DOMINATORS
, preheaderbb
,
2490 single_exit (loop
)->src
);
2491 set_immediate_dominator (CDI_DOMINATORS
, latchbb
, bodybb
);
2492 set_immediate_dominator (CDI_DOMINATORS
, olddest
,
2493 recompute_dominator (CDI_DOMINATORS
, olddest
));
2494 /* Create the new iv. */
2495 oldivvar
= VEC_index (tree
, loopivs
, 0);
2496 ivvar
= create_tmp_var (TREE_TYPE (oldivvar
), "perfectiv");
2497 add_referenced_var (ivvar
);
2498 standard_iv_increment_position (newloop
, &bsi
, &insert_after
);
2499 create_iv (VEC_index (tree
, lbounds
, 0),
2500 build_int_cst (TREE_TYPE (oldivvar
), VEC_index (int, steps
, 0)),
2501 ivvar
, newloop
, &bsi
, insert_after
, &ivvar
, &ivvarinced
);
2503 /* Create the new upper bound. This may be not just a variable, so we copy
2504 it to one just in case. */
2506 exit_condition
= get_loop_exit_condition (newloop
);
2507 uboundvar
= create_tmp_var (TREE_TYPE (VEC_index (tree
, ubounds
, 0)),
2509 add_referenced_var (uboundvar
);
2510 stmt
= gimple_build_assign (uboundvar
, VEC_index (tree
, ubounds
, 0));
2511 uboundvar
= make_ssa_name (uboundvar
, stmt
);
2512 gimple_assign_set_lhs (stmt
, uboundvar
);
2515 gsi_insert_after (&bsi
, stmt
, GSI_SAME_STMT
);
2517 gsi_insert_before (&bsi
, stmt
, GSI_SAME_STMT
);
2519 gimple_cond_set_condition (exit_condition
, GE_EXPR
, uboundvar
, ivvarinced
);
2520 update_stmt (exit_condition
);
2521 replacements
= htab_create_ggc (20, tree_map_hash
,
2523 bbs
= get_loop_body_in_dom_order (loop
);
2524 /* Now move the statements, and replace the induction variable in the moved
2525 statements with the correct loop induction variable. */
2526 oldivvar
= VEC_index (tree
, loopivs
, 0);
2527 firstbsi
= gsi_start_bb (bodybb
);
2528 for (i
= loop
->num_nodes
- 1; i
>= 0 ; i
--)
2530 gimple_stmt_iterator tobsi
= gsi_last_bb (bodybb
);
2531 if (bbs
[i
]->loop_father
== loop
)
2533 /* If this is true, we are *before* the inner loop.
2534 If this isn't true, we are *after* it.
2536 The only time can_convert_to_perfect_nest returns true when we
2537 have statements before the inner loop is if they can be moved
2538 into the inner loop.
2540 The only time can_convert_to_perfect_nest returns true when we
2541 have statements after the inner loop is if they can be moved into
2542 the new split loop. */
2544 if (dominated_by_p (CDI_DOMINATORS
, loop
->inner
->header
, bbs
[i
]))
2546 gimple_stmt_iterator header_bsi
2547 = gsi_after_labels (loop
->inner
->header
);
2549 for (bsi
= gsi_start_bb (bbs
[i
]); !gsi_end_p (bsi
);)
2551 gimple stmt
= gsi_stmt (bsi
);
2553 if (stmt
== exit_condition
2554 || not_interesting_stmt (stmt
)
2555 || stmt_is_bumper_for_loop (loop
, stmt
))
2561 gsi_move_before (&bsi
, &header_bsi
);
2566 /* Note that the bsi only needs to be explicitly incremented
2567 when we don't move something, since it is automatically
2568 incremented when we do. */
2569 for (bsi
= gsi_start_bb (bbs
[i
]); !gsi_end_p (bsi
);)
2571 gimple stmt
= gsi_stmt (bsi
);
2573 if (stmt
== exit_condition
2574 || not_interesting_stmt (stmt
)
2575 || stmt_is_bumper_for_loop (loop
, stmt
))
2581 replace_uses_equiv_to_x_with_y
2582 (loop
, stmt
, oldivvar
, VEC_index (int, steps
, 0), ivvar
,
2583 VEC_index (tree
, lbounds
, 0), replacements
, &firstbsi
);
2585 gsi_move_before (&bsi
, &tobsi
);
2587 /* If the statement has any virtual operands, they may
2588 need to be rewired because the original loop may
2589 still reference them. */
2590 if (gimple_vuse (stmt
))
2591 mark_sym_for_renaming (gimple_vop (cfun
));
2599 htab_delete (replacements
);
2600 return perfect_nest_p (loop
);
2603 /* Return true if TRANS is a legal transformation matrix that respects
2604 the dependence vectors in DISTS and DIRS. The conservative answer
2607 "Wolfe proves that a unimodular transformation represented by the
2608 matrix T is legal when applied to a loop nest with a set of
2609 lexicographically non-negative distance vectors RDG if and only if
2610 for each vector d in RDG, (T.d >= 0) is lexicographically positive.
2611 i.e.: if and only if it transforms the lexicographically positive
2612 distance vectors to lexicographically positive vectors. Note that
2613 a unimodular matrix must transform the zero vector (and only it) to
2614 the zero vector." S.Muchnick. */
2617 lambda_transform_legal_p (lambda_trans_matrix trans
,
2619 VEC (ddr_p
, heap
) *dependence_relations
)
2622 lambda_vector distres
;
2623 struct data_dependence_relation
*ddr
;
2625 gcc_assert (LTM_COLSIZE (trans
) == nb_loops
2626 && LTM_ROWSIZE (trans
) == nb_loops
);
2628 /* When there are no dependences, the transformation is correct. */
2629 if (VEC_length (ddr_p
, dependence_relations
) == 0)
2632 ddr
= VEC_index (ddr_p
, dependence_relations
, 0);
2636 /* When there is an unknown relation in the dependence_relations, we
2637 know that it is no worth looking at this loop nest: give up. */
2638 if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
2641 distres
= lambda_vector_new (nb_loops
);
2643 /* For each distance vector in the dependence graph. */
2644 FOR_EACH_VEC_ELT (ddr_p
, dependence_relations
, i
, ddr
)
2646 /* Don't care about relations for which we know that there is no
2647 dependence, nor about read-read (aka. output-dependences):
2648 these data accesses can happen in any order. */
2649 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
2650 || (DR_IS_READ (DDR_A (ddr
)) && DR_IS_READ (DDR_B (ddr
))))
2653 /* Conservatively answer: "this transformation is not valid". */
2654 if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
2657 /* If the dependence could not be captured by a distance vector,
2658 conservatively answer that the transform is not valid. */
2659 if (DDR_NUM_DIST_VECTS (ddr
) == 0)
2662 /* Compute trans.dist_vect */
2663 for (j
= 0; j
< DDR_NUM_DIST_VECTS (ddr
); j
++)
2665 lambda_matrix_vector_mult (LTM_MATRIX (trans
), nb_loops
, nb_loops
,
2666 DDR_DIST_VECT (ddr
, j
), distres
);
2668 if (!lambda_vector_lexico_pos (distres
, nb_loops
))
2676 /* Collects parameters from affine function ACCESS_FUNCTION, and push
2677 them in PARAMETERS. */
2680 lambda_collect_parameters_from_af (tree access_function
,
2681 struct pointer_set_t
*param_set
,
2682 VEC (tree
, heap
) **parameters
)
2684 if (access_function
== NULL
)
2687 if (TREE_CODE (access_function
) == SSA_NAME
2688 && pointer_set_contains (param_set
, access_function
) == 0)
2690 pointer_set_insert (param_set
, access_function
);
2691 VEC_safe_push (tree
, heap
, *parameters
, access_function
);
2695 int i
, num_operands
= tree_operand_length (access_function
);
2697 for (i
= 0; i
< num_operands
; i
++)
2698 lambda_collect_parameters_from_af (TREE_OPERAND (access_function
, i
),
2699 param_set
, parameters
);
2703 /* Collects parameters from DATAREFS, and push them in PARAMETERS. */
2706 lambda_collect_parameters (VEC (data_reference_p
, heap
) *datarefs
,
2707 VEC (tree
, heap
) **parameters
)
2710 struct pointer_set_t
*parameter_set
= pointer_set_create ();
2711 data_reference_p data_reference
;
2713 FOR_EACH_VEC_ELT (data_reference_p
, datarefs
, i
, data_reference
)
2714 for (j
= 0; j
< DR_NUM_DIMENSIONS (data_reference
); j
++)
2715 lambda_collect_parameters_from_af (DR_ACCESS_FN (data_reference
, j
),
2716 parameter_set
, parameters
);
2717 pointer_set_destroy (parameter_set
);
2720 /* Translates BASE_EXPR to vector CY. AM is needed for inferring
2721 indexing positions in the data access vector. CST is the analyzed
2722 integer constant. */
2725 av_for_af_base (tree base_expr
, lambda_vector cy
, struct access_matrix
*am
,
2730 switch (TREE_CODE (base_expr
))
2733 /* Constant part. */
2734 cy
[AM_CONST_COLUMN_INDEX (am
)] += int_cst_value (base_expr
) * cst
;
2740 access_matrix_get_index_for_parameter (base_expr
, am
);
2742 if (param_index
>= 0)
2744 cy
[param_index
] = cst
+ cy
[param_index
];
2752 return av_for_af_base (TREE_OPERAND (base_expr
, 0), cy
, am
, cst
)
2753 && av_for_af_base (TREE_OPERAND (base_expr
, 1), cy
, am
, cst
);
2756 return av_for_af_base (TREE_OPERAND (base_expr
, 0), cy
, am
, cst
)
2757 && av_for_af_base (TREE_OPERAND (base_expr
, 1), cy
, am
, -1 * cst
);
2760 if (TREE_CODE (TREE_OPERAND (base_expr
, 0)) == INTEGER_CST
)
2761 result
= av_for_af_base (TREE_OPERAND (base_expr
, 1),
2763 int_cst_value (TREE_OPERAND (base_expr
, 0)));
2764 else if (TREE_CODE (TREE_OPERAND (base_expr
, 1)) == INTEGER_CST
)
2765 result
= av_for_af_base (TREE_OPERAND (base_expr
, 0),
2767 int_cst_value (TREE_OPERAND (base_expr
, 1)));
2774 return av_for_af_base (TREE_OPERAND (base_expr
, 0), cy
, am
, -1 * cst
);
2783 /* Translates ACCESS_FUN to vector CY. AM is needed for inferring
2784 indexing positions in the data access vector. */
2787 av_for_af (tree access_fun
, lambda_vector cy
, struct access_matrix
*am
)
2789 switch (TREE_CODE (access_fun
))
2791 case POLYNOMIAL_CHREC
:
2793 tree left
= CHREC_LEFT (access_fun
);
2794 tree right
= CHREC_RIGHT (access_fun
);
2797 if (TREE_CODE (right
) != INTEGER_CST
)
2800 var
= am_vector_index_for_loop (am
, CHREC_VARIABLE (access_fun
));
2801 cy
[var
] = int_cst_value (right
);
2803 if (TREE_CODE (left
) == POLYNOMIAL_CHREC
)
2804 return av_for_af (left
, cy
, am
);
2806 return av_for_af_base (left
, cy
, am
, 1);
2810 /* Constant part. */
2811 return av_for_af_base (access_fun
, cy
, am
, 1);
2818 /* Initializes the access matrix for DATA_REFERENCE. */
2821 build_access_matrix (data_reference_p data_reference
,
2822 VEC (tree
, heap
) *parameters
,
2823 VEC (loop_p
, heap
) *nest
,
2824 struct obstack
* lambda_obstack
)
2826 struct access_matrix
*am
= (struct access_matrix
*)
2827 obstack_alloc(lambda_obstack
, sizeof (struct access_matrix
));
2828 unsigned i
, ndim
= DR_NUM_DIMENSIONS (data_reference
);
2829 unsigned nivs
= VEC_length (loop_p
, nest
);
2830 unsigned lambda_nb_columns
;
2832 AM_LOOP_NEST (am
) = nest
;
2833 AM_NB_INDUCTION_VARS (am
) = nivs
;
2834 AM_PARAMETERS (am
) = parameters
;
2836 lambda_nb_columns
= AM_NB_COLUMNS (am
);
2837 AM_MATRIX (am
) = VEC_alloc (lambda_vector
, gc
, ndim
);
2839 for (i
= 0; i
< ndim
; i
++)
2841 lambda_vector access_vector
= lambda_vector_new (lambda_nb_columns
);
2842 tree access_function
= DR_ACCESS_FN (data_reference
, i
);
2844 if (!av_for_af (access_function
, access_vector
, am
))
2847 VEC_quick_push (lambda_vector
, AM_MATRIX (am
), access_vector
);
2850 DR_ACCESS_MATRIX (data_reference
) = am
;
2854 /* Returns false when one of the access matrices cannot be built. */
2857 lambda_compute_access_matrices (VEC (data_reference_p
, heap
) *datarefs
,
2858 VEC (tree
, heap
) *parameters
,
2859 VEC (loop_p
, heap
) *nest
,
2860 struct obstack
* lambda_obstack
)
2862 data_reference_p dataref
;
2865 FOR_EACH_VEC_ELT (data_reference_p
, datarefs
, ix
, dataref
)
2866 if (!build_access_matrix (dataref
, parameters
, nest
, lambda_obstack
))