1 /* Analysis Utilities for Loop Vectorization.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Dorit Nuzman <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "basic-block.h"
29 #include "diagnostic.h"
30 #include "tree-flow.h"
31 #include "tree-dump.h"
36 #include "tree-data-ref.h"
37 #include "tree-vectorizer.h"
41 /* Function prototypes */
42 static void vect_pattern_recog_1
43 (gimple (* ) (gimple
, tree
*, tree
*), gimple_stmt_iterator
);
44 static bool widened_name_p (tree
, gimple
, tree
*, gimple
*);
46 /* Pattern recognition functions */
47 static gimple
vect_recog_widen_sum_pattern (gimple
, tree
*, tree
*);
48 static gimple
vect_recog_widen_mult_pattern (gimple
, tree
*, tree
*);
49 static gimple
vect_recog_dot_prod_pattern (gimple
, tree
*, tree
*);
50 static gimple
vect_recog_pow_pattern (gimple
, tree
*, tree
*);
51 static vect_recog_func_ptr vect_vect_recog_func_ptrs
[NUM_PATTERNS
] = {
52 vect_recog_widen_mult_pattern
,
53 vect_recog_widen_sum_pattern
,
54 vect_recog_dot_prod_pattern
,
55 vect_recog_pow_pattern
};
58 /* Function widened_name_p
60 Check whether NAME, an ssa-name used in USE_STMT,
61 is a result of a type-promotion, such that:
62 DEF_STMT: NAME = NOP (name0)
63 where the type of name0 (HALF_TYPE) is smaller than the type of NAME.
67 widened_name_p (tree name
, gimple use_stmt
, tree
*half_type
, gimple
*def_stmt
)
71 loop_vec_info loop_vinfo
;
72 stmt_vec_info stmt_vinfo
;
73 tree type
= TREE_TYPE (name
);
75 enum vect_def_type dt
;
78 stmt_vinfo
= vinfo_for_stmt (use_stmt
);
79 loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
81 if (!vect_is_simple_use (name
, loop_vinfo
, NULL
, def_stmt
, &def
, &dt
))
84 if (dt
!= vect_internal_def
85 && dt
!= vect_external_def
&& dt
!= vect_constant_def
)
91 if (!is_gimple_assign (*def_stmt
))
94 if (gimple_assign_rhs_code (*def_stmt
) != NOP_EXPR
)
97 oprnd0
= gimple_assign_rhs1 (*def_stmt
);
99 *half_type
= TREE_TYPE (oprnd0
);
100 if (!INTEGRAL_TYPE_P (type
) || !INTEGRAL_TYPE_P (*half_type
)
101 || (TYPE_UNSIGNED (type
) != TYPE_UNSIGNED (*half_type
))
102 || (TYPE_PRECISION (type
) < (TYPE_PRECISION (*half_type
) * 2)))
105 if (!vect_is_simple_use (oprnd0
, loop_vinfo
, NULL
, &dummy_gimple
, &dummy
,
112 /* Helper to return a new temporary for pattern of TYPE for STMT. If STMT
113 is NULL, the caller must set SSA_NAME_DEF_STMT for the returned SSA var. */
116 vect_recog_temp_ssa_var (tree type
, gimple stmt
)
118 tree var
= create_tmp_var (type
, "patt");
120 add_referenced_var (var
);
121 var
= make_ssa_name (var
, stmt
);
125 /* Function vect_recog_dot_prod_pattern
127 Try to find the following pattern:
133 sum_0 = phi <init, sum_1>
136 S3 x_T = (TYPE1) x_t;
137 S4 y_T = (TYPE1) y_t;
139 [S6 prod = (TYPE2) prod; #optional]
140 S7 sum_1 = prod + sum_0;
142 where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
143 same size of 'TYPE1' or bigger. This is a special case of a reduction
148 * LAST_STMT: A stmt from which the pattern search begins. In the example,
149 when this function is called with S7, the pattern {S3,S4,S5,S6,S7} will be
154 * TYPE_IN: The type of the input arguments to the pattern.
156 * TYPE_OUT: The type of the output of this pattern.
158 * Return value: A new stmt that will be used to replace the sequence of
159 stmts that constitute the pattern. In this case it will be:
160 WIDEN_DOT_PRODUCT <x_t, y_t, sum_0>
162 Note: The dot-prod idiom is a widening reduction pattern that is
163 vectorized without preserving all the intermediate results. It
164 produces only N/2 (widened) results (by summing up pairs of
165 intermediate results) rather than all N results. Therefore, we
166 cannot allow this pattern when we want to get all the results and in
167 the correct order (as is the case when this computation is in an
168 inner-loop nested in an outer-loop that us being vectorized). */
171 vect_recog_dot_prod_pattern (gimple last_stmt
, tree
*type_in
, tree
*type_out
)
175 tree oprnd00
, oprnd01
;
176 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
177 tree type
, half_type
;
180 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
181 struct loop
*loop
= LOOP_VINFO_LOOP (loop_info
);
184 if (!is_gimple_assign (last_stmt
))
187 type
= gimple_expr_type (last_stmt
);
189 /* Look for the following pattern
193 DDPROD = (TYPE2) DPROD;
194 sum_1 = DDPROD + sum_0;
196 - DX is double the size of X
197 - DY is double the size of Y
198 - DX, DY, DPROD all have the same type
199 - sum is the same size of DPROD or bigger
200 - sum has been recognized as a reduction variable.
202 This is equivalent to:
203 DPROD = X w* Y; #widen mult
204 sum_1 = DPROD w+ sum_0; #widen summation
206 DPROD = X w* Y; #widen mult
207 sum_1 = DPROD + sum_0; #summation
210 /* Starting from LAST_STMT, follow the defs of its uses in search
211 of the above pattern. */
213 if (gimple_assign_rhs_code (last_stmt
) != PLUS_EXPR
)
216 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
218 /* Has been detected as widening-summation? */
220 stmt
= STMT_VINFO_RELATED_STMT (stmt_vinfo
);
221 type
= gimple_expr_type (stmt
);
222 if (gimple_assign_rhs_code (stmt
) != WIDEN_SUM_EXPR
)
224 oprnd0
= gimple_assign_rhs1 (stmt
);
225 oprnd1
= gimple_assign_rhs2 (stmt
);
226 half_type
= TREE_TYPE (oprnd0
);
232 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
)
234 oprnd0
= gimple_assign_rhs1 (last_stmt
);
235 oprnd1
= gimple_assign_rhs2 (last_stmt
);
236 if (!types_compatible_p (TREE_TYPE (oprnd0
), type
)
237 || !types_compatible_p (TREE_TYPE (oprnd1
), type
))
241 if (widened_name_p (oprnd0
, stmt
, &half_type
, &def_stmt
))
244 oprnd0
= gimple_assign_rhs1 (stmt
);
250 /* So far so good. Since last_stmt was detected as a (summation) reduction,
251 we know that oprnd1 is the reduction variable (defined by a loop-header
252 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
253 Left to check that oprnd0 is defined by a (widen_)mult_expr */
255 prod_type
= half_type
;
256 stmt
= SSA_NAME_DEF_STMT (oprnd0
);
257 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
258 inside the loop (in case we are analyzing an outer-loop). */
259 if (!is_gimple_assign (stmt
))
261 stmt_vinfo
= vinfo_for_stmt (stmt
);
262 gcc_assert (stmt_vinfo
);
263 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_internal_def
)
265 if (gimple_assign_rhs_code (stmt
) != MULT_EXPR
)
267 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
269 /* Has been detected as a widening multiplication? */
271 stmt
= STMT_VINFO_RELATED_STMT (stmt_vinfo
);
272 if (gimple_assign_rhs_code (stmt
) != WIDEN_MULT_EXPR
)
274 stmt_vinfo
= vinfo_for_stmt (stmt
);
275 gcc_assert (stmt_vinfo
);
276 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_internal_def
);
277 oprnd00
= gimple_assign_rhs1 (stmt
);
278 oprnd01
= gimple_assign_rhs2 (stmt
);
282 tree half_type0
, half_type1
;
286 oprnd0
= gimple_assign_rhs1 (stmt
);
287 oprnd1
= gimple_assign_rhs2 (stmt
);
288 if (!types_compatible_p (TREE_TYPE (oprnd0
), prod_type
)
289 || !types_compatible_p (TREE_TYPE (oprnd1
), prod_type
))
291 if (!widened_name_p (oprnd0
, stmt
, &half_type0
, &def_stmt
))
293 oprnd00
= gimple_assign_rhs1 (def_stmt
);
294 if (!widened_name_p (oprnd1
, stmt
, &half_type1
, &def_stmt
))
296 oprnd01
= gimple_assign_rhs1 (def_stmt
);
297 if (!types_compatible_p (half_type0
, half_type1
))
299 if (TYPE_PRECISION (prod_type
) != TYPE_PRECISION (half_type0
) * 2)
303 half_type
= TREE_TYPE (oprnd00
);
304 *type_in
= half_type
;
307 /* Pattern detected. Create a stmt to be used to replace the pattern: */
308 var
= vect_recog_temp_ssa_var (type
, NULL
);
309 rhs
= build3 (DOT_PROD_EXPR
, type
, oprnd00
, oprnd01
, oprnd1
),
310 pattern_stmt
= gimple_build_assign (var
, rhs
);
312 if (vect_print_dump_info (REPORT_DETAILS
))
314 fprintf (vect_dump
, "vect_recog_dot_prod_pattern: detected: ");
315 print_gimple_stmt (vect_dump
, pattern_stmt
, 0, TDF_SLIM
);
318 /* We don't allow changing the order of the computation in the inner-loop
319 when doing outer-loop vectorization. */
320 gcc_assert (!nested_in_vect_loop_p (loop
, last_stmt
));
325 /* Function vect_recog_widen_mult_pattern
327 Try to find the following pattern:
330 TYPE a_T, b_T, prod_T;
336 S5 prod_T = a_T * b_T;
338 where type 'TYPE' is at least double the size of type 'type'.
342 * LAST_STMT: A stmt from which the pattern search begins. In the example,
343 when this function is called with S5, the pattern {S3,S4,S5} is be detected.
347 * TYPE_IN: The type of the input arguments to the pattern.
349 * TYPE_OUT: The type of the output of this pattern.
351 * Return value: A new stmt that will be used to replace the sequence of
352 stmts that constitute the pattern. In this case it will be:
353 WIDEN_MULT <a_t, b_t>
357 vect_recog_widen_mult_pattern (gimple last_stmt
,
361 gimple def_stmt0
, def_stmt1
;
363 tree type
, half_type0
, half_type1
;
368 enum tree_code dummy_code
;
370 VEC (tree
, heap
) *dummy_vec
;
372 if (!is_gimple_assign (last_stmt
))
375 type
= gimple_expr_type (last_stmt
);
377 /* Starting from LAST_STMT, follow the defs of its uses in search
378 of the above pattern. */
380 if (gimple_assign_rhs_code (last_stmt
) != MULT_EXPR
)
383 oprnd0
= gimple_assign_rhs1 (last_stmt
);
384 oprnd1
= gimple_assign_rhs2 (last_stmt
);
385 if (!types_compatible_p (TREE_TYPE (oprnd0
), type
)
386 || !types_compatible_p (TREE_TYPE (oprnd1
), type
))
389 /* Check argument 0 */
390 if (!widened_name_p (oprnd0
, last_stmt
, &half_type0
, &def_stmt0
))
392 oprnd0
= gimple_assign_rhs1 (def_stmt0
);
394 /* Check argument 1 */
395 if (!widened_name_p (oprnd1
, last_stmt
, &half_type1
, &def_stmt1
))
397 oprnd1
= gimple_assign_rhs1 (def_stmt1
);
399 if (!types_compatible_p (half_type0
, half_type1
))
402 /* Pattern detected. */
403 if (vect_print_dump_info (REPORT_DETAILS
))
404 fprintf (vect_dump
, "vect_recog_widen_mult_pattern: detected: ");
406 /* Check target support */
407 vectype
= get_vectype_for_scalar_type (half_type0
);
409 || !supportable_widening_operation (WIDEN_MULT_EXPR
, last_stmt
, vectype
,
410 &dummy
, &dummy
, &dummy_code
,
411 &dummy_code
, &dummy_int
, &dummy_vec
))
415 *type_out
= NULL_TREE
;
417 /* Pattern supported. Create a stmt to be used to replace the pattern: */
418 var
= vect_recog_temp_ssa_var (type
, NULL
);
419 pattern_stmt
= gimple_build_assign_with_ops (WIDEN_MULT_EXPR
, var
, oprnd0
,
421 SSA_NAME_DEF_STMT (var
) = pattern_stmt
;
423 if (vect_print_dump_info (REPORT_DETAILS
))
424 print_gimple_stmt (vect_dump
, pattern_stmt
, 0, TDF_SLIM
);
430 /* Function vect_recog_pow_pattern
432 Try to find the following pattern:
436 with POW being one of pow, powf, powi, powif and N being
441 * LAST_STMT: A stmt from which the pattern search begins.
445 * TYPE_IN: The type of the input arguments to the pattern.
447 * TYPE_OUT: The type of the output of this pattern.
449 * Return value: A new stmt that will be used to replace the sequence of
450 stmts that constitute the pattern. In this case it will be:
457 vect_recog_pow_pattern (gimple last_stmt
, tree
*type_in
, tree
*type_out
)
459 tree fn
, base
, exp
= NULL
;
463 if (!is_gimple_call (last_stmt
) || gimple_call_lhs (last_stmt
) == NULL
)
466 fn
= gimple_call_fndecl (last_stmt
);
467 switch (DECL_FUNCTION_CODE (fn
))
473 base
= gimple_call_arg (last_stmt
, 0);
474 exp
= gimple_call_arg (last_stmt
, 1);
475 if (TREE_CODE (exp
) != REAL_CST
476 && TREE_CODE (exp
) != INTEGER_CST
)
484 /* We now have a pow or powi builtin function call with a constant
487 *type_out
= NULL_TREE
;
489 /* Catch squaring. */
490 if ((host_integerp (exp
, 0)
491 && tree_low_cst (exp
, 0) == 2)
492 || (TREE_CODE (exp
) == REAL_CST
493 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp
), dconst2
)))
495 *type_in
= TREE_TYPE (base
);
497 var
= vect_recog_temp_ssa_var (TREE_TYPE (base
), NULL
);
498 stmt
= gimple_build_assign_with_ops (MULT_EXPR
, var
, base
, base
);
499 SSA_NAME_DEF_STMT (var
) = stmt
;
503 /* Catch square root. */
504 if (TREE_CODE (exp
) == REAL_CST
505 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp
), dconsthalf
))
507 tree newfn
= mathfn_built_in (TREE_TYPE (base
), BUILT_IN_SQRT
);
508 *type_in
= get_vectype_for_scalar_type (TREE_TYPE (base
));
511 gimple stmt
= gimple_build_call (newfn
, 1, base
);
512 if (vectorizable_function (stmt
, *type_in
, *type_in
)
515 var
= vect_recog_temp_ssa_var (TREE_TYPE (base
), stmt
);
516 gimple_call_set_lhs (stmt
, var
);
526 /* Function vect_recog_widen_sum_pattern
528 Try to find the following pattern:
531 TYPE x_T, sum = init;
533 sum_0 = phi <init, sum_1>
536 S3 sum_1 = x_T + sum_0;
538 where type 'TYPE' is at least double the size of type 'type', i.e - we're
539 summing elements of type 'type' into an accumulator of type 'TYPE'. This is
540 a special case of a reduction computation.
544 * LAST_STMT: A stmt from which the pattern search begins. In the example,
545 when this function is called with S3, the pattern {S2,S3} will be detected.
549 * TYPE_IN: The type of the input arguments to the pattern.
551 * TYPE_OUT: The type of the output of this pattern.
553 * Return value: A new stmt that will be used to replace the sequence of
554 stmts that constitute the pattern. In this case it will be:
555 WIDEN_SUM <x_t, sum_0>
557 Note: The widening-sum idiom is a widening reduction pattern that is
558 vectorized without preserving all the intermediate results. It
559 produces only N/2 (widened) results (by summing up pairs of
560 intermediate results) rather than all N results. Therefore, we
561 cannot allow this pattern when we want to get all the results and in
562 the correct order (as is the case when this computation is in an
563 inner-loop nested in an outer-loop that us being vectorized). */
566 vect_recog_widen_sum_pattern (gimple last_stmt
, tree
*type_in
, tree
*type_out
)
570 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
571 tree type
, half_type
;
573 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
574 struct loop
*loop
= LOOP_VINFO_LOOP (loop_info
);
577 if (!is_gimple_assign (last_stmt
))
580 type
= gimple_expr_type (last_stmt
);
582 /* Look for the following pattern
585 In which DX is at least double the size of X, and sum_1 has been
586 recognized as a reduction variable.
589 /* Starting from LAST_STMT, follow the defs of its uses in search
590 of the above pattern. */
592 if (gimple_assign_rhs_code (last_stmt
) != PLUS_EXPR
)
595 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
)
598 oprnd0
= gimple_assign_rhs1 (last_stmt
);
599 oprnd1
= gimple_assign_rhs2 (last_stmt
);
600 if (!types_compatible_p (TREE_TYPE (oprnd0
), type
)
601 || !types_compatible_p (TREE_TYPE (oprnd1
), type
))
604 /* So far so good. Since last_stmt was detected as a (summation) reduction,
605 we know that oprnd1 is the reduction variable (defined by a loop-header
606 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
607 Left to check that oprnd0 is defined by a cast from type 'type' to type
610 if (!widened_name_p (oprnd0
, last_stmt
, &half_type
, &stmt
))
613 oprnd0
= gimple_assign_rhs1 (stmt
);
614 *type_in
= half_type
;
617 /* Pattern detected. Create a stmt to be used to replace the pattern: */
618 var
= vect_recog_temp_ssa_var (type
, NULL
);
619 pattern_stmt
= gimple_build_assign_with_ops (WIDEN_SUM_EXPR
, var
,
621 SSA_NAME_DEF_STMT (var
) = pattern_stmt
;
623 if (vect_print_dump_info (REPORT_DETAILS
))
625 fprintf (vect_dump
, "vect_recog_widen_sum_pattern: detected: ");
626 print_gimple_stmt (vect_dump
, pattern_stmt
, 0, TDF_SLIM
);
629 /* We don't allow changing the order of the computation in the inner-loop
630 when doing outer-loop vectorization. */
631 gcc_assert (!nested_in_vect_loop_p (loop
, last_stmt
));
637 /* Function vect_pattern_recog_1
640 PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
642 STMT: A stmt from which the pattern search should start.
644 If PATTERN_RECOG_FUNC successfully detected the pattern, it creates an
645 expression that computes the same functionality and can be used to
646 replace the sequence of stmts that are involved in the pattern.
649 This function checks if the expression returned by PATTERN_RECOG_FUNC is
650 supported in vector form by the target. We use 'TYPE_IN' to obtain the
651 relevant vector type. If 'TYPE_IN' is already a vector type, then this
652 indicates that target support had already been checked by PATTERN_RECOG_FUNC.
653 If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
654 to the available target pattern.
656 This function also does some bookkeeping, as explained in the documentation
657 for vect_recog_pattern. */
660 vect_pattern_recog_1 (
661 gimple (* vect_recog_func
) (gimple
, tree
*, tree
*),
662 gimple_stmt_iterator si
)
664 gimple stmt
= gsi_stmt (si
), pattern_stmt
;
665 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
666 stmt_vec_info pattern_stmt_info
;
667 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
668 tree pattern_vectype
;
669 tree type_in
, type_out
;
672 pattern_stmt
= (* vect_recog_func
) (stmt
, &type_in
, &type_out
);
676 if (VECTOR_MODE_P (TYPE_MODE (type_in
)))
678 /* No need to check target support (already checked by the pattern
679 recognition function). */
680 pattern_vectype
= type_in
;
684 enum machine_mode vec_mode
;
685 enum insn_code icode
;
688 /* Check target support */
689 pattern_vectype
= get_vectype_for_scalar_type (type_in
);
690 if (!pattern_vectype
)
693 if (is_gimple_assign (pattern_stmt
))
694 code
= gimple_assign_rhs_code (pattern_stmt
);
697 gcc_assert (is_gimple_call (pattern_stmt
));
701 optab
= optab_for_tree_code (code
, pattern_vectype
, optab_default
);
702 vec_mode
= TYPE_MODE (pattern_vectype
);
704 || (icode
= optab_handler (optab
, vec_mode
)->insn_code
) ==
707 && (!get_vectype_for_scalar_type (type_out
)
708 || (insn_data
[icode
].operand
[0].mode
!=
709 TYPE_MODE (get_vectype_for_scalar_type (type_out
))))))
713 /* Found a vectorizable pattern. */
714 if (vect_print_dump_info (REPORT_DETAILS
))
716 fprintf (vect_dump
, "pattern recognized: ");
717 print_gimple_stmt (vect_dump
, pattern_stmt
, 0, TDF_SLIM
);
720 /* Mark the stmts that are involved in the pattern. */
721 gsi_insert_before (&si
, pattern_stmt
, GSI_SAME_STMT
);
722 set_vinfo_for_stmt (pattern_stmt
,
723 new_stmt_vec_info (pattern_stmt
, loop_vinfo
, NULL
));
724 pattern_stmt_info
= vinfo_for_stmt (pattern_stmt
);
726 STMT_VINFO_RELATED_STMT (pattern_stmt_info
) = stmt
;
727 STMT_VINFO_DEF_TYPE (pattern_stmt_info
) = STMT_VINFO_DEF_TYPE (stmt_info
);
728 STMT_VINFO_VECTYPE (pattern_stmt_info
) = pattern_vectype
;
729 STMT_VINFO_IN_PATTERN_P (stmt_info
) = true;
730 STMT_VINFO_RELATED_STMT (stmt_info
) = pattern_stmt
;
736 /* Function vect_pattern_recog
739 LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
742 Output - for each computation idiom that is detected we insert a new stmt
743 that provides the same functionality and that can be vectorized. We
744 also record some information in the struct_stmt_info of the relevant
745 stmts, as explained below:
747 At the entry to this function we have the following stmts, with the
748 following initial value in the STMT_VINFO fields:
750 stmt in_pattern_p related_stmt vec_stmt
752 S2: a_2 = ..use(a_i).. - - -
753 S3: a_1 = ..use(a_2).. - - -
754 S4: a_0 = ..use(a_1).. - - -
755 S5: ... = ..use(a_0).. - - -
757 Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
758 represented by a single stmt. We then:
759 - create a new stmt S6 that will replace the pattern.
760 - insert the new stmt S6 before the last stmt in the pattern
761 - fill in the STMT_VINFO fields as follows:
763 in_pattern_p related_stmt vec_stmt
765 S2: a_2 = ..use(a_i).. - - -
766 S3: a_1 = ..use(a_2).. - - -
767 > S6: a_new = .... - S4 -
768 S4: a_0 = ..use(a_1).. true S6 -
769 S5: ... = ..use(a_0).. - - -
771 (the last stmt in the pattern (S4) and the new pattern stmt (S6) point
772 to each other through the RELATED_STMT field).
774 S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
775 of S4 because it will replace all its uses. Stmts {S1,S2,S3} will
776 remain irrelevant unless used by stmts other than S4.
778 If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
779 (because they are marked as irrelevant). It will vectorize S6, and record
780 a pointer to the new vector stmt VS6 both from S6 (as usual), and also
781 from S4. We do that so that when we get to vectorizing stmts that use the
782 def of S4 (like S5 that uses a_0), we'll know where to take the relevant
783 vector-def from. S4 will be skipped, and S5 will be vectorized as usual:
785 in_pattern_p related_stmt vec_stmt
787 S2: a_2 = ..use(a_i).. - - -
788 S3: a_1 = ..use(a_2).. - - -
789 > VS6: va_new = .... - - -
790 S6: a_new = .... - S4 VS6
791 S4: a_0 = ..use(a_1).. true S6 VS6
792 > VS5: ... = ..vuse(va_new).. - - -
793 S5: ... = ..use(a_0).. - - -
795 DCE could then get rid of {S1,S2,S3,S4,S5,S6} (if their defs are not used
796 elsewhere), and we'll end up with:
799 VS5: ... = ..vuse(va_new)..
801 If vectorization does not succeed, DCE will clean S6 away (its def is
802 not used), and we'll end up with the original sequence.
806 vect_pattern_recog (loop_vec_info loop_vinfo
)
808 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
809 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
810 unsigned int nbbs
= loop
->num_nodes
;
811 gimple_stmt_iterator si
;
813 gimple (* vect_recog_func_ptr
) (gimple
, tree
*, tree
*);
815 if (vect_print_dump_info (REPORT_DETAILS
))
816 fprintf (vect_dump
, "=== vect_pattern_recog ===");
818 /* Scan through the loop stmts, applying the pattern recognition
819 functions starting at each stmt visited: */
820 for (i
= 0; i
< nbbs
; i
++)
822 basic_block bb
= bbs
[i
];
823 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
825 /* Scan over all generic vect_recog_xxx_pattern functions. */
826 for (j
= 0; j
< NUM_PATTERNS
; j
++)
828 vect_recog_func_ptr
= vect_vect_recog_func_ptrs
[j
];
829 vect_pattern_recog_1 (vect_recog_func_ptr
, si
);