1 /* Analysis Utilities for Loop Vectorization.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
3 Contributed by Dorit Nuzman <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "basic-block.h"
29 #include "gimple-pretty-print.h"
30 #include "tree-flow.h"
31 #include "tree-dump.h"
36 #include "tree-data-ref.h"
37 #include "tree-vectorizer.h"
39 #include "diagnostic-core.h"
42 /* Function prototypes */
43 static void vect_pattern_recog_1
44 (gimple (* ) (gimple
, tree
*, tree
*), gimple_stmt_iterator
);
45 static bool widened_name_p (tree
, gimple
, tree
*, gimple
*);
47 /* Pattern recognition functions */
48 static gimple
vect_recog_widen_sum_pattern (gimple
, tree
*, tree
*);
49 static gimple
vect_recog_widen_mult_pattern (gimple
, tree
*, tree
*);
50 static gimple
vect_recog_dot_prod_pattern (gimple
, tree
*, tree
*);
51 static gimple
vect_recog_pow_pattern (gimple
, tree
*, tree
*);
52 static vect_recog_func_ptr vect_vect_recog_func_ptrs
[NUM_PATTERNS
] = {
53 vect_recog_widen_mult_pattern
,
54 vect_recog_widen_sum_pattern
,
55 vect_recog_dot_prod_pattern
,
56 vect_recog_pow_pattern
};
59 /* Function widened_name_p
61 Check whether NAME, an ssa-name used in USE_STMT,
62 is a result of a type-promotion, such that:
63 DEF_STMT: NAME = NOP (name0)
64 where the type of name0 (HALF_TYPE) is smaller than the type of NAME.
68 widened_name_p (tree name
, gimple use_stmt
, tree
*half_type
, gimple
*def_stmt
)
72 loop_vec_info loop_vinfo
;
73 stmt_vec_info stmt_vinfo
;
74 tree type
= TREE_TYPE (name
);
76 enum vect_def_type dt
;
79 stmt_vinfo
= vinfo_for_stmt (use_stmt
);
80 loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
82 if (!vect_is_simple_use (name
, loop_vinfo
, NULL
, def_stmt
, &def
, &dt
))
85 if (dt
!= vect_internal_def
86 && dt
!= vect_external_def
&& dt
!= vect_constant_def
)
92 if (!is_gimple_assign (*def_stmt
))
95 if (gimple_assign_rhs_code (*def_stmt
) != NOP_EXPR
)
98 oprnd0
= gimple_assign_rhs1 (*def_stmt
);
100 *half_type
= TREE_TYPE (oprnd0
);
101 if (!INTEGRAL_TYPE_P (type
) || !INTEGRAL_TYPE_P (*half_type
)
102 || (TYPE_UNSIGNED (type
) != TYPE_UNSIGNED (*half_type
))
103 || (TYPE_PRECISION (type
) < (TYPE_PRECISION (*half_type
) * 2)))
106 if (!vect_is_simple_use (oprnd0
, loop_vinfo
, NULL
, &dummy_gimple
, &dummy
,
113 /* Helper to return a new temporary for pattern of TYPE for STMT. If STMT
114 is NULL, the caller must set SSA_NAME_DEF_STMT for the returned SSA var. */
117 vect_recog_temp_ssa_var (tree type
, gimple stmt
)
119 tree var
= create_tmp_var (type
, "patt");
121 add_referenced_var (var
);
122 var
= make_ssa_name (var
, stmt
);
126 /* Function vect_recog_dot_prod_pattern
128 Try to find the following pattern:
134 sum_0 = phi <init, sum_1>
137 S3 x_T = (TYPE1) x_t;
138 S4 y_T = (TYPE1) y_t;
140 [S6 prod = (TYPE2) prod; #optional]
141 S7 sum_1 = prod + sum_0;
143 where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
144 same size of 'TYPE1' or bigger. This is a special case of a reduction
149 * LAST_STMT: A stmt from which the pattern search begins. In the example,
150 when this function is called with S7, the pattern {S3,S4,S5,S6,S7} will be
155 * TYPE_IN: The type of the input arguments to the pattern.
157 * TYPE_OUT: The type of the output of this pattern.
159 * Return value: A new stmt that will be used to replace the sequence of
160 stmts that constitute the pattern. In this case it will be:
161 WIDEN_DOT_PRODUCT <x_t, y_t, sum_0>
163 Note: The dot-prod idiom is a widening reduction pattern that is
164 vectorized without preserving all the intermediate results. It
165 produces only N/2 (widened) results (by summing up pairs of
166 intermediate results) rather than all N results. Therefore, we
167 cannot allow this pattern when we want to get all the results and in
168 the correct order (as is the case when this computation is in an
169 inner-loop nested in an outer-loop that us being vectorized). */
172 vect_recog_dot_prod_pattern (gimple last_stmt
, tree
*type_in
, tree
*type_out
)
176 tree oprnd00
, oprnd01
;
177 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
178 tree type
, half_type
;
181 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
182 struct loop
*loop
= LOOP_VINFO_LOOP (loop_info
);
185 if (!is_gimple_assign (last_stmt
))
188 type
= gimple_expr_type (last_stmt
);
190 /* Look for the following pattern
194 DDPROD = (TYPE2) DPROD;
195 sum_1 = DDPROD + sum_0;
197 - DX is double the size of X
198 - DY is double the size of Y
199 - DX, DY, DPROD all have the same type
200 - sum is the same size of DPROD or bigger
201 - sum has been recognized as a reduction variable.
203 This is equivalent to:
204 DPROD = X w* Y; #widen mult
205 sum_1 = DPROD w+ sum_0; #widen summation
207 DPROD = X w* Y; #widen mult
208 sum_1 = DPROD + sum_0; #summation
211 /* Starting from LAST_STMT, follow the defs of its uses in search
212 of the above pattern. */
214 if (gimple_assign_rhs_code (last_stmt
) != PLUS_EXPR
)
217 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
219 /* Has been detected as widening-summation? */
221 stmt
= STMT_VINFO_RELATED_STMT (stmt_vinfo
);
222 type
= gimple_expr_type (stmt
);
223 if (gimple_assign_rhs_code (stmt
) != WIDEN_SUM_EXPR
)
225 oprnd0
= gimple_assign_rhs1 (stmt
);
226 oprnd1
= gimple_assign_rhs2 (stmt
);
227 half_type
= TREE_TYPE (oprnd0
);
233 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
)
235 oprnd0
= gimple_assign_rhs1 (last_stmt
);
236 oprnd1
= gimple_assign_rhs2 (last_stmt
);
237 if (!types_compatible_p (TREE_TYPE (oprnd0
), type
)
238 || !types_compatible_p (TREE_TYPE (oprnd1
), type
))
242 if (widened_name_p (oprnd0
, stmt
, &half_type
, &def_stmt
))
245 oprnd0
= gimple_assign_rhs1 (stmt
);
251 /* So far so good. Since last_stmt was detected as a (summation) reduction,
252 we know that oprnd1 is the reduction variable (defined by a loop-header
253 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
254 Left to check that oprnd0 is defined by a (widen_)mult_expr */
256 prod_type
= half_type
;
257 stmt
= SSA_NAME_DEF_STMT (oprnd0
);
259 /* It could not be the dot_prod pattern if the stmt is outside the loop. */
260 if (!flow_bb_inside_loop_p (loop
, gimple_bb (stmt
)))
263 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
264 inside the loop (in case we are analyzing an outer-loop). */
265 if (!is_gimple_assign (stmt
))
267 stmt_vinfo
= vinfo_for_stmt (stmt
);
268 gcc_assert (stmt_vinfo
);
269 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_internal_def
)
271 if (gimple_assign_rhs_code (stmt
) != MULT_EXPR
)
273 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
275 /* Has been detected as a widening multiplication? */
277 stmt
= STMT_VINFO_RELATED_STMT (stmt_vinfo
);
278 if (gimple_assign_rhs_code (stmt
) != WIDEN_MULT_EXPR
)
280 stmt_vinfo
= vinfo_for_stmt (stmt
);
281 gcc_assert (stmt_vinfo
);
282 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_internal_def
);
283 oprnd00
= gimple_assign_rhs1 (stmt
);
284 oprnd01
= gimple_assign_rhs2 (stmt
);
288 tree half_type0
, half_type1
;
292 oprnd0
= gimple_assign_rhs1 (stmt
);
293 oprnd1
= gimple_assign_rhs2 (stmt
);
294 if (!types_compatible_p (TREE_TYPE (oprnd0
), prod_type
)
295 || !types_compatible_p (TREE_TYPE (oprnd1
), prod_type
))
297 if (!widened_name_p (oprnd0
, stmt
, &half_type0
, &def_stmt
))
299 oprnd00
= gimple_assign_rhs1 (def_stmt
);
300 if (!widened_name_p (oprnd1
, stmt
, &half_type1
, &def_stmt
))
302 oprnd01
= gimple_assign_rhs1 (def_stmt
);
303 if (!types_compatible_p (half_type0
, half_type1
))
305 if (TYPE_PRECISION (prod_type
) != TYPE_PRECISION (half_type0
) * 2)
309 half_type
= TREE_TYPE (oprnd00
);
310 *type_in
= half_type
;
313 /* Pattern detected. Create a stmt to be used to replace the pattern: */
314 var
= vect_recog_temp_ssa_var (type
, NULL
);
315 rhs
= build3 (DOT_PROD_EXPR
, type
, oprnd00
, oprnd01
, oprnd1
),
316 pattern_stmt
= gimple_build_assign (var
, rhs
);
318 if (vect_print_dump_info (REPORT_DETAILS
))
320 fprintf (vect_dump
, "vect_recog_dot_prod_pattern: detected: ");
321 print_gimple_stmt (vect_dump
, pattern_stmt
, 0, TDF_SLIM
);
324 /* We don't allow changing the order of the computation in the inner-loop
325 when doing outer-loop vectorization. */
326 gcc_assert (!nested_in_vect_loop_p (loop
, last_stmt
));
331 /* Function vect_recog_widen_mult_pattern
333 Try to find the following pattern:
336 TYPE a_T, b_T, prod_T;
342 S5 prod_T = a_T * b_T;
344 where type 'TYPE' is at least double the size of type 'type'.
348 * LAST_STMT: A stmt from which the pattern search begins. In the example,
349 when this function is called with S5, the pattern {S3,S4,S5} is be detected.
353 * TYPE_IN: The type of the input arguments to the pattern.
355 * TYPE_OUT: The type of the output of this pattern.
357 * Return value: A new stmt that will be used to replace the sequence of
358 stmts that constitute the pattern. In this case it will be:
359 WIDEN_MULT <a_t, b_t>
363 vect_recog_widen_mult_pattern (gimple last_stmt
,
367 gimple def_stmt0
, def_stmt1
;
369 tree type
, half_type0
, half_type1
;
371 tree vectype
, vectype_out
;
374 enum tree_code dummy_code
;
376 VEC (tree
, heap
) *dummy_vec
;
378 if (!is_gimple_assign (last_stmt
))
381 type
= gimple_expr_type (last_stmt
);
383 /* Starting from LAST_STMT, follow the defs of its uses in search
384 of the above pattern. */
386 if (gimple_assign_rhs_code (last_stmt
) != MULT_EXPR
)
389 oprnd0
= gimple_assign_rhs1 (last_stmt
);
390 oprnd1
= gimple_assign_rhs2 (last_stmt
);
391 if (!types_compatible_p (TREE_TYPE (oprnd0
), type
)
392 || !types_compatible_p (TREE_TYPE (oprnd1
), type
))
395 /* Check argument 0 */
396 if (!widened_name_p (oprnd0
, last_stmt
, &half_type0
, &def_stmt0
))
398 oprnd0
= gimple_assign_rhs1 (def_stmt0
);
400 /* Check argument 1 */
401 if (!widened_name_p (oprnd1
, last_stmt
, &half_type1
, &def_stmt1
))
403 oprnd1
= gimple_assign_rhs1 (def_stmt1
);
405 if (!types_compatible_p (half_type0
, half_type1
))
408 /* Pattern detected. */
409 if (vect_print_dump_info (REPORT_DETAILS
))
410 fprintf (vect_dump
, "vect_recog_widen_mult_pattern: detected: ");
412 /* Check target support */
413 vectype
= get_vectype_for_scalar_type (half_type0
);
414 vectype_out
= get_vectype_for_scalar_type (type
);
416 || !supportable_widening_operation (WIDEN_MULT_EXPR
, last_stmt
,
417 vectype_out
, vectype
,
418 &dummy
, &dummy
, &dummy_code
,
419 &dummy_code
, &dummy_int
, &dummy_vec
))
423 *type_out
= vectype_out
;
425 /* Pattern supported. Create a stmt to be used to replace the pattern: */
426 var
= vect_recog_temp_ssa_var (type
, NULL
);
427 pattern_stmt
= gimple_build_assign_with_ops (WIDEN_MULT_EXPR
, var
, oprnd0
,
429 SSA_NAME_DEF_STMT (var
) = pattern_stmt
;
431 if (vect_print_dump_info (REPORT_DETAILS
))
432 print_gimple_stmt (vect_dump
, pattern_stmt
, 0, TDF_SLIM
);
438 /* Function vect_recog_pow_pattern
440 Try to find the following pattern:
444 with POW being one of pow, powf, powi, powif and N being
449 * LAST_STMT: A stmt from which the pattern search begins.
453 * TYPE_IN: The type of the input arguments to the pattern.
455 * TYPE_OUT: The type of the output of this pattern.
457 * Return value: A new stmt that will be used to replace the sequence of
458 stmts that constitute the pattern. In this case it will be:
465 vect_recog_pow_pattern (gimple last_stmt
, tree
*type_in
, tree
*type_out
)
467 tree fn
, base
, exp
= NULL
;
471 if (!is_gimple_call (last_stmt
) || gimple_call_lhs (last_stmt
) == NULL
)
474 fn
= gimple_call_fndecl (last_stmt
);
475 switch (DECL_FUNCTION_CODE (fn
))
481 base
= gimple_call_arg (last_stmt
, 0);
482 exp
= gimple_call_arg (last_stmt
, 1);
483 if (TREE_CODE (exp
) != REAL_CST
484 && TREE_CODE (exp
) != INTEGER_CST
)
492 /* We now have a pow or powi builtin function call with a constant
495 *type_out
= NULL_TREE
;
497 /* Catch squaring. */
498 if ((host_integerp (exp
, 0)
499 && tree_low_cst (exp
, 0) == 2)
500 || (TREE_CODE (exp
) == REAL_CST
501 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp
), dconst2
)))
503 *type_in
= TREE_TYPE (base
);
505 var
= vect_recog_temp_ssa_var (TREE_TYPE (base
), NULL
);
506 stmt
= gimple_build_assign_with_ops (MULT_EXPR
, var
, base
, base
);
507 SSA_NAME_DEF_STMT (var
) = stmt
;
511 /* Catch square root. */
512 if (TREE_CODE (exp
) == REAL_CST
513 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp
), dconsthalf
))
515 tree newfn
= mathfn_built_in (TREE_TYPE (base
), BUILT_IN_SQRT
);
516 *type_in
= get_vectype_for_scalar_type (TREE_TYPE (base
));
519 gimple stmt
= gimple_build_call (newfn
, 1, base
);
520 if (vectorizable_function (stmt
, *type_in
, *type_in
)
523 var
= vect_recog_temp_ssa_var (TREE_TYPE (base
), stmt
);
524 gimple_call_set_lhs (stmt
, var
);
534 /* Function vect_recog_widen_sum_pattern
536 Try to find the following pattern:
539 TYPE x_T, sum = init;
541 sum_0 = phi <init, sum_1>
544 S3 sum_1 = x_T + sum_0;
546 where type 'TYPE' is at least double the size of type 'type', i.e - we're
547 summing elements of type 'type' into an accumulator of type 'TYPE'. This is
548 a special case of a reduction computation.
552 * LAST_STMT: A stmt from which the pattern search begins. In the example,
553 when this function is called with S3, the pattern {S2,S3} will be detected.
557 * TYPE_IN: The type of the input arguments to the pattern.
559 * TYPE_OUT: The type of the output of this pattern.
561 * Return value: A new stmt that will be used to replace the sequence of
562 stmts that constitute the pattern. In this case it will be:
563 WIDEN_SUM <x_t, sum_0>
565 Note: The widening-sum idiom is a widening reduction pattern that is
566 vectorized without preserving all the intermediate results. It
567 produces only N/2 (widened) results (by summing up pairs of
568 intermediate results) rather than all N results. Therefore, we
569 cannot allow this pattern when we want to get all the results and in
570 the correct order (as is the case when this computation is in an
571 inner-loop nested in an outer-loop that us being vectorized). */
574 vect_recog_widen_sum_pattern (gimple last_stmt
, tree
*type_in
, tree
*type_out
)
578 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
579 tree type
, half_type
;
581 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
582 struct loop
*loop
= LOOP_VINFO_LOOP (loop_info
);
585 if (!is_gimple_assign (last_stmt
))
588 type
= gimple_expr_type (last_stmt
);
590 /* Look for the following pattern
593 In which DX is at least double the size of X, and sum_1 has been
594 recognized as a reduction variable.
597 /* Starting from LAST_STMT, follow the defs of its uses in search
598 of the above pattern. */
600 if (gimple_assign_rhs_code (last_stmt
) != PLUS_EXPR
)
603 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
)
606 oprnd0
= gimple_assign_rhs1 (last_stmt
);
607 oprnd1
= gimple_assign_rhs2 (last_stmt
);
608 if (!types_compatible_p (TREE_TYPE (oprnd0
), type
)
609 || !types_compatible_p (TREE_TYPE (oprnd1
), type
))
612 /* So far so good. Since last_stmt was detected as a (summation) reduction,
613 we know that oprnd1 is the reduction variable (defined by a loop-header
614 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
615 Left to check that oprnd0 is defined by a cast from type 'type' to type
618 if (!widened_name_p (oprnd0
, last_stmt
, &half_type
, &stmt
))
621 oprnd0
= gimple_assign_rhs1 (stmt
);
622 *type_in
= half_type
;
625 /* Pattern detected. Create a stmt to be used to replace the pattern: */
626 var
= vect_recog_temp_ssa_var (type
, NULL
);
627 pattern_stmt
= gimple_build_assign_with_ops (WIDEN_SUM_EXPR
, var
,
629 SSA_NAME_DEF_STMT (var
) = pattern_stmt
;
631 if (vect_print_dump_info (REPORT_DETAILS
))
633 fprintf (vect_dump
, "vect_recog_widen_sum_pattern: detected: ");
634 print_gimple_stmt (vect_dump
, pattern_stmt
, 0, TDF_SLIM
);
637 /* We don't allow changing the order of the computation in the inner-loop
638 when doing outer-loop vectorization. */
639 gcc_assert (!nested_in_vect_loop_p (loop
, last_stmt
));
645 /* Function vect_pattern_recog_1
648 PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
650 STMT: A stmt from which the pattern search should start.
652 If PATTERN_RECOG_FUNC successfully detected the pattern, it creates an
653 expression that computes the same functionality and can be used to
654 replace the sequence of stmts that are involved in the pattern.
657 This function checks if the expression returned by PATTERN_RECOG_FUNC is
658 supported in vector form by the target. We use 'TYPE_IN' to obtain the
659 relevant vector type. If 'TYPE_IN' is already a vector type, then this
660 indicates that target support had already been checked by PATTERN_RECOG_FUNC.
661 If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
662 to the available target pattern.
664 This function also does some bookkeeping, as explained in the documentation
665 for vect_recog_pattern. */
668 vect_pattern_recog_1 (
669 gimple (* vect_recog_func
) (gimple
, tree
*, tree
*),
670 gimple_stmt_iterator si
)
672 gimple stmt
= gsi_stmt (si
), pattern_stmt
;
673 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
674 stmt_vec_info pattern_stmt_info
;
675 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
676 tree pattern_vectype
;
677 tree type_in
, type_out
;
682 pattern_stmt
= (* vect_recog_func
) (stmt
, &type_in
, &type_out
);
686 if (VECTOR_MODE_P (TYPE_MODE (type_in
)))
688 /* No need to check target support (already checked by the pattern
689 recognition function). */
691 gcc_assert (VECTOR_MODE_P (TYPE_MODE (type_out
)));
692 pattern_vectype
= type_out
? type_out
: type_in
;
696 enum machine_mode vec_mode
;
697 enum insn_code icode
;
700 /* Check target support */
701 type_in
= get_vectype_for_scalar_type (type_in
);
705 type_out
= get_vectype_for_scalar_type (type_out
);
710 pattern_vectype
= type_out
;
712 if (is_gimple_assign (pattern_stmt
))
713 code
= gimple_assign_rhs_code (pattern_stmt
);
716 gcc_assert (is_gimple_call (pattern_stmt
));
720 optab
= optab_for_tree_code (code
, type_in
, optab_default
);
721 vec_mode
= TYPE_MODE (type_in
);
723 || (icode
= optab_handler (optab
, vec_mode
)) == CODE_FOR_nothing
724 || (insn_data
[icode
].operand
[0].mode
!= TYPE_MODE (type_out
)))
728 /* Found a vectorizable pattern. */
729 if (vect_print_dump_info (REPORT_DETAILS
))
731 fprintf (vect_dump
, "pattern recognized: ");
732 print_gimple_stmt (vect_dump
, pattern_stmt
, 0, TDF_SLIM
);
735 /* Mark the stmts that are involved in the pattern. */
736 gsi_insert_before (&si
, pattern_stmt
, GSI_SAME_STMT
);
737 set_vinfo_for_stmt (pattern_stmt
,
738 new_stmt_vec_info (pattern_stmt
, loop_vinfo
, NULL
));
739 pattern_stmt_info
= vinfo_for_stmt (pattern_stmt
);
741 STMT_VINFO_RELATED_STMT (pattern_stmt_info
) = stmt
;
742 STMT_VINFO_DEF_TYPE (pattern_stmt_info
) = STMT_VINFO_DEF_TYPE (stmt_info
);
743 STMT_VINFO_VECTYPE (pattern_stmt_info
) = pattern_vectype
;
744 STMT_VINFO_IN_PATTERN_P (stmt_info
) = true;
745 STMT_VINFO_RELATED_STMT (stmt_info
) = pattern_stmt
;
747 /* Patterns cannot be vectorized using SLP, because they change the order of
749 FOR_EACH_VEC_ELT (gimple
, LOOP_VINFO_REDUCTIONS (loop_vinfo
), i
, next
)
751 VEC_ordered_remove (gimple
, LOOP_VINFO_REDUCTIONS (loop_vinfo
), i
);
755 /* Function vect_pattern_recog
758 LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
761 Output - for each computation idiom that is detected we insert a new stmt
762 that provides the same functionality and that can be vectorized. We
763 also record some information in the struct_stmt_info of the relevant
764 stmts, as explained below:
766 At the entry to this function we have the following stmts, with the
767 following initial value in the STMT_VINFO fields:
769 stmt in_pattern_p related_stmt vec_stmt
771 S2: a_2 = ..use(a_i).. - - -
772 S3: a_1 = ..use(a_2).. - - -
773 S4: a_0 = ..use(a_1).. - - -
774 S5: ... = ..use(a_0).. - - -
776 Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
777 represented by a single stmt. We then:
778 - create a new stmt S6 that will replace the pattern.
779 - insert the new stmt S6 before the last stmt in the pattern
780 - fill in the STMT_VINFO fields as follows:
782 in_pattern_p related_stmt vec_stmt
784 S2: a_2 = ..use(a_i).. - - -
785 S3: a_1 = ..use(a_2).. - - -
786 > S6: a_new = .... - S4 -
787 S4: a_0 = ..use(a_1).. true S6 -
788 S5: ... = ..use(a_0).. - - -
790 (the last stmt in the pattern (S4) and the new pattern stmt (S6) point
791 to each other through the RELATED_STMT field).
793 S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
794 of S4 because it will replace all its uses. Stmts {S1,S2,S3} will
795 remain irrelevant unless used by stmts other than S4.
797 If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
798 (because they are marked as irrelevant). It will vectorize S6, and record
799 a pointer to the new vector stmt VS6 both from S6 (as usual), and also
800 from S4. We do that so that when we get to vectorizing stmts that use the
801 def of S4 (like S5 that uses a_0), we'll know where to take the relevant
802 vector-def from. S4 will be skipped, and S5 will be vectorized as usual:
804 in_pattern_p related_stmt vec_stmt
806 S2: a_2 = ..use(a_i).. - - -
807 S3: a_1 = ..use(a_2).. - - -
808 > VS6: va_new = .... - - -
809 S6: a_new = .... - S4 VS6
810 S4: a_0 = ..use(a_1).. true S6 VS6
811 > VS5: ... = ..vuse(va_new).. - - -
812 S5: ... = ..use(a_0).. - - -
814 DCE could then get rid of {S1,S2,S3,S4,S5,S6} (if their defs are not used
815 elsewhere), and we'll end up with:
818 VS5: ... = ..vuse(va_new)..
820 If vectorization does not succeed, DCE will clean S6 away (its def is
821 not used), and we'll end up with the original sequence.
825 vect_pattern_recog (loop_vec_info loop_vinfo
)
827 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
828 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
829 unsigned int nbbs
= loop
->num_nodes
;
830 gimple_stmt_iterator si
;
832 gimple (* vect_recog_func_ptr
) (gimple
, tree
*, tree
*);
834 if (vect_print_dump_info (REPORT_DETAILS
))
835 fprintf (vect_dump
, "=== vect_pattern_recog ===");
837 /* Scan through the loop stmts, applying the pattern recognition
838 functions starting at each stmt visited: */
839 for (i
= 0; i
< nbbs
; i
++)
841 basic_block bb
= bbs
[i
];
842 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
844 /* Scan over all generic vect_recog_xxx_pattern functions. */
845 for (j
= 0; j
< NUM_PATTERNS
; j
++)
847 vect_recog_func_ptr
= vect_vect_recog_func_ptrs
[j
];
848 vect_pattern_recog_1 (vect_recog_func_ptr
, si
);