2010-07-27 Paolo Carlini <paolo.carlini@oracle.com>
[official-gcc/alias-decl.git] / gcc / tree-vect-patterns.c
blob19f0ae67a883f964d5b712a485bf0792c83a98d1
1 /* Analysis Utilities for Loop Vectorization.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
3 Contributed by Dorit Nuzman <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "tree.h"
27 #include "target.h"
28 #include "basic-block.h"
29 #include "gimple-pretty-print.h"
30 #include "tree-flow.h"
31 #include "tree-dump.h"
32 #include "cfgloop.h"
33 #include "expr.h"
34 #include "optabs.h"
35 #include "params.h"
36 #include "tree-data-ref.h"
37 #include "tree-vectorizer.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
40 #include "toplev.h"
42 /* Function prototypes */
43 static void vect_pattern_recog_1
44 (gimple (* ) (gimple, tree *, tree *), gimple_stmt_iterator);
45 static bool widened_name_p (tree, gimple, tree *, gimple *);
47 /* Pattern recognition functions */
48 static gimple vect_recog_widen_sum_pattern (gimple, tree *, tree *);
49 static gimple vect_recog_widen_mult_pattern (gimple, tree *, tree *);
50 static gimple vect_recog_dot_prod_pattern (gimple, tree *, tree *);
51 static gimple vect_recog_pow_pattern (gimple, tree *, tree *);
52 static vect_recog_func_ptr vect_vect_recog_func_ptrs[NUM_PATTERNS] = {
53 vect_recog_widen_mult_pattern,
54 vect_recog_widen_sum_pattern,
55 vect_recog_dot_prod_pattern,
56 vect_recog_pow_pattern};
59 /* Function widened_name_p
61 Check whether NAME, an ssa-name used in USE_STMT,
62 is a result of a type-promotion, such that:
63 DEF_STMT: NAME = NOP (name0)
64 where the type of name0 (HALF_TYPE) is smaller than the type of NAME.
67 static bool
68 widened_name_p (tree name, gimple use_stmt, tree *half_type, gimple *def_stmt)
70 tree dummy;
71 gimple dummy_gimple;
72 loop_vec_info loop_vinfo;
73 stmt_vec_info stmt_vinfo;
74 tree type = TREE_TYPE (name);
75 tree oprnd0;
76 enum vect_def_type dt;
77 tree def;
79 stmt_vinfo = vinfo_for_stmt (use_stmt);
80 loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
82 if (!vect_is_simple_use (name, loop_vinfo, NULL, def_stmt, &def, &dt))
83 return false;
85 if (dt != vect_internal_def
86 && dt != vect_external_def && dt != vect_constant_def)
87 return false;
89 if (! *def_stmt)
90 return false;
92 if (!is_gimple_assign (*def_stmt))
93 return false;
95 if (gimple_assign_rhs_code (*def_stmt) != NOP_EXPR)
96 return false;
98 oprnd0 = gimple_assign_rhs1 (*def_stmt);
100 *half_type = TREE_TYPE (oprnd0);
101 if (!INTEGRAL_TYPE_P (type) || !INTEGRAL_TYPE_P (*half_type)
102 || (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (*half_type))
103 || (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 2)))
104 return false;
106 if (!vect_is_simple_use (oprnd0, loop_vinfo, NULL, &dummy_gimple, &dummy,
107 &dt))
108 return false;
110 return true;
113 /* Helper to return a new temporary for pattern of TYPE for STMT. If STMT
114 is NULL, the caller must set SSA_NAME_DEF_STMT for the returned SSA var. */
116 static tree
117 vect_recog_temp_ssa_var (tree type, gimple stmt)
119 tree var = create_tmp_var (type, "patt");
121 add_referenced_var (var);
122 var = make_ssa_name (var, stmt);
123 return var;
126 /* Function vect_recog_dot_prod_pattern
128 Try to find the following pattern:
130 type x_t, y_t;
131 TYPE1 prod;
132 TYPE2 sum = init;
133 loop:
134 sum_0 = phi <init, sum_1>
135 S1 x_t = ...
136 S2 y_t = ...
137 S3 x_T = (TYPE1) x_t;
138 S4 y_T = (TYPE1) y_t;
139 S5 prod = x_T * y_T;
140 [S6 prod = (TYPE2) prod; #optional]
141 S7 sum_1 = prod + sum_0;
143 where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
144 same size of 'TYPE1' or bigger. This is a special case of a reduction
145 computation.
147 Input:
149 * LAST_STMT: A stmt from which the pattern search begins. In the example,
150 when this function is called with S7, the pattern {S3,S4,S5,S6,S7} will be
151 detected.
153 Output:
155 * TYPE_IN: The type of the input arguments to the pattern.
157 * TYPE_OUT: The type of the output of this pattern.
159 * Return value: A new stmt that will be used to replace the sequence of
160 stmts that constitute the pattern. In this case it will be:
161 WIDEN_DOT_PRODUCT <x_t, y_t, sum_0>
163 Note: The dot-prod idiom is a widening reduction pattern that is
164 vectorized without preserving all the intermediate results. It
165 produces only N/2 (widened) results (by summing up pairs of
166 intermediate results) rather than all N results. Therefore, we
167 cannot allow this pattern when we want to get all the results and in
168 the correct order (as is the case when this computation is in an
169 inner-loop nested in an outer-loop that us being vectorized). */
171 static gimple
172 vect_recog_dot_prod_pattern (gimple last_stmt, tree *type_in, tree *type_out)
174 gimple stmt;
175 tree oprnd0, oprnd1;
176 tree oprnd00, oprnd01;
177 stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
178 tree type, half_type;
179 gimple pattern_stmt;
180 tree prod_type;
181 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
182 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
183 tree var, rhs;
185 if (!is_gimple_assign (last_stmt))
186 return NULL;
188 type = gimple_expr_type (last_stmt);
190 /* Look for the following pattern
191 DX = (TYPE1) X;
192 DY = (TYPE1) Y;
193 DPROD = DX * DY;
194 DDPROD = (TYPE2) DPROD;
195 sum_1 = DDPROD + sum_0;
196 In which
197 - DX is double the size of X
198 - DY is double the size of Y
199 - DX, DY, DPROD all have the same type
200 - sum is the same size of DPROD or bigger
201 - sum has been recognized as a reduction variable.
203 This is equivalent to:
204 DPROD = X w* Y; #widen mult
205 sum_1 = DPROD w+ sum_0; #widen summation
207 DPROD = X w* Y; #widen mult
208 sum_1 = DPROD + sum_0; #summation
211 /* Starting from LAST_STMT, follow the defs of its uses in search
212 of the above pattern. */
214 if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
215 return NULL;
217 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
219 /* Has been detected as widening-summation? */
221 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
222 type = gimple_expr_type (stmt);
223 if (gimple_assign_rhs_code (stmt) != WIDEN_SUM_EXPR)
224 return NULL;
225 oprnd0 = gimple_assign_rhs1 (stmt);
226 oprnd1 = gimple_assign_rhs2 (stmt);
227 half_type = TREE_TYPE (oprnd0);
229 else
231 gimple def_stmt;
233 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
234 return NULL;
235 oprnd0 = gimple_assign_rhs1 (last_stmt);
236 oprnd1 = gimple_assign_rhs2 (last_stmt);
237 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
238 || !types_compatible_p (TREE_TYPE (oprnd1), type))
239 return NULL;
240 stmt = last_stmt;
242 if (widened_name_p (oprnd0, stmt, &half_type, &def_stmt))
244 stmt = def_stmt;
245 oprnd0 = gimple_assign_rhs1 (stmt);
247 else
248 half_type = type;
251 /* So far so good. Since last_stmt was detected as a (summation) reduction,
252 we know that oprnd1 is the reduction variable (defined by a loop-header
253 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
254 Left to check that oprnd0 is defined by a (widen_)mult_expr */
256 prod_type = half_type;
257 stmt = SSA_NAME_DEF_STMT (oprnd0);
258 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
259 inside the loop (in case we are analyzing an outer-loop). */
260 if (!is_gimple_assign (stmt))
261 return NULL;
262 stmt_vinfo = vinfo_for_stmt (stmt);
263 gcc_assert (stmt_vinfo);
264 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_internal_def)
265 return NULL;
266 if (gimple_assign_rhs_code (stmt) != MULT_EXPR)
267 return NULL;
268 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
270 /* Has been detected as a widening multiplication? */
272 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
273 if (gimple_assign_rhs_code (stmt) != WIDEN_MULT_EXPR)
274 return NULL;
275 stmt_vinfo = vinfo_for_stmt (stmt);
276 gcc_assert (stmt_vinfo);
277 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_internal_def);
278 oprnd00 = gimple_assign_rhs1 (stmt);
279 oprnd01 = gimple_assign_rhs2 (stmt);
281 else
283 tree half_type0, half_type1;
284 gimple def_stmt;
285 tree oprnd0, oprnd1;
287 oprnd0 = gimple_assign_rhs1 (stmt);
288 oprnd1 = gimple_assign_rhs2 (stmt);
289 if (!types_compatible_p (TREE_TYPE (oprnd0), prod_type)
290 || !types_compatible_p (TREE_TYPE (oprnd1), prod_type))
291 return NULL;
292 if (!widened_name_p (oprnd0, stmt, &half_type0, &def_stmt))
293 return NULL;
294 oprnd00 = gimple_assign_rhs1 (def_stmt);
295 if (!widened_name_p (oprnd1, stmt, &half_type1, &def_stmt))
296 return NULL;
297 oprnd01 = gimple_assign_rhs1 (def_stmt);
298 if (!types_compatible_p (half_type0, half_type1))
299 return NULL;
300 if (TYPE_PRECISION (prod_type) != TYPE_PRECISION (half_type0) * 2)
301 return NULL;
304 half_type = TREE_TYPE (oprnd00);
305 *type_in = half_type;
306 *type_out = type;
308 /* Pattern detected. Create a stmt to be used to replace the pattern: */
309 var = vect_recog_temp_ssa_var (type, NULL);
310 rhs = build3 (DOT_PROD_EXPR, type, oprnd00, oprnd01, oprnd1),
311 pattern_stmt = gimple_build_assign (var, rhs);
313 if (vect_print_dump_info (REPORT_DETAILS))
315 fprintf (vect_dump, "vect_recog_dot_prod_pattern: detected: ");
316 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
319 /* We don't allow changing the order of the computation in the inner-loop
320 when doing outer-loop vectorization. */
321 gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));
323 return pattern_stmt;
326 /* Function vect_recog_widen_mult_pattern
328 Try to find the following pattern:
330 type a_t, b_t;
331 TYPE a_T, b_T, prod_T;
333 S1 a_t = ;
334 S2 b_t = ;
335 S3 a_T = (TYPE) a_t;
336 S4 b_T = (TYPE) b_t;
337 S5 prod_T = a_T * b_T;
339 where type 'TYPE' is at least double the size of type 'type'.
341 Input:
343 * LAST_STMT: A stmt from which the pattern search begins. In the example,
344 when this function is called with S5, the pattern {S3,S4,S5} is be detected.
346 Output:
348 * TYPE_IN: The type of the input arguments to the pattern.
350 * TYPE_OUT: The type of the output of this pattern.
352 * Return value: A new stmt that will be used to replace the sequence of
353 stmts that constitute the pattern. In this case it will be:
354 WIDEN_MULT <a_t, b_t>
357 static gimple
358 vect_recog_widen_mult_pattern (gimple last_stmt,
359 tree *type_in,
360 tree *type_out)
362 gimple def_stmt0, def_stmt1;
363 tree oprnd0, oprnd1;
364 tree type, half_type0, half_type1;
365 gimple pattern_stmt;
366 tree vectype, vectype_out;
367 tree dummy;
368 tree var;
369 enum tree_code dummy_code;
370 int dummy_int;
371 VEC (tree, heap) *dummy_vec;
373 if (!is_gimple_assign (last_stmt))
374 return NULL;
376 type = gimple_expr_type (last_stmt);
378 /* Starting from LAST_STMT, follow the defs of its uses in search
379 of the above pattern. */
381 if (gimple_assign_rhs_code (last_stmt) != MULT_EXPR)
382 return NULL;
384 oprnd0 = gimple_assign_rhs1 (last_stmt);
385 oprnd1 = gimple_assign_rhs2 (last_stmt);
386 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
387 || !types_compatible_p (TREE_TYPE (oprnd1), type))
388 return NULL;
390 /* Check argument 0 */
391 if (!widened_name_p (oprnd0, last_stmt, &half_type0, &def_stmt0))
392 return NULL;
393 oprnd0 = gimple_assign_rhs1 (def_stmt0);
395 /* Check argument 1 */
396 if (!widened_name_p (oprnd1, last_stmt, &half_type1, &def_stmt1))
397 return NULL;
398 oprnd1 = gimple_assign_rhs1 (def_stmt1);
400 if (!types_compatible_p (half_type0, half_type1))
401 return NULL;
403 /* Pattern detected. */
404 if (vect_print_dump_info (REPORT_DETAILS))
405 fprintf (vect_dump, "vect_recog_widen_mult_pattern: detected: ");
407 /* Check target support */
408 vectype = get_vectype_for_scalar_type (half_type0);
409 vectype_out = get_vectype_for_scalar_type (type);
410 if (!vectype
411 || !supportable_widening_operation (WIDEN_MULT_EXPR, last_stmt,
412 vectype_out, vectype,
413 &dummy, &dummy, &dummy_code,
414 &dummy_code, &dummy_int, &dummy_vec))
415 return NULL;
417 *type_in = vectype;
418 *type_out = vectype_out;
420 /* Pattern supported. Create a stmt to be used to replace the pattern: */
421 var = vect_recog_temp_ssa_var (type, NULL);
422 pattern_stmt = gimple_build_assign_with_ops (WIDEN_MULT_EXPR, var, oprnd0,
423 oprnd1);
424 SSA_NAME_DEF_STMT (var) = pattern_stmt;
426 if (vect_print_dump_info (REPORT_DETAILS))
427 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
429 return pattern_stmt;
433 /* Function vect_recog_pow_pattern
435 Try to find the following pattern:
437 x = POW (y, N);
439 with POW being one of pow, powf, powi, powif and N being
440 either 2 or 0.5.
442 Input:
444 * LAST_STMT: A stmt from which the pattern search begins.
446 Output:
448 * TYPE_IN: The type of the input arguments to the pattern.
450 * TYPE_OUT: The type of the output of this pattern.
452 * Return value: A new stmt that will be used to replace the sequence of
453 stmts that constitute the pattern. In this case it will be:
454 x = x * x
456 x = sqrt (x)
459 static gimple
460 vect_recog_pow_pattern (gimple last_stmt, tree *type_in, tree *type_out)
462 tree fn, base, exp = NULL;
463 gimple stmt;
464 tree var;
466 if (!is_gimple_call (last_stmt) || gimple_call_lhs (last_stmt) == NULL)
467 return NULL;
469 fn = gimple_call_fndecl (last_stmt);
470 switch (DECL_FUNCTION_CODE (fn))
472 case BUILT_IN_POWIF:
473 case BUILT_IN_POWI:
474 case BUILT_IN_POWF:
475 case BUILT_IN_POW:
476 base = gimple_call_arg (last_stmt, 0);
477 exp = gimple_call_arg (last_stmt, 1);
478 if (TREE_CODE (exp) != REAL_CST
479 && TREE_CODE (exp) != INTEGER_CST)
480 return NULL;
481 break;
483 default:
484 return NULL;
487 /* We now have a pow or powi builtin function call with a constant
488 exponent. */
490 *type_out = NULL_TREE;
492 /* Catch squaring. */
493 if ((host_integerp (exp, 0)
494 && tree_low_cst (exp, 0) == 2)
495 || (TREE_CODE (exp) == REAL_CST
496 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconst2)))
498 *type_in = TREE_TYPE (base);
500 var = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
501 stmt = gimple_build_assign_with_ops (MULT_EXPR, var, base, base);
502 SSA_NAME_DEF_STMT (var) = stmt;
503 return stmt;
506 /* Catch square root. */
507 if (TREE_CODE (exp) == REAL_CST
508 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconsthalf))
510 tree newfn = mathfn_built_in (TREE_TYPE (base), BUILT_IN_SQRT);
511 *type_in = get_vectype_for_scalar_type (TREE_TYPE (base));
512 if (*type_in)
514 gimple stmt = gimple_build_call (newfn, 1, base);
515 if (vectorizable_function (stmt, *type_in, *type_in)
516 != NULL_TREE)
518 var = vect_recog_temp_ssa_var (TREE_TYPE (base), stmt);
519 gimple_call_set_lhs (stmt, var);
520 return stmt;
525 return NULL;
529 /* Function vect_recog_widen_sum_pattern
531 Try to find the following pattern:
533 type x_t;
534 TYPE x_T, sum = init;
535 loop:
536 sum_0 = phi <init, sum_1>
537 S1 x_t = *p;
538 S2 x_T = (TYPE) x_t;
539 S3 sum_1 = x_T + sum_0;
541 where type 'TYPE' is at least double the size of type 'type', i.e - we're
542 summing elements of type 'type' into an accumulator of type 'TYPE'. This is
543 a special case of a reduction computation.
545 Input:
547 * LAST_STMT: A stmt from which the pattern search begins. In the example,
548 when this function is called with S3, the pattern {S2,S3} will be detected.
550 Output:
552 * TYPE_IN: The type of the input arguments to the pattern.
554 * TYPE_OUT: The type of the output of this pattern.
556 * Return value: A new stmt that will be used to replace the sequence of
557 stmts that constitute the pattern. In this case it will be:
558 WIDEN_SUM <x_t, sum_0>
560 Note: The widening-sum idiom is a widening reduction pattern that is
561 vectorized without preserving all the intermediate results. It
562 produces only N/2 (widened) results (by summing up pairs of
563 intermediate results) rather than all N results. Therefore, we
564 cannot allow this pattern when we want to get all the results and in
565 the correct order (as is the case when this computation is in an
566 inner-loop nested in an outer-loop that us being vectorized). */
568 static gimple
569 vect_recog_widen_sum_pattern (gimple last_stmt, tree *type_in, tree *type_out)
571 gimple stmt;
572 tree oprnd0, oprnd1;
573 stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
574 tree type, half_type;
575 gimple pattern_stmt;
576 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
577 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
578 tree var;
580 if (!is_gimple_assign (last_stmt))
581 return NULL;
583 type = gimple_expr_type (last_stmt);
585 /* Look for the following pattern
586 DX = (TYPE) X;
587 sum_1 = DX + sum_0;
588 In which DX is at least double the size of X, and sum_1 has been
589 recognized as a reduction variable.
592 /* Starting from LAST_STMT, follow the defs of its uses in search
593 of the above pattern. */
595 if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
596 return NULL;
598 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
599 return NULL;
601 oprnd0 = gimple_assign_rhs1 (last_stmt);
602 oprnd1 = gimple_assign_rhs2 (last_stmt);
603 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
604 || !types_compatible_p (TREE_TYPE (oprnd1), type))
605 return NULL;
607 /* So far so good. Since last_stmt was detected as a (summation) reduction,
608 we know that oprnd1 is the reduction variable (defined by a loop-header
609 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
610 Left to check that oprnd0 is defined by a cast from type 'type' to type
611 'TYPE'. */
613 if (!widened_name_p (oprnd0, last_stmt, &half_type, &stmt))
614 return NULL;
616 oprnd0 = gimple_assign_rhs1 (stmt);
617 *type_in = half_type;
618 *type_out = type;
620 /* Pattern detected. Create a stmt to be used to replace the pattern: */
621 var = vect_recog_temp_ssa_var (type, NULL);
622 pattern_stmt = gimple_build_assign_with_ops (WIDEN_SUM_EXPR, var,
623 oprnd0, oprnd1);
624 SSA_NAME_DEF_STMT (var) = pattern_stmt;
626 if (vect_print_dump_info (REPORT_DETAILS))
628 fprintf (vect_dump, "vect_recog_widen_sum_pattern: detected: ");
629 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
632 /* We don't allow changing the order of the computation in the inner-loop
633 when doing outer-loop vectorization. */
634 gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));
636 return pattern_stmt;
640 /* Function vect_pattern_recog_1
642 Input:
643 PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
644 computation pattern.
645 STMT: A stmt from which the pattern search should start.
647 If PATTERN_RECOG_FUNC successfully detected the pattern, it creates an
648 expression that computes the same functionality and can be used to
649 replace the sequence of stmts that are involved in the pattern.
651 Output:
652 This function checks if the expression returned by PATTERN_RECOG_FUNC is
653 supported in vector form by the target. We use 'TYPE_IN' to obtain the
654 relevant vector type. If 'TYPE_IN' is already a vector type, then this
655 indicates that target support had already been checked by PATTERN_RECOG_FUNC.
656 If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
657 to the available target pattern.
659 This function also does some bookkeeping, as explained in the documentation
660 for vect_recog_pattern. */
662 static void
663 vect_pattern_recog_1 (
664 gimple (* vect_recog_func) (gimple, tree *, tree *),
665 gimple_stmt_iterator si)
667 gimple stmt = gsi_stmt (si), pattern_stmt;
668 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
669 stmt_vec_info pattern_stmt_info;
670 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
671 tree pattern_vectype;
672 tree type_in, type_out;
673 enum tree_code code;
674 int i;
675 gimple next;
677 pattern_stmt = (* vect_recog_func) (stmt, &type_in, &type_out);
678 if (!pattern_stmt)
679 return;
681 if (VECTOR_MODE_P (TYPE_MODE (type_in)))
683 /* No need to check target support (already checked by the pattern
684 recognition function). */
685 if (type_out)
686 gcc_assert (VECTOR_MODE_P (TYPE_MODE (type_out)));
687 pattern_vectype = type_out ? type_out : type_in;
689 else
691 enum machine_mode vec_mode;
692 enum insn_code icode;
693 optab optab;
695 /* Check target support */
696 type_in = get_vectype_for_scalar_type (type_in);
697 if (!type_in)
698 return;
699 if (type_out)
700 type_out = get_vectype_for_scalar_type (type_out);
701 else
702 type_out = type_in;
703 if (!type_out)
704 return;
705 pattern_vectype = type_out;
707 if (is_gimple_assign (pattern_stmt))
708 code = gimple_assign_rhs_code (pattern_stmt);
709 else
711 gcc_assert (is_gimple_call (pattern_stmt));
712 code = CALL_EXPR;
715 optab = optab_for_tree_code (code, type_in, optab_default);
716 vec_mode = TYPE_MODE (type_in);
717 if (!optab
718 || (icode = optab_handler (optab, vec_mode)) == CODE_FOR_nothing
719 || (insn_data[icode].operand[0].mode != TYPE_MODE (type_out)))
720 return;
723 /* Found a vectorizable pattern. */
724 if (vect_print_dump_info (REPORT_DETAILS))
726 fprintf (vect_dump, "pattern recognized: ");
727 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
730 /* Mark the stmts that are involved in the pattern. */
731 gsi_insert_before (&si, pattern_stmt, GSI_SAME_STMT);
732 set_vinfo_for_stmt (pattern_stmt,
733 new_stmt_vec_info (pattern_stmt, loop_vinfo, NULL));
734 pattern_stmt_info = vinfo_for_stmt (pattern_stmt);
736 STMT_VINFO_RELATED_STMT (pattern_stmt_info) = stmt;
737 STMT_VINFO_DEF_TYPE (pattern_stmt_info) = STMT_VINFO_DEF_TYPE (stmt_info);
738 STMT_VINFO_VECTYPE (pattern_stmt_info) = pattern_vectype;
739 STMT_VINFO_IN_PATTERN_P (stmt_info) = true;
740 STMT_VINFO_RELATED_STMT (stmt_info) = pattern_stmt;
742 /* Patterns cannot be vectorized using SLP, because they change the order of
743 computation. */
744 for (i = 0; VEC_iterate (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i,
745 next);
746 i++)
747 if (next == stmt)
748 VEC_ordered_remove (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i);
752 /* Function vect_pattern_recog
754 Input:
755 LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
756 computation idioms.
758 Output - for each computation idiom that is detected we insert a new stmt
759 that provides the same functionality and that can be vectorized. We
760 also record some information in the struct_stmt_info of the relevant
761 stmts, as explained below:
763 At the entry to this function we have the following stmts, with the
764 following initial value in the STMT_VINFO fields:
766 stmt in_pattern_p related_stmt vec_stmt
767 S1: a_i = .... - - -
768 S2: a_2 = ..use(a_i).. - - -
769 S3: a_1 = ..use(a_2).. - - -
770 S4: a_0 = ..use(a_1).. - - -
771 S5: ... = ..use(a_0).. - - -
773 Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
774 represented by a single stmt. We then:
775 - create a new stmt S6 that will replace the pattern.
776 - insert the new stmt S6 before the last stmt in the pattern
777 - fill in the STMT_VINFO fields as follows:
779 in_pattern_p related_stmt vec_stmt
780 S1: a_i = .... - - -
781 S2: a_2 = ..use(a_i).. - - -
782 S3: a_1 = ..use(a_2).. - - -
783 > S6: a_new = .... - S4 -
784 S4: a_0 = ..use(a_1).. true S6 -
785 S5: ... = ..use(a_0).. - - -
787 (the last stmt in the pattern (S4) and the new pattern stmt (S6) point
788 to each other through the RELATED_STMT field).
790 S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
791 of S4 because it will replace all its uses. Stmts {S1,S2,S3} will
792 remain irrelevant unless used by stmts other than S4.
794 If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
795 (because they are marked as irrelevant). It will vectorize S6, and record
796 a pointer to the new vector stmt VS6 both from S6 (as usual), and also
797 from S4. We do that so that when we get to vectorizing stmts that use the
798 def of S4 (like S5 that uses a_0), we'll know where to take the relevant
799 vector-def from. S4 will be skipped, and S5 will be vectorized as usual:
801 in_pattern_p related_stmt vec_stmt
802 S1: a_i = .... - - -
803 S2: a_2 = ..use(a_i).. - - -
804 S3: a_1 = ..use(a_2).. - - -
805 > VS6: va_new = .... - - -
806 S6: a_new = .... - S4 VS6
807 S4: a_0 = ..use(a_1).. true S6 VS6
808 > VS5: ... = ..vuse(va_new).. - - -
809 S5: ... = ..use(a_0).. - - -
811 DCE could then get rid of {S1,S2,S3,S4,S5,S6} (if their defs are not used
812 elsewhere), and we'll end up with:
814 VS6: va_new = ....
815 VS5: ... = ..vuse(va_new)..
817 If vectorization does not succeed, DCE will clean S6 away (its def is
818 not used), and we'll end up with the original sequence.
821 void
822 vect_pattern_recog (loop_vec_info loop_vinfo)
824 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
825 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
826 unsigned int nbbs = loop->num_nodes;
827 gimple_stmt_iterator si;
828 unsigned int i, j;
829 gimple (* vect_recog_func_ptr) (gimple, tree *, tree *);
831 if (vect_print_dump_info (REPORT_DETAILS))
832 fprintf (vect_dump, "=== vect_pattern_recog ===");
834 /* Scan through the loop stmts, applying the pattern recognition
835 functions starting at each stmt visited: */
836 for (i = 0; i < nbbs; i++)
838 basic_block bb = bbs[i];
839 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
841 /* Scan over all generic vect_recog_xxx_pattern functions. */
842 for (j = 0; j < NUM_PATTERNS; j++)
844 vect_recog_func_ptr = vect_vect_recog_func_ptrs[j];
845 vect_pattern_recog_1 (vect_recog_func_ptr, si);