* gcc.c-torture/execute/20101011-1.c: Skip on SH.
[official-gcc.git] / gcc / tree-vect-patterns.c
bloba26cb0b69f6d5f546cb00f49fe731151d93402c0
1 /* Analysis Utilities for Loop Vectorization.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
3 Contributed by Dorit Nuzman <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "tree.h"
27 #include "target.h"
28 #include "basic-block.h"
29 #include "gimple-pretty-print.h"
30 #include "tree-flow.h"
31 #include "tree-dump.h"
32 #include "cfgloop.h"
33 #include "expr.h"
34 #include "optabs.h"
35 #include "params.h"
36 #include "tree-data-ref.h"
37 #include "tree-vectorizer.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
40 #include "toplev.h"
42 /* Function prototypes */
43 static void vect_pattern_recog_1
44 (gimple (* ) (gimple, tree *, tree *), gimple_stmt_iterator);
45 static bool widened_name_p (tree, gimple, tree *, gimple *);
47 /* Pattern recognition functions */
48 static gimple vect_recog_widen_sum_pattern (gimple, tree *, tree *);
49 static gimple vect_recog_widen_mult_pattern (gimple, tree *, tree *);
50 static gimple vect_recog_dot_prod_pattern (gimple, tree *, tree *);
51 static gimple vect_recog_pow_pattern (gimple, tree *, tree *);
52 static vect_recog_func_ptr vect_vect_recog_func_ptrs[NUM_PATTERNS] = {
53 vect_recog_widen_mult_pattern,
54 vect_recog_widen_sum_pattern,
55 vect_recog_dot_prod_pattern,
56 vect_recog_pow_pattern};
59 /* Function widened_name_p
61 Check whether NAME, an ssa-name used in USE_STMT,
62 is a result of a type-promotion, such that:
63 DEF_STMT: NAME = NOP (name0)
64 where the type of name0 (HALF_TYPE) is smaller than the type of NAME.
67 static bool
68 widened_name_p (tree name, gimple use_stmt, tree *half_type, gimple *def_stmt)
70 tree dummy;
71 gimple dummy_gimple;
72 loop_vec_info loop_vinfo;
73 stmt_vec_info stmt_vinfo;
74 tree type = TREE_TYPE (name);
75 tree oprnd0;
76 enum vect_def_type dt;
77 tree def;
79 stmt_vinfo = vinfo_for_stmt (use_stmt);
80 loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
82 if (!vect_is_simple_use (name, loop_vinfo, NULL, def_stmt, &def, &dt))
83 return false;
85 if (dt != vect_internal_def
86 && dt != vect_external_def && dt != vect_constant_def)
87 return false;
89 if (! *def_stmt)
90 return false;
92 if (!is_gimple_assign (*def_stmt))
93 return false;
95 if (gimple_assign_rhs_code (*def_stmt) != NOP_EXPR)
96 return false;
98 oprnd0 = gimple_assign_rhs1 (*def_stmt);
100 *half_type = TREE_TYPE (oprnd0);
101 if (!INTEGRAL_TYPE_P (type) || !INTEGRAL_TYPE_P (*half_type)
102 || (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (*half_type))
103 || (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 2)))
104 return false;
106 if (!vect_is_simple_use (oprnd0, loop_vinfo, NULL, &dummy_gimple, &dummy,
107 &dt))
108 return false;
110 return true;
113 /* Helper to return a new temporary for pattern of TYPE for STMT. If STMT
114 is NULL, the caller must set SSA_NAME_DEF_STMT for the returned SSA var. */
116 static tree
117 vect_recog_temp_ssa_var (tree type, gimple stmt)
119 tree var = create_tmp_var (type, "patt");
121 add_referenced_var (var);
122 var = make_ssa_name (var, stmt);
123 return var;
126 /* Function vect_recog_dot_prod_pattern
128 Try to find the following pattern:
130 type x_t, y_t;
131 TYPE1 prod;
132 TYPE2 sum = init;
133 loop:
134 sum_0 = phi <init, sum_1>
135 S1 x_t = ...
136 S2 y_t = ...
137 S3 x_T = (TYPE1) x_t;
138 S4 y_T = (TYPE1) y_t;
139 S5 prod = x_T * y_T;
140 [S6 prod = (TYPE2) prod; #optional]
141 S7 sum_1 = prod + sum_0;
143 where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
144 same size of 'TYPE1' or bigger. This is a special case of a reduction
145 computation.
147 Input:
149 * LAST_STMT: A stmt from which the pattern search begins. In the example,
150 when this function is called with S7, the pattern {S3,S4,S5,S6,S7} will be
151 detected.
153 Output:
155 * TYPE_IN: The type of the input arguments to the pattern.
157 * TYPE_OUT: The type of the output of this pattern.
159 * Return value: A new stmt that will be used to replace the sequence of
160 stmts that constitute the pattern. In this case it will be:
161 WIDEN_DOT_PRODUCT <x_t, y_t, sum_0>
163 Note: The dot-prod idiom is a widening reduction pattern that is
164 vectorized without preserving all the intermediate results. It
165 produces only N/2 (widened) results (by summing up pairs of
166 intermediate results) rather than all N results. Therefore, we
167 cannot allow this pattern when we want to get all the results and in
168 the correct order (as is the case when this computation is in an
169 inner-loop nested in an outer-loop that us being vectorized). */
171 static gimple
172 vect_recog_dot_prod_pattern (gimple last_stmt, tree *type_in, tree *type_out)
174 gimple stmt;
175 tree oprnd0, oprnd1;
176 tree oprnd00, oprnd01;
177 stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
178 tree type, half_type;
179 gimple pattern_stmt;
180 tree prod_type;
181 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
182 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
183 tree var, rhs;
185 if (!is_gimple_assign (last_stmt))
186 return NULL;
188 type = gimple_expr_type (last_stmt);
190 /* Look for the following pattern
191 DX = (TYPE1) X;
192 DY = (TYPE1) Y;
193 DPROD = DX * DY;
194 DDPROD = (TYPE2) DPROD;
195 sum_1 = DDPROD + sum_0;
196 In which
197 - DX is double the size of X
198 - DY is double the size of Y
199 - DX, DY, DPROD all have the same type
200 - sum is the same size of DPROD or bigger
201 - sum has been recognized as a reduction variable.
203 This is equivalent to:
204 DPROD = X w* Y; #widen mult
205 sum_1 = DPROD w+ sum_0; #widen summation
207 DPROD = X w* Y; #widen mult
208 sum_1 = DPROD + sum_0; #summation
211 /* Starting from LAST_STMT, follow the defs of its uses in search
212 of the above pattern. */
214 if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
215 return NULL;
217 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
219 /* Has been detected as widening-summation? */
221 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
222 type = gimple_expr_type (stmt);
223 if (gimple_assign_rhs_code (stmt) != WIDEN_SUM_EXPR)
224 return NULL;
225 oprnd0 = gimple_assign_rhs1 (stmt);
226 oprnd1 = gimple_assign_rhs2 (stmt);
227 half_type = TREE_TYPE (oprnd0);
229 else
231 gimple def_stmt;
233 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
234 return NULL;
235 oprnd0 = gimple_assign_rhs1 (last_stmt);
236 oprnd1 = gimple_assign_rhs2 (last_stmt);
237 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
238 || !types_compatible_p (TREE_TYPE (oprnd1), type))
239 return NULL;
240 stmt = last_stmt;
242 if (widened_name_p (oprnd0, stmt, &half_type, &def_stmt))
244 stmt = def_stmt;
245 oprnd0 = gimple_assign_rhs1 (stmt);
247 else
248 half_type = type;
251 /* So far so good. Since last_stmt was detected as a (summation) reduction,
252 we know that oprnd1 is the reduction variable (defined by a loop-header
253 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
254 Left to check that oprnd0 is defined by a (widen_)mult_expr */
256 prod_type = half_type;
257 stmt = SSA_NAME_DEF_STMT (oprnd0);
259 /* It could not be the dot_prod pattern if the stmt is outside the loop. */
260 if (!gimple_bb (stmt) || !flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
261 return NULL;
263 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
264 inside the loop (in case we are analyzing an outer-loop). */
265 if (!is_gimple_assign (stmt))
266 return NULL;
267 stmt_vinfo = vinfo_for_stmt (stmt);
268 gcc_assert (stmt_vinfo);
269 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_internal_def)
270 return NULL;
271 if (gimple_assign_rhs_code (stmt) != MULT_EXPR)
272 return NULL;
273 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
275 /* Has been detected as a widening multiplication? */
277 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
278 if (gimple_assign_rhs_code (stmt) != WIDEN_MULT_EXPR)
279 return NULL;
280 stmt_vinfo = vinfo_for_stmt (stmt);
281 gcc_assert (stmt_vinfo);
282 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_internal_def);
283 oprnd00 = gimple_assign_rhs1 (stmt);
284 oprnd01 = gimple_assign_rhs2 (stmt);
286 else
288 tree half_type0, half_type1;
289 gimple def_stmt;
290 tree oprnd0, oprnd1;
292 oprnd0 = gimple_assign_rhs1 (stmt);
293 oprnd1 = gimple_assign_rhs2 (stmt);
294 if (!types_compatible_p (TREE_TYPE (oprnd0), prod_type)
295 || !types_compatible_p (TREE_TYPE (oprnd1), prod_type))
296 return NULL;
297 if (!widened_name_p (oprnd0, stmt, &half_type0, &def_stmt))
298 return NULL;
299 oprnd00 = gimple_assign_rhs1 (def_stmt);
300 if (!widened_name_p (oprnd1, stmt, &half_type1, &def_stmt))
301 return NULL;
302 oprnd01 = gimple_assign_rhs1 (def_stmt);
303 if (!types_compatible_p (half_type0, half_type1))
304 return NULL;
305 if (TYPE_PRECISION (prod_type) != TYPE_PRECISION (half_type0) * 2)
306 return NULL;
309 half_type = TREE_TYPE (oprnd00);
310 *type_in = half_type;
311 *type_out = type;
313 /* Pattern detected. Create a stmt to be used to replace the pattern: */
314 var = vect_recog_temp_ssa_var (type, NULL);
315 rhs = build3 (DOT_PROD_EXPR, type, oprnd00, oprnd01, oprnd1),
316 pattern_stmt = gimple_build_assign (var, rhs);
318 if (vect_print_dump_info (REPORT_DETAILS))
320 fprintf (vect_dump, "vect_recog_dot_prod_pattern: detected: ");
321 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
324 /* We don't allow changing the order of the computation in the inner-loop
325 when doing outer-loop vectorization. */
326 gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));
328 return pattern_stmt;
331 /* Function vect_recog_widen_mult_pattern
333 Try to find the following pattern:
335 type a_t, b_t;
336 TYPE a_T, b_T, prod_T;
338 S1 a_t = ;
339 S2 b_t = ;
340 S3 a_T = (TYPE) a_t;
341 S4 b_T = (TYPE) b_t;
342 S5 prod_T = a_T * b_T;
344 where type 'TYPE' is at least double the size of type 'type'.
346 Input:
348 * LAST_STMT: A stmt from which the pattern search begins. In the example,
349 when this function is called with S5, the pattern {S3,S4,S5} is be detected.
351 Output:
353 * TYPE_IN: The type of the input arguments to the pattern.
355 * TYPE_OUT: The type of the output of this pattern.
357 * Return value: A new stmt that will be used to replace the sequence of
358 stmts that constitute the pattern. In this case it will be:
359 WIDEN_MULT <a_t, b_t>
362 static gimple
363 vect_recog_widen_mult_pattern (gimple last_stmt,
364 tree *type_in,
365 tree *type_out)
367 gimple def_stmt0, def_stmt1;
368 tree oprnd0, oprnd1;
369 tree type, half_type0, half_type1;
370 gimple pattern_stmt;
371 tree vectype, vectype_out;
372 tree dummy;
373 tree var;
374 enum tree_code dummy_code;
375 int dummy_int;
376 VEC (tree, heap) *dummy_vec;
378 if (!is_gimple_assign (last_stmt))
379 return NULL;
381 type = gimple_expr_type (last_stmt);
383 /* Starting from LAST_STMT, follow the defs of its uses in search
384 of the above pattern. */
386 if (gimple_assign_rhs_code (last_stmt) != MULT_EXPR)
387 return NULL;
389 oprnd0 = gimple_assign_rhs1 (last_stmt);
390 oprnd1 = gimple_assign_rhs2 (last_stmt);
391 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
392 || !types_compatible_p (TREE_TYPE (oprnd1), type))
393 return NULL;
395 /* Check argument 0 */
396 if (!widened_name_p (oprnd0, last_stmt, &half_type0, &def_stmt0))
397 return NULL;
398 oprnd0 = gimple_assign_rhs1 (def_stmt0);
400 /* Check argument 1 */
401 if (!widened_name_p (oprnd1, last_stmt, &half_type1, &def_stmt1))
402 return NULL;
403 oprnd1 = gimple_assign_rhs1 (def_stmt1);
405 if (!types_compatible_p (half_type0, half_type1))
406 return NULL;
408 /* Pattern detected. */
409 if (vect_print_dump_info (REPORT_DETAILS))
410 fprintf (vect_dump, "vect_recog_widen_mult_pattern: detected: ");
412 /* Check target support */
413 vectype = get_vectype_for_scalar_type (half_type0);
414 vectype_out = get_vectype_for_scalar_type (type);
415 if (!vectype
416 || !vectype_out
417 || !supportable_widening_operation (WIDEN_MULT_EXPR, last_stmt,
418 vectype_out, vectype,
419 &dummy, &dummy, &dummy_code,
420 &dummy_code, &dummy_int, &dummy_vec))
421 return NULL;
423 *type_in = vectype;
424 *type_out = vectype_out;
426 /* Pattern supported. Create a stmt to be used to replace the pattern: */
427 var = vect_recog_temp_ssa_var (type, NULL);
428 pattern_stmt = gimple_build_assign_with_ops (WIDEN_MULT_EXPR, var, oprnd0,
429 oprnd1);
430 SSA_NAME_DEF_STMT (var) = pattern_stmt;
432 if (vect_print_dump_info (REPORT_DETAILS))
433 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
435 return pattern_stmt;
439 /* Function vect_recog_pow_pattern
441 Try to find the following pattern:
443 x = POW (y, N);
445 with POW being one of pow, powf, powi, powif and N being
446 either 2 or 0.5.
448 Input:
450 * LAST_STMT: A stmt from which the pattern search begins.
452 Output:
454 * TYPE_IN: The type of the input arguments to the pattern.
456 * TYPE_OUT: The type of the output of this pattern.
458 * Return value: A new stmt that will be used to replace the sequence of
459 stmts that constitute the pattern. In this case it will be:
460 x = x * x
462 x = sqrt (x)
465 static gimple
466 vect_recog_pow_pattern (gimple last_stmt, tree *type_in, tree *type_out)
468 tree fn, base, exp = NULL;
469 gimple stmt;
470 tree var;
472 if (!is_gimple_call (last_stmt) || gimple_call_lhs (last_stmt) == NULL)
473 return NULL;
475 fn = gimple_call_fndecl (last_stmt);
476 switch (DECL_FUNCTION_CODE (fn))
478 case BUILT_IN_POWIF:
479 case BUILT_IN_POWI:
480 case BUILT_IN_POWF:
481 case BUILT_IN_POW:
482 base = gimple_call_arg (last_stmt, 0);
483 exp = gimple_call_arg (last_stmt, 1);
484 if (TREE_CODE (exp) != REAL_CST
485 && TREE_CODE (exp) != INTEGER_CST)
486 return NULL;
487 break;
489 default:
490 return NULL;
493 /* We now have a pow or powi builtin function call with a constant
494 exponent. */
496 *type_out = NULL_TREE;
498 /* Catch squaring. */
499 if ((host_integerp (exp, 0)
500 && tree_low_cst (exp, 0) == 2)
501 || (TREE_CODE (exp) == REAL_CST
502 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconst2)))
504 *type_in = TREE_TYPE (base);
506 var = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
507 stmt = gimple_build_assign_with_ops (MULT_EXPR, var, base, base);
508 SSA_NAME_DEF_STMT (var) = stmt;
509 return stmt;
512 /* Catch square root. */
513 if (TREE_CODE (exp) == REAL_CST
514 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconsthalf))
516 tree newfn = mathfn_built_in (TREE_TYPE (base), BUILT_IN_SQRT);
517 *type_in = get_vectype_for_scalar_type (TREE_TYPE (base));
518 if (*type_in)
520 gimple stmt = gimple_build_call (newfn, 1, base);
521 if (vectorizable_function (stmt, *type_in, *type_in)
522 != NULL_TREE)
524 var = vect_recog_temp_ssa_var (TREE_TYPE (base), stmt);
525 gimple_call_set_lhs (stmt, var);
526 return stmt;
531 return NULL;
535 /* Function vect_recog_widen_sum_pattern
537 Try to find the following pattern:
539 type x_t;
540 TYPE x_T, sum = init;
541 loop:
542 sum_0 = phi <init, sum_1>
543 S1 x_t = *p;
544 S2 x_T = (TYPE) x_t;
545 S3 sum_1 = x_T + sum_0;
547 where type 'TYPE' is at least double the size of type 'type', i.e - we're
548 summing elements of type 'type' into an accumulator of type 'TYPE'. This is
549 a special case of a reduction computation.
551 Input:
553 * LAST_STMT: A stmt from which the pattern search begins. In the example,
554 when this function is called with S3, the pattern {S2,S3} will be detected.
556 Output:
558 * TYPE_IN: The type of the input arguments to the pattern.
560 * TYPE_OUT: The type of the output of this pattern.
562 * Return value: A new stmt that will be used to replace the sequence of
563 stmts that constitute the pattern. In this case it will be:
564 WIDEN_SUM <x_t, sum_0>
566 Note: The widening-sum idiom is a widening reduction pattern that is
567 vectorized without preserving all the intermediate results. It
568 produces only N/2 (widened) results (by summing up pairs of
569 intermediate results) rather than all N results. Therefore, we
570 cannot allow this pattern when we want to get all the results and in
571 the correct order (as is the case when this computation is in an
572 inner-loop nested in an outer-loop that us being vectorized). */
574 static gimple
575 vect_recog_widen_sum_pattern (gimple last_stmt, tree *type_in, tree *type_out)
577 gimple stmt;
578 tree oprnd0, oprnd1;
579 stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
580 tree type, half_type;
581 gimple pattern_stmt;
582 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
583 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
584 tree var;
586 if (!is_gimple_assign (last_stmt))
587 return NULL;
589 type = gimple_expr_type (last_stmt);
591 /* Look for the following pattern
592 DX = (TYPE) X;
593 sum_1 = DX + sum_0;
594 In which DX is at least double the size of X, and sum_1 has been
595 recognized as a reduction variable.
598 /* Starting from LAST_STMT, follow the defs of its uses in search
599 of the above pattern. */
601 if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
602 return NULL;
604 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
605 return NULL;
607 oprnd0 = gimple_assign_rhs1 (last_stmt);
608 oprnd1 = gimple_assign_rhs2 (last_stmt);
609 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
610 || !types_compatible_p (TREE_TYPE (oprnd1), type))
611 return NULL;
613 /* So far so good. Since last_stmt was detected as a (summation) reduction,
614 we know that oprnd1 is the reduction variable (defined by a loop-header
615 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
616 Left to check that oprnd0 is defined by a cast from type 'type' to type
617 'TYPE'. */
619 if (!widened_name_p (oprnd0, last_stmt, &half_type, &stmt))
620 return NULL;
622 oprnd0 = gimple_assign_rhs1 (stmt);
623 *type_in = half_type;
624 *type_out = type;
626 /* Pattern detected. Create a stmt to be used to replace the pattern: */
627 var = vect_recog_temp_ssa_var (type, NULL);
628 pattern_stmt = gimple_build_assign_with_ops (WIDEN_SUM_EXPR, var,
629 oprnd0, oprnd1);
630 SSA_NAME_DEF_STMT (var) = pattern_stmt;
632 if (vect_print_dump_info (REPORT_DETAILS))
634 fprintf (vect_dump, "vect_recog_widen_sum_pattern: detected: ");
635 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
638 /* We don't allow changing the order of the computation in the inner-loop
639 when doing outer-loop vectorization. */
640 gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));
642 return pattern_stmt;
646 /* Function vect_pattern_recog_1
648 Input:
649 PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
650 computation pattern.
651 STMT: A stmt from which the pattern search should start.
653 If PATTERN_RECOG_FUNC successfully detected the pattern, it creates an
654 expression that computes the same functionality and can be used to
655 replace the sequence of stmts that are involved in the pattern.
657 Output:
658 This function checks if the expression returned by PATTERN_RECOG_FUNC is
659 supported in vector form by the target. We use 'TYPE_IN' to obtain the
660 relevant vector type. If 'TYPE_IN' is already a vector type, then this
661 indicates that target support had already been checked by PATTERN_RECOG_FUNC.
662 If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
663 to the available target pattern.
665 This function also does some bookkeeping, as explained in the documentation
666 for vect_recog_pattern. */
668 static void
669 vect_pattern_recog_1 (
670 gimple (* vect_recog_func) (gimple, tree *, tree *),
671 gimple_stmt_iterator si)
673 gimple stmt = gsi_stmt (si), pattern_stmt;
674 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
675 stmt_vec_info pattern_stmt_info;
676 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
677 tree pattern_vectype;
678 tree type_in, type_out;
679 enum tree_code code;
680 int i;
681 gimple next;
683 pattern_stmt = (* vect_recog_func) (stmt, &type_in, &type_out);
684 if (!pattern_stmt)
685 return;
687 if (VECTOR_MODE_P (TYPE_MODE (type_in)))
689 /* No need to check target support (already checked by the pattern
690 recognition function). */
691 if (type_out)
692 gcc_assert (VECTOR_MODE_P (TYPE_MODE (type_out)));
693 pattern_vectype = type_out ? type_out : type_in;
695 else
697 enum machine_mode vec_mode;
698 enum insn_code icode;
699 optab optab;
701 /* Check target support */
702 type_in = get_vectype_for_scalar_type (type_in);
703 if (!type_in)
704 return;
705 if (type_out)
706 type_out = get_vectype_for_scalar_type (type_out);
707 else
708 type_out = type_in;
709 if (!type_out)
710 return;
711 pattern_vectype = type_out;
713 if (is_gimple_assign (pattern_stmt))
714 code = gimple_assign_rhs_code (pattern_stmt);
715 else
717 gcc_assert (is_gimple_call (pattern_stmt));
718 code = CALL_EXPR;
721 optab = optab_for_tree_code (code, type_in, optab_default);
722 vec_mode = TYPE_MODE (type_in);
723 if (!optab
724 || (icode = optab_handler (optab, vec_mode)) == CODE_FOR_nothing
725 || (insn_data[icode].operand[0].mode != TYPE_MODE (type_out)))
726 return;
729 /* Found a vectorizable pattern. */
730 if (vect_print_dump_info (REPORT_DETAILS))
732 fprintf (vect_dump, "pattern recognized: ");
733 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
736 /* Mark the stmts that are involved in the pattern. */
737 gsi_insert_before (&si, pattern_stmt, GSI_SAME_STMT);
738 set_vinfo_for_stmt (pattern_stmt,
739 new_stmt_vec_info (pattern_stmt, loop_vinfo, NULL));
740 pattern_stmt_info = vinfo_for_stmt (pattern_stmt);
742 STMT_VINFO_RELATED_STMT (pattern_stmt_info) = stmt;
743 STMT_VINFO_DEF_TYPE (pattern_stmt_info) = STMT_VINFO_DEF_TYPE (stmt_info);
744 STMT_VINFO_VECTYPE (pattern_stmt_info) = pattern_vectype;
745 STMT_VINFO_IN_PATTERN_P (stmt_info) = true;
746 STMT_VINFO_RELATED_STMT (stmt_info) = pattern_stmt;
748 /* Patterns cannot be vectorized using SLP, because they change the order of
749 computation. */
750 FOR_EACH_VEC_ELT (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i, next)
751 if (next == stmt)
752 VEC_ordered_remove (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i);
756 /* Function vect_pattern_recog
758 Input:
759 LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
760 computation idioms.
762 Output - for each computation idiom that is detected we insert a new stmt
763 that provides the same functionality and that can be vectorized. We
764 also record some information in the struct_stmt_info of the relevant
765 stmts, as explained below:
767 At the entry to this function we have the following stmts, with the
768 following initial value in the STMT_VINFO fields:
770 stmt in_pattern_p related_stmt vec_stmt
771 S1: a_i = .... - - -
772 S2: a_2 = ..use(a_i).. - - -
773 S3: a_1 = ..use(a_2).. - - -
774 S4: a_0 = ..use(a_1).. - - -
775 S5: ... = ..use(a_0).. - - -
777 Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
778 represented by a single stmt. We then:
779 - create a new stmt S6 that will replace the pattern.
780 - insert the new stmt S6 before the last stmt in the pattern
781 - fill in the STMT_VINFO fields as follows:
783 in_pattern_p related_stmt vec_stmt
784 S1: a_i = .... - - -
785 S2: a_2 = ..use(a_i).. - - -
786 S3: a_1 = ..use(a_2).. - - -
787 > S6: a_new = .... - S4 -
788 S4: a_0 = ..use(a_1).. true S6 -
789 S5: ... = ..use(a_0).. - - -
791 (the last stmt in the pattern (S4) and the new pattern stmt (S6) point
792 to each other through the RELATED_STMT field).
794 S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
795 of S4 because it will replace all its uses. Stmts {S1,S2,S3} will
796 remain irrelevant unless used by stmts other than S4.
798 If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
799 (because they are marked as irrelevant). It will vectorize S6, and record
800 a pointer to the new vector stmt VS6 both from S6 (as usual), and also
801 from S4. We do that so that when we get to vectorizing stmts that use the
802 def of S4 (like S5 that uses a_0), we'll know where to take the relevant
803 vector-def from. S4 will be skipped, and S5 will be vectorized as usual:
805 in_pattern_p related_stmt vec_stmt
806 S1: a_i = .... - - -
807 S2: a_2 = ..use(a_i).. - - -
808 S3: a_1 = ..use(a_2).. - - -
809 > VS6: va_new = .... - - -
810 S6: a_new = .... - S4 VS6
811 S4: a_0 = ..use(a_1).. true S6 VS6
812 > VS5: ... = ..vuse(va_new).. - - -
813 S5: ... = ..use(a_0).. - - -
815 DCE could then get rid of {S1,S2,S3,S4,S5,S6} (if their defs are not used
816 elsewhere), and we'll end up with:
818 VS6: va_new = ....
819 VS5: ... = ..vuse(va_new)..
821 If vectorization does not succeed, DCE will clean S6 away (its def is
822 not used), and we'll end up with the original sequence.
825 void
826 vect_pattern_recog (loop_vec_info loop_vinfo)
828 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
829 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
830 unsigned int nbbs = loop->num_nodes;
831 gimple_stmt_iterator si;
832 unsigned int i, j;
833 gimple (* vect_recog_func_ptr) (gimple, tree *, tree *);
835 if (vect_print_dump_info (REPORT_DETAILS))
836 fprintf (vect_dump, "=== vect_pattern_recog ===");
838 /* Scan through the loop stmts, applying the pattern recognition
839 functions starting at each stmt visited: */
840 for (i = 0; i < nbbs; i++)
842 basic_block bb = bbs[i];
843 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
845 /* Scan over all generic vect_recog_xxx_pattern functions. */
846 for (j = 0; j < NUM_PATTERNS; j++)
848 vect_recog_func_ptr = vect_vect_recog_func_ptrs[j];
849 vect_pattern_recog_1 (vect_recog_func_ptr, si);