2011-02-06 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / tree-vect-patterns.c
blobd4053044de9b2626f53781fd096e83c276133ff0
1 /* Analysis Utilities for Loop Vectorization.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
3 Contributed by Dorit Nuzman <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "tree.h"
27 #include "target.h"
28 #include "basic-block.h"
29 #include "gimple-pretty-print.h"
30 #include "tree-flow.h"
31 #include "tree-dump.h"
32 #include "cfgloop.h"
33 #include "expr.h"
34 #include "optabs.h"
35 #include "params.h"
36 #include "tree-data-ref.h"
37 #include "tree-vectorizer.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
41 /* Function prototypes */
42 static void vect_pattern_recog_1
43 (gimple (* ) (gimple, tree *, tree *), gimple_stmt_iterator);
44 static bool widened_name_p (tree, gimple, tree *, gimple *);
46 /* Pattern recognition functions */
47 static gimple vect_recog_widen_sum_pattern (gimple, tree *, tree *);
48 static gimple vect_recog_widen_mult_pattern (gimple, tree *, tree *);
49 static gimple vect_recog_dot_prod_pattern (gimple, tree *, tree *);
50 static gimple vect_recog_pow_pattern (gimple, tree *, tree *);
51 static vect_recog_func_ptr vect_vect_recog_func_ptrs[NUM_PATTERNS] = {
52 vect_recog_widen_mult_pattern,
53 vect_recog_widen_sum_pattern,
54 vect_recog_dot_prod_pattern,
55 vect_recog_pow_pattern};
58 /* Function widened_name_p
60 Check whether NAME, an ssa-name used in USE_STMT,
61 is a result of a type-promotion, such that:
62 DEF_STMT: NAME = NOP (name0)
63 where the type of name0 (HALF_TYPE) is smaller than the type of NAME.
66 static bool
67 widened_name_p (tree name, gimple use_stmt, tree *half_type, gimple *def_stmt)
69 tree dummy;
70 gimple dummy_gimple;
71 loop_vec_info loop_vinfo;
72 stmt_vec_info stmt_vinfo;
73 tree type = TREE_TYPE (name);
74 tree oprnd0;
75 enum vect_def_type dt;
76 tree def;
78 stmt_vinfo = vinfo_for_stmt (use_stmt);
79 loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
81 if (!vect_is_simple_use (name, loop_vinfo, NULL, def_stmt, &def, &dt))
82 return false;
84 if (dt != vect_internal_def
85 && dt != vect_external_def && dt != vect_constant_def)
86 return false;
88 if (! *def_stmt)
89 return false;
91 if (!is_gimple_assign (*def_stmt))
92 return false;
94 if (gimple_assign_rhs_code (*def_stmt) != NOP_EXPR)
95 return false;
97 oprnd0 = gimple_assign_rhs1 (*def_stmt);
99 *half_type = TREE_TYPE (oprnd0);
100 if (!INTEGRAL_TYPE_P (type) || !INTEGRAL_TYPE_P (*half_type)
101 || (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (*half_type))
102 || (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 2)))
103 return false;
105 if (!vect_is_simple_use (oprnd0, loop_vinfo, NULL, &dummy_gimple, &dummy,
106 &dt))
107 return false;
109 return true;
112 /* Helper to return a new temporary for pattern of TYPE for STMT. If STMT
113 is NULL, the caller must set SSA_NAME_DEF_STMT for the returned SSA var. */
115 static tree
116 vect_recog_temp_ssa_var (tree type, gimple stmt)
118 tree var = create_tmp_var (type, "patt");
120 add_referenced_var (var);
121 var = make_ssa_name (var, stmt);
122 return var;
125 /* Function vect_recog_dot_prod_pattern
127 Try to find the following pattern:
129 type x_t, y_t;
130 TYPE1 prod;
131 TYPE2 sum = init;
132 loop:
133 sum_0 = phi <init, sum_1>
134 S1 x_t = ...
135 S2 y_t = ...
136 S3 x_T = (TYPE1) x_t;
137 S4 y_T = (TYPE1) y_t;
138 S5 prod = x_T * y_T;
139 [S6 prod = (TYPE2) prod; #optional]
140 S7 sum_1 = prod + sum_0;
142 where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
143 same size of 'TYPE1' or bigger. This is a special case of a reduction
144 computation.
146 Input:
148 * LAST_STMT: A stmt from which the pattern search begins. In the example,
149 when this function is called with S7, the pattern {S3,S4,S5,S6,S7} will be
150 detected.
152 Output:
154 * TYPE_IN: The type of the input arguments to the pattern.
156 * TYPE_OUT: The type of the output of this pattern.
158 * Return value: A new stmt that will be used to replace the sequence of
159 stmts that constitute the pattern. In this case it will be:
160 WIDEN_DOT_PRODUCT <x_t, y_t, sum_0>
162 Note: The dot-prod idiom is a widening reduction pattern that is
163 vectorized without preserving all the intermediate results. It
164 produces only N/2 (widened) results (by summing up pairs of
165 intermediate results) rather than all N results. Therefore, we
166 cannot allow this pattern when we want to get all the results and in
167 the correct order (as is the case when this computation is in an
168 inner-loop nested in an outer-loop that us being vectorized). */
170 static gimple
171 vect_recog_dot_prod_pattern (gimple last_stmt, tree *type_in, tree *type_out)
173 gimple stmt;
174 tree oprnd0, oprnd1;
175 tree oprnd00, oprnd01;
176 stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
177 tree type, half_type;
178 gimple pattern_stmt;
179 tree prod_type;
180 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
181 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
182 tree var, rhs;
184 if (!is_gimple_assign (last_stmt))
185 return NULL;
187 type = gimple_expr_type (last_stmt);
189 /* Look for the following pattern
190 DX = (TYPE1) X;
191 DY = (TYPE1) Y;
192 DPROD = DX * DY;
193 DDPROD = (TYPE2) DPROD;
194 sum_1 = DDPROD + sum_0;
195 In which
196 - DX is double the size of X
197 - DY is double the size of Y
198 - DX, DY, DPROD all have the same type
199 - sum is the same size of DPROD or bigger
200 - sum has been recognized as a reduction variable.
202 This is equivalent to:
203 DPROD = X w* Y; #widen mult
204 sum_1 = DPROD w+ sum_0; #widen summation
206 DPROD = X w* Y; #widen mult
207 sum_1 = DPROD + sum_0; #summation
210 /* Starting from LAST_STMT, follow the defs of its uses in search
211 of the above pattern. */
213 if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
214 return NULL;
216 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
218 /* Has been detected as widening-summation? */
220 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
221 type = gimple_expr_type (stmt);
222 if (gimple_assign_rhs_code (stmt) != WIDEN_SUM_EXPR)
223 return NULL;
224 oprnd0 = gimple_assign_rhs1 (stmt);
225 oprnd1 = gimple_assign_rhs2 (stmt);
226 half_type = TREE_TYPE (oprnd0);
228 else
230 gimple def_stmt;
232 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
233 return NULL;
234 oprnd0 = gimple_assign_rhs1 (last_stmt);
235 oprnd1 = gimple_assign_rhs2 (last_stmt);
236 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
237 || !types_compatible_p (TREE_TYPE (oprnd1), type))
238 return NULL;
239 stmt = last_stmt;
241 if (widened_name_p (oprnd0, stmt, &half_type, &def_stmt))
243 stmt = def_stmt;
244 oprnd0 = gimple_assign_rhs1 (stmt);
246 else
247 half_type = type;
250 /* So far so good. Since last_stmt was detected as a (summation) reduction,
251 we know that oprnd1 is the reduction variable (defined by a loop-header
252 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
253 Left to check that oprnd0 is defined by a (widen_)mult_expr */
255 prod_type = half_type;
256 stmt = SSA_NAME_DEF_STMT (oprnd0);
258 /* It could not be the dot_prod pattern if the stmt is outside the loop. */
259 if (!gimple_bb (stmt) || !flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
260 return NULL;
262 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
263 inside the loop (in case we are analyzing an outer-loop). */
264 if (!is_gimple_assign (stmt))
265 return NULL;
266 stmt_vinfo = vinfo_for_stmt (stmt);
267 gcc_assert (stmt_vinfo);
268 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_internal_def)
269 return NULL;
270 if (gimple_assign_rhs_code (stmt) != MULT_EXPR)
271 return NULL;
272 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
274 /* Has been detected as a widening multiplication? */
276 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
277 if (gimple_assign_rhs_code (stmt) != WIDEN_MULT_EXPR)
278 return NULL;
279 stmt_vinfo = vinfo_for_stmt (stmt);
280 gcc_assert (stmt_vinfo);
281 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_internal_def);
282 oprnd00 = gimple_assign_rhs1 (stmt);
283 oprnd01 = gimple_assign_rhs2 (stmt);
285 else
287 tree half_type0, half_type1;
288 gimple def_stmt;
289 tree oprnd0, oprnd1;
291 oprnd0 = gimple_assign_rhs1 (stmt);
292 oprnd1 = gimple_assign_rhs2 (stmt);
293 if (!types_compatible_p (TREE_TYPE (oprnd0), prod_type)
294 || !types_compatible_p (TREE_TYPE (oprnd1), prod_type))
295 return NULL;
296 if (!widened_name_p (oprnd0, stmt, &half_type0, &def_stmt))
297 return NULL;
298 oprnd00 = gimple_assign_rhs1 (def_stmt);
299 if (!widened_name_p (oprnd1, stmt, &half_type1, &def_stmt))
300 return NULL;
301 oprnd01 = gimple_assign_rhs1 (def_stmt);
302 if (!types_compatible_p (half_type0, half_type1))
303 return NULL;
304 if (TYPE_PRECISION (prod_type) != TYPE_PRECISION (half_type0) * 2)
305 return NULL;
308 half_type = TREE_TYPE (oprnd00);
309 *type_in = half_type;
310 *type_out = type;
312 /* Pattern detected. Create a stmt to be used to replace the pattern: */
313 var = vect_recog_temp_ssa_var (type, NULL);
314 rhs = build3 (DOT_PROD_EXPR, type, oprnd00, oprnd01, oprnd1),
315 pattern_stmt = gimple_build_assign (var, rhs);
317 if (vect_print_dump_info (REPORT_DETAILS))
319 fprintf (vect_dump, "vect_recog_dot_prod_pattern: detected: ");
320 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
323 /* We don't allow changing the order of the computation in the inner-loop
324 when doing outer-loop vectorization. */
325 gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));
327 return pattern_stmt;
330 /* Function vect_recog_widen_mult_pattern
332 Try to find the following pattern:
334 type a_t, b_t;
335 TYPE a_T, b_T, prod_T;
337 S1 a_t = ;
338 S2 b_t = ;
339 S3 a_T = (TYPE) a_t;
340 S4 b_T = (TYPE) b_t;
341 S5 prod_T = a_T * b_T;
343 where type 'TYPE' is at least double the size of type 'type'.
345 Input:
347 * LAST_STMT: A stmt from which the pattern search begins. In the example,
348 when this function is called with S5, the pattern {S3,S4,S5} is be detected.
350 Output:
352 * TYPE_IN: The type of the input arguments to the pattern.
354 * TYPE_OUT: The type of the output of this pattern.
356 * Return value: A new stmt that will be used to replace the sequence of
357 stmts that constitute the pattern. In this case it will be:
358 WIDEN_MULT <a_t, b_t>
361 static gimple
362 vect_recog_widen_mult_pattern (gimple last_stmt,
363 tree *type_in,
364 tree *type_out)
366 gimple def_stmt0, def_stmt1;
367 tree oprnd0, oprnd1;
368 tree type, half_type0, half_type1;
369 gimple pattern_stmt;
370 tree vectype, vectype_out;
371 tree dummy;
372 tree var;
373 enum tree_code dummy_code;
374 int dummy_int;
375 VEC (tree, heap) *dummy_vec;
377 if (!is_gimple_assign (last_stmt))
378 return NULL;
380 type = gimple_expr_type (last_stmt);
382 /* Starting from LAST_STMT, follow the defs of its uses in search
383 of the above pattern. */
385 if (gimple_assign_rhs_code (last_stmt) != MULT_EXPR)
386 return NULL;
388 oprnd0 = gimple_assign_rhs1 (last_stmt);
389 oprnd1 = gimple_assign_rhs2 (last_stmt);
390 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
391 || !types_compatible_p (TREE_TYPE (oprnd1), type))
392 return NULL;
394 /* Check argument 0 */
395 if (!widened_name_p (oprnd0, last_stmt, &half_type0, &def_stmt0))
396 return NULL;
397 oprnd0 = gimple_assign_rhs1 (def_stmt0);
399 /* Check argument 1 */
400 if (!widened_name_p (oprnd1, last_stmt, &half_type1, &def_stmt1))
401 return NULL;
402 oprnd1 = gimple_assign_rhs1 (def_stmt1);
404 if (!types_compatible_p (half_type0, half_type1))
405 return NULL;
407 /* Pattern detected. */
408 if (vect_print_dump_info (REPORT_DETAILS))
409 fprintf (vect_dump, "vect_recog_widen_mult_pattern: detected: ");
411 /* Check target support */
412 vectype = get_vectype_for_scalar_type (half_type0);
413 vectype_out = get_vectype_for_scalar_type (type);
414 if (!vectype
415 || !vectype_out
416 || !supportable_widening_operation (WIDEN_MULT_EXPR, last_stmt,
417 vectype_out, vectype,
418 &dummy, &dummy, &dummy_code,
419 &dummy_code, &dummy_int, &dummy_vec))
420 return NULL;
422 *type_in = vectype;
423 *type_out = vectype_out;
425 /* Pattern supported. Create a stmt to be used to replace the pattern: */
426 var = vect_recog_temp_ssa_var (type, NULL);
427 pattern_stmt = gimple_build_assign_with_ops (WIDEN_MULT_EXPR, var, oprnd0,
428 oprnd1);
429 SSA_NAME_DEF_STMT (var) = pattern_stmt;
431 if (vect_print_dump_info (REPORT_DETAILS))
432 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
434 return pattern_stmt;
438 /* Function vect_recog_pow_pattern
440 Try to find the following pattern:
442 x = POW (y, N);
444 with POW being one of pow, powf, powi, powif and N being
445 either 2 or 0.5.
447 Input:
449 * LAST_STMT: A stmt from which the pattern search begins.
451 Output:
453 * TYPE_IN: The type of the input arguments to the pattern.
455 * TYPE_OUT: The type of the output of this pattern.
457 * Return value: A new stmt that will be used to replace the sequence of
458 stmts that constitute the pattern. In this case it will be:
459 x = x * x
461 x = sqrt (x)
464 static gimple
465 vect_recog_pow_pattern (gimple last_stmt, tree *type_in, tree *type_out)
467 tree fn, base, exp = NULL;
468 gimple stmt;
469 tree var;
471 if (!is_gimple_call (last_stmt) || gimple_call_lhs (last_stmt) == NULL)
472 return NULL;
474 fn = gimple_call_fndecl (last_stmt);
475 if (fn == NULL_TREE || DECL_BUILT_IN_CLASS (fn) != BUILT_IN_NORMAL)
476 return NULL;
478 switch (DECL_FUNCTION_CODE (fn))
480 case BUILT_IN_POWIF:
481 case BUILT_IN_POWI:
482 case BUILT_IN_POWF:
483 case BUILT_IN_POW:
484 base = gimple_call_arg (last_stmt, 0);
485 exp = gimple_call_arg (last_stmt, 1);
486 if (TREE_CODE (exp) != REAL_CST
487 && TREE_CODE (exp) != INTEGER_CST)
488 return NULL;
489 break;
491 default:
492 return NULL;
495 /* We now have a pow or powi builtin function call with a constant
496 exponent. */
498 *type_out = NULL_TREE;
500 /* Catch squaring. */
501 if ((host_integerp (exp, 0)
502 && tree_low_cst (exp, 0) == 2)
503 || (TREE_CODE (exp) == REAL_CST
504 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconst2)))
506 *type_in = TREE_TYPE (base);
508 var = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
509 stmt = gimple_build_assign_with_ops (MULT_EXPR, var, base, base);
510 SSA_NAME_DEF_STMT (var) = stmt;
511 return stmt;
514 /* Catch square root. */
515 if (TREE_CODE (exp) == REAL_CST
516 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconsthalf))
518 tree newfn = mathfn_built_in (TREE_TYPE (base), BUILT_IN_SQRT);
519 *type_in = get_vectype_for_scalar_type (TREE_TYPE (base));
520 if (*type_in)
522 gimple stmt = gimple_build_call (newfn, 1, base);
523 if (vectorizable_function (stmt, *type_in, *type_in)
524 != NULL_TREE)
526 var = vect_recog_temp_ssa_var (TREE_TYPE (base), stmt);
527 gimple_call_set_lhs (stmt, var);
528 return stmt;
533 return NULL;
537 /* Function vect_recog_widen_sum_pattern
539 Try to find the following pattern:
541 type x_t;
542 TYPE x_T, sum = init;
543 loop:
544 sum_0 = phi <init, sum_1>
545 S1 x_t = *p;
546 S2 x_T = (TYPE) x_t;
547 S3 sum_1 = x_T + sum_0;
549 where type 'TYPE' is at least double the size of type 'type', i.e - we're
550 summing elements of type 'type' into an accumulator of type 'TYPE'. This is
551 a special case of a reduction computation.
553 Input:
555 * LAST_STMT: A stmt from which the pattern search begins. In the example,
556 when this function is called with S3, the pattern {S2,S3} will be detected.
558 Output:
560 * TYPE_IN: The type of the input arguments to the pattern.
562 * TYPE_OUT: The type of the output of this pattern.
564 * Return value: A new stmt that will be used to replace the sequence of
565 stmts that constitute the pattern. In this case it will be:
566 WIDEN_SUM <x_t, sum_0>
568 Note: The widening-sum idiom is a widening reduction pattern that is
569 vectorized without preserving all the intermediate results. It
570 produces only N/2 (widened) results (by summing up pairs of
571 intermediate results) rather than all N results. Therefore, we
572 cannot allow this pattern when we want to get all the results and in
573 the correct order (as is the case when this computation is in an
574 inner-loop nested in an outer-loop that us being vectorized). */
576 static gimple
577 vect_recog_widen_sum_pattern (gimple last_stmt, tree *type_in, tree *type_out)
579 gimple stmt;
580 tree oprnd0, oprnd1;
581 stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
582 tree type, half_type;
583 gimple pattern_stmt;
584 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
585 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
586 tree var;
588 if (!is_gimple_assign (last_stmt))
589 return NULL;
591 type = gimple_expr_type (last_stmt);
593 /* Look for the following pattern
594 DX = (TYPE) X;
595 sum_1 = DX + sum_0;
596 In which DX is at least double the size of X, and sum_1 has been
597 recognized as a reduction variable.
600 /* Starting from LAST_STMT, follow the defs of its uses in search
601 of the above pattern. */
603 if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
604 return NULL;
606 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
607 return NULL;
609 oprnd0 = gimple_assign_rhs1 (last_stmt);
610 oprnd1 = gimple_assign_rhs2 (last_stmt);
611 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
612 || !types_compatible_p (TREE_TYPE (oprnd1), type))
613 return NULL;
615 /* So far so good. Since last_stmt was detected as a (summation) reduction,
616 we know that oprnd1 is the reduction variable (defined by a loop-header
617 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
618 Left to check that oprnd0 is defined by a cast from type 'type' to type
619 'TYPE'. */
621 if (!widened_name_p (oprnd0, last_stmt, &half_type, &stmt))
622 return NULL;
624 oprnd0 = gimple_assign_rhs1 (stmt);
625 *type_in = half_type;
626 *type_out = type;
628 /* Pattern detected. Create a stmt to be used to replace the pattern: */
629 var = vect_recog_temp_ssa_var (type, NULL);
630 pattern_stmt = gimple_build_assign_with_ops (WIDEN_SUM_EXPR, var,
631 oprnd0, oprnd1);
632 SSA_NAME_DEF_STMT (var) = pattern_stmt;
634 if (vect_print_dump_info (REPORT_DETAILS))
636 fprintf (vect_dump, "vect_recog_widen_sum_pattern: detected: ");
637 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
640 /* We don't allow changing the order of the computation in the inner-loop
641 when doing outer-loop vectorization. */
642 gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));
644 return pattern_stmt;
648 /* Function vect_pattern_recog_1
650 Input:
651 PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
652 computation pattern.
653 STMT: A stmt from which the pattern search should start.
655 If PATTERN_RECOG_FUNC successfully detected the pattern, it creates an
656 expression that computes the same functionality and can be used to
657 replace the sequence of stmts that are involved in the pattern.
659 Output:
660 This function checks if the expression returned by PATTERN_RECOG_FUNC is
661 supported in vector form by the target. We use 'TYPE_IN' to obtain the
662 relevant vector type. If 'TYPE_IN' is already a vector type, then this
663 indicates that target support had already been checked by PATTERN_RECOG_FUNC.
664 If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
665 to the available target pattern.
667 This function also does some bookkeeping, as explained in the documentation
668 for vect_recog_pattern. */
670 static void
671 vect_pattern_recog_1 (
672 gimple (* vect_recog_func) (gimple, tree *, tree *),
673 gimple_stmt_iterator si)
675 gimple stmt = gsi_stmt (si), pattern_stmt;
676 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
677 stmt_vec_info pattern_stmt_info;
678 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
679 tree pattern_vectype;
680 tree type_in, type_out;
681 enum tree_code code;
682 int i;
683 gimple next;
685 pattern_stmt = (* vect_recog_func) (stmt, &type_in, &type_out);
686 if (!pattern_stmt)
687 return;
689 if (VECTOR_MODE_P (TYPE_MODE (type_in)))
691 /* No need to check target support (already checked by the pattern
692 recognition function). */
693 if (type_out)
694 gcc_assert (VECTOR_MODE_P (TYPE_MODE (type_out)));
695 pattern_vectype = type_out ? type_out : type_in;
697 else
699 enum machine_mode vec_mode;
700 enum insn_code icode;
701 optab optab;
703 /* Check target support */
704 type_in = get_vectype_for_scalar_type (type_in);
705 if (!type_in)
706 return;
707 if (type_out)
708 type_out = get_vectype_for_scalar_type (type_out);
709 else
710 type_out = type_in;
711 if (!type_out)
712 return;
713 pattern_vectype = type_out;
715 if (is_gimple_assign (pattern_stmt))
716 code = gimple_assign_rhs_code (pattern_stmt);
717 else
719 gcc_assert (is_gimple_call (pattern_stmt));
720 code = CALL_EXPR;
723 optab = optab_for_tree_code (code, type_in, optab_default);
724 vec_mode = TYPE_MODE (type_in);
725 if (!optab
726 || (icode = optab_handler (optab, vec_mode)) == CODE_FOR_nothing
727 || (insn_data[icode].operand[0].mode != TYPE_MODE (type_out)))
728 return;
731 /* Found a vectorizable pattern. */
732 if (vect_print_dump_info (REPORT_DETAILS))
734 fprintf (vect_dump, "pattern recognized: ");
735 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
738 /* Mark the stmts that are involved in the pattern. */
739 gsi_insert_before (&si, pattern_stmt, GSI_SAME_STMT);
740 set_vinfo_for_stmt (pattern_stmt,
741 new_stmt_vec_info (pattern_stmt, loop_vinfo, NULL));
742 pattern_stmt_info = vinfo_for_stmt (pattern_stmt);
744 STMT_VINFO_RELATED_STMT (pattern_stmt_info) = stmt;
745 STMT_VINFO_DEF_TYPE (pattern_stmt_info) = STMT_VINFO_DEF_TYPE (stmt_info);
746 STMT_VINFO_VECTYPE (pattern_stmt_info) = pattern_vectype;
747 STMT_VINFO_IN_PATTERN_P (stmt_info) = true;
748 STMT_VINFO_RELATED_STMT (stmt_info) = pattern_stmt;
750 /* Patterns cannot be vectorized using SLP, because they change the order of
751 computation. */
752 FOR_EACH_VEC_ELT (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i, next)
753 if (next == stmt)
754 VEC_ordered_remove (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i);
758 /* Function vect_pattern_recog
760 Input:
761 LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
762 computation idioms.
764 Output - for each computation idiom that is detected we insert a new stmt
765 that provides the same functionality and that can be vectorized. We
766 also record some information in the struct_stmt_info of the relevant
767 stmts, as explained below:
769 At the entry to this function we have the following stmts, with the
770 following initial value in the STMT_VINFO fields:
772 stmt in_pattern_p related_stmt vec_stmt
773 S1: a_i = .... - - -
774 S2: a_2 = ..use(a_i).. - - -
775 S3: a_1 = ..use(a_2).. - - -
776 S4: a_0 = ..use(a_1).. - - -
777 S5: ... = ..use(a_0).. - - -
779 Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
780 represented by a single stmt. We then:
781 - create a new stmt S6 that will replace the pattern.
782 - insert the new stmt S6 before the last stmt in the pattern
783 - fill in the STMT_VINFO fields as follows:
785 in_pattern_p related_stmt vec_stmt
786 S1: a_i = .... - - -
787 S2: a_2 = ..use(a_i).. - - -
788 S3: a_1 = ..use(a_2).. - - -
789 > S6: a_new = .... - S4 -
790 S4: a_0 = ..use(a_1).. true S6 -
791 S5: ... = ..use(a_0).. - - -
793 (the last stmt in the pattern (S4) and the new pattern stmt (S6) point
794 to each other through the RELATED_STMT field).
796 S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
797 of S4 because it will replace all its uses. Stmts {S1,S2,S3} will
798 remain irrelevant unless used by stmts other than S4.
800 If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
801 (because they are marked as irrelevant). It will vectorize S6, and record
802 a pointer to the new vector stmt VS6 both from S6 (as usual), and also
803 from S4. We do that so that when we get to vectorizing stmts that use the
804 def of S4 (like S5 that uses a_0), we'll know where to take the relevant
805 vector-def from. S4 will be skipped, and S5 will be vectorized as usual:
807 in_pattern_p related_stmt vec_stmt
808 S1: a_i = .... - - -
809 S2: a_2 = ..use(a_i).. - - -
810 S3: a_1 = ..use(a_2).. - - -
811 > VS6: va_new = .... - - -
812 S6: a_new = .... - S4 VS6
813 S4: a_0 = ..use(a_1).. true S6 VS6
814 > VS5: ... = ..vuse(va_new).. - - -
815 S5: ... = ..use(a_0).. - - -
817 DCE could then get rid of {S1,S2,S3,S4,S5,S6} (if their defs are not used
818 elsewhere), and we'll end up with:
820 VS6: va_new = ....
821 VS5: ... = ..vuse(va_new)..
823 If vectorization does not succeed, DCE will clean S6 away (its def is
824 not used), and we'll end up with the original sequence.
827 void
828 vect_pattern_recog (loop_vec_info loop_vinfo)
830 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
831 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
832 unsigned int nbbs = loop->num_nodes;
833 gimple_stmt_iterator si;
834 unsigned int i, j;
835 gimple (* vect_recog_func_ptr) (gimple, tree *, tree *);
837 if (vect_print_dump_info (REPORT_DETAILS))
838 fprintf (vect_dump, "=== vect_pattern_recog ===");
840 /* Scan through the loop stmts, applying the pattern recognition
841 functions starting at each stmt visited: */
842 for (i = 0; i < nbbs; i++)
844 basic_block bb = bbs[i];
845 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
847 /* Scan over all generic vect_recog_xxx_pattern functions. */
848 for (j = 0; j < NUM_PATTERNS; j++)
850 vect_recog_func_ptr = vect_vect_recog_func_ptrs[j];
851 vect_pattern_recog_1 (vect_recog_func_ptr, si);