1 /* Analysis Utilities for Loop Vectorization.
2 Copyright (C) 2006-2016 Free Software Foundation, Inc.
3 Contributed by Dorit Nuzman <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "optabs-tree.h"
31 #include "insn-config.h"
32 #include "recog.h" /* FIXME: for insn_data */
33 #include "fold-const.h"
34 #include "stor-layout.h"
37 #include "gimple-iterator.h"
39 #include "tree-vectorizer.h"
42 #include "internal-fn.h"
43 #include "case-cfn-macros.h"
45 /* Pattern recognition functions */
46 static gimple
*vect_recog_widen_sum_pattern (vec
<gimple
*> *, tree
*,
48 static gimple
*vect_recog_widen_mult_pattern (vec
<gimple
*> *, tree
*,
50 static gimple
*vect_recog_dot_prod_pattern (vec
<gimple
*> *, tree
*,
52 static gimple
*vect_recog_sad_pattern (vec
<gimple
*> *, tree
*,
54 static gimple
*vect_recog_pow_pattern (vec
<gimple
*> *, tree
*, tree
*);
55 static gimple
*vect_recog_over_widening_pattern (vec
<gimple
*> *, tree
*,
57 static gimple
*vect_recog_widen_shift_pattern (vec
<gimple
*> *,
59 static gimple
*vect_recog_rotate_pattern (vec
<gimple
*> *, tree
*, tree
*);
60 static gimple
*vect_recog_vector_vector_shift_pattern (vec
<gimple
*> *,
62 static gimple
*vect_recog_divmod_pattern (vec
<gimple
*> *,
65 static gimple
*vect_recog_mult_pattern (vec
<gimple
*> *,
68 static gimple
*vect_recog_mixed_size_cond_pattern (vec
<gimple
*> *,
70 static gimple
*vect_recog_bool_pattern (vec
<gimple
*> *, tree
*, tree
*);
71 static gimple
*vect_recog_mask_conversion_pattern (vec
<gimple
*> *, tree
*, tree
*);
73 struct vect_recog_func
75 vect_recog_func_ptr fn
;
79 /* Note that ordering matters - the first pattern matching on a stmt
80 is taken which means usually the more complex one needs to preceed
81 the less comples onex (widen_sum only after dot_prod or sad for example). */
82 static vect_recog_func vect_vect_recog_func_ptrs
[NUM_PATTERNS
] = {
83 { vect_recog_widen_mult_pattern
, "widen_mult" },
84 { vect_recog_dot_prod_pattern
, "dot_prod" },
85 { vect_recog_sad_pattern
, "sad" },
86 { vect_recog_widen_sum_pattern
, "widen_sum" },
87 { vect_recog_pow_pattern
, "pow" },
88 { vect_recog_widen_shift_pattern
, "widen_shift" },
89 { vect_recog_over_widening_pattern
, "over_widening" },
90 { vect_recog_rotate_pattern
, "rotate" },
91 { vect_recog_vector_vector_shift_pattern
, "vector_vector_shift" },
92 { vect_recog_divmod_pattern
, "divmod" },
93 { vect_recog_mult_pattern
, "mult" },
94 { vect_recog_mixed_size_cond_pattern
, "mixed_size_cond" },
95 { vect_recog_bool_pattern
, "bool" },
96 { vect_recog_mask_conversion_pattern
, "mask_conversion" }
100 append_pattern_def_seq (stmt_vec_info stmt_info
, gimple
*stmt
)
102 gimple_seq_add_stmt_without_update (&STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
),
107 new_pattern_def_seq (stmt_vec_info stmt_info
, gimple
*stmt
)
109 STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
) = NULL
;
110 append_pattern_def_seq (stmt_info
, stmt
);
113 /* Check whether STMT2 is in the same loop or basic block as STMT1.
114 Which of the two applies depends on whether we're currently doing
115 loop-based or basic-block-based vectorization, as determined by
116 the vinfo_for_stmt for STMT1 (which must be defined).
118 If this returns true, vinfo_for_stmt for STMT2 is guaranteed
119 to be defined as well. */
122 vect_same_loop_or_bb_p (gimple
*stmt1
, gimple
*stmt2
)
124 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt1
);
125 return vect_stmt_in_region_p (stmt_vinfo
->vinfo
, stmt2
);
128 /* If the LHS of DEF_STMT has a single use, and that statement is
129 in the same loop or basic block, return it. */
132 vect_single_imm_use (gimple
*def_stmt
)
134 tree lhs
= gimple_assign_lhs (def_stmt
);
138 if (!single_imm_use (lhs
, &use_p
, &use_stmt
))
141 if (!vect_same_loop_or_bb_p (def_stmt
, use_stmt
))
147 /* Check whether NAME, an ssa-name used in USE_STMT,
148 is a result of a type promotion, such that:
149 DEF_STMT: NAME = NOP (name0)
150 If CHECK_SIGN is TRUE, check that either both types are signed or both are
154 type_conversion_p (tree name
, gimple
*use_stmt
, bool check_sign
,
155 tree
*orig_type
, gimple
**def_stmt
, bool *promotion
)
157 gimple
*dummy_gimple
;
158 stmt_vec_info stmt_vinfo
;
159 tree type
= TREE_TYPE (name
);
161 enum vect_def_type dt
;
163 stmt_vinfo
= vinfo_for_stmt (use_stmt
);
164 if (!vect_is_simple_use (name
, stmt_vinfo
->vinfo
, def_stmt
, &dt
))
167 if (dt
!= vect_internal_def
168 && dt
!= vect_external_def
&& dt
!= vect_constant_def
)
174 if (dt
== vect_internal_def
)
176 stmt_vec_info def_vinfo
= vinfo_for_stmt (*def_stmt
);
177 if (STMT_VINFO_IN_PATTERN_P (def_vinfo
))
181 if (!is_gimple_assign (*def_stmt
))
184 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (*def_stmt
)))
187 oprnd0
= gimple_assign_rhs1 (*def_stmt
);
189 *orig_type
= TREE_TYPE (oprnd0
);
190 if (!INTEGRAL_TYPE_P (type
) || !INTEGRAL_TYPE_P (*orig_type
)
191 || ((TYPE_UNSIGNED (type
) != TYPE_UNSIGNED (*orig_type
)) && check_sign
))
194 if (TYPE_PRECISION (type
) >= (TYPE_PRECISION (*orig_type
) * 2))
199 if (!vect_is_simple_use (oprnd0
, stmt_vinfo
->vinfo
, &dummy_gimple
, &dt
))
205 /* Helper to return a new temporary for pattern of TYPE for STMT. If STMT
206 is NULL, the caller must set SSA_NAME_DEF_STMT for the returned SSA var. */
209 vect_recog_temp_ssa_var (tree type
, gimple
*stmt
)
211 return make_temp_ssa_name (type
, stmt
, "patt");
214 /* Function vect_recog_dot_prod_pattern
216 Try to find the following pattern:
222 sum_0 = phi <init, sum_1>
225 S3 x_T = (TYPE1) x_t;
226 S4 y_T = (TYPE1) y_t;
228 [S6 prod = (TYPE2) prod; #optional]
229 S7 sum_1 = prod + sum_0;
231 where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
232 same size of 'TYPE1' or bigger. This is a special case of a reduction
237 * STMTS: Contains a stmt from which the pattern search begins. In the
238 example, when this function is called with S7, the pattern {S3,S4,S5,S6,S7}
243 * TYPE_IN: The type of the input arguments to the pattern.
245 * TYPE_OUT: The type of the output of this pattern.
247 * Return value: A new stmt that will be used to replace the sequence of
248 stmts that constitute the pattern. In this case it will be:
249 WIDEN_DOT_PRODUCT <x_t, y_t, sum_0>
251 Note: The dot-prod idiom is a widening reduction pattern that is
252 vectorized without preserving all the intermediate results. It
253 produces only N/2 (widened) results (by summing up pairs of
254 intermediate results) rather than all N results. Therefore, we
255 cannot allow this pattern when we want to get all the results and in
256 the correct order (as is the case when this computation is in an
257 inner-loop nested in an outer-loop that us being vectorized). */
260 vect_recog_dot_prod_pattern (vec
<gimple
*> *stmts
, tree
*type_in
,
263 gimple
*stmt
, *last_stmt
= (*stmts
)[0];
265 tree oprnd00
, oprnd01
;
266 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
267 tree type
, half_type
;
268 gimple
*pattern_stmt
;
270 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
278 loop
= LOOP_VINFO_LOOP (loop_info
);
280 /* We don't allow changing the order of the computation in the inner-loop
281 when doing outer-loop vectorization. */
282 if (loop
&& nested_in_vect_loop_p (loop
, last_stmt
))
285 if (!is_gimple_assign (last_stmt
))
288 type
= gimple_expr_type (last_stmt
);
290 /* Look for the following pattern
294 DDPROD = (TYPE2) DPROD;
295 sum_1 = DDPROD + sum_0;
297 - DX is double the size of X
298 - DY is double the size of Y
299 - DX, DY, DPROD all have the same type
300 - sum is the same size of DPROD or bigger
301 - sum has been recognized as a reduction variable.
303 This is equivalent to:
304 DPROD = X w* Y; #widen mult
305 sum_1 = DPROD w+ sum_0; #widen summation
307 DPROD = X w* Y; #widen mult
308 sum_1 = DPROD + sum_0; #summation
311 /* Starting from LAST_STMT, follow the defs of its uses in search
312 of the above pattern. */
314 if (gimple_assign_rhs_code (last_stmt
) != PLUS_EXPR
)
317 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
319 /* Has been detected as widening-summation? */
321 stmt
= STMT_VINFO_RELATED_STMT (stmt_vinfo
);
322 type
= gimple_expr_type (stmt
);
323 if (gimple_assign_rhs_code (stmt
) != WIDEN_SUM_EXPR
)
325 oprnd0
= gimple_assign_rhs1 (stmt
);
326 oprnd1
= gimple_assign_rhs2 (stmt
);
327 half_type
= TREE_TYPE (oprnd0
);
333 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
334 && ! STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_vinfo
))
336 oprnd0
= gimple_assign_rhs1 (last_stmt
);
337 oprnd1
= gimple_assign_rhs2 (last_stmt
);
338 if (!types_compatible_p (TREE_TYPE (oprnd0
), type
)
339 || !types_compatible_p (TREE_TYPE (oprnd1
), type
))
343 if (type_conversion_p (oprnd0
, stmt
, true, &half_type
, &def_stmt
,
348 oprnd0
= gimple_assign_rhs1 (stmt
);
354 /* So far so good. Since last_stmt was detected as a (summation) reduction,
355 we know that oprnd1 is the reduction variable (defined by a loop-header
356 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
357 Left to check that oprnd0 is defined by a (widen_)mult_expr */
358 if (TREE_CODE (oprnd0
) != SSA_NAME
)
361 prod_type
= half_type
;
362 stmt
= SSA_NAME_DEF_STMT (oprnd0
);
364 /* It could not be the dot_prod pattern if the stmt is outside the loop. */
365 if (!gimple_bb (stmt
) || !flow_bb_inside_loop_p (loop
, gimple_bb (stmt
)))
368 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
369 inside the loop (in case we are analyzing an outer-loop). */
370 if (!is_gimple_assign (stmt
))
372 stmt_vinfo
= vinfo_for_stmt (stmt
);
373 gcc_assert (stmt_vinfo
);
374 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_internal_def
)
376 if (gimple_assign_rhs_code (stmt
) != MULT_EXPR
)
378 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
380 /* Has been detected as a widening multiplication? */
382 stmt
= STMT_VINFO_RELATED_STMT (stmt_vinfo
);
383 if (gimple_assign_rhs_code (stmt
) != WIDEN_MULT_EXPR
)
385 stmt_vinfo
= vinfo_for_stmt (stmt
);
386 gcc_assert (stmt_vinfo
);
387 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_internal_def
);
388 oprnd00
= gimple_assign_rhs1 (stmt
);
389 oprnd01
= gimple_assign_rhs2 (stmt
);
390 STMT_VINFO_PATTERN_DEF_SEQ (vinfo_for_stmt (last_stmt
))
391 = STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo
);
395 tree half_type0
, half_type1
;
399 oprnd0
= gimple_assign_rhs1 (stmt
);
400 oprnd1
= gimple_assign_rhs2 (stmt
);
401 if (!types_compatible_p (TREE_TYPE (oprnd0
), prod_type
)
402 || !types_compatible_p (TREE_TYPE (oprnd1
), prod_type
))
404 if (!type_conversion_p (oprnd0
, stmt
, true, &half_type0
, &def_stmt
,
408 oprnd00
= gimple_assign_rhs1 (def_stmt
);
409 if (!type_conversion_p (oprnd1
, stmt
, true, &half_type1
, &def_stmt
,
413 oprnd01
= gimple_assign_rhs1 (def_stmt
);
414 if (!types_compatible_p (half_type0
, half_type1
))
416 if (TYPE_PRECISION (prod_type
) != TYPE_PRECISION (half_type0
) * 2)
420 half_type
= TREE_TYPE (oprnd00
);
421 *type_in
= half_type
;
424 /* Pattern detected. Create a stmt to be used to replace the pattern: */
425 var
= vect_recog_temp_ssa_var (type
, NULL
);
426 pattern_stmt
= gimple_build_assign (var
, DOT_PROD_EXPR
,
427 oprnd00
, oprnd01
, oprnd1
);
429 if (dump_enabled_p ())
431 dump_printf_loc (MSG_NOTE
, vect_location
,
432 "vect_recog_dot_prod_pattern: detected: ");
433 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
434 dump_printf (MSG_NOTE
, "\n");
441 /* Function vect_recog_sad_pattern
443 Try to find the following Sum of Absolute Difference (SAD) pattern:
446 signed TYPE1 diff, abs_diff;
449 sum_0 = phi <init, sum_1>
452 S3 x_T = (TYPE1) x_t;
453 S4 y_T = (TYPE1) y_t;
455 S6 abs_diff = ABS_EXPR <diff>;
456 [S7 abs_diff = (TYPE2) abs_diff; #optional]
457 S8 sum_1 = abs_diff + sum_0;
459 where 'TYPE1' is at least double the size of type 'type', and 'TYPE2' is the
460 same size of 'TYPE1' or bigger. This is a special case of a reduction
465 * STMTS: Contains a stmt from which the pattern search begins. In the
466 example, when this function is called with S8, the pattern
467 {S3,S4,S5,S6,S7,S8} will be detected.
471 * TYPE_IN: The type of the input arguments to the pattern.
473 * TYPE_OUT: The type of the output of this pattern.
475 * Return value: A new stmt that will be used to replace the sequence of
476 stmts that constitute the pattern. In this case it will be:
477 SAD_EXPR <x_t, y_t, sum_0>
481 vect_recog_sad_pattern (vec
<gimple
*> *stmts
, tree
*type_in
,
484 gimple
*last_stmt
= (*stmts
)[0];
485 tree sad_oprnd0
, sad_oprnd1
;
486 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
488 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
495 loop
= LOOP_VINFO_LOOP (loop_info
);
497 /* We don't allow changing the order of the computation in the inner-loop
498 when doing outer-loop vectorization. */
499 if (loop
&& nested_in_vect_loop_p (loop
, last_stmt
))
502 if (!is_gimple_assign (last_stmt
))
505 tree sum_type
= gimple_expr_type (last_stmt
);
507 /* Look for the following pattern
511 DAD = ABS_EXPR <DDIFF>;
512 DDPROD = (TYPE2) DPROD;
515 - DX is at least double the size of X
516 - DY is at least double the size of Y
517 - DX, DY, DDIFF, DAD all have the same type
518 - sum is the same size of DAD or bigger
519 - sum has been recognized as a reduction variable.
521 This is equivalent to:
522 DDIFF = X w- Y; #widen sub
523 DAD = ABS_EXPR <DDIFF>;
524 sum_1 = DAD w+ sum_0; #widen summation
526 DDIFF = X w- Y; #widen sub
527 DAD = ABS_EXPR <DDIFF>;
528 sum_1 = DAD + sum_0; #summation
531 /* Starting from LAST_STMT, follow the defs of its uses in search
532 of the above pattern. */
534 if (gimple_assign_rhs_code (last_stmt
) != PLUS_EXPR
)
537 tree plus_oprnd0
, plus_oprnd1
;
539 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
541 /* Has been detected as widening-summation? */
543 gimple
*stmt
= STMT_VINFO_RELATED_STMT (stmt_vinfo
);
544 sum_type
= gimple_expr_type (stmt
);
545 if (gimple_assign_rhs_code (stmt
) != WIDEN_SUM_EXPR
)
547 plus_oprnd0
= gimple_assign_rhs1 (stmt
);
548 plus_oprnd1
= gimple_assign_rhs2 (stmt
);
549 half_type
= TREE_TYPE (plus_oprnd0
);
555 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
556 && ! STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_vinfo
))
558 plus_oprnd0
= gimple_assign_rhs1 (last_stmt
);
559 plus_oprnd1
= gimple_assign_rhs2 (last_stmt
);
560 if (!types_compatible_p (TREE_TYPE (plus_oprnd0
), sum_type
)
561 || !types_compatible_p (TREE_TYPE (plus_oprnd1
), sum_type
))
564 /* The type conversion could be promotion, demotion,
565 or just signed -> unsigned. */
566 if (type_conversion_p (plus_oprnd0
, last_stmt
, false,
567 &half_type
, &def_stmt
, &promotion
))
568 plus_oprnd0
= gimple_assign_rhs1 (def_stmt
);
570 half_type
= sum_type
;
573 /* So far so good. Since last_stmt was detected as a (summation) reduction,
574 we know that plus_oprnd1 is the reduction variable (defined by a loop-header
575 phi), and plus_oprnd0 is an ssa-name defined by a stmt in the loop body.
576 Then check that plus_oprnd0 is defined by an abs_expr. */
578 if (TREE_CODE (plus_oprnd0
) != SSA_NAME
)
581 tree abs_type
= half_type
;
582 gimple
*abs_stmt
= SSA_NAME_DEF_STMT (plus_oprnd0
);
584 /* It could not be the sad pattern if the abs_stmt is outside the loop. */
585 if (!gimple_bb (abs_stmt
) || !flow_bb_inside_loop_p (loop
, gimple_bb (abs_stmt
)))
588 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
589 inside the loop (in case we are analyzing an outer-loop). */
590 if (!is_gimple_assign (abs_stmt
))
593 stmt_vec_info abs_stmt_vinfo
= vinfo_for_stmt (abs_stmt
);
594 gcc_assert (abs_stmt_vinfo
);
595 if (STMT_VINFO_DEF_TYPE (abs_stmt_vinfo
) != vect_internal_def
)
597 if (gimple_assign_rhs_code (abs_stmt
) != ABS_EXPR
)
600 tree abs_oprnd
= gimple_assign_rhs1 (abs_stmt
);
601 if (!types_compatible_p (TREE_TYPE (abs_oprnd
), abs_type
))
603 if (TYPE_UNSIGNED (abs_type
))
606 /* We then detect if the operand of abs_expr is defined by a minus_expr. */
608 if (TREE_CODE (abs_oprnd
) != SSA_NAME
)
611 gimple
*diff_stmt
= SSA_NAME_DEF_STMT (abs_oprnd
);
613 /* It could not be the sad pattern if the diff_stmt is outside the loop. */
614 if (!gimple_bb (diff_stmt
)
615 || !flow_bb_inside_loop_p (loop
, gimple_bb (diff_stmt
)))
618 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
619 inside the loop (in case we are analyzing an outer-loop). */
620 if (!is_gimple_assign (diff_stmt
))
623 stmt_vec_info diff_stmt_vinfo
= vinfo_for_stmt (diff_stmt
);
624 gcc_assert (diff_stmt_vinfo
);
625 if (STMT_VINFO_DEF_TYPE (diff_stmt_vinfo
) != vect_internal_def
)
627 if (gimple_assign_rhs_code (diff_stmt
) != MINUS_EXPR
)
630 tree half_type0
, half_type1
;
633 tree minus_oprnd0
= gimple_assign_rhs1 (diff_stmt
);
634 tree minus_oprnd1
= gimple_assign_rhs2 (diff_stmt
);
636 if (!types_compatible_p (TREE_TYPE (minus_oprnd0
), abs_type
)
637 || !types_compatible_p (TREE_TYPE (minus_oprnd1
), abs_type
))
639 if (!type_conversion_p (minus_oprnd0
, diff_stmt
, false,
640 &half_type0
, &def_stmt
, &promotion
)
643 sad_oprnd0
= gimple_assign_rhs1 (def_stmt
);
645 if (!type_conversion_p (minus_oprnd1
, diff_stmt
, false,
646 &half_type1
, &def_stmt
, &promotion
)
649 sad_oprnd1
= gimple_assign_rhs1 (def_stmt
);
651 if (!types_compatible_p (half_type0
, half_type1
))
653 if (TYPE_PRECISION (abs_type
) < TYPE_PRECISION (half_type0
) * 2
654 || TYPE_PRECISION (sum_type
) < TYPE_PRECISION (half_type0
) * 2)
657 *type_in
= TREE_TYPE (sad_oprnd0
);
658 *type_out
= sum_type
;
660 /* Pattern detected. Create a stmt to be used to replace the pattern: */
661 tree var
= vect_recog_temp_ssa_var (sum_type
, NULL
);
662 gimple
*pattern_stmt
= gimple_build_assign (var
, SAD_EXPR
, sad_oprnd0
,
663 sad_oprnd1
, plus_oprnd1
);
665 if (dump_enabled_p ())
667 dump_printf_loc (MSG_NOTE
, vect_location
,
668 "vect_recog_sad_pattern: detected: ");
669 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
670 dump_printf (MSG_NOTE
, "\n");
677 /* Handle widening operation by a constant. At the moment we support MULT_EXPR
680 For MULT_EXPR we check that CONST_OPRND fits HALF_TYPE, and for LSHIFT_EXPR
681 we check that CONST_OPRND is less or equal to the size of HALF_TYPE.
683 Otherwise, if the type of the result (TYPE) is at least 4 times bigger than
684 HALF_TYPE, and there is an intermediate type (2 times smaller than TYPE)
685 that satisfies the above restrictions, we can perform a widening opeartion
686 from the intermediate type to TYPE and replace a_T = (TYPE) a_t;
687 with a_it = (interm_type) a_t; Store such operation in *WSTMT. */
690 vect_handle_widen_op_by_const (gimple
*stmt
, enum tree_code code
,
691 tree const_oprnd
, tree
*oprnd
,
692 gimple
**wstmt
, tree type
,
693 tree
*half_type
, gimple
*def_stmt
)
695 tree new_type
, new_oprnd
;
697 if (code
!= MULT_EXPR
&& code
!= LSHIFT_EXPR
)
700 if (((code
== MULT_EXPR
&& int_fits_type_p (const_oprnd
, *half_type
))
701 || (code
== LSHIFT_EXPR
702 && compare_tree_int (const_oprnd
, TYPE_PRECISION (*half_type
))
704 && TYPE_PRECISION (type
) == (TYPE_PRECISION (*half_type
) * 2))
706 /* CONST_OPRND is a constant of HALF_TYPE. */
707 *oprnd
= gimple_assign_rhs1 (def_stmt
);
711 if (TYPE_PRECISION (type
) < (TYPE_PRECISION (*half_type
) * 4))
714 if (!vect_same_loop_or_bb_p (stmt
, def_stmt
))
717 /* TYPE is 4 times bigger than HALF_TYPE, try widening operation for
718 a type 2 times bigger than HALF_TYPE. */
719 new_type
= build_nonstandard_integer_type (TYPE_PRECISION (type
) / 2,
720 TYPE_UNSIGNED (type
));
721 if ((code
== MULT_EXPR
&& !int_fits_type_p (const_oprnd
, new_type
))
722 || (code
== LSHIFT_EXPR
723 && compare_tree_int (const_oprnd
, TYPE_PRECISION (new_type
)) == 1))
726 /* Use NEW_TYPE for widening operation and create a_T = (NEW_TYPE) a_t; */
727 *oprnd
= gimple_assign_rhs1 (def_stmt
);
728 new_oprnd
= make_ssa_name (new_type
);
729 *wstmt
= gimple_build_assign (new_oprnd
, NOP_EXPR
, *oprnd
);
732 *half_type
= new_type
;
737 /* Function vect_recog_widen_mult_pattern
739 Try to find the following pattern:
743 TYPE a_T, b_T, prod_T;
749 S5 prod_T = a_T * b_T;
751 where type 'TYPE' is at least double the size of type 'type1' and 'type2'.
753 Also detect unsigned cases:
757 unsigned TYPE u_prod_T;
758 TYPE a_T, b_T, prod_T;
764 S5 prod_T = a_T * b_T;
765 S6 u_prod_T = (unsigned TYPE) prod_T;
767 and multiplication by constants:
774 S5 prod_T = a_T * CONST;
776 A special case of multiplication by constants is when 'TYPE' is 4 times
777 bigger than 'type', but CONST fits an intermediate type 2 times smaller
778 than 'TYPE'. In that case we create an additional pattern stmt for S3
779 to create a variable of the intermediate type, and perform widen-mult
780 on the intermediate type as well:
784 TYPE a_T, prod_T, prod_T';
788 '--> a_it = (interm_type) a_t;
789 S5 prod_T = a_T * CONST;
790 '--> prod_T' = a_it w* CONST;
794 * STMTS: Contains a stmt from which the pattern search begins. In the
795 example, when this function is called with S5, the pattern {S3,S4,S5,(S6)}
796 is detected. In case of unsigned widen-mult, the original stmt (S5) is
797 replaced with S6 in STMTS. In case of multiplication by a constant
798 of an intermediate type (the last case above), STMTS also contains S3
799 (inserted before S5).
803 * TYPE_IN: The type of the input arguments to the pattern.
805 * TYPE_OUT: The type of the output of this pattern.
807 * Return value: A new stmt that will be used to replace the sequence of
808 stmts that constitute the pattern. In this case it will be:
809 WIDEN_MULT <a_t, b_t>
810 If the result of WIDEN_MULT needs to be converted to a larger type, the
811 returned stmt will be this type conversion stmt.
815 vect_recog_widen_mult_pattern (vec
<gimple
*> *stmts
,
816 tree
*type_in
, tree
*type_out
)
818 gimple
*last_stmt
= stmts
->pop ();
819 gimple
*def_stmt0
, *def_stmt1
;
821 tree type
, half_type0
, half_type1
;
822 gimple
*new_stmt
= NULL
, *pattern_stmt
= NULL
;
823 tree vectype
, vecitype
;
825 enum tree_code dummy_code
;
831 if (!is_gimple_assign (last_stmt
))
834 type
= gimple_expr_type (last_stmt
);
836 /* Starting from LAST_STMT, follow the defs of its uses in search
837 of the above pattern. */
839 if (gimple_assign_rhs_code (last_stmt
) != MULT_EXPR
)
842 oprnd0
= gimple_assign_rhs1 (last_stmt
);
843 oprnd1
= gimple_assign_rhs2 (last_stmt
);
844 if (!types_compatible_p (TREE_TYPE (oprnd0
), type
)
845 || !types_compatible_p (TREE_TYPE (oprnd1
), type
))
848 /* Check argument 0. */
849 if (!type_conversion_p (oprnd0
, last_stmt
, false, &half_type0
, &def_stmt0
,
853 /* Check argument 1. */
854 op1_ok
= type_conversion_p (oprnd1
, last_stmt
, false, &half_type1
,
855 &def_stmt1
, &promotion
);
857 if (op1_ok
&& promotion
)
859 oprnd0
= gimple_assign_rhs1 (def_stmt0
);
860 oprnd1
= gimple_assign_rhs1 (def_stmt1
);
864 if (TREE_CODE (oprnd1
) == INTEGER_CST
865 && TREE_CODE (half_type0
) == INTEGER_TYPE
866 && vect_handle_widen_op_by_const (last_stmt
, MULT_EXPR
, oprnd1
,
867 &oprnd0
, &new_stmt
, type
,
868 &half_type0
, def_stmt0
))
870 half_type1
= half_type0
;
871 oprnd1
= fold_convert (half_type1
, oprnd1
);
877 /* If the two arguments have different sizes, convert the one with
878 the smaller type into the larger type. */
879 if (TYPE_PRECISION (half_type0
) != TYPE_PRECISION (half_type1
))
881 /* If we already used up the single-stmt slot give up. */
886 gimple
*def_stmt
= NULL
;
888 if (TYPE_PRECISION (half_type0
) < TYPE_PRECISION (half_type1
))
890 def_stmt
= def_stmt0
;
891 half_type0
= half_type1
;
896 def_stmt
= def_stmt1
;
897 half_type1
= half_type0
;
901 tree old_oprnd
= gimple_assign_rhs1 (def_stmt
);
902 tree new_oprnd
= make_ssa_name (half_type0
);
903 new_stmt
= gimple_build_assign (new_oprnd
, NOP_EXPR
, old_oprnd
);
907 /* Handle unsigned case. Look for
908 S6 u_prod_T = (unsigned TYPE) prod_T;
909 Use unsigned TYPE as the type for WIDEN_MULT_EXPR. */
910 if (TYPE_UNSIGNED (type
) != TYPE_UNSIGNED (half_type0
))
916 if (TYPE_UNSIGNED (type
) == TYPE_UNSIGNED (half_type1
))
919 use_stmt
= vect_single_imm_use (last_stmt
);
920 if (!use_stmt
|| !is_gimple_assign (use_stmt
)
921 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_stmt
)))
924 use_lhs
= gimple_assign_lhs (use_stmt
);
925 use_type
= TREE_TYPE (use_lhs
);
926 if (!INTEGRAL_TYPE_P (use_type
)
927 || (TYPE_UNSIGNED (type
) == TYPE_UNSIGNED (use_type
))
928 || (TYPE_PRECISION (type
) != TYPE_PRECISION (use_type
)))
932 last_stmt
= use_stmt
;
935 if (!types_compatible_p (half_type0
, half_type1
))
938 /* If TYPE is more than twice larger than HALF_TYPE, we use WIDEN_MULT
939 to get an intermediate result of type ITYPE. In this case we need
940 to build a statement to convert this intermediate result to type TYPE. */
942 if (TYPE_PRECISION (type
) > TYPE_PRECISION (half_type0
) * 2)
943 itype
= build_nonstandard_integer_type
944 (GET_MODE_BITSIZE (TYPE_MODE (half_type0
)) * 2,
945 TYPE_UNSIGNED (type
));
947 /* Pattern detected. */
948 if (dump_enabled_p ())
949 dump_printf_loc (MSG_NOTE
, vect_location
,
950 "vect_recog_widen_mult_pattern: detected:\n");
952 /* Check target support */
953 vectype
= get_vectype_for_scalar_type (half_type0
);
954 vecitype
= get_vectype_for_scalar_type (itype
);
957 || !supportable_widening_operation (WIDEN_MULT_EXPR
, last_stmt
,
959 &dummy_code
, &dummy_code
,
960 &dummy_int
, &dummy_vec
))
964 *type_out
= get_vectype_for_scalar_type (type
);
966 /* Pattern supported. Create a stmt to be used to replace the pattern: */
967 var
= vect_recog_temp_ssa_var (itype
, NULL
);
968 pattern_stmt
= gimple_build_assign (var
, WIDEN_MULT_EXPR
, oprnd0
, oprnd1
);
970 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
971 STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo
) = NULL
;
973 /* If the original two operands have different sizes, we may need to convert
974 the smaller one into the larget type. If this is the case, at this point
975 the new stmt is already built. */
978 append_pattern_def_seq (stmt_vinfo
, new_stmt
);
979 stmt_vec_info new_stmt_info
980 = new_stmt_vec_info (new_stmt
, stmt_vinfo
->vinfo
);
981 set_vinfo_for_stmt (new_stmt
, new_stmt_info
);
982 STMT_VINFO_VECTYPE (new_stmt_info
) = vectype
;
985 /* If ITYPE is not TYPE, we need to build a type convertion stmt to convert
986 the result of the widen-mult operation into type TYPE. */
989 append_pattern_def_seq (stmt_vinfo
, pattern_stmt
);
990 stmt_vec_info pattern_stmt_info
991 = new_stmt_vec_info (pattern_stmt
, stmt_vinfo
->vinfo
);
992 set_vinfo_for_stmt (pattern_stmt
, pattern_stmt_info
);
993 STMT_VINFO_VECTYPE (pattern_stmt_info
) = vecitype
;
994 pattern_stmt
= gimple_build_assign (vect_recog_temp_ssa_var (type
, NULL
),
996 gimple_assign_lhs (pattern_stmt
));
999 if (dump_enabled_p ())
1000 dump_gimple_stmt_loc (MSG_NOTE
, vect_location
, TDF_SLIM
, pattern_stmt
, 0);
1002 stmts
->safe_push (last_stmt
);
1003 return pattern_stmt
;
1007 /* Function vect_recog_pow_pattern
1009 Try to find the following pattern:
1013 with POW being one of pow, powf, powi, powif and N being
1018 * LAST_STMT: A stmt from which the pattern search begins.
1022 * TYPE_IN: The type of the input arguments to the pattern.
1024 * TYPE_OUT: The type of the output of this pattern.
1026 * Return value: A new stmt that will be used to replace the sequence of
1027 stmts that constitute the pattern. In this case it will be:
1034 vect_recog_pow_pattern (vec
<gimple
*> *stmts
, tree
*type_in
,
1037 gimple
*last_stmt
= (*stmts
)[0];
1038 tree base
, exp
= NULL
;
1042 if (!is_gimple_call (last_stmt
) || gimple_call_lhs (last_stmt
) == NULL
)
1045 switch (gimple_call_combined_fn (last_stmt
))
1049 base
= gimple_call_arg (last_stmt
, 0);
1050 exp
= gimple_call_arg (last_stmt
, 1);
1051 if (TREE_CODE (exp
) != REAL_CST
1052 && TREE_CODE (exp
) != INTEGER_CST
)
1060 /* We now have a pow or powi builtin function call with a constant
1063 *type_out
= NULL_TREE
;
1065 /* Catch squaring. */
1066 if ((tree_fits_shwi_p (exp
)
1067 && tree_to_shwi (exp
) == 2)
1068 || (TREE_CODE (exp
) == REAL_CST
1069 && real_equal (&TREE_REAL_CST (exp
), &dconst2
)))
1071 *type_in
= TREE_TYPE (base
);
1073 var
= vect_recog_temp_ssa_var (TREE_TYPE (base
), NULL
);
1074 stmt
= gimple_build_assign (var
, MULT_EXPR
, base
, base
);
1078 /* Catch square root. */
1079 if (TREE_CODE (exp
) == REAL_CST
1080 && real_equal (&TREE_REAL_CST (exp
), &dconsthalf
))
1082 *type_in
= get_vectype_for_scalar_type (TREE_TYPE (base
));
1084 && direct_internal_fn_supported_p (IFN_SQRT
, *type_in
,
1085 OPTIMIZE_FOR_SPEED
))
1087 gcall
*stmt
= gimple_build_call_internal (IFN_SQRT
, 1, base
);
1088 var
= vect_recog_temp_ssa_var (TREE_TYPE (base
), stmt
);
1089 gimple_call_set_lhs (stmt
, var
);
1098 /* Function vect_recog_widen_sum_pattern
1100 Try to find the following pattern:
1103 TYPE x_T, sum = init;
1105 sum_0 = phi <init, sum_1>
1107 S2 x_T = (TYPE) x_t;
1108 S3 sum_1 = x_T + sum_0;
1110 where type 'TYPE' is at least double the size of type 'type', i.e - we're
1111 summing elements of type 'type' into an accumulator of type 'TYPE'. This is
1112 a special case of a reduction computation.
1116 * LAST_STMT: A stmt from which the pattern search begins. In the example,
1117 when this function is called with S3, the pattern {S2,S3} will be detected.
1121 * TYPE_IN: The type of the input arguments to the pattern.
1123 * TYPE_OUT: The type of the output of this pattern.
1125 * Return value: A new stmt that will be used to replace the sequence of
1126 stmts that constitute the pattern. In this case it will be:
1127 WIDEN_SUM <x_t, sum_0>
1129 Note: The widening-sum idiom is a widening reduction pattern that is
1130 vectorized without preserving all the intermediate results. It
1131 produces only N/2 (widened) results (by summing up pairs of
1132 intermediate results) rather than all N results. Therefore, we
1133 cannot allow this pattern when we want to get all the results and in
1134 the correct order (as is the case when this computation is in an
1135 inner-loop nested in an outer-loop that us being vectorized). */
1138 vect_recog_widen_sum_pattern (vec
<gimple
*> *stmts
, tree
*type_in
,
1141 gimple
*stmt
, *last_stmt
= (*stmts
)[0];
1142 tree oprnd0
, oprnd1
;
1143 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
1144 tree type
, half_type
;
1145 gimple
*pattern_stmt
;
1146 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1154 loop
= LOOP_VINFO_LOOP (loop_info
);
1156 /* We don't allow changing the order of the computation in the inner-loop
1157 when doing outer-loop vectorization. */
1158 if (loop
&& nested_in_vect_loop_p (loop
, last_stmt
))
1161 if (!is_gimple_assign (last_stmt
))
1164 type
= gimple_expr_type (last_stmt
);
1166 /* Look for the following pattern
1169 In which DX is at least double the size of X, and sum_1 has been
1170 recognized as a reduction variable.
1173 /* Starting from LAST_STMT, follow the defs of its uses in search
1174 of the above pattern. */
1176 if (gimple_assign_rhs_code (last_stmt
) != PLUS_EXPR
)
1179 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
1180 && ! STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_vinfo
))
1183 oprnd0
= gimple_assign_rhs1 (last_stmt
);
1184 oprnd1
= gimple_assign_rhs2 (last_stmt
);
1185 if (!types_compatible_p (TREE_TYPE (oprnd0
), type
)
1186 || !types_compatible_p (TREE_TYPE (oprnd1
), type
))
1189 /* So far so good. Since last_stmt was detected as a (summation) reduction,
1190 we know that oprnd1 is the reduction variable (defined by a loop-header
1191 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
1192 Left to check that oprnd0 is defined by a cast from type 'type' to type
1195 if (!type_conversion_p (oprnd0
, last_stmt
, true, &half_type
, &stmt
,
1200 oprnd0
= gimple_assign_rhs1 (stmt
);
1201 *type_in
= half_type
;
1204 /* Pattern detected. Create a stmt to be used to replace the pattern: */
1205 var
= vect_recog_temp_ssa_var (type
, NULL
);
1206 pattern_stmt
= gimple_build_assign (var
, WIDEN_SUM_EXPR
, oprnd0
, oprnd1
);
1208 if (dump_enabled_p ())
1210 dump_printf_loc (MSG_NOTE
, vect_location
,
1211 "vect_recog_widen_sum_pattern: detected: ");
1212 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
1213 dump_printf (MSG_NOTE
, "\n");
1216 return pattern_stmt
;
1220 /* Return TRUE if the operation in STMT can be performed on a smaller type.
1223 STMT - a statement to check.
1224 DEF - we support operations with two operands, one of which is constant.
1225 The other operand can be defined by a demotion operation, or by a
1226 previous statement in a sequence of over-promoted operations. In the
1227 later case DEF is used to replace that operand. (It is defined by a
1228 pattern statement we created for the previous statement in the
1232 NEW_TYPE - Output: a smaller type that we are trying to use. Input: if not
1233 NULL, it's the type of DEF.
1234 STMTS - additional pattern statements. If a pattern statement (type
1235 conversion) is created in this function, its original statement is
1239 OP0, OP1 - if the operation fits a smaller type, OP0 and OP1 are the new
1240 operands to use in the new pattern statement for STMT (will be created
1241 in vect_recog_over_widening_pattern ()).
1242 NEW_DEF_STMT - in case DEF has to be promoted, we create two pattern
1243 statements for STMT: the first one is a type promotion and the second
1244 one is the operation itself. We return the type promotion statement
1245 in NEW_DEF_STMT and further store it in STMT_VINFO_PATTERN_DEF_SEQ of
1246 the second pattern statement. */
1249 vect_operation_fits_smaller_type (gimple
*stmt
, tree def
, tree
*new_type
,
1250 tree
*op0
, tree
*op1
, gimple
**new_def_stmt
,
1251 vec
<gimple
*> *stmts
)
1253 enum tree_code code
;
1254 tree const_oprnd
, oprnd
;
1255 tree interm_type
= NULL_TREE
, half_type
, new_oprnd
, type
;
1256 gimple
*def_stmt
, *new_stmt
;
1262 *new_def_stmt
= NULL
;
1264 if (!is_gimple_assign (stmt
))
1267 code
= gimple_assign_rhs_code (stmt
);
1268 if (code
!= LSHIFT_EXPR
&& code
!= RSHIFT_EXPR
1269 && code
!= BIT_IOR_EXPR
&& code
!= BIT_XOR_EXPR
&& code
!= BIT_AND_EXPR
)
1272 oprnd
= gimple_assign_rhs1 (stmt
);
1273 const_oprnd
= gimple_assign_rhs2 (stmt
);
1274 type
= gimple_expr_type (stmt
);
1276 if (TREE_CODE (oprnd
) != SSA_NAME
1277 || TREE_CODE (const_oprnd
) != INTEGER_CST
)
1280 /* If oprnd has other uses besides that in stmt we cannot mark it
1281 as being part of a pattern only. */
1282 if (!has_single_use (oprnd
))
1285 /* If we are in the middle of a sequence, we use DEF from a previous
1286 statement. Otherwise, OPRND has to be a result of type promotion. */
1289 half_type
= *new_type
;
1295 if (!type_conversion_p (oprnd
, stmt
, false, &half_type
, &def_stmt
,
1298 || !vect_same_loop_or_bb_p (stmt
, def_stmt
))
1302 /* Can we perform the operation on a smaller type? */
1308 if (!int_fits_type_p (const_oprnd
, half_type
))
1310 /* HALF_TYPE is not enough. Try a bigger type if possible. */
1311 if (TYPE_PRECISION (type
) < (TYPE_PRECISION (half_type
) * 4))
1314 interm_type
= build_nonstandard_integer_type (
1315 TYPE_PRECISION (half_type
) * 2, TYPE_UNSIGNED (type
));
1316 if (!int_fits_type_p (const_oprnd
, interm_type
))
1323 /* Try intermediate type - HALF_TYPE is not enough for sure. */
1324 if (TYPE_PRECISION (type
) < (TYPE_PRECISION (half_type
) * 4))
1327 /* Check that HALF_TYPE size + shift amount <= INTERM_TYPE size.
1328 (e.g., if the original value was char, the shift amount is at most 8
1329 if we want to use short). */
1330 if (compare_tree_int (const_oprnd
, TYPE_PRECISION (half_type
)) == 1)
1333 interm_type
= build_nonstandard_integer_type (
1334 TYPE_PRECISION (half_type
) * 2, TYPE_UNSIGNED (type
));
1336 if (!vect_supportable_shift (code
, interm_type
))
1342 if (vect_supportable_shift (code
, half_type
))
1345 /* Try intermediate type - HALF_TYPE is not supported. */
1346 if (TYPE_PRECISION (type
) < (TYPE_PRECISION (half_type
) * 4))
1349 interm_type
= build_nonstandard_integer_type (
1350 TYPE_PRECISION (half_type
) * 2, TYPE_UNSIGNED (type
));
1352 if (!vect_supportable_shift (code
, interm_type
))
1361 /* There are four possible cases:
1362 1. OPRND is defined by a type promotion (in that case FIRST is TRUE, it's
1363 the first statement in the sequence)
1364 a. The original, HALF_TYPE, is not enough - we replace the promotion
1365 from HALF_TYPE to TYPE with a promotion to INTERM_TYPE.
1366 b. HALF_TYPE is sufficient, OPRND is set as the RHS of the original
1368 2. OPRND is defined by a pattern statement we created.
1369 a. Its type is not sufficient for the operation, we create a new stmt:
1370 a type conversion for OPRND from HALF_TYPE to INTERM_TYPE. We store
1371 this statement in NEW_DEF_STMT, and it is later put in
1372 STMT_VINFO_PATTERN_DEF_SEQ of the pattern statement for STMT.
1373 b. OPRND is good to use in the new statement. */
1378 /* Replace the original type conversion HALF_TYPE->TYPE with
1379 HALF_TYPE->INTERM_TYPE. */
1380 if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
)))
1382 new_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
));
1383 /* Check if the already created pattern stmt is what we need. */
1384 if (!is_gimple_assign (new_stmt
)
1385 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (new_stmt
))
1386 || TREE_TYPE (gimple_assign_lhs (new_stmt
)) != interm_type
)
1389 stmts
->safe_push (def_stmt
);
1390 oprnd
= gimple_assign_lhs (new_stmt
);
1394 /* Create NEW_OPRND = (INTERM_TYPE) OPRND. */
1395 oprnd
= gimple_assign_rhs1 (def_stmt
);
1396 new_oprnd
= make_ssa_name (interm_type
);
1397 new_stmt
= gimple_build_assign (new_oprnd
, NOP_EXPR
, oprnd
);
1398 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
)) = new_stmt
;
1399 stmts
->safe_push (def_stmt
);
1405 /* Retrieve the operand before the type promotion. */
1406 oprnd
= gimple_assign_rhs1 (def_stmt
);
1413 /* Create a type conversion HALF_TYPE->INTERM_TYPE. */
1414 new_oprnd
= make_ssa_name (interm_type
);
1415 new_stmt
= gimple_build_assign (new_oprnd
, NOP_EXPR
, oprnd
);
1417 *new_def_stmt
= new_stmt
;
1420 /* Otherwise, OPRND is already set. */
1424 *new_type
= interm_type
;
1426 *new_type
= half_type
;
1429 *op1
= fold_convert (*new_type
, const_oprnd
);
1435 /* Try to find a statement or a sequence of statements that can be performed
1439 TYPE x_T, res0_T, res1_T;
1442 S2 x_T = (TYPE) x_t;
1443 S3 res0_T = op (x_T, C0);
1444 S4 res1_T = op (res0_T, C1);
1445 S5 ... = () res1_T; - type demotion
1447 where type 'TYPE' is at least double the size of type 'type', C0 and C1 are
1449 Check if S3 and S4 can be done on a smaller type than 'TYPE', it can either
1450 be 'type' or some intermediate type. For now, we expect S5 to be a type
1451 demotion operation. We also check that S3 and S4 have only one use. */
1454 vect_recog_over_widening_pattern (vec
<gimple
*> *stmts
,
1455 tree
*type_in
, tree
*type_out
)
1457 gimple
*stmt
= stmts
->pop ();
1458 gimple
*pattern_stmt
= NULL
, *new_def_stmt
, *prev_stmt
= NULL
,
1460 tree op0
, op1
, vectype
= NULL_TREE
, use_lhs
, use_type
;
1461 tree var
= NULL_TREE
, new_type
= NULL_TREE
, new_oprnd
;
1468 if (!vinfo_for_stmt (stmt
)
1469 || STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (stmt
)))
1472 new_def_stmt
= NULL
;
1473 if (!vect_operation_fits_smaller_type (stmt
, var
, &new_type
,
1474 &op0
, &op1
, &new_def_stmt
,
1483 /* STMT can be performed on a smaller type. Check its uses. */
1484 use_stmt
= vect_single_imm_use (stmt
);
1485 if (!use_stmt
|| !is_gimple_assign (use_stmt
))
1488 /* Create pattern statement for STMT. */
1489 vectype
= get_vectype_for_scalar_type (new_type
);
1493 /* We want to collect all the statements for which we create pattern
1494 statetments, except for the case when the last statement in the
1495 sequence doesn't have a corresponding pattern statement. In such
1496 case we associate the last pattern statement with the last statement
1497 in the sequence. Therefore, we only add the original statement to
1498 the list if we know that it is not the last. */
1500 stmts
->safe_push (prev_stmt
);
1502 var
= vect_recog_temp_ssa_var (new_type
, NULL
);
1504 = gimple_build_assign (var
, gimple_assign_rhs_code (stmt
), op0
, op1
);
1505 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
)) = pattern_stmt
;
1506 new_pattern_def_seq (vinfo_for_stmt (stmt
), new_def_stmt
);
1508 if (dump_enabled_p ())
1510 dump_printf_loc (MSG_NOTE
, vect_location
,
1511 "created pattern stmt: ");
1512 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
1513 dump_printf (MSG_NOTE
, "\n");
1516 type
= gimple_expr_type (stmt
);
1523 /* We got a sequence. We expect it to end with a type demotion operation.
1524 Otherwise, we quit (for now). There are three possible cases: the
1525 conversion is to NEW_TYPE (we don't do anything), the conversion is to
1526 a type bigger than NEW_TYPE and/or the signedness of USE_TYPE and
1527 NEW_TYPE differs (we create a new conversion statement). */
1528 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_stmt
)))
1530 use_lhs
= gimple_assign_lhs (use_stmt
);
1531 use_type
= TREE_TYPE (use_lhs
);
1532 /* Support only type demotion or signedess change. */
1533 if (!INTEGRAL_TYPE_P (use_type
)
1534 || TYPE_PRECISION (type
) <= TYPE_PRECISION (use_type
))
1537 /* Check that NEW_TYPE is not bigger than the conversion result. */
1538 if (TYPE_PRECISION (new_type
) > TYPE_PRECISION (use_type
))
1541 if (TYPE_UNSIGNED (new_type
) != TYPE_UNSIGNED (use_type
)
1542 || TYPE_PRECISION (new_type
) != TYPE_PRECISION (use_type
))
1544 /* Create NEW_TYPE->USE_TYPE conversion. */
1545 new_oprnd
= make_ssa_name (use_type
);
1546 pattern_stmt
= gimple_build_assign (new_oprnd
, NOP_EXPR
, var
);
1547 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt
)) = pattern_stmt
;
1549 *type_in
= get_vectype_for_scalar_type (new_type
);
1550 *type_out
= get_vectype_for_scalar_type (use_type
);
1552 /* We created a pattern statement for the last statement in the
1553 sequence, so we don't need to associate it with the pattern
1554 statement created for PREV_STMT. Therefore, we add PREV_STMT
1555 to the list in order to mark it later in vect_pattern_recog_1. */
1557 stmts
->safe_push (prev_stmt
);
1562 STMT_VINFO_PATTERN_DEF_SEQ (vinfo_for_stmt (use_stmt
))
1563 = STMT_VINFO_PATTERN_DEF_SEQ (vinfo_for_stmt (prev_stmt
));
1566 *type_out
= NULL_TREE
;
1569 stmts
->safe_push (use_stmt
);
1572 /* TODO: support general case, create a conversion to the correct type. */
1575 /* Pattern detected. */
1576 if (dump_enabled_p ())
1578 dump_printf_loc (MSG_NOTE
, vect_location
,
1579 "vect_recog_over_widening_pattern: detected: ");
1580 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
1581 dump_printf (MSG_NOTE
, "\n");
1584 return pattern_stmt
;
1587 /* Detect widening shift pattern:
1593 S2 a_T = (TYPE) a_t;
1594 S3 res_T = a_T << CONST;
1596 where type 'TYPE' is at least double the size of type 'type'.
1598 Also detect cases where the shift result is immediately converted
1599 to another type 'result_type' that is no larger in size than 'TYPE'.
1600 In those cases we perform a widen-shift that directly results in
1601 'result_type', to avoid a possible over-widening situation:
1605 result_type res_result;
1608 S2 a_T = (TYPE) a_t;
1609 S3 res_T = a_T << CONST;
1610 S4 res_result = (result_type) res_T;
1611 '--> res_result' = a_t w<< CONST;
1613 And a case when 'TYPE' is 4 times bigger than 'type'. In that case we
1614 create an additional pattern stmt for S2 to create a variable of an
1615 intermediate type, and perform widen-shift on the intermediate type:
1619 TYPE a_T, res_T, res_T';
1622 S2 a_T = (TYPE) a_t;
1623 '--> a_it = (interm_type) a_t;
1624 S3 res_T = a_T << CONST;
1625 '--> res_T' = a_it <<* CONST;
1629 * STMTS: Contains a stmt from which the pattern search begins.
1630 In case of unsigned widen-shift, the original stmt (S3) is replaced with S4
1631 in STMTS. When an intermediate type is used and a pattern statement is
1632 created for S2, we also put S2 here (before S3).
1636 * TYPE_IN: The type of the input arguments to the pattern.
1638 * TYPE_OUT: The type of the output of this pattern.
1640 * Return value: A new stmt that will be used to replace the sequence of
1641 stmts that constitute the pattern. In this case it will be:
1642 WIDEN_LSHIFT_EXPR <a_t, CONST>. */
1645 vect_recog_widen_shift_pattern (vec
<gimple
*> *stmts
,
1646 tree
*type_in
, tree
*type_out
)
1648 gimple
*last_stmt
= stmts
->pop ();
1650 tree oprnd0
, oprnd1
;
1651 tree type
, half_type0
;
1652 gimple
*pattern_stmt
;
1653 tree vectype
, vectype_out
= NULL_TREE
;
1655 enum tree_code dummy_code
;
1657 vec
<tree
> dummy_vec
;
1661 if (!is_gimple_assign (last_stmt
) || !vinfo_for_stmt (last_stmt
))
1664 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (last_stmt
)))
1667 if (gimple_assign_rhs_code (last_stmt
) != LSHIFT_EXPR
)
1670 oprnd0
= gimple_assign_rhs1 (last_stmt
);
1671 oprnd1
= gimple_assign_rhs2 (last_stmt
);
1672 if (TREE_CODE (oprnd0
) != SSA_NAME
|| TREE_CODE (oprnd1
) != INTEGER_CST
)
1675 /* Check operand 0: it has to be defined by a type promotion. */
1676 if (!type_conversion_p (oprnd0
, last_stmt
, false, &half_type0
, &def_stmt0
,
1681 /* Check operand 1: has to be positive. We check that it fits the type
1682 in vect_handle_widen_op_by_const (). */
1683 if (tree_int_cst_compare (oprnd1
, size_zero_node
) <= 0)
1686 oprnd0
= gimple_assign_rhs1 (def_stmt0
);
1687 type
= gimple_expr_type (last_stmt
);
1689 /* Check for subsequent conversion to another type. */
1690 use_stmt
= vect_single_imm_use (last_stmt
);
1691 if (use_stmt
&& is_gimple_assign (use_stmt
)
1692 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_stmt
))
1693 && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
1695 tree use_lhs
= gimple_assign_lhs (use_stmt
);
1696 tree use_type
= TREE_TYPE (use_lhs
);
1698 if (INTEGRAL_TYPE_P (use_type
)
1699 && TYPE_PRECISION (use_type
) <= TYPE_PRECISION (type
))
1701 last_stmt
= use_stmt
;
1706 /* Check if this a widening operation. */
1707 gimple
*wstmt
= NULL
;
1708 if (!vect_handle_widen_op_by_const (last_stmt
, LSHIFT_EXPR
, oprnd1
,
1710 type
, &half_type0
, def_stmt0
))
1713 /* Pattern detected. */
1714 if (dump_enabled_p ())
1715 dump_printf_loc (MSG_NOTE
, vect_location
,
1716 "vect_recog_widen_shift_pattern: detected:\n");
1718 /* Check target support. */
1719 vectype
= get_vectype_for_scalar_type (half_type0
);
1720 vectype_out
= get_vectype_for_scalar_type (type
);
1724 || !supportable_widening_operation (WIDEN_LSHIFT_EXPR
, last_stmt
,
1725 vectype_out
, vectype
,
1726 &dummy_code
, &dummy_code
,
1727 &dummy_int
, &dummy_vec
))
1731 *type_out
= vectype_out
;
1733 /* Pattern supported. Create a stmt to be used to replace the pattern. */
1734 var
= vect_recog_temp_ssa_var (type
, NULL
);
1736 gimple_build_assign (var
, WIDEN_LSHIFT_EXPR
, oprnd0
, oprnd1
);
1739 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
1740 new_pattern_def_seq (stmt_vinfo
, wstmt
);
1741 stmt_vec_info new_stmt_info
1742 = new_stmt_vec_info (wstmt
, stmt_vinfo
->vinfo
);
1743 set_vinfo_for_stmt (wstmt
, new_stmt_info
);
1744 STMT_VINFO_VECTYPE (new_stmt_info
) = vectype
;
1747 if (dump_enabled_p ())
1748 dump_gimple_stmt_loc (MSG_NOTE
, vect_location
, TDF_SLIM
, pattern_stmt
, 0);
1750 stmts
->safe_push (last_stmt
);
1751 return pattern_stmt
;
1754 /* Detect a rotate pattern wouldn't be otherwise vectorized:
1758 S0 a_t = b_t r<< c_t;
1762 * STMTS: Contains a stmt from which the pattern search begins,
1763 i.e. the shift/rotate stmt. The original stmt (S0) is replaced
1767 S2 e_t = d_t & (B - 1);
1768 S3 f_t = b_t << c_t;
1769 S4 g_t = b_t >> e_t;
1772 where B is element bitsize of type.
1776 * TYPE_IN: The type of the input arguments to the pattern.
1778 * TYPE_OUT: The type of the output of this pattern.
1780 * Return value: A new stmt that will be used to replace the rotate
1784 vect_recog_rotate_pattern (vec
<gimple
*> *stmts
, tree
*type_in
, tree
*type_out
)
1786 gimple
*last_stmt
= stmts
->pop ();
1787 tree oprnd0
, oprnd1
, lhs
, var
, var1
, var2
, vectype
, type
, stype
, def
, def2
;
1788 gimple
*pattern_stmt
, *def_stmt
;
1789 enum tree_code rhs_code
;
1790 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
1791 vec_info
*vinfo
= stmt_vinfo
->vinfo
;
1792 enum vect_def_type dt
;
1793 optab optab1
, optab2
;
1794 edge ext_def
= NULL
;
1796 if (!is_gimple_assign (last_stmt
))
1799 rhs_code
= gimple_assign_rhs_code (last_stmt
);
1809 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
1812 lhs
= gimple_assign_lhs (last_stmt
);
1813 oprnd0
= gimple_assign_rhs1 (last_stmt
);
1814 type
= TREE_TYPE (oprnd0
);
1815 oprnd1
= gimple_assign_rhs2 (last_stmt
);
1816 if (TREE_CODE (oprnd0
) != SSA_NAME
1817 || TYPE_PRECISION (TREE_TYPE (lhs
)) != TYPE_PRECISION (type
)
1818 || !INTEGRAL_TYPE_P (type
)
1819 || !TYPE_UNSIGNED (type
))
1822 if (!vect_is_simple_use (oprnd1
, vinfo
, &def_stmt
, &dt
))
1825 if (dt
!= vect_internal_def
1826 && dt
!= vect_constant_def
1827 && dt
!= vect_external_def
)
1830 vectype
= get_vectype_for_scalar_type (type
);
1831 if (vectype
== NULL_TREE
)
1834 /* If vector/vector or vector/scalar rotate is supported by the target,
1835 don't do anything here. */
1836 optab1
= optab_for_tree_code (rhs_code
, vectype
, optab_vector
);
1838 && optab_handler (optab1
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
1841 if (is_a
<bb_vec_info
> (vinfo
) || dt
!= vect_internal_def
)
1843 optab2
= optab_for_tree_code (rhs_code
, vectype
, optab_scalar
);
1845 && optab_handler (optab2
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
1849 /* If vector/vector or vector/scalar shifts aren't supported by the target,
1850 don't do anything here either. */
1851 optab1
= optab_for_tree_code (LSHIFT_EXPR
, vectype
, optab_vector
);
1852 optab2
= optab_for_tree_code (RSHIFT_EXPR
, vectype
, optab_vector
);
1854 || optab_handler (optab1
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
1856 || optab_handler (optab2
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
1858 if (! is_a
<bb_vec_info
> (vinfo
) && dt
== vect_internal_def
)
1860 optab1
= optab_for_tree_code (LSHIFT_EXPR
, vectype
, optab_scalar
);
1861 optab2
= optab_for_tree_code (RSHIFT_EXPR
, vectype
, optab_scalar
);
1863 || optab_handler (optab1
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
1865 || optab_handler (optab2
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
1870 *type_out
= vectype
;
1871 if (*type_in
== NULL_TREE
)
1874 if (dt
== vect_external_def
1875 && TREE_CODE (oprnd1
) == SSA_NAME
1876 && is_a
<loop_vec_info
> (vinfo
))
1878 struct loop
*loop
= as_a
<loop_vec_info
> (vinfo
)->loop
;
1879 ext_def
= loop_preheader_edge (loop
);
1880 if (!SSA_NAME_IS_DEFAULT_DEF (oprnd1
))
1882 basic_block bb
= gimple_bb (SSA_NAME_DEF_STMT (oprnd1
));
1884 || !dominated_by_p (CDI_DOMINATORS
, ext_def
->dest
, bb
))
1890 if (TREE_CODE (oprnd1
) == INTEGER_CST
1891 || TYPE_MODE (TREE_TYPE (oprnd1
)) == TYPE_MODE (type
))
1893 else if (def_stmt
&& gimple_assign_cast_p (def_stmt
))
1895 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
1896 if (TYPE_MODE (TREE_TYPE (rhs1
)) == TYPE_MODE (type
)
1897 && TYPE_PRECISION (TREE_TYPE (rhs1
))
1898 == TYPE_PRECISION (type
))
1902 STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo
) = NULL
;
1903 if (def
== NULL_TREE
)
1905 def
= vect_recog_temp_ssa_var (type
, NULL
);
1906 def_stmt
= gimple_build_assign (def
, NOP_EXPR
, oprnd1
);
1910 = gsi_insert_on_edge_immediate (ext_def
, def_stmt
);
1911 gcc_assert (!new_bb
);
1914 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
1916 stype
= TREE_TYPE (def
);
1918 if (TREE_CODE (def
) == INTEGER_CST
)
1920 if (!tree_fits_uhwi_p (def
)
1921 || tree_to_uhwi (def
) >= GET_MODE_PRECISION (TYPE_MODE (type
))
1922 || integer_zerop (def
))
1924 def2
= build_int_cst (stype
,
1925 GET_MODE_PRECISION (TYPE_MODE (type
))
1926 - tree_to_uhwi (def
));
1930 tree vecstype
= get_vectype_for_scalar_type (stype
);
1931 stmt_vec_info def_stmt_vinfo
;
1933 if (vecstype
== NULL_TREE
)
1935 def2
= vect_recog_temp_ssa_var (stype
, NULL
);
1936 def_stmt
= gimple_build_assign (def2
, NEGATE_EXPR
, def
);
1940 = gsi_insert_on_edge_immediate (ext_def
, def_stmt
);
1941 gcc_assert (!new_bb
);
1945 def_stmt_vinfo
= new_stmt_vec_info (def_stmt
, vinfo
);
1946 set_vinfo_for_stmt (def_stmt
, def_stmt_vinfo
);
1947 STMT_VINFO_VECTYPE (def_stmt_vinfo
) = vecstype
;
1948 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
1951 def2
= vect_recog_temp_ssa_var (stype
, NULL
);
1953 = build_int_cst (stype
, GET_MODE_PRECISION (TYPE_MODE (stype
)) - 1);
1954 def_stmt
= gimple_build_assign (def2
, BIT_AND_EXPR
,
1955 gimple_assign_lhs (def_stmt
), mask
);
1959 = gsi_insert_on_edge_immediate (ext_def
, def_stmt
);
1960 gcc_assert (!new_bb
);
1964 def_stmt_vinfo
= new_stmt_vec_info (def_stmt
, vinfo
);
1965 set_vinfo_for_stmt (def_stmt
, def_stmt_vinfo
);
1966 STMT_VINFO_VECTYPE (def_stmt_vinfo
) = vecstype
;
1967 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
1971 var1
= vect_recog_temp_ssa_var (type
, NULL
);
1972 def_stmt
= gimple_build_assign (var1
, rhs_code
== LROTATE_EXPR
1973 ? LSHIFT_EXPR
: RSHIFT_EXPR
,
1975 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
1977 var2
= vect_recog_temp_ssa_var (type
, NULL
);
1978 def_stmt
= gimple_build_assign (var2
, rhs_code
== LROTATE_EXPR
1979 ? RSHIFT_EXPR
: LSHIFT_EXPR
,
1981 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
1983 /* Pattern detected. */
1984 if (dump_enabled_p ())
1985 dump_printf_loc (MSG_NOTE
, vect_location
,
1986 "vect_recog_rotate_pattern: detected:\n");
1988 /* Pattern supported. Create a stmt to be used to replace the pattern. */
1989 var
= vect_recog_temp_ssa_var (type
, NULL
);
1990 pattern_stmt
= gimple_build_assign (var
, BIT_IOR_EXPR
, var1
, var2
);
1992 if (dump_enabled_p ())
1993 dump_gimple_stmt_loc (MSG_NOTE
, vect_location
, TDF_SLIM
, pattern_stmt
, 0);
1995 stmts
->safe_push (last_stmt
);
1996 return pattern_stmt
;
1999 /* Detect a vector by vector shift pattern that wouldn't be otherwise
2007 S3 res_T = b_T op a_t;
2009 where type 'TYPE' is a type with different size than 'type',
2010 and op is <<, >> or rotate.
2015 TYPE b_T, c_T, res_T;
2018 S1 a_t = (type) c_T;
2020 S3 res_T = b_T op a_t;
2024 * STMTS: Contains a stmt from which the pattern search begins,
2025 i.e. the shift/rotate stmt. The original stmt (S3) is replaced
2026 with a shift/rotate which has same type on both operands, in the
2027 second case just b_T op c_T, in the first case with added cast
2028 from a_t to c_T in STMT_VINFO_PATTERN_DEF_SEQ.
2032 * TYPE_IN: The type of the input arguments to the pattern.
2034 * TYPE_OUT: The type of the output of this pattern.
2036 * Return value: A new stmt that will be used to replace the shift/rotate
2040 vect_recog_vector_vector_shift_pattern (vec
<gimple
*> *stmts
,
2041 tree
*type_in
, tree
*type_out
)
2043 gimple
*last_stmt
= stmts
->pop ();
2044 tree oprnd0
, oprnd1
, lhs
, var
;
2045 gimple
*pattern_stmt
, *def_stmt
;
2046 enum tree_code rhs_code
;
2047 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
2048 vec_info
*vinfo
= stmt_vinfo
->vinfo
;
2049 enum vect_def_type dt
;
2051 if (!is_gimple_assign (last_stmt
))
2054 rhs_code
= gimple_assign_rhs_code (last_stmt
);
2066 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
2069 lhs
= gimple_assign_lhs (last_stmt
);
2070 oprnd0
= gimple_assign_rhs1 (last_stmt
);
2071 oprnd1
= gimple_assign_rhs2 (last_stmt
);
2072 if (TREE_CODE (oprnd0
) != SSA_NAME
2073 || TREE_CODE (oprnd1
) != SSA_NAME
2074 || TYPE_MODE (TREE_TYPE (oprnd0
)) == TYPE_MODE (TREE_TYPE (oprnd1
))
2075 || TYPE_PRECISION (TREE_TYPE (oprnd1
))
2076 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (oprnd1
)))
2077 || TYPE_PRECISION (TREE_TYPE (lhs
))
2078 != TYPE_PRECISION (TREE_TYPE (oprnd0
)))
2081 if (!vect_is_simple_use (oprnd1
, vinfo
, &def_stmt
, &dt
))
2084 if (dt
!= vect_internal_def
)
2087 *type_in
= get_vectype_for_scalar_type (TREE_TYPE (oprnd0
));
2088 *type_out
= *type_in
;
2089 if (*type_in
== NULL_TREE
)
2092 tree def
= NULL_TREE
;
2093 stmt_vec_info def_vinfo
= vinfo_for_stmt (def_stmt
);
2094 if (!STMT_VINFO_IN_PATTERN_P (def_vinfo
) && gimple_assign_cast_p (def_stmt
))
2096 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
2097 if (TYPE_MODE (TREE_TYPE (rhs1
)) == TYPE_MODE (TREE_TYPE (oprnd0
))
2098 && TYPE_PRECISION (TREE_TYPE (rhs1
))
2099 == TYPE_PRECISION (TREE_TYPE (oprnd0
)))
2101 if (TYPE_PRECISION (TREE_TYPE (oprnd1
))
2102 >= TYPE_PRECISION (TREE_TYPE (rhs1
)))
2107 = build_low_bits_mask (TREE_TYPE (rhs1
),
2108 TYPE_PRECISION (TREE_TYPE (oprnd1
)));
2109 def
= vect_recog_temp_ssa_var (TREE_TYPE (rhs1
), NULL
);
2110 def_stmt
= gimple_build_assign (def
, BIT_AND_EXPR
, rhs1
, mask
);
2111 new_pattern_def_seq (stmt_vinfo
, def_stmt
);
2116 if (def
== NULL_TREE
)
2118 def
= vect_recog_temp_ssa_var (TREE_TYPE (oprnd0
), NULL
);
2119 def_stmt
= gimple_build_assign (def
, NOP_EXPR
, oprnd1
);
2120 new_pattern_def_seq (stmt_vinfo
, def_stmt
);
2123 /* Pattern detected. */
2124 if (dump_enabled_p ())
2125 dump_printf_loc (MSG_NOTE
, vect_location
,
2126 "vect_recog_vector_vector_shift_pattern: detected:\n");
2128 /* Pattern supported. Create a stmt to be used to replace the pattern. */
2129 var
= vect_recog_temp_ssa_var (TREE_TYPE (oprnd0
), NULL
);
2130 pattern_stmt
= gimple_build_assign (var
, rhs_code
, oprnd0
, def
);
2132 if (dump_enabled_p ())
2133 dump_gimple_stmt_loc (MSG_NOTE
, vect_location
, TDF_SLIM
, pattern_stmt
, 0);
2135 stmts
->safe_push (last_stmt
);
2136 return pattern_stmt
;
2139 /* Detect multiplication by constant which are postive or negatives of power 2,
2140 and convert them to shift patterns.
2142 Mult with constants that are postive power of two.
2149 Mult with constants that are negative power of two.
2154 STMTS: Contains a stmt from which the pattern search begins,
2155 i.e. the mult stmt. Convert the mult operation to LSHIFT if
2156 constant operand is a power of 2.
2158 S1': b_t = a_t << log2 (n)
2160 Convert the mult operation to LSHIFT and followed by a NEGATE
2161 if constant operand is a negative power of 2.
2162 type a_t, b_t, res_T;
2163 S2': b_t = a_t << log2 (n)
2164 S3': res_T = - (b_t)
2168 * TYPE_IN: The type of the input arguments to the pattern.
2170 * TYPE_OUT: The type of the output of this pattern.
2172 * Return value: A new stmt that will be used to replace the multiplication
2176 vect_recog_mult_pattern (vec
<gimple
*> *stmts
,
2177 tree
*type_in
, tree
*type_out
)
2179 gimple
*last_stmt
= stmts
->pop ();
2180 tree oprnd0
, oprnd1
, vectype
, itype
;
2181 gimple
*pattern_stmt
, *def_stmt
;
2183 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
2184 int power2_val
, power2_neg_val
;
2187 if (!is_gimple_assign (last_stmt
))
2190 if (gimple_assign_rhs_code (last_stmt
) != MULT_EXPR
)
2193 oprnd0
= gimple_assign_rhs1 (last_stmt
);
2194 oprnd1
= gimple_assign_rhs2 (last_stmt
);
2195 itype
= TREE_TYPE (oprnd0
);
2197 if (TREE_CODE (oprnd0
) != SSA_NAME
2198 || TREE_CODE (oprnd1
) != INTEGER_CST
2199 || !INTEGRAL_TYPE_P (itype
)
2200 || TYPE_PRECISION (itype
) != GET_MODE_PRECISION (TYPE_MODE (itype
)))
2203 vectype
= get_vectype_for_scalar_type (itype
);
2204 if (vectype
== NULL_TREE
)
2207 /* If the target can handle vectorized multiplication natively,
2208 don't attempt to optimize this. */
2209 optab
= optab_for_tree_code (MULT_EXPR
, vectype
, optab_default
);
2210 if (optab
!= unknown_optab
)
2212 machine_mode vec_mode
= TYPE_MODE (vectype
);
2213 int icode
= (int) optab_handler (optab
, vec_mode
);
2214 if (icode
!= CODE_FOR_nothing
)
2218 /* If target cannot handle vector left shift then we cannot
2219 optimize and bail out. */
2220 optab
= optab_for_tree_code (LSHIFT_EXPR
, vectype
, optab_vector
);
2222 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
2225 power2_val
= wi::exact_log2 (oprnd1
);
2226 power2_neg_val
= wi::exact_log2 (wi::neg (oprnd1
));
2228 /* Handle constant operands that are postive or negative powers of 2. */
2229 if (power2_val
!= -1)
2231 shift
= build_int_cst (itype
, power2_val
);
2233 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
2234 LSHIFT_EXPR
, oprnd0
, shift
);
2236 else if (power2_neg_val
!= -1)
2238 /* If the target cannot handle vector NEGATE then we cannot
2239 do the optimization. */
2240 optab
= optab_for_tree_code (NEGATE_EXPR
, vectype
, optab_vector
);
2242 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
2245 shift
= build_int_cst (itype
, power2_neg_val
);
2247 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
2248 LSHIFT_EXPR
, oprnd0
, shift
);
2249 new_pattern_def_seq (stmt_vinfo
, def_stmt
);
2251 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
2252 NEGATE_EXPR
, gimple_assign_lhs (def_stmt
));
2257 /* Pattern detected. */
2258 if (dump_enabled_p ())
2259 dump_printf_loc (MSG_NOTE
, vect_location
,
2260 "vect_recog_mult_pattern: detected:\n");
2262 if (dump_enabled_p ())
2263 dump_gimple_stmt_loc (MSG_NOTE
, vect_location
, TDF_SLIM
,
2266 stmts
->safe_push (last_stmt
);
2268 *type_out
= vectype
;
2270 return pattern_stmt
;
2273 /* Detect a signed division by a constant that wouldn't be
2274 otherwise vectorized:
2280 where type 'type' is an integral type and N is a constant.
2282 Similarly handle modulo by a constant:
2288 * STMTS: Contains a stmt from which the pattern search begins,
2289 i.e. the division stmt. S1 is replaced by if N is a power
2290 of two constant and type is signed:
2291 S3 y_t = b_t < 0 ? N - 1 : 0;
2293 S1' a_t = x_t >> log2 (N);
2295 S4 is replaced if N is a power of two constant and
2296 type is signed by (where *_T temporaries have unsigned type):
2297 S9 y_T = b_t < 0 ? -1U : 0U;
2298 S8 z_T = y_T >> (sizeof (type_t) * CHAR_BIT - log2 (N));
2299 S7 z_t = (type) z_T;
2301 S5 x_t = w_t & (N - 1);
2302 S4' a_t = x_t - z_t;
2306 * TYPE_IN: The type of the input arguments to the pattern.
2308 * TYPE_OUT: The type of the output of this pattern.
2310 * Return value: A new stmt that will be used to replace the division
2311 S1 or modulo S4 stmt. */
2314 vect_recog_divmod_pattern (vec
<gimple
*> *stmts
,
2315 tree
*type_in
, tree
*type_out
)
2317 gimple
*last_stmt
= stmts
->pop ();
2318 tree oprnd0
, oprnd1
, vectype
, itype
, cond
;
2319 gimple
*pattern_stmt
, *def_stmt
;
2320 enum tree_code rhs_code
;
2321 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
2322 vec_info
*vinfo
= stmt_vinfo
->vinfo
;
2325 int dummy_int
, prec
;
2326 stmt_vec_info def_stmt_vinfo
;
2328 if (!is_gimple_assign (last_stmt
))
2331 rhs_code
= gimple_assign_rhs_code (last_stmt
);
2334 case TRUNC_DIV_EXPR
:
2335 case TRUNC_MOD_EXPR
:
2341 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
2344 oprnd0
= gimple_assign_rhs1 (last_stmt
);
2345 oprnd1
= gimple_assign_rhs2 (last_stmt
);
2346 itype
= TREE_TYPE (oprnd0
);
2347 if (TREE_CODE (oprnd0
) != SSA_NAME
2348 || TREE_CODE (oprnd1
) != INTEGER_CST
2349 || TREE_CODE (itype
) != INTEGER_TYPE
2350 || TYPE_PRECISION (itype
) != GET_MODE_PRECISION (TYPE_MODE (itype
)))
2353 vectype
= get_vectype_for_scalar_type (itype
);
2354 if (vectype
== NULL_TREE
)
2357 /* If the target can handle vectorized division or modulo natively,
2358 don't attempt to optimize this. */
2359 optab
= optab_for_tree_code (rhs_code
, vectype
, optab_default
);
2360 if (optab
!= unknown_optab
)
2362 machine_mode vec_mode
= TYPE_MODE (vectype
);
2363 int icode
= (int) optab_handler (optab
, vec_mode
);
2364 if (icode
!= CODE_FOR_nothing
)
2368 prec
= TYPE_PRECISION (itype
);
2369 if (integer_pow2p (oprnd1
))
2371 if (TYPE_UNSIGNED (itype
) || tree_int_cst_sgn (oprnd1
) != 1)
2374 /* Pattern detected. */
2375 if (dump_enabled_p ())
2376 dump_printf_loc (MSG_NOTE
, vect_location
,
2377 "vect_recog_divmod_pattern: detected:\n");
2379 cond
= build2 (LT_EXPR
, boolean_type_node
, oprnd0
,
2380 build_int_cst (itype
, 0));
2381 if (rhs_code
== TRUNC_DIV_EXPR
)
2383 tree var
= vect_recog_temp_ssa_var (itype
, NULL
);
2386 = gimple_build_assign (var
, COND_EXPR
, cond
,
2387 fold_build2 (MINUS_EXPR
, itype
, oprnd1
,
2388 build_int_cst (itype
, 1)),
2389 build_int_cst (itype
, 0));
2390 new_pattern_def_seq (stmt_vinfo
, def_stmt
);
2391 var
= vect_recog_temp_ssa_var (itype
, NULL
);
2393 = gimple_build_assign (var
, PLUS_EXPR
, oprnd0
,
2394 gimple_assign_lhs (def_stmt
));
2395 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2397 shift
= build_int_cst (itype
, tree_log2 (oprnd1
));
2399 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
2400 RSHIFT_EXPR
, var
, shift
);
2405 STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo
) = NULL
;
2406 if (compare_tree_int (oprnd1
, 2) == 0)
2408 signmask
= vect_recog_temp_ssa_var (itype
, NULL
);
2409 def_stmt
= gimple_build_assign (signmask
, COND_EXPR
, cond
,
2410 build_int_cst (itype
, 1),
2411 build_int_cst (itype
, 0));
2412 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2417 = build_nonstandard_integer_type (prec
, 1);
2418 tree vecutype
= get_vectype_for_scalar_type (utype
);
2420 = build_int_cst (utype
, GET_MODE_BITSIZE (TYPE_MODE (itype
))
2421 - tree_log2 (oprnd1
));
2422 tree var
= vect_recog_temp_ssa_var (utype
, NULL
);
2424 def_stmt
= gimple_build_assign (var
, COND_EXPR
, cond
,
2425 build_int_cst (utype
, -1),
2426 build_int_cst (utype
, 0));
2427 def_stmt_vinfo
= new_stmt_vec_info (def_stmt
, vinfo
);
2428 set_vinfo_for_stmt (def_stmt
, def_stmt_vinfo
);
2429 STMT_VINFO_VECTYPE (def_stmt_vinfo
) = vecutype
;
2430 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2431 var
= vect_recog_temp_ssa_var (utype
, NULL
);
2432 def_stmt
= gimple_build_assign (var
, RSHIFT_EXPR
,
2433 gimple_assign_lhs (def_stmt
),
2435 def_stmt_vinfo
= new_stmt_vec_info (def_stmt
, vinfo
);
2436 set_vinfo_for_stmt (def_stmt
, def_stmt_vinfo
);
2437 STMT_VINFO_VECTYPE (def_stmt_vinfo
) = vecutype
;
2438 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2439 signmask
= vect_recog_temp_ssa_var (itype
, NULL
);
2441 = gimple_build_assign (signmask
, NOP_EXPR
, var
);
2442 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2445 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
2446 PLUS_EXPR
, oprnd0
, signmask
);
2447 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2449 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
2450 BIT_AND_EXPR
, gimple_assign_lhs (def_stmt
),
2451 fold_build2 (MINUS_EXPR
, itype
, oprnd1
,
2452 build_int_cst (itype
, 1)));
2453 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2456 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
2457 MINUS_EXPR
, gimple_assign_lhs (def_stmt
),
2461 if (dump_enabled_p ())
2462 dump_gimple_stmt_loc (MSG_NOTE
, vect_location
, TDF_SLIM
, pattern_stmt
,
2465 stmts
->safe_push (last_stmt
);
2468 *type_out
= vectype
;
2469 return pattern_stmt
;
2472 if (prec
> HOST_BITS_PER_WIDE_INT
2473 || integer_zerop (oprnd1
))
2476 if (!can_mult_highpart_p (TYPE_MODE (vectype
), TYPE_UNSIGNED (itype
)))
2479 STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo
) = NULL
;
2481 if (TYPE_UNSIGNED (itype
))
2483 unsigned HOST_WIDE_INT mh
, ml
;
2484 int pre_shift
, post_shift
;
2485 unsigned HOST_WIDE_INT d
= (TREE_INT_CST_LOW (oprnd1
)
2486 & GET_MODE_MASK (TYPE_MODE (itype
)));
2487 tree t1
, t2
, t3
, t4
;
2489 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (prec
- 1)))
2490 /* FIXME: Can transform this into oprnd0 >= oprnd1 ? 1 : 0. */
2493 /* Find a suitable multiplier and right shift count
2494 instead of multiplying with D. */
2495 mh
= choose_multiplier (d
, prec
, prec
, &ml
, &post_shift
, &dummy_int
);
2497 /* If the suggested multiplier is more than SIZE bits, we can do better
2498 for even divisors, using an initial right shift. */
2499 if (mh
!= 0 && (d
& 1) == 0)
2501 pre_shift
= floor_log2 (d
& -d
);
2502 mh
= choose_multiplier (d
>> pre_shift
, prec
, prec
- pre_shift
,
2503 &ml
, &post_shift
, &dummy_int
);
2511 if (post_shift
- 1 >= prec
)
2514 /* t1 = oprnd0 h* ml;
2518 q = t4 >> (post_shift - 1); */
2519 t1
= vect_recog_temp_ssa_var (itype
, NULL
);
2520 def_stmt
= gimple_build_assign (t1
, MULT_HIGHPART_EXPR
, oprnd0
,
2521 build_int_cst (itype
, ml
));
2522 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2524 t2
= vect_recog_temp_ssa_var (itype
, NULL
);
2526 = gimple_build_assign (t2
, MINUS_EXPR
, oprnd0
, t1
);
2527 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2529 t3
= vect_recog_temp_ssa_var (itype
, NULL
);
2531 = gimple_build_assign (t3
, RSHIFT_EXPR
, t2
, integer_one_node
);
2532 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2534 t4
= vect_recog_temp_ssa_var (itype
, NULL
);
2536 = gimple_build_assign (t4
, PLUS_EXPR
, t1
, t3
);
2538 if (post_shift
!= 1)
2540 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2542 q
= vect_recog_temp_ssa_var (itype
, NULL
);
2544 = gimple_build_assign (q
, RSHIFT_EXPR
, t4
,
2545 build_int_cst (itype
, post_shift
- 1));
2550 pattern_stmt
= def_stmt
;
2555 if (pre_shift
>= prec
|| post_shift
>= prec
)
2558 /* t1 = oprnd0 >> pre_shift;
2560 q = t2 >> post_shift; */
2563 t1
= vect_recog_temp_ssa_var (itype
, NULL
);
2565 = gimple_build_assign (t1
, RSHIFT_EXPR
, oprnd0
,
2566 build_int_cst (NULL
, pre_shift
));
2567 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2572 t2
= vect_recog_temp_ssa_var (itype
, NULL
);
2573 def_stmt
= gimple_build_assign (t2
, MULT_HIGHPART_EXPR
, t1
,
2574 build_int_cst (itype
, ml
));
2578 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2580 q
= vect_recog_temp_ssa_var (itype
, NULL
);
2582 = gimple_build_assign (q
, RSHIFT_EXPR
, t2
,
2583 build_int_cst (itype
, post_shift
));
2588 pattern_stmt
= def_stmt
;
2593 unsigned HOST_WIDE_INT ml
;
2595 HOST_WIDE_INT d
= TREE_INT_CST_LOW (oprnd1
);
2596 unsigned HOST_WIDE_INT abs_d
;
2598 tree t1
, t2
, t3
, t4
;
2600 /* Give up for -1. */
2604 /* Since d might be INT_MIN, we have to cast to
2605 unsigned HOST_WIDE_INT before negating to avoid
2606 undefined signed overflow. */
2608 ? (unsigned HOST_WIDE_INT
) d
2609 : - (unsigned HOST_WIDE_INT
) d
);
2611 /* n rem d = n rem -d */
2612 if (rhs_code
== TRUNC_MOD_EXPR
&& d
< 0)
2615 oprnd1
= build_int_cst (itype
, abs_d
);
2617 else if (HOST_BITS_PER_WIDE_INT
>= prec
2618 && abs_d
== (unsigned HOST_WIDE_INT
) 1 << (prec
- 1))
2619 /* This case is not handled correctly below. */
2622 choose_multiplier (abs_d
, prec
, prec
- 1, &ml
, &post_shift
, &dummy_int
);
2623 if (ml
>= (unsigned HOST_WIDE_INT
) 1 << (prec
- 1))
2626 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (prec
- 1);
2628 if (post_shift
>= prec
)
2631 /* t1 = oprnd0 h* ml; */
2632 t1
= vect_recog_temp_ssa_var (itype
, NULL
);
2633 def_stmt
= gimple_build_assign (t1
, MULT_HIGHPART_EXPR
, oprnd0
,
2634 build_int_cst (itype
, ml
));
2638 /* t2 = t1 + oprnd0; */
2639 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2640 t2
= vect_recog_temp_ssa_var (itype
, NULL
);
2641 def_stmt
= gimple_build_assign (t2
, PLUS_EXPR
, t1
, oprnd0
);
2648 /* t3 = t2 >> post_shift; */
2649 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2650 t3
= vect_recog_temp_ssa_var (itype
, NULL
);
2651 def_stmt
= gimple_build_assign (t3
, RSHIFT_EXPR
, t2
,
2652 build_int_cst (itype
, post_shift
));
2657 wide_int oprnd0_min
, oprnd0_max
;
2659 if (get_range_info (oprnd0
, &oprnd0_min
, &oprnd0_max
) == VR_RANGE
)
2661 if (!wi::neg_p (oprnd0_min
, TYPE_SIGN (itype
)))
2663 else if (wi::neg_p (oprnd0_max
, TYPE_SIGN (itype
)))
2667 if (msb
== 0 && d
>= 0)
2671 pattern_stmt
= def_stmt
;
2675 /* t4 = oprnd0 >> (prec - 1);
2676 or if we know from VRP that oprnd0 >= 0
2678 or if we know from VRP that oprnd0 < 0
2680 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2681 t4
= vect_recog_temp_ssa_var (itype
, NULL
);
2683 def_stmt
= gimple_build_assign (t4
, INTEGER_CST
,
2684 build_int_cst (itype
, msb
));
2686 def_stmt
= gimple_build_assign (t4
, RSHIFT_EXPR
, oprnd0
,
2687 build_int_cst (itype
, prec
- 1));
2688 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2690 /* q = t3 - t4; or q = t4 - t3; */
2691 q
= vect_recog_temp_ssa_var (itype
, NULL
);
2692 pattern_stmt
= gimple_build_assign (q
, MINUS_EXPR
, d
< 0 ? t4
: t3
,
2697 if (rhs_code
== TRUNC_MOD_EXPR
)
2701 /* We divided. Now finish by:
2704 append_pattern_def_seq (stmt_vinfo
, pattern_stmt
);
2706 t1
= vect_recog_temp_ssa_var (itype
, NULL
);
2707 def_stmt
= gimple_build_assign (t1
, MULT_EXPR
, q
, oprnd1
);
2708 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2710 r
= vect_recog_temp_ssa_var (itype
, NULL
);
2711 pattern_stmt
= gimple_build_assign (r
, MINUS_EXPR
, oprnd0
, t1
);
2714 /* Pattern detected. */
2715 if (dump_enabled_p ())
2717 dump_printf_loc (MSG_NOTE
, vect_location
,
2718 "vect_recog_divmod_pattern: detected: ");
2719 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
2720 dump_printf (MSG_NOTE
, "\n");
2723 stmts
->safe_push (last_stmt
);
2726 *type_out
= vectype
;
2727 return pattern_stmt
;
2730 /* Function vect_recog_mixed_size_cond_pattern
2732 Try to find the following pattern:
2737 S1 a_T = x_t CMP y_t ? b_T : c_T;
2739 where type 'TYPE' is an integral type which has different size
2740 from 'type'. b_T and c_T are either constants (and if 'TYPE' is wider
2741 than 'type', the constants need to fit into an integer type
2742 with the same width as 'type') or results of conversion from 'type'.
2746 * LAST_STMT: A stmt from which the pattern search begins.
2750 * TYPE_IN: The type of the input arguments to the pattern.
2752 * TYPE_OUT: The type of the output of this pattern.
2754 * Return value: A new stmt that will be used to replace the pattern.
2755 Additionally a def_stmt is added.
2757 a_it = x_t CMP y_t ? b_it : c_it;
2758 a_T = (TYPE) a_it; */
2761 vect_recog_mixed_size_cond_pattern (vec
<gimple
*> *stmts
, tree
*type_in
,
2764 gimple
*last_stmt
= (*stmts
)[0];
2765 tree cond_expr
, then_clause
, else_clause
;
2766 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
), def_stmt_info
;
2767 tree type
, vectype
, comp_vectype
, itype
= NULL_TREE
, vecitype
;
2768 gimple
*pattern_stmt
, *def_stmt
;
2769 vec_info
*vinfo
= stmt_vinfo
->vinfo
;
2770 tree orig_type0
= NULL_TREE
, orig_type1
= NULL_TREE
;
2771 gimple
*def_stmt0
= NULL
, *def_stmt1
= NULL
;
2773 tree comp_scalar_type
;
2775 if (!is_gimple_assign (last_stmt
)
2776 || gimple_assign_rhs_code (last_stmt
) != COND_EXPR
2777 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_internal_def
)
2780 cond_expr
= gimple_assign_rhs1 (last_stmt
);
2781 then_clause
= gimple_assign_rhs2 (last_stmt
);
2782 else_clause
= gimple_assign_rhs3 (last_stmt
);
2784 if (!COMPARISON_CLASS_P (cond_expr
))
2787 comp_scalar_type
= TREE_TYPE (TREE_OPERAND (cond_expr
, 0));
2788 comp_vectype
= get_vectype_for_scalar_type (comp_scalar_type
);
2789 if (comp_vectype
== NULL_TREE
)
2792 type
= gimple_expr_type (last_stmt
);
2793 if (types_compatible_p (type
, comp_scalar_type
)
2794 || ((TREE_CODE (then_clause
) != INTEGER_CST
2795 || TREE_CODE (else_clause
) != INTEGER_CST
)
2796 && !INTEGRAL_TYPE_P (comp_scalar_type
))
2797 || !INTEGRAL_TYPE_P (type
))
2800 if ((TREE_CODE (then_clause
) != INTEGER_CST
2801 && !type_conversion_p (then_clause
, last_stmt
, false, &orig_type0
,
2802 &def_stmt0
, &promotion
))
2803 || (TREE_CODE (else_clause
) != INTEGER_CST
2804 && !type_conversion_p (else_clause
, last_stmt
, false, &orig_type1
,
2805 &def_stmt1
, &promotion
)))
2808 if (orig_type0
&& orig_type1
2809 && !types_compatible_p (orig_type0
, orig_type1
))
2814 if (!types_compatible_p (orig_type0
, comp_scalar_type
))
2816 then_clause
= gimple_assign_rhs1 (def_stmt0
);
2822 if (!types_compatible_p (orig_type1
, comp_scalar_type
))
2824 else_clause
= gimple_assign_rhs1 (def_stmt1
);
2829 HOST_WIDE_INT cmp_mode_size
2830 = GET_MODE_UNIT_BITSIZE (TYPE_MODE (comp_vectype
));
2832 if (GET_MODE_BITSIZE (TYPE_MODE (type
)) == cmp_mode_size
)
2835 vectype
= get_vectype_for_scalar_type (type
);
2836 if (vectype
== NULL_TREE
)
2839 if (expand_vec_cond_expr_p (vectype
, comp_vectype
))
2842 if (itype
== NULL_TREE
)
2843 itype
= build_nonstandard_integer_type (cmp_mode_size
,
2844 TYPE_UNSIGNED (type
));
2846 if (itype
== NULL_TREE
2847 || GET_MODE_BITSIZE (TYPE_MODE (itype
)) != cmp_mode_size
)
2850 vecitype
= get_vectype_for_scalar_type (itype
);
2851 if (vecitype
== NULL_TREE
)
2854 if (!expand_vec_cond_expr_p (vecitype
, comp_vectype
))
2857 if (GET_MODE_BITSIZE (TYPE_MODE (type
)) > cmp_mode_size
)
2859 if ((TREE_CODE (then_clause
) == INTEGER_CST
2860 && !int_fits_type_p (then_clause
, itype
))
2861 || (TREE_CODE (else_clause
) == INTEGER_CST
2862 && !int_fits_type_p (else_clause
, itype
)))
2866 def_stmt
= gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
2867 COND_EXPR
, unshare_expr (cond_expr
),
2868 fold_convert (itype
, then_clause
),
2869 fold_convert (itype
, else_clause
));
2870 pattern_stmt
= gimple_build_assign (vect_recog_temp_ssa_var (type
, NULL
),
2871 NOP_EXPR
, gimple_assign_lhs (def_stmt
));
2873 new_pattern_def_seq (stmt_vinfo
, def_stmt
);
2874 def_stmt_info
= new_stmt_vec_info (def_stmt
, vinfo
);
2875 set_vinfo_for_stmt (def_stmt
, def_stmt_info
);
2876 STMT_VINFO_VECTYPE (def_stmt_info
) = vecitype
;
2877 *type_in
= vecitype
;
2878 *type_out
= vectype
;
2880 if (dump_enabled_p ())
2881 dump_printf_loc (MSG_NOTE
, vect_location
,
2882 "vect_recog_mixed_size_cond_pattern: detected:\n");
2884 return pattern_stmt
;
2888 /* Helper function of vect_recog_bool_pattern. Called recursively, return
2889 true if bool VAR can and should be optimized that way. Assume it shouldn't
2890 in case it's a result of a comparison which can be directly vectorized into
2891 a vector comparison. */
2894 check_bool_pattern (tree var
, vec_info
*vinfo
)
2897 enum vect_def_type dt
;
2899 enum tree_code rhs_code
;
2901 if (!vect_is_simple_use (var
, vinfo
, &def_stmt
, &dt
))
2904 if (dt
!= vect_internal_def
)
2907 if (!is_gimple_assign (def_stmt
))
2910 if (!has_single_use (var
))
2913 rhs1
= gimple_assign_rhs1 (def_stmt
);
2914 rhs_code
= gimple_assign_rhs_code (def_stmt
);
2918 return check_bool_pattern (rhs1
, vinfo
);
2921 if ((TYPE_PRECISION (TREE_TYPE (rhs1
)) != 1
2922 || !TYPE_UNSIGNED (TREE_TYPE (rhs1
)))
2923 && TREE_CODE (TREE_TYPE (rhs1
)) != BOOLEAN_TYPE
)
2925 return check_bool_pattern (rhs1
, vinfo
);
2928 return check_bool_pattern (rhs1
, vinfo
);
2933 if (!check_bool_pattern (rhs1
, vinfo
))
2935 return check_bool_pattern (gimple_assign_rhs2 (def_stmt
), vinfo
);
2938 if (TREE_CODE_CLASS (rhs_code
) == tcc_comparison
)
2940 tree vecitype
, comp_vectype
, mask_type
;
2942 /* If the comparison can throw, then is_gimple_condexpr will be
2943 false and we can't make a COND_EXPR/VEC_COND_EXPR out of it. */
2944 if (stmt_could_throw_p (def_stmt
))
2947 comp_vectype
= get_vectype_for_scalar_type (TREE_TYPE (rhs1
));
2948 if (comp_vectype
== NULL_TREE
)
2951 mask_type
= get_mask_type_for_scalar_type (TREE_TYPE (rhs1
));
2953 && expand_vec_cmp_expr_p (comp_vectype
, mask_type
))
2956 if (TREE_CODE (TREE_TYPE (rhs1
)) != INTEGER_TYPE
)
2958 machine_mode mode
= TYPE_MODE (TREE_TYPE (rhs1
));
2960 = build_nonstandard_integer_type (GET_MODE_BITSIZE (mode
), 1);
2961 vecitype
= get_vectype_for_scalar_type (itype
);
2962 if (vecitype
== NULL_TREE
)
2966 vecitype
= comp_vectype
;
2967 return expand_vec_cond_expr_p (vecitype
, comp_vectype
);
2974 /* Helper function of adjust_bool_pattern. Add a cast to TYPE to a previous
2975 stmt (SSA_NAME_DEF_STMT of VAR) by moving the COND_EXPR from RELATED_STMT
2976 to PATTERN_DEF_SEQ and adding a cast as RELATED_STMT. */
2979 adjust_bool_pattern_cast (tree type
, tree var
)
2981 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (SSA_NAME_DEF_STMT (var
));
2982 gimple
*cast_stmt
, *pattern_stmt
;
2984 gcc_assert (!STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo
));
2985 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_vinfo
);
2986 new_pattern_def_seq (stmt_vinfo
, pattern_stmt
);
2987 cast_stmt
= gimple_build_assign (vect_recog_temp_ssa_var (type
, NULL
),
2988 NOP_EXPR
, gimple_assign_lhs (pattern_stmt
));
2989 STMT_VINFO_RELATED_STMT (stmt_vinfo
) = cast_stmt
;
2990 return gimple_assign_lhs (cast_stmt
);
2994 /* Helper function of vect_recog_bool_pattern. Do the actual transformations,
2995 recursively. VAR is an SSA_NAME that should be transformed from bool
2996 to a wider integer type, OUT_TYPE is the desired final integer type of
2997 the whole pattern, TRUEVAL should be NULL unless optimizing
2998 BIT_AND_EXPR into a COND_EXPR with one integer from one of the operands
2999 in the then_clause, STMTS is where statements with added pattern stmts
3000 should be pushed to. */
3003 adjust_bool_pattern (tree var
, tree out_type
, tree trueval
,
3004 vec
<gimple
*> *stmts
)
3006 gimple
*stmt
= SSA_NAME_DEF_STMT (var
);
3007 enum tree_code rhs_code
, def_rhs_code
;
3008 tree itype
, cond_expr
, rhs1
, rhs2
, irhs1
, irhs2
;
3010 gimple
*pattern_stmt
, *def_stmt
;
3012 rhs1
= gimple_assign_rhs1 (stmt
);
3013 rhs2
= gimple_assign_rhs2 (stmt
);
3014 rhs_code
= gimple_assign_rhs_code (stmt
);
3015 loc
= gimple_location (stmt
);
3020 irhs1
= adjust_bool_pattern (rhs1
, out_type
, NULL_TREE
, stmts
);
3021 itype
= TREE_TYPE (irhs1
);
3023 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
3028 irhs1
= adjust_bool_pattern (rhs1
, out_type
, NULL_TREE
, stmts
);
3029 itype
= TREE_TYPE (irhs1
);
3031 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
3032 BIT_XOR_EXPR
, irhs1
, build_int_cst (itype
, 1));
3036 /* Try to optimize x = y & (a < b ? 1 : 0); into
3037 x = (a < b ? y : 0);
3043 S1 a_b = x1 CMP1 y1;
3044 S2 b_b = x2 CMP2 y2;
3046 S4 d_T = (TYPE) c_b;
3048 we would normally emit:
3050 S1' a_T = x1 CMP1 y1 ? 1 : 0;
3051 S2' b_T = x2 CMP2 y2 ? 1 : 0;
3052 S3' c_T = a_T & b_T;
3055 but we can save one stmt by using the
3056 result of one of the COND_EXPRs in the other COND_EXPR and leave
3057 BIT_AND_EXPR stmt out:
3059 S1' a_T = x1 CMP1 y1 ? 1 : 0;
3060 S3' c_T = x2 CMP2 y2 ? a_T : 0;
3063 At least when VEC_COND_EXPR is implemented using masks
3064 cond ? 1 : 0 is as expensive as cond ? var : 0, in both cases it
3065 computes the comparison masks and ands it, in one case with
3066 all ones vector, in the other case with a vector register.
3067 Don't do this for BIT_IOR_EXPR, because cond ? 1 : var; is
3068 often more expensive. */
3069 def_stmt
= SSA_NAME_DEF_STMT (rhs2
);
3070 def_rhs_code
= gimple_assign_rhs_code (def_stmt
);
3071 if (TREE_CODE_CLASS (def_rhs_code
) == tcc_comparison
)
3073 tree def_rhs1
= gimple_assign_rhs1 (def_stmt
);
3074 irhs1
= adjust_bool_pattern (rhs1
, out_type
, NULL_TREE
, stmts
);
3075 if (TYPE_PRECISION (TREE_TYPE (irhs1
))
3076 == GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (def_rhs1
))))
3079 stmt_vec_info stmt_def_vinfo
= vinfo_for_stmt (def_stmt
);
3080 irhs2
= adjust_bool_pattern (rhs2
, out_type
, irhs1
, stmts
);
3081 tstmt
= stmts
->pop ();
3082 gcc_assert (tstmt
== def_stmt
);
3083 stmts
->quick_push (stmt
);
3084 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
))
3085 = STMT_VINFO_RELATED_STMT (stmt_def_vinfo
);
3086 gcc_assert (!STMT_VINFO_PATTERN_DEF_SEQ (stmt_def_vinfo
));
3087 STMT_VINFO_RELATED_STMT (stmt_def_vinfo
) = NULL
;
3091 irhs2
= adjust_bool_pattern (rhs2
, out_type
, NULL_TREE
, stmts
);
3094 def_stmt
= SSA_NAME_DEF_STMT (rhs1
);
3095 def_rhs_code
= gimple_assign_rhs_code (def_stmt
);
3096 if (TREE_CODE_CLASS (def_rhs_code
) == tcc_comparison
)
3098 tree def_rhs1
= gimple_assign_rhs1 (def_stmt
);
3099 irhs2
= adjust_bool_pattern (rhs2
, out_type
, NULL_TREE
, stmts
);
3100 if (TYPE_PRECISION (TREE_TYPE (irhs2
))
3101 == GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (def_rhs1
))))
3104 stmt_vec_info stmt_def_vinfo
= vinfo_for_stmt (def_stmt
);
3105 irhs1
= adjust_bool_pattern (rhs1
, out_type
, irhs2
, stmts
);
3106 tstmt
= stmts
->pop ();
3107 gcc_assert (tstmt
== def_stmt
);
3108 stmts
->quick_push (stmt
);
3109 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
))
3110 = STMT_VINFO_RELATED_STMT (stmt_def_vinfo
);
3111 gcc_assert (!STMT_VINFO_PATTERN_DEF_SEQ (stmt_def_vinfo
));
3112 STMT_VINFO_RELATED_STMT (stmt_def_vinfo
) = NULL
;
3116 irhs1
= adjust_bool_pattern (rhs1
, out_type
, NULL_TREE
, stmts
);
3122 irhs1
= adjust_bool_pattern (rhs1
, out_type
, NULL_TREE
, stmts
);
3123 irhs2
= adjust_bool_pattern (rhs2
, out_type
, NULL_TREE
, stmts
);
3125 if (TYPE_PRECISION (TREE_TYPE (irhs1
))
3126 != TYPE_PRECISION (TREE_TYPE (irhs2
)))
3128 int prec1
= TYPE_PRECISION (TREE_TYPE (irhs1
));
3129 int prec2
= TYPE_PRECISION (TREE_TYPE (irhs2
));
3130 int out_prec
= TYPE_PRECISION (out_type
);
3131 if (absu_hwi (out_prec
- prec1
) < absu_hwi (out_prec
- prec2
))
3132 irhs2
= adjust_bool_pattern_cast (TREE_TYPE (irhs1
), rhs2
);
3133 else if (absu_hwi (out_prec
- prec1
) > absu_hwi (out_prec
- prec2
))
3134 irhs1
= adjust_bool_pattern_cast (TREE_TYPE (irhs2
), rhs1
);
3137 irhs1
= adjust_bool_pattern_cast (out_type
, rhs1
);
3138 irhs2
= adjust_bool_pattern_cast (out_type
, rhs2
);
3141 itype
= TREE_TYPE (irhs1
);
3143 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
3144 rhs_code
, irhs1
, irhs2
);
3148 gcc_assert (TREE_CODE_CLASS (rhs_code
) == tcc_comparison
);
3149 if (TREE_CODE (TREE_TYPE (rhs1
)) != INTEGER_TYPE
3150 || !TYPE_UNSIGNED (TREE_TYPE (rhs1
))
3151 || (TYPE_PRECISION (TREE_TYPE (rhs1
))
3152 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1
)))))
3154 machine_mode mode
= TYPE_MODE (TREE_TYPE (rhs1
));
3156 = build_nonstandard_integer_type (GET_MODE_BITSIZE (mode
), 1);
3159 itype
= TREE_TYPE (rhs1
);
3160 cond_expr
= build2_loc (loc
, rhs_code
, itype
, rhs1
, rhs2
);
3161 if (trueval
== NULL_TREE
)
3162 trueval
= build_int_cst (itype
, 1);
3164 gcc_checking_assert (useless_type_conversion_p (itype
,
3165 TREE_TYPE (trueval
)));
3167 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
3168 COND_EXPR
, cond_expr
, trueval
,
3169 build_int_cst (itype
, 0));
3173 stmts
->safe_push (stmt
);
3174 gimple_set_location (pattern_stmt
, loc
);
3175 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
)) = pattern_stmt
;
3176 return gimple_assign_lhs (pattern_stmt
);
3180 /* Return the proper type for converting bool VAR into
3181 an integer value or NULL_TREE if no such type exists.
3182 The type is chosen so that converted value has the
3183 same number of elements as VAR's vector type. */
3186 search_type_for_mask (tree var
, vec_info
*vinfo
)
3189 enum vect_def_type dt
;
3191 enum tree_code rhs_code
;
3192 tree res
= NULL_TREE
, res2
;
3194 if (TREE_CODE (var
) != SSA_NAME
)
3197 if ((TYPE_PRECISION (TREE_TYPE (var
)) != 1
3198 || !TYPE_UNSIGNED (TREE_TYPE (var
)))
3199 && TREE_CODE (TREE_TYPE (var
)) != BOOLEAN_TYPE
)
3202 if (!vect_is_simple_use (var
, vinfo
, &def_stmt
, &dt
))
3205 if (dt
!= vect_internal_def
)
3208 if (!is_gimple_assign (def_stmt
))
3211 rhs_code
= gimple_assign_rhs_code (def_stmt
);
3212 rhs1
= gimple_assign_rhs1 (def_stmt
);
3219 res
= search_type_for_mask (rhs1
, vinfo
);
3225 res
= search_type_for_mask (rhs1
, vinfo
);
3226 res2
= search_type_for_mask (gimple_assign_rhs2 (def_stmt
), vinfo
);
3227 if (!res
|| (res2
&& TYPE_PRECISION (res
) > TYPE_PRECISION (res2
)))
3232 if (TREE_CODE_CLASS (rhs_code
) == tcc_comparison
)
3234 tree comp_vectype
, mask_type
;
3236 if (TREE_CODE (TREE_TYPE (rhs1
)) == BOOLEAN_TYPE
)
3238 res
= search_type_for_mask (rhs1
, vinfo
);
3239 res2
= search_type_for_mask (gimple_assign_rhs2 (def_stmt
), vinfo
);
3240 if (!res
|| (res2
&& TYPE_PRECISION (res
) > TYPE_PRECISION (res2
)))
3245 comp_vectype
= get_vectype_for_scalar_type (TREE_TYPE (rhs1
));
3246 if (comp_vectype
== NULL_TREE
)
3249 mask_type
= get_mask_type_for_scalar_type (TREE_TYPE (rhs1
));
3251 || !expand_vec_cmp_expr_p (comp_vectype
, mask_type
))
3254 if (TREE_CODE (TREE_TYPE (rhs1
)) != INTEGER_TYPE
3255 || !TYPE_UNSIGNED (TREE_TYPE (rhs1
)))
3257 machine_mode mode
= TYPE_MODE (TREE_TYPE (rhs1
));
3258 res
= build_nonstandard_integer_type (GET_MODE_BITSIZE (mode
), 1);
3261 res
= TREE_TYPE (rhs1
);
3269 /* Function vect_recog_bool_pattern
3271 Try to find pattern like following:
3273 bool a_b, b_b, c_b, d_b, e_b;
3276 S1 a_b = x1 CMP1 y1;
3277 S2 b_b = x2 CMP2 y2;
3279 S4 d_b = x3 CMP3 y3;
3281 S6 f_T = (TYPE) e_b;
3283 where type 'TYPE' is an integral type. Or a similar pattern
3286 S6 f_Y = e_b ? r_Y : s_Y;
3288 as results from if-conversion of a complex condition.
3292 * LAST_STMT: A stmt at the end from which the pattern
3293 search begins, i.e. cast of a bool to
3298 * TYPE_IN: The type of the input arguments to the pattern.
3300 * TYPE_OUT: The type of the output of this pattern.
3302 * Return value: A new stmt that will be used to replace the pattern.
3304 Assuming size of TYPE is the same as size of all comparisons
3305 (otherwise some casts would be added where needed), the above
3306 sequence we create related pattern stmts:
3307 S1' a_T = x1 CMP1 y1 ? 1 : 0;
3308 S3' c_T = x2 CMP2 y2 ? a_T : 0;
3309 S4' d_T = x3 CMP3 y3 ? 1 : 0;
3310 S5' e_T = c_T | d_T;
3313 Instead of the above S3' we could emit:
3314 S2' b_T = x2 CMP2 y2 ? 1 : 0;
3315 S3' c_T = a_T | b_T;
3316 but the above is more efficient. */
3319 vect_recog_bool_pattern (vec
<gimple
*> *stmts
, tree
*type_in
,
3322 gimple
*last_stmt
= stmts
->pop ();
3323 enum tree_code rhs_code
;
3324 tree var
, lhs
, rhs
, vectype
;
3325 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
3326 stmt_vec_info new_stmt_info
;
3327 vec_info
*vinfo
= stmt_vinfo
->vinfo
;
3328 gimple
*pattern_stmt
;
3330 if (!is_gimple_assign (last_stmt
))
3333 var
= gimple_assign_rhs1 (last_stmt
);
3334 lhs
= gimple_assign_lhs (last_stmt
);
3336 if ((TYPE_PRECISION (TREE_TYPE (var
)) != 1
3337 || !TYPE_UNSIGNED (TREE_TYPE (var
)))
3338 && TREE_CODE (TREE_TYPE (var
)) != BOOLEAN_TYPE
)
3341 rhs_code
= gimple_assign_rhs_code (last_stmt
);
3342 if (CONVERT_EXPR_CODE_P (rhs_code
))
3344 if (TREE_CODE (TREE_TYPE (lhs
)) != INTEGER_TYPE
3345 || TYPE_PRECISION (TREE_TYPE (lhs
)) == 1)
3347 vectype
= get_vectype_for_scalar_type (TREE_TYPE (lhs
));
3348 if (vectype
== NULL_TREE
)
3351 if (check_bool_pattern (var
, vinfo
))
3353 rhs
= adjust_bool_pattern (var
, TREE_TYPE (lhs
), NULL_TREE
, stmts
);
3354 lhs
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
3355 if (useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
3356 pattern_stmt
= gimple_build_assign (lhs
, SSA_NAME
, rhs
);
3359 = gimple_build_assign (lhs
, NOP_EXPR
, rhs
);
3363 tree type
= search_type_for_mask (var
, vinfo
);
3364 tree cst0
, cst1
, tmp
;
3369 /* We may directly use cond with narrowed type to avoid
3370 multiple cond exprs with following result packing and
3371 perform single cond with packed mask instead. In case
3372 of widening we better make cond first and then extract
3374 if (TYPE_MODE (type
) == TYPE_MODE (TREE_TYPE (lhs
)))
3375 type
= TREE_TYPE (lhs
);
3377 cst0
= build_int_cst (type
, 0);
3378 cst1
= build_int_cst (type
, 1);
3379 tmp
= vect_recog_temp_ssa_var (type
, NULL
);
3380 pattern_stmt
= gimple_build_assign (tmp
, COND_EXPR
, var
, cst1
, cst0
);
3382 if (!useless_type_conversion_p (type
, TREE_TYPE (lhs
)))
3384 tree new_vectype
= get_vectype_for_scalar_type (type
);
3385 new_stmt_info
= new_stmt_vec_info (pattern_stmt
, vinfo
);
3386 set_vinfo_for_stmt (pattern_stmt
, new_stmt_info
);
3387 STMT_VINFO_VECTYPE (new_stmt_info
) = new_vectype
;
3388 new_pattern_def_seq (stmt_vinfo
, pattern_stmt
);
3390 lhs
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
3391 pattern_stmt
= gimple_build_assign (lhs
, CONVERT_EXPR
, tmp
);
3395 *type_out
= vectype
;
3397 stmts
->safe_push (last_stmt
);
3398 if (dump_enabled_p ())
3399 dump_printf_loc (MSG_NOTE
, vect_location
,
3400 "vect_recog_bool_pattern: detected:\n");
3402 return pattern_stmt
;
3404 else if (rhs_code
== COND_EXPR
3405 && TREE_CODE (var
) == SSA_NAME
)
3407 vectype
= get_vectype_for_scalar_type (TREE_TYPE (lhs
));
3408 if (vectype
== NULL_TREE
)
3411 /* Build a scalar type for the boolean result that when
3412 vectorized matches the vector type of the result in
3413 size and number of elements. */
3415 = wi::udiv_trunc (TYPE_SIZE (vectype
),
3416 TYPE_VECTOR_SUBPARTS (vectype
)).to_uhwi ();
3418 = build_nonstandard_integer_type (prec
,
3419 TYPE_UNSIGNED (TREE_TYPE (var
)));
3420 if (get_vectype_for_scalar_type (type
) == NULL_TREE
)
3423 if (!check_bool_pattern (var
, vinfo
))
3426 rhs
= adjust_bool_pattern (var
, type
, NULL_TREE
, stmts
);
3428 lhs
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
3430 = gimple_build_assign (lhs
, COND_EXPR
,
3431 build2 (NE_EXPR
, boolean_type_node
,
3432 rhs
, build_int_cst (type
, 0)),
3433 gimple_assign_rhs2 (last_stmt
),
3434 gimple_assign_rhs3 (last_stmt
));
3435 *type_out
= vectype
;
3437 stmts
->safe_push (last_stmt
);
3438 if (dump_enabled_p ())
3439 dump_printf_loc (MSG_NOTE
, vect_location
,
3440 "vect_recog_bool_pattern: detected:\n");
3442 return pattern_stmt
;
3444 else if (rhs_code
== SSA_NAME
3445 && STMT_VINFO_DATA_REF (stmt_vinfo
))
3447 stmt_vec_info pattern_stmt_info
;
3448 vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
3449 gcc_assert (vectype
!= NULL_TREE
);
3450 if (!VECTOR_MODE_P (TYPE_MODE (vectype
)))
3453 if (check_bool_pattern (var
, vinfo
))
3454 rhs
= adjust_bool_pattern (var
, TREE_TYPE (vectype
),
3458 tree type
= search_type_for_mask (var
, vinfo
);
3459 tree cst0
, cst1
, new_vectype
;
3464 if (TYPE_MODE (type
) == TYPE_MODE (TREE_TYPE (vectype
)))
3465 type
= TREE_TYPE (vectype
);
3467 cst0
= build_int_cst (type
, 0);
3468 cst1
= build_int_cst (type
, 1);
3469 new_vectype
= get_vectype_for_scalar_type (type
);
3471 rhs
= vect_recog_temp_ssa_var (type
, NULL
);
3472 pattern_stmt
= gimple_build_assign (rhs
, COND_EXPR
, var
, cst1
, cst0
);
3474 pattern_stmt_info
= new_stmt_vec_info (pattern_stmt
, vinfo
);
3475 set_vinfo_for_stmt (pattern_stmt
, pattern_stmt_info
);
3476 STMT_VINFO_VECTYPE (pattern_stmt_info
) = new_vectype
;
3477 append_pattern_def_seq (stmt_vinfo
, pattern_stmt
);
3480 lhs
= build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (vectype
), lhs
);
3481 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
3483 tree rhs2
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
3484 gimple
*cast_stmt
= gimple_build_assign (rhs2
, NOP_EXPR
, rhs
);
3485 append_pattern_def_seq (stmt_vinfo
, cast_stmt
);
3488 pattern_stmt
= gimple_build_assign (lhs
, SSA_NAME
, rhs
);
3489 pattern_stmt_info
= new_stmt_vec_info (pattern_stmt
, vinfo
);
3490 set_vinfo_for_stmt (pattern_stmt
, pattern_stmt_info
);
3491 STMT_VINFO_DATA_REF (pattern_stmt_info
)
3492 = STMT_VINFO_DATA_REF (stmt_vinfo
);
3493 STMT_VINFO_DR_BASE_ADDRESS (pattern_stmt_info
)
3494 = STMT_VINFO_DR_BASE_ADDRESS (stmt_vinfo
);
3495 STMT_VINFO_DR_INIT (pattern_stmt_info
) = STMT_VINFO_DR_INIT (stmt_vinfo
);
3496 STMT_VINFO_DR_OFFSET (pattern_stmt_info
)
3497 = STMT_VINFO_DR_OFFSET (stmt_vinfo
);
3498 STMT_VINFO_DR_STEP (pattern_stmt_info
) = STMT_VINFO_DR_STEP (stmt_vinfo
);
3499 STMT_VINFO_DR_ALIGNED_TO (pattern_stmt_info
)
3500 = STMT_VINFO_DR_ALIGNED_TO (stmt_vinfo
);
3501 DR_STMT (STMT_VINFO_DATA_REF (stmt_vinfo
)) = pattern_stmt
;
3502 *type_out
= vectype
;
3504 stmts
->safe_push (last_stmt
);
3505 if (dump_enabled_p ())
3506 dump_printf_loc (MSG_NOTE
, vect_location
,
3507 "vect_recog_bool_pattern: detected:\n");
3508 return pattern_stmt
;
3515 /* A helper for vect_recog_mask_conversion_pattern. Build
3516 conversion of MASK to a type suitable for masking VECTYPE.
3517 Built statement gets required vectype and is appended to
3518 a pattern sequence of STMT_VINFO.
3520 Return converted mask. */
3523 build_mask_conversion (tree mask
, tree vectype
, stmt_vec_info stmt_vinfo
,
3528 stmt_vec_info new_stmt_info
;
3530 masktype
= build_same_sized_truth_vector_type (vectype
);
3531 tmp
= vect_recog_temp_ssa_var (TREE_TYPE (masktype
), NULL
);
3532 stmt
= gimple_build_assign (tmp
, CONVERT_EXPR
, mask
);
3533 new_stmt_info
= new_stmt_vec_info (stmt
, vinfo
);
3534 set_vinfo_for_stmt (stmt
, new_stmt_info
);
3535 STMT_VINFO_VECTYPE (new_stmt_info
) = masktype
;
3536 append_pattern_def_seq (stmt_vinfo
, stmt
);
3542 /* Function vect_recog_mask_conversion_pattern
3544 Try to find statements which require boolean type
3545 converison. Additional conversion statements are
3546 added to handle such cases. For example:
3556 S4 c_1 = m_3 ? c_2 : c_3;
3558 Will be transformed into:
3562 S3'' m_2' = (_Bool[bitsize=32])m_2
3563 S3' m_3' = m_1 & m_2';
3564 S4'' m_3'' = (_Bool[bitsize=8])m_3'
3565 S4' c_1' = m_3'' ? c_2 : c_3; */
3568 vect_recog_mask_conversion_pattern (vec
<gimple
*> *stmts
, tree
*type_in
,
3571 gimple
*last_stmt
= stmts
->pop ();
3572 enum tree_code rhs_code
;
3573 tree lhs
, rhs1
, rhs2
, tmp
, rhs1_type
, rhs2_type
, vectype1
, vectype2
;
3574 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
3575 stmt_vec_info pattern_stmt_info
;
3576 vec_info
*vinfo
= stmt_vinfo
->vinfo
;
3577 gimple
*pattern_stmt
;
3579 /* Check for MASK_LOAD ans MASK_STORE calls requiring mask conversion. */
3580 if (is_gimple_call (last_stmt
)
3581 && gimple_call_internal_p (last_stmt
)
3582 && (gimple_call_internal_fn (last_stmt
) == IFN_MASK_STORE
3583 || gimple_call_internal_fn (last_stmt
) == IFN_MASK_LOAD
))
3585 bool load
= (gimple_call_internal_fn (last_stmt
) == IFN_MASK_LOAD
);
3589 lhs
= gimple_call_lhs (last_stmt
);
3590 vectype1
= get_vectype_for_scalar_type (TREE_TYPE (lhs
));
3594 rhs2
= gimple_call_arg (last_stmt
, 3);
3595 vectype1
= get_vectype_for_scalar_type (TREE_TYPE (rhs2
));
3598 rhs1
= gimple_call_arg (last_stmt
, 2);
3599 rhs1_type
= search_type_for_mask (rhs1
, vinfo
);
3602 vectype2
= get_mask_type_for_scalar_type (rhs1_type
);
3604 if (!vectype1
|| !vectype2
3605 || TYPE_VECTOR_SUBPARTS (vectype1
) == TYPE_VECTOR_SUBPARTS (vectype2
))
3608 tmp
= build_mask_conversion (rhs1
, vectype1
, stmt_vinfo
, vinfo
);
3612 lhs
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
3614 = gimple_build_call_internal (IFN_MASK_LOAD
, 3,
3615 gimple_call_arg (last_stmt
, 0),
3616 gimple_call_arg (last_stmt
, 1),
3618 gimple_call_set_lhs (pattern_stmt
, lhs
);
3622 = gimple_build_call_internal (IFN_MASK_STORE
, 4,
3623 gimple_call_arg (last_stmt
, 0),
3624 gimple_call_arg (last_stmt
, 1),
3626 gimple_call_arg (last_stmt
, 3));
3629 pattern_stmt_info
= new_stmt_vec_info (pattern_stmt
, vinfo
);
3630 set_vinfo_for_stmt (pattern_stmt
, pattern_stmt_info
);
3631 STMT_VINFO_DATA_REF (pattern_stmt_info
)
3632 = STMT_VINFO_DATA_REF (stmt_vinfo
);
3633 STMT_VINFO_DR_BASE_ADDRESS (pattern_stmt_info
)
3634 = STMT_VINFO_DR_BASE_ADDRESS (stmt_vinfo
);
3635 STMT_VINFO_DR_INIT (pattern_stmt_info
) = STMT_VINFO_DR_INIT (stmt_vinfo
);
3636 STMT_VINFO_DR_OFFSET (pattern_stmt_info
)
3637 = STMT_VINFO_DR_OFFSET (stmt_vinfo
);
3638 STMT_VINFO_DR_STEP (pattern_stmt_info
) = STMT_VINFO_DR_STEP (stmt_vinfo
);
3639 STMT_VINFO_DR_ALIGNED_TO (pattern_stmt_info
)
3640 = STMT_VINFO_DR_ALIGNED_TO (stmt_vinfo
);
3641 DR_STMT (STMT_VINFO_DATA_REF (stmt_vinfo
)) = pattern_stmt
;
3643 *type_out
= vectype1
;
3644 *type_in
= vectype1
;
3645 stmts
->safe_push (last_stmt
);
3646 if (dump_enabled_p ())
3647 dump_printf_loc (MSG_NOTE
, vect_location
,
3648 "vect_recog_mask_conversion_pattern: detected:\n");
3650 return pattern_stmt
;
3653 if (!is_gimple_assign (last_stmt
))
3656 lhs
= gimple_assign_lhs (last_stmt
);
3657 rhs1
= gimple_assign_rhs1 (last_stmt
);
3658 rhs_code
= gimple_assign_rhs_code (last_stmt
);
3660 /* Check for cond expression requiring mask conversion. */
3661 if (rhs_code
== COND_EXPR
)
3663 /* vect_recog_mixed_size_cond_pattern could apply.
3665 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
3668 vectype1
= get_vectype_for_scalar_type (TREE_TYPE (lhs
));
3670 if (TREE_CODE (rhs1
) == SSA_NAME
)
3672 rhs1_type
= search_type_for_mask (rhs1
, vinfo
);
3677 rhs1_type
= TREE_TYPE (TREE_OPERAND (rhs1
, 0));
3679 vectype2
= get_mask_type_for_scalar_type (rhs1_type
);
3681 if (!vectype1
|| !vectype2
3682 || TYPE_VECTOR_SUBPARTS (vectype1
) == TYPE_VECTOR_SUBPARTS (vectype2
))
3685 /* If rhs1 is a comparison we need to move it into a
3686 separate statement. */
3687 if (TREE_CODE (rhs1
) != SSA_NAME
)
3689 tmp
= vect_recog_temp_ssa_var (TREE_TYPE (rhs1
), NULL
);
3690 pattern_stmt
= gimple_build_assign (tmp
, rhs1
);
3693 pattern_stmt_info
= new_stmt_vec_info (pattern_stmt
, vinfo
);
3694 set_vinfo_for_stmt (pattern_stmt
, pattern_stmt_info
);
3695 STMT_VINFO_VECTYPE (pattern_stmt_info
) = vectype2
;
3696 append_pattern_def_seq (stmt_vinfo
, pattern_stmt
);
3699 tmp
= build_mask_conversion (rhs1
, vectype1
, stmt_vinfo
, vinfo
);
3701 lhs
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
3702 pattern_stmt
= gimple_build_assign (lhs
, COND_EXPR
, tmp
,
3703 gimple_assign_rhs2 (last_stmt
),
3704 gimple_assign_rhs3 (last_stmt
));
3706 *type_out
= vectype1
;
3707 *type_in
= vectype1
;
3708 stmts
->safe_push (last_stmt
);
3709 if (dump_enabled_p ())
3710 dump_printf_loc (MSG_NOTE
, vect_location
,
3711 "vect_recog_mask_conversion_pattern: detected:\n");
3713 return pattern_stmt
;
3716 /* Now check for binary boolean operations requiring conversion for
3718 if (TREE_CODE (TREE_TYPE (lhs
)) != BOOLEAN_TYPE
)
3721 if (rhs_code
!= BIT_IOR_EXPR
3722 && rhs_code
!= BIT_XOR_EXPR
3723 && rhs_code
!= BIT_AND_EXPR
)
3726 rhs2
= gimple_assign_rhs2 (last_stmt
);
3728 rhs1_type
= search_type_for_mask (rhs1
, vinfo
);
3729 rhs2_type
= search_type_for_mask (rhs2
, vinfo
);
3731 if (!rhs1_type
|| !rhs2_type
3732 || TYPE_PRECISION (rhs1_type
) == TYPE_PRECISION (rhs2_type
))
3735 if (TYPE_PRECISION (rhs1_type
) < TYPE_PRECISION (rhs2_type
))
3737 vectype1
= get_mask_type_for_scalar_type (rhs1_type
);
3740 rhs2
= build_mask_conversion (rhs2
, vectype1
, stmt_vinfo
, vinfo
);
3744 vectype1
= get_mask_type_for_scalar_type (rhs2_type
);
3747 rhs1
= build_mask_conversion (rhs1
, vectype1
, stmt_vinfo
, vinfo
);
3750 lhs
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
3751 pattern_stmt
= gimple_build_assign (lhs
, rhs_code
, rhs1
, rhs2
);
3753 *type_out
= vectype1
;
3754 *type_in
= vectype1
;
3755 stmts
->safe_push (last_stmt
);
3756 if (dump_enabled_p ())
3757 dump_printf_loc (MSG_NOTE
, vect_location
,
3758 "vect_recog_mask_conversion_pattern: detected:\n");
3760 return pattern_stmt
;
3764 /* Mark statements that are involved in a pattern. */
3767 vect_mark_pattern_stmts (gimple
*orig_stmt
, gimple
*pattern_stmt
,
3768 tree pattern_vectype
)
3770 stmt_vec_info pattern_stmt_info
, def_stmt_info
;
3771 stmt_vec_info orig_stmt_info
= vinfo_for_stmt (orig_stmt
);
3772 vec_info
*vinfo
= orig_stmt_info
->vinfo
;
3775 pattern_stmt_info
= vinfo_for_stmt (pattern_stmt
);
3776 if (pattern_stmt_info
== NULL
)
3778 pattern_stmt_info
= new_stmt_vec_info (pattern_stmt
, vinfo
);
3779 set_vinfo_for_stmt (pattern_stmt
, pattern_stmt_info
);
3781 gimple_set_bb (pattern_stmt
, gimple_bb (orig_stmt
));
3783 STMT_VINFO_RELATED_STMT (pattern_stmt_info
) = orig_stmt
;
3784 STMT_VINFO_DEF_TYPE (pattern_stmt_info
)
3785 = STMT_VINFO_DEF_TYPE (orig_stmt_info
);
3786 STMT_VINFO_VECTYPE (pattern_stmt_info
) = pattern_vectype
;
3787 STMT_VINFO_IN_PATTERN_P (orig_stmt_info
) = true;
3788 STMT_VINFO_RELATED_STMT (orig_stmt_info
) = pattern_stmt
;
3789 STMT_VINFO_PATTERN_DEF_SEQ (pattern_stmt_info
)
3790 = STMT_VINFO_PATTERN_DEF_SEQ (orig_stmt_info
);
3791 if (STMT_VINFO_PATTERN_DEF_SEQ (pattern_stmt_info
))
3793 gimple_stmt_iterator si
;
3794 for (si
= gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (pattern_stmt_info
));
3795 !gsi_end_p (si
); gsi_next (&si
))
3797 def_stmt
= gsi_stmt (si
);
3798 def_stmt_info
= vinfo_for_stmt (def_stmt
);
3799 if (def_stmt_info
== NULL
)
3801 def_stmt_info
= new_stmt_vec_info (def_stmt
, vinfo
);
3802 set_vinfo_for_stmt (def_stmt
, def_stmt_info
);
3804 gimple_set_bb (def_stmt
, gimple_bb (orig_stmt
));
3805 STMT_VINFO_RELATED_STMT (def_stmt_info
) = orig_stmt
;
3806 STMT_VINFO_DEF_TYPE (def_stmt_info
) = vect_internal_def
;
3807 if (STMT_VINFO_VECTYPE (def_stmt_info
) == NULL_TREE
)
3808 STMT_VINFO_VECTYPE (def_stmt_info
) = pattern_vectype
;
3813 /* Function vect_pattern_recog_1
3816 PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
3817 computation pattern.
3818 STMT: A stmt from which the pattern search should start.
3820 If PATTERN_RECOG_FUNC successfully detected the pattern, it creates an
3821 expression that computes the same functionality and can be used to
3822 replace the sequence of stmts that are involved in the pattern.
3825 This function checks if the expression returned by PATTERN_RECOG_FUNC is
3826 supported in vector form by the target. We use 'TYPE_IN' to obtain the
3827 relevant vector type. If 'TYPE_IN' is already a vector type, then this
3828 indicates that target support had already been checked by PATTERN_RECOG_FUNC.
3829 If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
3830 to the available target pattern.
3832 This function also does some bookkeeping, as explained in the documentation
3833 for vect_recog_pattern. */
3836 vect_pattern_recog_1 (vect_recog_func
*recog_func
,
3837 gimple_stmt_iterator si
,
3838 vec
<gimple
*> *stmts_to_replace
)
3840 gimple
*stmt
= gsi_stmt (si
), *pattern_stmt
;
3841 stmt_vec_info stmt_info
;
3842 loop_vec_info loop_vinfo
;
3843 tree pattern_vectype
;
3844 tree type_in
, type_out
;
3845 enum tree_code code
;
3849 stmts_to_replace
->truncate (0);
3850 stmts_to_replace
->quick_push (stmt
);
3851 pattern_stmt
= recog_func
->fn (stmts_to_replace
, &type_in
, &type_out
);
3855 stmt
= stmts_to_replace
->last ();
3856 stmt_info
= vinfo_for_stmt (stmt
);
3857 loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3859 if (VECTOR_BOOLEAN_TYPE_P (type_in
)
3860 || VECTOR_MODE_P (TYPE_MODE (type_in
)))
3862 /* No need to check target support (already checked by the pattern
3863 recognition function). */
3864 pattern_vectype
= type_out
? type_out
: type_in
;
3868 machine_mode vec_mode
;
3869 enum insn_code icode
;
3872 /* Check target support */
3873 type_in
= get_vectype_for_scalar_type (type_in
);
3877 type_out
= get_vectype_for_scalar_type (type_out
);
3882 pattern_vectype
= type_out
;
3884 if (is_gimple_assign (pattern_stmt
))
3885 code
= gimple_assign_rhs_code (pattern_stmt
);
3888 gcc_assert (is_gimple_call (pattern_stmt
));
3892 optab
= optab_for_tree_code (code
, type_in
, optab_default
);
3893 vec_mode
= TYPE_MODE (type_in
);
3895 || (icode
= optab_handler (optab
, vec_mode
)) == CODE_FOR_nothing
3896 || (insn_data
[icode
].operand
[0].mode
!= TYPE_MODE (type_out
)))
3900 /* Found a vectorizable pattern. */
3901 if (dump_enabled_p ())
3903 dump_printf_loc (MSG_NOTE
, vect_location
,
3904 "%s pattern recognized: ", recog_func
->name
);
3905 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
3908 /* Mark the stmts that are involved in the pattern. */
3909 vect_mark_pattern_stmts (stmt
, pattern_stmt
, pattern_vectype
);
3911 /* Patterns cannot be vectorized using SLP, because they change the order of
3914 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTIONS (loop_vinfo
), i
, next
)
3916 LOOP_VINFO_REDUCTIONS (loop_vinfo
).ordered_remove (i
);
3918 /* It is possible that additional pattern stmts are created and inserted in
3919 STMTS_TO_REPLACE. We create a stmt_info for each of them, and mark the
3920 relevant statements. */
3921 for (i
= 0; stmts_to_replace
->iterate (i
, &stmt
)
3922 && (unsigned) i
< (stmts_to_replace
->length () - 1);
3925 stmt_info
= vinfo_for_stmt (stmt
);
3926 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
3927 if (dump_enabled_p ())
3929 dump_printf_loc (MSG_NOTE
, vect_location
,
3930 "additional pattern stmt: ");
3931 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
3934 vect_mark_pattern_stmts (stmt
, pattern_stmt
, NULL_TREE
);
3941 /* Function vect_pattern_recog
3944 LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
3947 Output - for each computation idiom that is detected we create a new stmt
3948 that provides the same functionality and that can be vectorized. We
3949 also record some information in the struct_stmt_info of the relevant
3950 stmts, as explained below:
3952 At the entry to this function we have the following stmts, with the
3953 following initial value in the STMT_VINFO fields:
3955 stmt in_pattern_p related_stmt vec_stmt
3956 S1: a_i = .... - - -
3957 S2: a_2 = ..use(a_i).. - - -
3958 S3: a_1 = ..use(a_2).. - - -
3959 S4: a_0 = ..use(a_1).. - - -
3960 S5: ... = ..use(a_0).. - - -
3962 Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
3963 represented by a single stmt. We then:
3964 - create a new stmt S6 equivalent to the pattern (the stmt is not
3965 inserted into the code)
3966 - fill in the STMT_VINFO fields as follows:
3968 in_pattern_p related_stmt vec_stmt
3969 S1: a_i = .... - - -
3970 S2: a_2 = ..use(a_i).. - - -
3971 S3: a_1 = ..use(a_2).. - - -
3972 S4: a_0 = ..use(a_1).. true S6 -
3973 '---> S6: a_new = .... - S4 -
3974 S5: ... = ..use(a_0).. - - -
3976 (the last stmt in the pattern (S4) and the new pattern stmt (S6) point
3977 to each other through the RELATED_STMT field).
3979 S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
3980 of S4 because it will replace all its uses. Stmts {S1,S2,S3} will
3981 remain irrelevant unless used by stmts other than S4.
3983 If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
3984 (because they are marked as irrelevant). It will vectorize S6, and record
3985 a pointer to the new vector stmt VS6 from S6 (as usual).
3986 S4 will be skipped, and S5 will be vectorized as usual:
3988 in_pattern_p related_stmt vec_stmt
3989 S1: a_i = .... - - -
3990 S2: a_2 = ..use(a_i).. - - -
3991 S3: a_1 = ..use(a_2).. - - -
3992 > VS6: va_new = .... - - -
3993 S4: a_0 = ..use(a_1).. true S6 VS6
3994 '---> S6: a_new = .... - S4 VS6
3995 > VS5: ... = ..vuse(va_new).. - - -
3996 S5: ... = ..use(a_0).. - - -
3998 DCE could then get rid of {S1,S2,S3,S4,S5} (if their defs are not used
3999 elsewhere), and we'll end up with:
4002 VS5: ... = ..vuse(va_new)..
4004 In case of more than one pattern statements, e.g., widen-mult with
4008 S2 a_T = (TYPE) a_t;
4009 '--> S3: a_it = (interm_type) a_t;
4010 S4 prod_T = a_T * CONST;
4011 '--> S5: prod_T' = a_it w* CONST;
4013 there may be other users of a_T outside the pattern. In that case S2 will
4014 be marked as relevant (as well as S3), and both S2 and S3 will be analyzed
4015 and vectorized. The vector stmt VS2 will be recorded in S2, and VS3 will
4016 be recorded in S3. */
4019 vect_pattern_recog (vec_info
*vinfo
)
4024 gimple_stmt_iterator si
;
4026 auto_vec
<gimple
*, 1> stmts_to_replace
;
4029 if (dump_enabled_p ())
4030 dump_printf_loc (MSG_NOTE
, vect_location
,
4031 "=== vect_pattern_recog ===\n");
4033 if (loop_vec_info loop_vinfo
= dyn_cast
<loop_vec_info
> (vinfo
))
4035 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4036 bbs
= LOOP_VINFO_BBS (loop_vinfo
);
4037 nbbs
= loop
->num_nodes
;
4039 /* Scan through the loop stmts, applying the pattern recognition
4040 functions starting at each stmt visited: */
4041 for (i
= 0; i
< nbbs
; i
++)
4043 basic_block bb
= bbs
[i
];
4044 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
4046 /* Scan over all generic vect_recog_xxx_pattern functions. */
4047 for (j
= 0; j
< NUM_PATTERNS
; j
++)
4048 if (vect_pattern_recog_1 (&vect_vect_recog_func_ptrs
[j
], si
,
4056 bb_vec_info bb_vinfo
= as_a
<bb_vec_info
> (vinfo
);
4057 for (si
= bb_vinfo
->region_begin
;
4058 gsi_stmt (si
) != gsi_stmt (bb_vinfo
->region_end
); gsi_next (&si
))
4060 if ((stmt
= gsi_stmt (si
))
4061 && vinfo_for_stmt (stmt
)
4062 && !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt
)))
4065 /* Scan over all generic vect_recog_xxx_pattern functions. */
4066 for (j
= 0; j
< NUM_PATTERNS
; j
++)
4067 if (vect_pattern_recog_1 (&vect_vect_recog_func_ptrs
[j
], si
,