1 /* Analysis Utilities for Loop Vectorization.
2 Copyright (C) 2006-2013 Free Software Foundation, Inc.
3 Contributed by Dorit Nuzman <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "basic-block.h"
29 #include "gimple-pretty-print.h"
30 #include "tree-flow.h"
35 #include "tree-data-ref.h"
36 #include "tree-vectorizer.h"
37 #include "recog.h" /* FIXME: for insn_data */
38 #include "diagnostic-core.h"
41 /* Pattern recognition functions */
42 static gimple
vect_recog_widen_sum_pattern (vec
<gimple
> *, tree
*,
44 static gimple
vect_recog_widen_mult_pattern (vec
<gimple
> *, tree
*,
46 static gimple
vect_recog_dot_prod_pattern (vec
<gimple
> *, tree
*,
48 static gimple
vect_recog_pow_pattern (vec
<gimple
> *, tree
*, tree
*);
49 static gimple
vect_recog_over_widening_pattern (vec
<gimple
> *, tree
*,
51 static gimple
vect_recog_widen_shift_pattern (vec
<gimple
> *,
53 static gimple
vect_recog_rotate_pattern (vec
<gimple
> *, tree
*, tree
*);
54 static gimple
vect_recog_vector_vector_shift_pattern (vec
<gimple
> *,
56 static gimple
vect_recog_divmod_pattern (vec
<gimple
> *,
58 static gimple
vect_recog_mixed_size_cond_pattern (vec
<gimple
> *,
60 static gimple
vect_recog_bool_pattern (vec
<gimple
> *, tree
*, tree
*);
61 static vect_recog_func_ptr vect_vect_recog_func_ptrs
[NUM_PATTERNS
] = {
62 vect_recog_widen_mult_pattern
,
63 vect_recog_widen_sum_pattern
,
64 vect_recog_dot_prod_pattern
,
65 vect_recog_pow_pattern
,
66 vect_recog_widen_shift_pattern
,
67 vect_recog_over_widening_pattern
,
68 vect_recog_rotate_pattern
,
69 vect_recog_vector_vector_shift_pattern
,
70 vect_recog_divmod_pattern
,
71 vect_recog_mixed_size_cond_pattern
,
72 vect_recog_bool_pattern
};
75 append_pattern_def_seq (stmt_vec_info stmt_info
, gimple stmt
)
77 gimple_seq_add_stmt_without_update (&STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
),
82 new_pattern_def_seq (stmt_vec_info stmt_info
, gimple stmt
)
84 STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
) = NULL
;
85 append_pattern_def_seq (stmt_info
, stmt
);
88 /* Check whether STMT2 is in the same loop or basic block as STMT1.
89 Which of the two applies depends on whether we're currently doing
90 loop-based or basic-block-based vectorization, as determined by
91 the vinfo_for_stmt for STMT1 (which must be defined).
93 If this returns true, vinfo_for_stmt for STMT2 is guaranteed
94 to be defined as well. */
97 vect_same_loop_or_bb_p (gimple stmt1
, gimple stmt2
)
99 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt1
);
100 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
101 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
103 if (!gimple_bb (stmt2
))
108 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
109 if (!flow_bb_inside_loop_p (loop
, gimple_bb (stmt2
)))
114 if (gimple_bb (stmt2
) != BB_VINFO_BB (bb_vinfo
)
115 || gimple_code (stmt2
) == GIMPLE_PHI
)
119 gcc_assert (vinfo_for_stmt (stmt2
));
123 /* If the LHS of DEF_STMT has a single use, and that statement is
124 in the same loop or basic block, return it. */
127 vect_single_imm_use (gimple def_stmt
)
129 tree lhs
= gimple_assign_lhs (def_stmt
);
133 if (!single_imm_use (lhs
, &use_p
, &use_stmt
))
136 if (!vect_same_loop_or_bb_p (def_stmt
, use_stmt
))
142 /* Check whether NAME, an ssa-name used in USE_STMT,
143 is a result of a type promotion or demotion, such that:
144 DEF_STMT: NAME = NOP (name0)
145 where the type of name0 (ORIG_TYPE) is smaller/bigger than the type of NAME.
146 If CHECK_SIGN is TRUE, check that either both types are signed or both are
150 type_conversion_p (tree name
, gimple use_stmt
, bool check_sign
,
151 tree
*orig_type
, gimple
*def_stmt
, bool *promotion
)
155 loop_vec_info loop_vinfo
;
156 stmt_vec_info stmt_vinfo
;
157 tree type
= TREE_TYPE (name
);
159 enum vect_def_type dt
;
161 bb_vec_info bb_vinfo
;
163 stmt_vinfo
= vinfo_for_stmt (use_stmt
);
164 loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
165 bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
166 if (!vect_is_simple_use (name
, use_stmt
, loop_vinfo
, bb_vinfo
, def_stmt
,
170 if (dt
!= vect_internal_def
171 && dt
!= vect_external_def
&& dt
!= vect_constant_def
)
177 if (!is_gimple_assign (*def_stmt
))
180 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (*def_stmt
)))
183 oprnd0
= gimple_assign_rhs1 (*def_stmt
);
185 *orig_type
= TREE_TYPE (oprnd0
);
186 if (!INTEGRAL_TYPE_P (type
) || !INTEGRAL_TYPE_P (*orig_type
)
187 || ((TYPE_UNSIGNED (type
) != TYPE_UNSIGNED (*orig_type
)) && check_sign
))
190 if (TYPE_PRECISION (type
) >= (TYPE_PRECISION (*orig_type
) * 2))
192 else if (TYPE_PRECISION (*orig_type
) >= (TYPE_PRECISION (type
) * 2))
197 if (!vect_is_simple_use (oprnd0
, *def_stmt
, loop_vinfo
,
198 bb_vinfo
, &dummy_gimple
, &dummy
, &dt
))
204 /* Helper to return a new temporary for pattern of TYPE for STMT. If STMT
205 is NULL, the caller must set SSA_NAME_DEF_STMT for the returned SSA var. */
208 vect_recog_temp_ssa_var (tree type
, gimple stmt
)
210 return make_temp_ssa_name (type
, stmt
, "patt");
213 /* Function vect_recog_dot_prod_pattern
215 Try to find the following pattern:
221 sum_0 = phi <init, sum_1>
224 S3 x_T = (TYPE1) x_t;
225 S4 y_T = (TYPE1) y_t;
227 [S6 prod = (TYPE2) prod; #optional]
228 S7 sum_1 = prod + sum_0;
230 where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
231 same size of 'TYPE1' or bigger. This is a special case of a reduction
236 * STMTS: Contains a stmt from which the pattern search begins. In the
237 example, when this function is called with S7, the pattern {S3,S4,S5,S6,S7}
242 * TYPE_IN: The type of the input arguments to the pattern.
244 * TYPE_OUT: The type of the output of this pattern.
246 * Return value: A new stmt that will be used to replace the sequence of
247 stmts that constitute the pattern. In this case it will be:
248 WIDEN_DOT_PRODUCT <x_t, y_t, sum_0>
250 Note: The dot-prod idiom is a widening reduction pattern that is
251 vectorized without preserving all the intermediate results. It
252 produces only N/2 (widened) results (by summing up pairs of
253 intermediate results) rather than all N results. Therefore, we
254 cannot allow this pattern when we want to get all the results and in
255 the correct order (as is the case when this computation is in an
256 inner-loop nested in an outer-loop that us being vectorized). */
259 vect_recog_dot_prod_pattern (vec
<gimple
> *stmts
, tree
*type_in
,
262 gimple stmt
, last_stmt
= (*stmts
)[0];
264 tree oprnd00
, oprnd01
;
265 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
266 tree type
, half_type
;
269 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
277 loop
= LOOP_VINFO_LOOP (loop_info
);
279 if (!is_gimple_assign (last_stmt
))
282 type
= gimple_expr_type (last_stmt
);
284 /* Look for the following pattern
288 DDPROD = (TYPE2) DPROD;
289 sum_1 = DDPROD + sum_0;
291 - DX is double the size of X
292 - DY is double the size of Y
293 - DX, DY, DPROD all have the same type
294 - sum is the same size of DPROD or bigger
295 - sum has been recognized as a reduction variable.
297 This is equivalent to:
298 DPROD = X w* Y; #widen mult
299 sum_1 = DPROD w+ sum_0; #widen summation
301 DPROD = X w* Y; #widen mult
302 sum_1 = DPROD + sum_0; #summation
305 /* Starting from LAST_STMT, follow the defs of its uses in search
306 of the above pattern. */
308 if (gimple_assign_rhs_code (last_stmt
) != PLUS_EXPR
)
311 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
313 /* Has been detected as widening-summation? */
315 stmt
= STMT_VINFO_RELATED_STMT (stmt_vinfo
);
316 type
= gimple_expr_type (stmt
);
317 if (gimple_assign_rhs_code (stmt
) != WIDEN_SUM_EXPR
)
319 oprnd0
= gimple_assign_rhs1 (stmt
);
320 oprnd1
= gimple_assign_rhs2 (stmt
);
321 half_type
= TREE_TYPE (oprnd0
);
327 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
)
329 oprnd0
= gimple_assign_rhs1 (last_stmt
);
330 oprnd1
= gimple_assign_rhs2 (last_stmt
);
331 if (!types_compatible_p (TREE_TYPE (oprnd0
), type
)
332 || !types_compatible_p (TREE_TYPE (oprnd1
), type
))
336 if (type_conversion_p (oprnd0
, stmt
, true, &half_type
, &def_stmt
,
341 oprnd0
= gimple_assign_rhs1 (stmt
);
347 /* So far so good. Since last_stmt was detected as a (summation) reduction,
348 we know that oprnd1 is the reduction variable (defined by a loop-header
349 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
350 Left to check that oprnd0 is defined by a (widen_)mult_expr */
351 if (TREE_CODE (oprnd0
) != SSA_NAME
)
354 prod_type
= half_type
;
355 stmt
= SSA_NAME_DEF_STMT (oprnd0
);
357 /* It could not be the dot_prod pattern if the stmt is outside the loop. */
358 if (!gimple_bb (stmt
) || !flow_bb_inside_loop_p (loop
, gimple_bb (stmt
)))
361 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
362 inside the loop (in case we are analyzing an outer-loop). */
363 if (!is_gimple_assign (stmt
))
365 stmt_vinfo
= vinfo_for_stmt (stmt
);
366 gcc_assert (stmt_vinfo
);
367 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_internal_def
)
369 if (gimple_assign_rhs_code (stmt
) != MULT_EXPR
)
371 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
373 /* Has been detected as a widening multiplication? */
375 stmt
= STMT_VINFO_RELATED_STMT (stmt_vinfo
);
376 if (gimple_assign_rhs_code (stmt
) != WIDEN_MULT_EXPR
)
378 stmt_vinfo
= vinfo_for_stmt (stmt
);
379 gcc_assert (stmt_vinfo
);
380 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_internal_def
);
381 oprnd00
= gimple_assign_rhs1 (stmt
);
382 oprnd01
= gimple_assign_rhs2 (stmt
);
386 tree half_type0
, half_type1
;
390 oprnd0
= gimple_assign_rhs1 (stmt
);
391 oprnd1
= gimple_assign_rhs2 (stmt
);
392 if (!types_compatible_p (TREE_TYPE (oprnd0
), prod_type
)
393 || !types_compatible_p (TREE_TYPE (oprnd1
), prod_type
))
395 if (!type_conversion_p (oprnd0
, stmt
, true, &half_type0
, &def_stmt
,
399 oprnd00
= gimple_assign_rhs1 (def_stmt
);
400 if (!type_conversion_p (oprnd0
, stmt
, true, &half_type1
, &def_stmt
,
404 oprnd01
= gimple_assign_rhs1 (def_stmt
);
405 if (!types_compatible_p (half_type0
, half_type1
))
407 if (TYPE_PRECISION (prod_type
) != TYPE_PRECISION (half_type0
) * 2)
411 half_type
= TREE_TYPE (oprnd00
);
412 *type_in
= half_type
;
415 /* Pattern detected. Create a stmt to be used to replace the pattern: */
416 var
= vect_recog_temp_ssa_var (type
, NULL
);
417 pattern_stmt
= gimple_build_assign_with_ops (DOT_PROD_EXPR
, var
,
418 oprnd00
, oprnd01
, oprnd1
);
420 if (dump_enabled_p ())
422 dump_printf_loc (MSG_NOTE
, vect_location
,
423 "vect_recog_dot_prod_pattern: detected: ");
424 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
427 /* We don't allow changing the order of the computation in the inner-loop
428 when doing outer-loop vectorization. */
429 gcc_assert (!nested_in_vect_loop_p (loop
, last_stmt
));
435 /* Handle widening operation by a constant. At the moment we support MULT_EXPR
438 For MULT_EXPR we check that CONST_OPRND fits HALF_TYPE, and for LSHIFT_EXPR
439 we check that CONST_OPRND is less or equal to the size of HALF_TYPE.
441 Otherwise, if the type of the result (TYPE) is at least 4 times bigger than
442 HALF_TYPE, and there is an intermediate type (2 times smaller than TYPE)
443 that satisfies the above restrictions, we can perform a widening opeartion
444 from the intermediate type to TYPE and replace a_T = (TYPE) a_t;
445 with a_it = (interm_type) a_t; */
448 vect_handle_widen_op_by_const (gimple stmt
, enum tree_code code
,
449 tree const_oprnd
, tree
*oprnd
,
450 vec
<gimple
> *stmts
, tree type
,
451 tree
*half_type
, gimple def_stmt
)
453 tree new_type
, new_oprnd
;
456 if (code
!= MULT_EXPR
&& code
!= LSHIFT_EXPR
)
459 if (((code
== MULT_EXPR
&& int_fits_type_p (const_oprnd
, *half_type
))
460 || (code
== LSHIFT_EXPR
461 && compare_tree_int (const_oprnd
, TYPE_PRECISION (*half_type
))
463 && TYPE_PRECISION (type
) == (TYPE_PRECISION (*half_type
) * 2))
465 /* CONST_OPRND is a constant of HALF_TYPE. */
466 *oprnd
= gimple_assign_rhs1 (def_stmt
);
470 if (TYPE_PRECISION (type
) < (TYPE_PRECISION (*half_type
) * 4))
473 if (!vect_same_loop_or_bb_p (stmt
, def_stmt
))
476 /* TYPE is 4 times bigger than HALF_TYPE, try widening operation for
477 a type 2 times bigger than HALF_TYPE. */
478 new_type
= build_nonstandard_integer_type (TYPE_PRECISION (type
) / 2,
479 TYPE_UNSIGNED (type
));
480 if ((code
== MULT_EXPR
&& !int_fits_type_p (const_oprnd
, new_type
))
481 || (code
== LSHIFT_EXPR
482 && compare_tree_int (const_oprnd
, TYPE_PRECISION (new_type
)) == 1))
485 /* Use NEW_TYPE for widening operation. */
486 if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
)))
488 new_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
));
489 /* Check if the already created pattern stmt is what we need. */
490 if (!is_gimple_assign (new_stmt
)
491 || gimple_assign_rhs_code (new_stmt
) != NOP_EXPR
492 || TREE_TYPE (gimple_assign_lhs (new_stmt
)) != new_type
)
495 stmts
->safe_push (def_stmt
);
496 *oprnd
= gimple_assign_lhs (new_stmt
);
500 /* Create a_T = (NEW_TYPE) a_t; */
501 *oprnd
= gimple_assign_rhs1 (def_stmt
);
502 new_oprnd
= make_ssa_name (new_type
, NULL
);
503 new_stmt
= gimple_build_assign_with_ops (NOP_EXPR
, new_oprnd
, *oprnd
,
505 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
)) = new_stmt
;
506 stmts
->safe_push (def_stmt
);
510 *half_type
= new_type
;
515 /* Function vect_recog_widen_mult_pattern
517 Try to find the following pattern:
520 TYPE a_T, b_T, prod_T;
526 S5 prod_T = a_T * b_T;
528 where type 'TYPE' is at least double the size of type 'type'.
530 Also detect unsigned cases:
532 unsigned type a_t, b_t;
533 unsigned TYPE u_prod_T;
534 TYPE a_T, b_T, prod_T;
540 S5 prod_T = a_T * b_T;
541 S6 u_prod_T = (unsigned TYPE) prod_T;
543 and multiplication by constants:
550 S5 prod_T = a_T * CONST;
552 A special case of multiplication by constants is when 'TYPE' is 4 times
553 bigger than 'type', but CONST fits an intermediate type 2 times smaller
554 than 'TYPE'. In that case we create an additional pattern stmt for S3
555 to create a variable of the intermediate type, and perform widen-mult
556 on the intermediate type as well:
560 TYPE a_T, prod_T, prod_T';
564 '--> a_it = (interm_type) a_t;
565 S5 prod_T = a_T * CONST;
566 '--> prod_T' = a_it w* CONST;
570 * STMTS: Contains a stmt from which the pattern search begins. In the
571 example, when this function is called with S5, the pattern {S3,S4,S5,(S6)}
572 is detected. In case of unsigned widen-mult, the original stmt (S5) is
573 replaced with S6 in STMTS. In case of multiplication by a constant
574 of an intermediate type (the last case above), STMTS also contains S3
575 (inserted before S5).
579 * TYPE_IN: The type of the input arguments to the pattern.
581 * TYPE_OUT: The type of the output of this pattern.
583 * Return value: A new stmt that will be used to replace the sequence of
584 stmts that constitute the pattern. In this case it will be:
585 WIDEN_MULT <a_t, b_t>
589 vect_recog_widen_mult_pattern (vec
<gimple
> *stmts
,
590 tree
*type_in
, tree
*type_out
)
592 gimple last_stmt
= stmts
->pop ();
593 gimple def_stmt0
, def_stmt1
;
595 tree type
, half_type0
, half_type1
;
597 tree vectype
, vectype_out
= NULL_TREE
;
599 enum tree_code dummy_code
;
605 if (!is_gimple_assign (last_stmt
))
608 type
= gimple_expr_type (last_stmt
);
610 /* Starting from LAST_STMT, follow the defs of its uses in search
611 of the above pattern. */
613 if (gimple_assign_rhs_code (last_stmt
) != MULT_EXPR
)
616 oprnd0
= gimple_assign_rhs1 (last_stmt
);
617 oprnd1
= gimple_assign_rhs2 (last_stmt
);
618 if (!types_compatible_p (TREE_TYPE (oprnd0
), type
)
619 || !types_compatible_p (TREE_TYPE (oprnd1
), type
))
622 /* Check argument 0. */
623 if (!type_conversion_p (oprnd0
, last_stmt
, false, &half_type0
, &def_stmt0
,
627 /* Check argument 1. */
628 op1_ok
= type_conversion_p (oprnd1
, last_stmt
, false, &half_type1
,
629 &def_stmt1
, &promotion
);
631 if (op1_ok
&& promotion
)
633 oprnd0
= gimple_assign_rhs1 (def_stmt0
);
634 oprnd1
= gimple_assign_rhs1 (def_stmt1
);
638 if (TREE_CODE (oprnd1
) == INTEGER_CST
639 && TREE_CODE (half_type0
) == INTEGER_TYPE
640 && vect_handle_widen_op_by_const (last_stmt
, MULT_EXPR
, oprnd1
,
641 &oprnd0
, stmts
, type
,
642 &half_type0
, def_stmt0
))
643 half_type1
= half_type0
;
648 /* Handle unsigned case. Look for
649 S6 u_prod_T = (unsigned TYPE) prod_T;
650 Use unsigned TYPE as the type for WIDEN_MULT_EXPR. */
651 if (TYPE_UNSIGNED (type
) != TYPE_UNSIGNED (half_type0
))
657 if (TYPE_UNSIGNED (type
) == TYPE_UNSIGNED (half_type1
))
660 use_stmt
= vect_single_imm_use (last_stmt
);
661 if (!use_stmt
|| !is_gimple_assign (use_stmt
)
662 || gimple_assign_rhs_code (use_stmt
) != NOP_EXPR
)
665 use_lhs
= gimple_assign_lhs (use_stmt
);
666 use_type
= TREE_TYPE (use_lhs
);
667 if (!INTEGRAL_TYPE_P (use_type
)
668 || (TYPE_UNSIGNED (type
) == TYPE_UNSIGNED (use_type
))
669 || (TYPE_PRECISION (type
) != TYPE_PRECISION (use_type
)))
673 last_stmt
= use_stmt
;
676 if (!types_compatible_p (half_type0
, half_type1
))
679 /* Pattern detected. */
680 if (dump_enabled_p ())
681 dump_printf_loc (MSG_NOTE
, vect_location
,
682 "vect_recog_widen_mult_pattern: detected: ");
684 /* Check target support */
685 vectype
= get_vectype_for_scalar_type (half_type0
);
686 vectype_out
= get_vectype_for_scalar_type (type
);
689 || !supportable_widening_operation (WIDEN_MULT_EXPR
, last_stmt
,
690 vectype_out
, vectype
,
691 &dummy_code
, &dummy_code
,
692 &dummy_int
, &dummy_vec
))
696 *type_out
= vectype_out
;
698 /* Pattern supported. Create a stmt to be used to replace the pattern: */
699 var
= vect_recog_temp_ssa_var (type
, NULL
);
700 pattern_stmt
= gimple_build_assign_with_ops (WIDEN_MULT_EXPR
, var
, oprnd0
,
703 if (dump_enabled_p ())
704 dump_gimple_stmt_loc (MSG_NOTE
, vect_location
, TDF_SLIM
, pattern_stmt
, 0);
706 stmts
->safe_push (last_stmt
);
711 /* Function vect_recog_pow_pattern
713 Try to find the following pattern:
717 with POW being one of pow, powf, powi, powif and N being
722 * LAST_STMT: A stmt from which the pattern search begins.
726 * TYPE_IN: The type of the input arguments to the pattern.
728 * TYPE_OUT: The type of the output of this pattern.
730 * Return value: A new stmt that will be used to replace the sequence of
731 stmts that constitute the pattern. In this case it will be:
738 vect_recog_pow_pattern (vec
<gimple
> *stmts
, tree
*type_in
,
741 gimple last_stmt
= (*stmts
)[0];
742 tree fn
, base
, exp
= NULL
;
746 if (!is_gimple_call (last_stmt
) || gimple_call_lhs (last_stmt
) == NULL
)
749 fn
= gimple_call_fndecl (last_stmt
);
750 if (fn
== NULL_TREE
|| DECL_BUILT_IN_CLASS (fn
) != BUILT_IN_NORMAL
)
753 switch (DECL_FUNCTION_CODE (fn
))
759 base
= gimple_call_arg (last_stmt
, 0);
760 exp
= gimple_call_arg (last_stmt
, 1);
761 if (TREE_CODE (exp
) != REAL_CST
762 && TREE_CODE (exp
) != INTEGER_CST
)
770 /* We now have a pow or powi builtin function call with a constant
773 *type_out
= NULL_TREE
;
775 /* Catch squaring. */
776 if ((host_integerp (exp
, 0)
777 && tree_low_cst (exp
, 0) == 2)
778 || (TREE_CODE (exp
) == REAL_CST
779 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp
), dconst2
)))
781 *type_in
= TREE_TYPE (base
);
783 var
= vect_recog_temp_ssa_var (TREE_TYPE (base
), NULL
);
784 stmt
= gimple_build_assign_with_ops (MULT_EXPR
, var
, base
, base
);
788 /* Catch square root. */
789 if (TREE_CODE (exp
) == REAL_CST
790 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp
), dconsthalf
))
792 tree newfn
= mathfn_built_in (TREE_TYPE (base
), BUILT_IN_SQRT
);
793 *type_in
= get_vectype_for_scalar_type (TREE_TYPE (base
));
796 gimple stmt
= gimple_build_call (newfn
, 1, base
);
797 if (vectorizable_function (stmt
, *type_in
, *type_in
)
800 var
= vect_recog_temp_ssa_var (TREE_TYPE (base
), stmt
);
801 gimple_call_set_lhs (stmt
, var
);
811 /* Function vect_recog_widen_sum_pattern
813 Try to find the following pattern:
816 TYPE x_T, sum = init;
818 sum_0 = phi <init, sum_1>
821 S3 sum_1 = x_T + sum_0;
823 where type 'TYPE' is at least double the size of type 'type', i.e - we're
824 summing elements of type 'type' into an accumulator of type 'TYPE'. This is
825 a special case of a reduction computation.
829 * LAST_STMT: A stmt from which the pattern search begins. In the example,
830 when this function is called with S3, the pattern {S2,S3} will be detected.
834 * TYPE_IN: The type of the input arguments to the pattern.
836 * TYPE_OUT: The type of the output of this pattern.
838 * Return value: A new stmt that will be used to replace the sequence of
839 stmts that constitute the pattern. In this case it will be:
840 WIDEN_SUM <x_t, sum_0>
842 Note: The widening-sum idiom is a widening reduction pattern that is
843 vectorized without preserving all the intermediate results. It
844 produces only N/2 (widened) results (by summing up pairs of
845 intermediate results) rather than all N results. Therefore, we
846 cannot allow this pattern when we want to get all the results and in
847 the correct order (as is the case when this computation is in an
848 inner-loop nested in an outer-loop that us being vectorized). */
851 vect_recog_widen_sum_pattern (vec
<gimple
> *stmts
, tree
*type_in
,
854 gimple stmt
, last_stmt
= (*stmts
)[0];
856 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
857 tree type
, half_type
;
859 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
867 loop
= LOOP_VINFO_LOOP (loop_info
);
869 if (!is_gimple_assign (last_stmt
))
872 type
= gimple_expr_type (last_stmt
);
874 /* Look for the following pattern
877 In which DX is at least double the size of X, and sum_1 has been
878 recognized as a reduction variable.
881 /* Starting from LAST_STMT, follow the defs of its uses in search
882 of the above pattern. */
884 if (gimple_assign_rhs_code (last_stmt
) != PLUS_EXPR
)
887 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
)
890 oprnd0
= gimple_assign_rhs1 (last_stmt
);
891 oprnd1
= gimple_assign_rhs2 (last_stmt
);
892 if (!types_compatible_p (TREE_TYPE (oprnd0
), type
)
893 || !types_compatible_p (TREE_TYPE (oprnd1
), type
))
896 /* So far so good. Since last_stmt was detected as a (summation) reduction,
897 we know that oprnd1 is the reduction variable (defined by a loop-header
898 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
899 Left to check that oprnd0 is defined by a cast from type 'type' to type
902 if (!type_conversion_p (oprnd0
, last_stmt
, true, &half_type
, &stmt
,
907 oprnd0
= gimple_assign_rhs1 (stmt
);
908 *type_in
= half_type
;
911 /* Pattern detected. Create a stmt to be used to replace the pattern: */
912 var
= vect_recog_temp_ssa_var (type
, NULL
);
913 pattern_stmt
= gimple_build_assign_with_ops (WIDEN_SUM_EXPR
, var
,
916 if (dump_enabled_p ())
918 dump_printf_loc (MSG_NOTE
, vect_location
,
919 "vect_recog_widen_sum_pattern: detected: ");
920 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
923 /* We don't allow changing the order of the computation in the inner-loop
924 when doing outer-loop vectorization. */
925 gcc_assert (!nested_in_vect_loop_p (loop
, last_stmt
));
931 /* Return TRUE if the operation in STMT can be performed on a smaller type.
934 STMT - a statement to check.
935 DEF - we support operations with two operands, one of which is constant.
936 The other operand can be defined by a demotion operation, or by a
937 previous statement in a sequence of over-promoted operations. In the
938 later case DEF is used to replace that operand. (It is defined by a
939 pattern statement we created for the previous statement in the
943 NEW_TYPE - Output: a smaller type that we are trying to use. Input: if not
944 NULL, it's the type of DEF.
945 STMTS - additional pattern statements. If a pattern statement (type
946 conversion) is created in this function, its original statement is
950 OP0, OP1 - if the operation fits a smaller type, OP0 and OP1 are the new
951 operands to use in the new pattern statement for STMT (will be created
952 in vect_recog_over_widening_pattern ()).
953 NEW_DEF_STMT - in case DEF has to be promoted, we create two pattern
954 statements for STMT: the first one is a type promotion and the second
955 one is the operation itself. We return the type promotion statement
956 in NEW_DEF_STMT and further store it in STMT_VINFO_PATTERN_DEF_SEQ of
957 the second pattern statement. */
960 vect_operation_fits_smaller_type (gimple stmt
, tree def
, tree
*new_type
,
961 tree
*op0
, tree
*op1
, gimple
*new_def_stmt
,
965 tree const_oprnd
, oprnd
;
966 tree interm_type
= NULL_TREE
, half_type
, new_oprnd
, type
;
967 gimple def_stmt
, new_stmt
;
973 *new_def_stmt
= NULL
;
975 if (!is_gimple_assign (stmt
))
978 code
= gimple_assign_rhs_code (stmt
);
979 if (code
!= LSHIFT_EXPR
&& code
!= RSHIFT_EXPR
980 && code
!= BIT_IOR_EXPR
&& code
!= BIT_XOR_EXPR
&& code
!= BIT_AND_EXPR
)
983 oprnd
= gimple_assign_rhs1 (stmt
);
984 const_oprnd
= gimple_assign_rhs2 (stmt
);
985 type
= gimple_expr_type (stmt
);
987 if (TREE_CODE (oprnd
) != SSA_NAME
988 || TREE_CODE (const_oprnd
) != INTEGER_CST
)
991 /* If oprnd has other uses besides that in stmt we cannot mark it
992 as being part of a pattern only. */
993 if (!has_single_use (oprnd
))
996 /* If we are in the middle of a sequence, we use DEF from a previous
997 statement. Otherwise, OPRND has to be a result of type promotion. */
1000 half_type
= *new_type
;
1006 if (!type_conversion_p (oprnd
, stmt
, false, &half_type
, &def_stmt
,
1009 || !vect_same_loop_or_bb_p (stmt
, def_stmt
))
1013 /* Can we perform the operation on a smaller type? */
1019 if (!int_fits_type_p (const_oprnd
, half_type
))
1021 /* HALF_TYPE is not enough. Try a bigger type if possible. */
1022 if (TYPE_PRECISION (type
) < (TYPE_PRECISION (half_type
) * 4))
1025 interm_type
= build_nonstandard_integer_type (
1026 TYPE_PRECISION (half_type
) * 2, TYPE_UNSIGNED (type
));
1027 if (!int_fits_type_p (const_oprnd
, interm_type
))
1034 /* Try intermediate type - HALF_TYPE is not enough for sure. */
1035 if (TYPE_PRECISION (type
) < (TYPE_PRECISION (half_type
) * 4))
1038 /* Check that HALF_TYPE size + shift amount <= INTERM_TYPE size.
1039 (e.g., if the original value was char, the shift amount is at most 8
1040 if we want to use short). */
1041 if (compare_tree_int (const_oprnd
, TYPE_PRECISION (half_type
)) == 1)
1044 interm_type
= build_nonstandard_integer_type (
1045 TYPE_PRECISION (half_type
) * 2, TYPE_UNSIGNED (type
));
1047 if (!vect_supportable_shift (code
, interm_type
))
1053 if (vect_supportable_shift (code
, half_type
))
1056 /* Try intermediate type - HALF_TYPE is not supported. */
1057 if (TYPE_PRECISION (type
) < (TYPE_PRECISION (half_type
) * 4))
1060 interm_type
= build_nonstandard_integer_type (
1061 TYPE_PRECISION (half_type
) * 2, TYPE_UNSIGNED (type
));
1063 if (!vect_supportable_shift (code
, interm_type
))
1072 /* There are four possible cases:
1073 1. OPRND is defined by a type promotion (in that case FIRST is TRUE, it's
1074 the first statement in the sequence)
1075 a. The original, HALF_TYPE, is not enough - we replace the promotion
1076 from HALF_TYPE to TYPE with a promotion to INTERM_TYPE.
1077 b. HALF_TYPE is sufficient, OPRND is set as the RHS of the original
1079 2. OPRND is defined by a pattern statement we created.
1080 a. Its type is not sufficient for the operation, we create a new stmt:
1081 a type conversion for OPRND from HALF_TYPE to INTERM_TYPE. We store
1082 this statement in NEW_DEF_STMT, and it is later put in
1083 STMT_VINFO_PATTERN_DEF_SEQ of the pattern statement for STMT.
1084 b. OPRND is good to use in the new statement. */
1089 /* Replace the original type conversion HALF_TYPE->TYPE with
1090 HALF_TYPE->INTERM_TYPE. */
1091 if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
)))
1093 new_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
));
1094 /* Check if the already created pattern stmt is what we need. */
1095 if (!is_gimple_assign (new_stmt
)
1096 || gimple_assign_rhs_code (new_stmt
) != NOP_EXPR
1097 || TREE_TYPE (gimple_assign_lhs (new_stmt
)) != interm_type
)
1100 stmts
->safe_push (def_stmt
);
1101 oprnd
= gimple_assign_lhs (new_stmt
);
1105 /* Create NEW_OPRND = (INTERM_TYPE) OPRND. */
1106 oprnd
= gimple_assign_rhs1 (def_stmt
);
1107 new_oprnd
= make_ssa_name (interm_type
, NULL
);
1108 new_stmt
= gimple_build_assign_with_ops (NOP_EXPR
, new_oprnd
,
1110 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
)) = new_stmt
;
1111 stmts
->safe_push (def_stmt
);
1117 /* Retrieve the operand before the type promotion. */
1118 oprnd
= gimple_assign_rhs1 (def_stmt
);
1125 /* Create a type conversion HALF_TYPE->INTERM_TYPE. */
1126 new_oprnd
= make_ssa_name (interm_type
, NULL
);
1127 new_stmt
= gimple_build_assign_with_ops (NOP_EXPR
, new_oprnd
,
1130 *new_def_stmt
= new_stmt
;
1133 /* Otherwise, OPRND is already set. */
1137 *new_type
= interm_type
;
1139 *new_type
= half_type
;
1142 *op1
= fold_convert (*new_type
, const_oprnd
);
1148 /* Try to find a statement or a sequence of statements that can be performed
1152 TYPE x_T, res0_T, res1_T;
1155 S2 x_T = (TYPE) x_t;
1156 S3 res0_T = op (x_T, C0);
1157 S4 res1_T = op (res0_T, C1);
1158 S5 ... = () res1_T; - type demotion
1160 where type 'TYPE' is at least double the size of type 'type', C0 and C1 are
1162 Check if S3 and S4 can be done on a smaller type than 'TYPE', it can either
1163 be 'type' or some intermediate type. For now, we expect S5 to be a type
1164 demotion operation. We also check that S3 and S4 have only one use. */
1167 vect_recog_over_widening_pattern (vec
<gimple
> *stmts
,
1168 tree
*type_in
, tree
*type_out
)
1170 gimple stmt
= stmts
->pop ();
1171 gimple pattern_stmt
= NULL
, new_def_stmt
, prev_stmt
= NULL
, use_stmt
= NULL
;
1172 tree op0
, op1
, vectype
= NULL_TREE
, use_lhs
, use_type
;
1173 tree var
= NULL_TREE
, new_type
= NULL_TREE
, new_oprnd
;
1180 if (!vinfo_for_stmt (stmt
)
1181 || STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (stmt
)))
1184 new_def_stmt
= NULL
;
1185 if (!vect_operation_fits_smaller_type (stmt
, var
, &new_type
,
1186 &op0
, &op1
, &new_def_stmt
,
1195 /* STMT can be performed on a smaller type. Check its uses. */
1196 use_stmt
= vect_single_imm_use (stmt
);
1197 if (!use_stmt
|| !is_gimple_assign (use_stmt
))
1200 /* Create pattern statement for STMT. */
1201 vectype
= get_vectype_for_scalar_type (new_type
);
1205 /* We want to collect all the statements for which we create pattern
1206 statetments, except for the case when the last statement in the
1207 sequence doesn't have a corresponding pattern statement. In such
1208 case we associate the last pattern statement with the last statement
1209 in the sequence. Therefore, we only add the original statement to
1210 the list if we know that it is not the last. */
1212 stmts
->safe_push (prev_stmt
);
1214 var
= vect_recog_temp_ssa_var (new_type
, NULL
);
1216 = gimple_build_assign_with_ops (gimple_assign_rhs_code (stmt
), var
,
1218 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
)) = pattern_stmt
;
1219 new_pattern_def_seq (vinfo_for_stmt (stmt
), new_def_stmt
);
1221 if (dump_enabled_p ())
1223 dump_printf_loc (MSG_NOTE
, vect_location
,
1224 "created pattern stmt: ");
1225 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
1228 type
= gimple_expr_type (stmt
);
1235 /* We got a sequence. We expect it to end with a type demotion operation.
1236 Otherwise, we quit (for now). There are three possible cases: the
1237 conversion is to NEW_TYPE (we don't do anything), the conversion is to
1238 a type bigger than NEW_TYPE and/or the signedness of USE_TYPE and
1239 NEW_TYPE differs (we create a new conversion statement). */
1240 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_stmt
)))
1242 use_lhs
= gimple_assign_lhs (use_stmt
);
1243 use_type
= TREE_TYPE (use_lhs
);
1244 /* Support only type demotion or signedess change. */
1245 if (!INTEGRAL_TYPE_P (use_type
)
1246 || TYPE_PRECISION (type
) <= TYPE_PRECISION (use_type
))
1249 /* Check that NEW_TYPE is not bigger than the conversion result. */
1250 if (TYPE_PRECISION (new_type
) > TYPE_PRECISION (use_type
))
1253 if (TYPE_UNSIGNED (new_type
) != TYPE_UNSIGNED (use_type
)
1254 || TYPE_PRECISION (new_type
) != TYPE_PRECISION (use_type
))
1256 /* Create NEW_TYPE->USE_TYPE conversion. */
1257 new_oprnd
= make_ssa_name (use_type
, NULL
);
1258 pattern_stmt
= gimple_build_assign_with_ops (NOP_EXPR
, new_oprnd
,
1260 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt
)) = pattern_stmt
;
1262 *type_in
= get_vectype_for_scalar_type (new_type
);
1263 *type_out
= get_vectype_for_scalar_type (use_type
);
1265 /* We created a pattern statement for the last statement in the
1266 sequence, so we don't need to associate it with the pattern
1267 statement created for PREV_STMT. Therefore, we add PREV_STMT
1268 to the list in order to mark it later in vect_pattern_recog_1. */
1270 stmts
->safe_push (prev_stmt
);
1275 STMT_VINFO_PATTERN_DEF_SEQ (vinfo_for_stmt (use_stmt
))
1276 = STMT_VINFO_PATTERN_DEF_SEQ (vinfo_for_stmt (prev_stmt
));
1279 *type_out
= NULL_TREE
;
1282 stmts
->safe_push (use_stmt
);
1285 /* TODO: support general case, create a conversion to the correct type. */
1288 /* Pattern detected. */
1289 if (dump_enabled_p ())
1291 dump_printf_loc (MSG_NOTE
, vect_location
,
1292 "vect_recog_over_widening_pattern: detected: ");
1293 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
1296 return pattern_stmt
;
1299 /* Detect widening shift pattern:
1305 S2 a_T = (TYPE) a_t;
1306 S3 res_T = a_T << CONST;
1308 where type 'TYPE' is at least double the size of type 'type'.
1310 Also detect cases where the shift result is immediately converted
1311 to another type 'result_type' that is no larger in size than 'TYPE'.
1312 In those cases we perform a widen-shift that directly results in
1313 'result_type', to avoid a possible over-widening situation:
1317 result_type res_result;
1320 S2 a_T = (TYPE) a_t;
1321 S3 res_T = a_T << CONST;
1322 S4 res_result = (result_type) res_T;
1323 '--> res_result' = a_t w<< CONST;
1325 And a case when 'TYPE' is 4 times bigger than 'type'. In that case we
1326 create an additional pattern stmt for S2 to create a variable of an
1327 intermediate type, and perform widen-shift on the intermediate type:
1331 TYPE a_T, res_T, res_T';
1334 S2 a_T = (TYPE) a_t;
1335 '--> a_it = (interm_type) a_t;
1336 S3 res_T = a_T << CONST;
1337 '--> res_T' = a_it <<* CONST;
1341 * STMTS: Contains a stmt from which the pattern search begins.
1342 In case of unsigned widen-shift, the original stmt (S3) is replaced with S4
1343 in STMTS. When an intermediate type is used and a pattern statement is
1344 created for S2, we also put S2 here (before S3).
1348 * TYPE_IN: The type of the input arguments to the pattern.
1350 * TYPE_OUT: The type of the output of this pattern.
1352 * Return value: A new stmt that will be used to replace the sequence of
1353 stmts that constitute the pattern. In this case it will be:
1354 WIDEN_LSHIFT_EXPR <a_t, CONST>. */
1357 vect_recog_widen_shift_pattern (vec
<gimple
> *stmts
,
1358 tree
*type_in
, tree
*type_out
)
1360 gimple last_stmt
= stmts
->pop ();
1362 tree oprnd0
, oprnd1
;
1363 tree type
, half_type0
;
1364 gimple pattern_stmt
;
1365 tree vectype
, vectype_out
= NULL_TREE
;
1367 enum tree_code dummy_code
;
1369 vec
<tree
> dummy_vec
;
1373 if (!is_gimple_assign (last_stmt
) || !vinfo_for_stmt (last_stmt
))
1376 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (last_stmt
)))
1379 if (gimple_assign_rhs_code (last_stmt
) != LSHIFT_EXPR
)
1382 oprnd0
= gimple_assign_rhs1 (last_stmt
);
1383 oprnd1
= gimple_assign_rhs2 (last_stmt
);
1384 if (TREE_CODE (oprnd0
) != SSA_NAME
|| TREE_CODE (oprnd1
) != INTEGER_CST
)
1387 /* Check operand 0: it has to be defined by a type promotion. */
1388 if (!type_conversion_p (oprnd0
, last_stmt
, false, &half_type0
, &def_stmt0
,
1393 /* Check operand 1: has to be positive. We check that it fits the type
1394 in vect_handle_widen_op_by_const (). */
1395 if (tree_int_cst_compare (oprnd1
, size_zero_node
) <= 0)
1398 oprnd0
= gimple_assign_rhs1 (def_stmt0
);
1399 type
= gimple_expr_type (last_stmt
);
1401 /* Check for subsequent conversion to another type. */
1402 use_stmt
= vect_single_imm_use (last_stmt
);
1403 if (use_stmt
&& is_gimple_assign (use_stmt
)
1404 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_stmt
))
1405 && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
1407 tree use_lhs
= gimple_assign_lhs (use_stmt
);
1408 tree use_type
= TREE_TYPE (use_lhs
);
1410 if (INTEGRAL_TYPE_P (use_type
)
1411 && TYPE_PRECISION (use_type
) <= TYPE_PRECISION (type
))
1413 last_stmt
= use_stmt
;
1418 /* Check if this a widening operation. */
1419 if (!vect_handle_widen_op_by_const (last_stmt
, LSHIFT_EXPR
, oprnd1
,
1421 type
, &half_type0
, def_stmt0
))
1424 /* Pattern detected. */
1425 if (dump_enabled_p ())
1426 dump_printf_loc (MSG_NOTE
, vect_location
,
1427 "vect_recog_widen_shift_pattern: detected: ");
1429 /* Check target support. */
1430 vectype
= get_vectype_for_scalar_type (half_type0
);
1431 vectype_out
= get_vectype_for_scalar_type (type
);
1435 || !supportable_widening_operation (WIDEN_LSHIFT_EXPR
, last_stmt
,
1436 vectype_out
, vectype
,
1437 &dummy_code
, &dummy_code
,
1438 &dummy_int
, &dummy_vec
))
1442 *type_out
= vectype_out
;
1444 /* Pattern supported. Create a stmt to be used to replace the pattern. */
1445 var
= vect_recog_temp_ssa_var (type
, NULL
);
1447 gimple_build_assign_with_ops (WIDEN_LSHIFT_EXPR
, var
, oprnd0
, oprnd1
);
1449 if (dump_enabled_p ())
1450 dump_gimple_stmt_loc (MSG_NOTE
, vect_location
, TDF_SLIM
, pattern_stmt
, 0);
1452 stmts
->safe_push (last_stmt
);
1453 return pattern_stmt
;
1456 /* Detect a rotate pattern wouldn't be otherwise vectorized:
1460 S0 a_t = b_t r<< c_t;
1464 * STMTS: Contains a stmt from which the pattern search begins,
1465 i.e. the shift/rotate stmt. The original stmt (S0) is replaced
1469 S2 e_t = d_t & (B - 1);
1470 S3 f_t = b_t << c_t;
1471 S4 g_t = b_t >> e_t;
1474 where B is element bitsize of type.
1478 * TYPE_IN: The type of the input arguments to the pattern.
1480 * TYPE_OUT: The type of the output of this pattern.
1482 * Return value: A new stmt that will be used to replace the rotate
1486 vect_recog_rotate_pattern (vec
<gimple
> *stmts
, tree
*type_in
, tree
*type_out
)
1488 gimple last_stmt
= stmts
->pop ();
1489 tree oprnd0
, oprnd1
, lhs
, var
, var1
, var2
, vectype
, type
, stype
, def
, def2
;
1490 gimple pattern_stmt
, def_stmt
;
1491 enum tree_code rhs_code
;
1492 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
1493 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1494 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1495 enum vect_def_type dt
;
1496 optab optab1
, optab2
;
1497 edge ext_def
= NULL
;
1499 if (!is_gimple_assign (last_stmt
))
1502 rhs_code
= gimple_assign_rhs_code (last_stmt
);
1512 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
1515 lhs
= gimple_assign_lhs (last_stmt
);
1516 oprnd0
= gimple_assign_rhs1 (last_stmt
);
1517 type
= TREE_TYPE (oprnd0
);
1518 oprnd1
= gimple_assign_rhs2 (last_stmt
);
1519 if (TREE_CODE (oprnd0
) != SSA_NAME
1520 || TYPE_PRECISION (TREE_TYPE (lhs
)) != TYPE_PRECISION (type
)
1521 || !INTEGRAL_TYPE_P (type
)
1522 || !TYPE_UNSIGNED (type
))
1525 if (!vect_is_simple_use (oprnd1
, last_stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
1529 if (dt
!= vect_internal_def
1530 && dt
!= vect_constant_def
1531 && dt
!= vect_external_def
)
1534 vectype
= get_vectype_for_scalar_type (type
);
1535 if (vectype
== NULL_TREE
)
1538 /* If vector/vector or vector/scalar rotate is supported by the target,
1539 don't do anything here. */
1540 optab1
= optab_for_tree_code (rhs_code
, vectype
, optab_vector
);
1542 && optab_handler (optab1
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
1545 if (bb_vinfo
!= NULL
|| dt
!= vect_internal_def
)
1547 optab2
= optab_for_tree_code (rhs_code
, vectype
, optab_scalar
);
1549 && optab_handler (optab2
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
1553 /* If vector/vector or vector/scalar shifts aren't supported by the target,
1554 don't do anything here either. */
1555 optab1
= optab_for_tree_code (LSHIFT_EXPR
, vectype
, optab_vector
);
1556 optab2
= optab_for_tree_code (RSHIFT_EXPR
, vectype
, optab_vector
);
1558 || optab_handler (optab1
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
1560 || optab_handler (optab2
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
1562 if (bb_vinfo
== NULL
&& dt
== vect_internal_def
)
1564 optab1
= optab_for_tree_code (LSHIFT_EXPR
, vectype
, optab_scalar
);
1565 optab2
= optab_for_tree_code (RSHIFT_EXPR
, vectype
, optab_scalar
);
1567 || optab_handler (optab1
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
1569 || optab_handler (optab2
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
1574 *type_out
= vectype
;
1575 if (*type_in
== NULL_TREE
)
1578 if (dt
== vect_external_def
1579 && TREE_CODE (oprnd1
) == SSA_NAME
1582 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1583 ext_def
= loop_preheader_edge (loop
);
1584 if (!SSA_NAME_IS_DEFAULT_DEF (oprnd1
))
1586 basic_block bb
= gimple_bb (SSA_NAME_DEF_STMT (oprnd1
));
1588 || !dominated_by_p (CDI_DOMINATORS
, ext_def
->dest
, bb
))
1594 if (TREE_CODE (oprnd1
) == INTEGER_CST
1595 || TYPE_MODE (TREE_TYPE (oprnd1
)) == TYPE_MODE (type
))
1597 else if (def_stmt
&& gimple_assign_cast_p (def_stmt
))
1599 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
1600 if (TYPE_MODE (TREE_TYPE (rhs1
)) == TYPE_MODE (type
)
1601 && TYPE_PRECISION (TREE_TYPE (rhs1
))
1602 == TYPE_PRECISION (type
))
1606 STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo
) = NULL
;
1607 if (def
== NULL_TREE
)
1609 def
= vect_recog_temp_ssa_var (type
, NULL
);
1610 def_stmt
= gimple_build_assign_with_ops (NOP_EXPR
, def
, oprnd1
,
1615 = gsi_insert_on_edge_immediate (ext_def
, def_stmt
);
1616 gcc_assert (!new_bb
);
1619 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
1621 stype
= TREE_TYPE (def
);
1623 if (TREE_CODE (def
) == INTEGER_CST
)
1625 if (!host_integerp (def
, 1)
1626 || (unsigned HOST_WIDE_INT
) tree_low_cst (def
, 1)
1627 >= GET_MODE_PRECISION (TYPE_MODE (type
))
1628 || integer_zerop (def
))
1630 def2
= build_int_cst (stype
,
1631 GET_MODE_PRECISION (TYPE_MODE (type
))
1632 - tree_low_cst (def
, 1));
1636 tree vecstype
= get_vectype_for_scalar_type (stype
);
1637 stmt_vec_info def_stmt_vinfo
;
1639 if (vecstype
== NULL_TREE
)
1641 def2
= vect_recog_temp_ssa_var (stype
, NULL
);
1642 def_stmt
= gimple_build_assign_with_ops (NEGATE_EXPR
, def2
, def
,
1647 = gsi_insert_on_edge_immediate (ext_def
, def_stmt
);
1648 gcc_assert (!new_bb
);
1652 def_stmt_vinfo
= new_stmt_vec_info (def_stmt
, loop_vinfo
, bb_vinfo
);
1653 set_vinfo_for_stmt (def_stmt
, def_stmt_vinfo
);
1654 STMT_VINFO_VECTYPE (def_stmt_vinfo
) = vecstype
;
1655 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
1658 def2
= vect_recog_temp_ssa_var (stype
, NULL
);
1660 = build_int_cst (stype
, GET_MODE_PRECISION (TYPE_MODE (stype
)) - 1);
1661 def_stmt
= gimple_build_assign_with_ops (BIT_AND_EXPR
, def2
,
1662 gimple_assign_lhs (def_stmt
),
1667 = gsi_insert_on_edge_immediate (ext_def
, def_stmt
);
1668 gcc_assert (!new_bb
);
1672 def_stmt_vinfo
= new_stmt_vec_info (def_stmt
, loop_vinfo
, bb_vinfo
);
1673 set_vinfo_for_stmt (def_stmt
, def_stmt_vinfo
);
1674 STMT_VINFO_VECTYPE (def_stmt_vinfo
) = vecstype
;
1675 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
1679 var1
= vect_recog_temp_ssa_var (type
, NULL
);
1680 def_stmt
= gimple_build_assign_with_ops (rhs_code
== LROTATE_EXPR
1681 ? LSHIFT_EXPR
: RSHIFT_EXPR
,
1683 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
1685 var2
= vect_recog_temp_ssa_var (type
, NULL
);
1686 def_stmt
= gimple_build_assign_with_ops (rhs_code
== LROTATE_EXPR
1687 ? RSHIFT_EXPR
: LSHIFT_EXPR
,
1688 var2
, oprnd0
, def2
);
1689 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
1691 /* Pattern detected. */
1692 if (dump_enabled_p ())
1693 dump_printf_loc (MSG_NOTE
, vect_location
,
1694 "vect_recog_rotate_pattern: detected: ");
1696 /* Pattern supported. Create a stmt to be used to replace the pattern. */
1697 var
= vect_recog_temp_ssa_var (type
, NULL
);
1698 pattern_stmt
= gimple_build_assign_with_ops (BIT_IOR_EXPR
, var
, var1
, var2
);
1700 if (dump_enabled_p ())
1701 dump_gimple_stmt_loc (MSG_NOTE
, vect_location
, TDF_SLIM
, pattern_stmt
, 0);
1703 stmts
->safe_push (last_stmt
);
1704 return pattern_stmt
;
1707 /* Detect a vector by vector shift pattern that wouldn't be otherwise
1715 S3 res_T = b_T op a_t;
1717 where type 'TYPE' is a type with different size than 'type',
1718 and op is <<, >> or rotate.
1723 TYPE b_T, c_T, res_T;
1726 S1 a_t = (type) c_T;
1728 S3 res_T = b_T op a_t;
1732 * STMTS: Contains a stmt from which the pattern search begins,
1733 i.e. the shift/rotate stmt. The original stmt (S3) is replaced
1734 with a shift/rotate which has same type on both operands, in the
1735 second case just b_T op c_T, in the first case with added cast
1736 from a_t to c_T in STMT_VINFO_PATTERN_DEF_SEQ.
1740 * TYPE_IN: The type of the input arguments to the pattern.
1742 * TYPE_OUT: The type of the output of this pattern.
1744 * Return value: A new stmt that will be used to replace the shift/rotate
1748 vect_recog_vector_vector_shift_pattern (vec
<gimple
> *stmts
,
1749 tree
*type_in
, tree
*type_out
)
1751 gimple last_stmt
= stmts
->pop ();
1752 tree oprnd0
, oprnd1
, lhs
, var
;
1753 gimple pattern_stmt
, def_stmt
;
1754 enum tree_code rhs_code
;
1755 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
1756 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1757 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1758 enum vect_def_type dt
;
1761 if (!is_gimple_assign (last_stmt
))
1764 rhs_code
= gimple_assign_rhs_code (last_stmt
);
1776 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
1779 lhs
= gimple_assign_lhs (last_stmt
);
1780 oprnd0
= gimple_assign_rhs1 (last_stmt
);
1781 oprnd1
= gimple_assign_rhs2 (last_stmt
);
1782 if (TREE_CODE (oprnd0
) != SSA_NAME
1783 || TREE_CODE (oprnd1
) != SSA_NAME
1784 || TYPE_MODE (TREE_TYPE (oprnd0
)) == TYPE_MODE (TREE_TYPE (oprnd1
))
1785 || TYPE_PRECISION (TREE_TYPE (oprnd1
))
1786 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (oprnd1
)))
1787 || TYPE_PRECISION (TREE_TYPE (lhs
))
1788 != TYPE_PRECISION (TREE_TYPE (oprnd0
)))
1791 if (!vect_is_simple_use (oprnd1
, last_stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
1795 if (dt
!= vect_internal_def
)
1798 *type_in
= get_vectype_for_scalar_type (TREE_TYPE (oprnd0
));
1799 *type_out
= *type_in
;
1800 if (*type_in
== NULL_TREE
)
1804 if (gimple_assign_cast_p (def_stmt
))
1806 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
1807 if (TYPE_MODE (TREE_TYPE (rhs1
)) == TYPE_MODE (TREE_TYPE (oprnd0
))
1808 && TYPE_PRECISION (TREE_TYPE (rhs1
))
1809 == TYPE_PRECISION (TREE_TYPE (oprnd0
)))
1813 if (def
== NULL_TREE
)
1815 def
= vect_recog_temp_ssa_var (TREE_TYPE (oprnd0
), NULL
);
1816 def_stmt
= gimple_build_assign_with_ops (NOP_EXPR
, def
, oprnd1
,
1818 new_pattern_def_seq (stmt_vinfo
, def_stmt
);
1821 /* Pattern detected. */
1822 if (dump_enabled_p ())
1823 dump_printf_loc (MSG_NOTE
, vect_location
,
1824 "vect_recog_vector_vector_shift_pattern: detected: ");
1826 /* Pattern supported. Create a stmt to be used to replace the pattern. */
1827 var
= vect_recog_temp_ssa_var (TREE_TYPE (oprnd0
), NULL
);
1828 pattern_stmt
= gimple_build_assign_with_ops (rhs_code
, var
, oprnd0
, def
);
1830 if (dump_enabled_p ())
1831 dump_gimple_stmt_loc (MSG_NOTE
, vect_location
, TDF_SLIM
, pattern_stmt
, 0);
1833 stmts
->safe_push (last_stmt
);
1834 return pattern_stmt
;
1837 /* Detect a signed division by a constant that wouldn't be
1838 otherwise vectorized:
1844 where type 'type' is an integral type and N is a constant.
1846 Similarly handle modulo by a constant:
1852 * STMTS: Contains a stmt from which the pattern search begins,
1853 i.e. the division stmt. S1 is replaced by if N is a power
1854 of two constant and type is signed:
1855 S3 y_t = b_t < 0 ? N - 1 : 0;
1857 S1' a_t = x_t >> log2 (N);
1859 S4 is replaced if N is a power of two constant and
1860 type is signed by (where *_T temporaries have unsigned type):
1861 S9 y_T = b_t < 0 ? -1U : 0U;
1862 S8 z_T = y_T >> (sizeof (type_t) * CHAR_BIT - log2 (N));
1863 S7 z_t = (type) z_T;
1865 S5 x_t = w_t & (N - 1);
1866 S4' a_t = x_t - z_t;
1870 * TYPE_IN: The type of the input arguments to the pattern.
1872 * TYPE_OUT: The type of the output of this pattern.
1874 * Return value: A new stmt that will be used to replace the division
1875 S1 or modulo S4 stmt. */
1878 vect_recog_divmod_pattern (vec
<gimple
> *stmts
,
1879 tree
*type_in
, tree
*type_out
)
1881 gimple last_stmt
= stmts
->pop ();
1882 tree oprnd0
, oprnd1
, vectype
, itype
, cond
;
1883 gimple pattern_stmt
, def_stmt
;
1884 enum tree_code rhs_code
;
1885 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
1886 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1887 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1890 int dummy_int
, prec
;
1891 stmt_vec_info def_stmt_vinfo
;
1893 if (!is_gimple_assign (last_stmt
))
1896 rhs_code
= gimple_assign_rhs_code (last_stmt
);
1899 case TRUNC_DIV_EXPR
:
1900 case TRUNC_MOD_EXPR
:
1906 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
1909 oprnd0
= gimple_assign_rhs1 (last_stmt
);
1910 oprnd1
= gimple_assign_rhs2 (last_stmt
);
1911 itype
= TREE_TYPE (oprnd0
);
1912 if (TREE_CODE (oprnd0
) != SSA_NAME
1913 || TREE_CODE (oprnd1
) != INTEGER_CST
1914 || TREE_CODE (itype
) != INTEGER_TYPE
1915 || TYPE_PRECISION (itype
) != GET_MODE_PRECISION (TYPE_MODE (itype
)))
1918 vectype
= get_vectype_for_scalar_type (itype
);
1919 if (vectype
== NULL_TREE
)
1922 /* If the target can handle vectorized division or modulo natively,
1923 don't attempt to optimize this. */
1924 optab
= optab_for_tree_code (rhs_code
, vectype
, optab_default
);
1925 if (optab
!= unknown_optab
)
1927 enum machine_mode vec_mode
= TYPE_MODE (vectype
);
1928 int icode
= (int) optab_handler (optab
, vec_mode
);
1929 if (icode
!= CODE_FOR_nothing
)
1933 prec
= TYPE_PRECISION (itype
);
1934 if (integer_pow2p (oprnd1
))
1936 if (TYPE_UNSIGNED (itype
) || tree_int_cst_sgn (oprnd1
) != 1)
1939 /* Pattern detected. */
1940 if (dump_enabled_p ())
1941 dump_printf_loc (MSG_NOTE
, vect_location
,
1942 "vect_recog_divmod_pattern: detected: ");
1944 cond
= build2 (LT_EXPR
, boolean_type_node
, oprnd0
,
1945 build_int_cst (itype
, 0));
1946 if (rhs_code
== TRUNC_DIV_EXPR
)
1948 tree var
= vect_recog_temp_ssa_var (itype
, NULL
);
1951 = gimple_build_assign_with_ops (COND_EXPR
, var
, cond
,
1952 fold_build2 (MINUS_EXPR
, itype
,
1954 build_int_cst (itype
,
1956 build_int_cst (itype
, 0));
1957 new_pattern_def_seq (stmt_vinfo
, def_stmt
);
1958 var
= vect_recog_temp_ssa_var (itype
, NULL
);
1960 = gimple_build_assign_with_ops (PLUS_EXPR
, var
, oprnd0
,
1961 gimple_assign_lhs (def_stmt
));
1962 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
1964 shift
= build_int_cst (itype
, tree_log2 (oprnd1
));
1966 = gimple_build_assign_with_ops (RSHIFT_EXPR
,
1967 vect_recog_temp_ssa_var (itype
,
1974 STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo
) = NULL
;
1975 if (compare_tree_int (oprnd1
, 2) == 0)
1977 signmask
= vect_recog_temp_ssa_var (itype
, NULL
);
1979 = gimple_build_assign_with_ops (COND_EXPR
, signmask
, cond
,
1980 build_int_cst (itype
, 1),
1981 build_int_cst (itype
, 0));
1982 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
1987 = build_nonstandard_integer_type (prec
, 1);
1988 tree vecutype
= get_vectype_for_scalar_type (utype
);
1990 = build_int_cst (utype
, GET_MODE_BITSIZE (TYPE_MODE (itype
))
1991 - tree_log2 (oprnd1
));
1992 tree var
= vect_recog_temp_ssa_var (utype
, NULL
);
1995 = gimple_build_assign_with_ops (COND_EXPR
, var
, cond
,
1996 build_int_cst (utype
, -1),
1997 build_int_cst (utype
, 0));
1999 = new_stmt_vec_info (def_stmt
, loop_vinfo
, bb_vinfo
);
2000 set_vinfo_for_stmt (def_stmt
, def_stmt_vinfo
);
2001 STMT_VINFO_VECTYPE (def_stmt_vinfo
) = vecutype
;
2002 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2003 var
= vect_recog_temp_ssa_var (utype
, NULL
);
2005 = gimple_build_assign_with_ops (RSHIFT_EXPR
, var
,
2006 gimple_assign_lhs (def_stmt
),
2009 = new_stmt_vec_info (def_stmt
, loop_vinfo
, bb_vinfo
);
2010 set_vinfo_for_stmt (def_stmt
, def_stmt_vinfo
);
2011 STMT_VINFO_VECTYPE (def_stmt_vinfo
) = vecutype
;
2012 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2013 signmask
= vect_recog_temp_ssa_var (itype
, NULL
);
2015 = gimple_build_assign_with_ops (NOP_EXPR
, signmask
, var
,
2017 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2020 = gimple_build_assign_with_ops (PLUS_EXPR
,
2021 vect_recog_temp_ssa_var (itype
,
2024 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2026 = gimple_build_assign_with_ops (BIT_AND_EXPR
,
2027 vect_recog_temp_ssa_var (itype
,
2029 gimple_assign_lhs (def_stmt
),
2030 fold_build2 (MINUS_EXPR
, itype
,
2032 build_int_cst (itype
,
2034 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2037 = gimple_build_assign_with_ops (MINUS_EXPR
,
2038 vect_recog_temp_ssa_var (itype
,
2040 gimple_assign_lhs (def_stmt
),
2044 if (dump_enabled_p ())
2045 dump_gimple_stmt_loc (MSG_NOTE
, vect_location
, TDF_SLIM
, pattern_stmt
,
2048 stmts
->safe_push (last_stmt
);
2051 *type_out
= vectype
;
2052 return pattern_stmt
;
2055 if (!host_integerp (oprnd1
, TYPE_UNSIGNED (itype
))
2056 || integer_zerop (oprnd1
)
2057 || prec
> HOST_BITS_PER_WIDE_INT
)
2060 if (!can_mult_highpart_p (TYPE_MODE (vectype
), TYPE_UNSIGNED (itype
)))
2063 STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo
) = NULL
;
2065 if (TYPE_UNSIGNED (itype
))
2067 unsigned HOST_WIDE_INT mh
, ml
;
2068 int pre_shift
, post_shift
;
2069 unsigned HOST_WIDE_INT d
= tree_low_cst (oprnd1
, 1)
2070 & GET_MODE_MASK (TYPE_MODE (itype
));
2071 tree t1
, t2
, t3
, t4
;
2073 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (prec
- 1)))
2074 /* FIXME: Can transform this into oprnd0 >= oprnd1 ? 1 : 0. */
2077 /* Find a suitable multiplier and right shift count
2078 instead of multiplying with D. */
2079 mh
= choose_multiplier (d
, prec
, prec
, &ml
, &post_shift
, &dummy_int
);
2081 /* If the suggested multiplier is more than SIZE bits, we can do better
2082 for even divisors, using an initial right shift. */
2083 if (mh
!= 0 && (d
& 1) == 0)
2085 pre_shift
= floor_log2 (d
& -d
);
2086 mh
= choose_multiplier (d
>> pre_shift
, prec
, prec
- pre_shift
,
2087 &ml
, &post_shift
, &dummy_int
);
2095 if (post_shift
- 1 >= prec
)
2098 /* t1 = oprnd0 h* ml;
2102 q = t4 >> (post_shift - 1); */
2103 t1
= vect_recog_temp_ssa_var (itype
, NULL
);
2105 = gimple_build_assign_with_ops (MULT_HIGHPART_EXPR
, t1
, oprnd0
,
2106 build_int_cst (itype
, ml
));
2107 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2109 t2
= vect_recog_temp_ssa_var (itype
, NULL
);
2111 = gimple_build_assign_with_ops (MINUS_EXPR
, t2
, oprnd0
, t1
);
2112 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2114 t3
= vect_recog_temp_ssa_var (itype
, NULL
);
2116 = gimple_build_assign_with_ops (RSHIFT_EXPR
, t3
, t2
,
2118 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2120 t4
= vect_recog_temp_ssa_var (itype
, NULL
);
2122 = gimple_build_assign_with_ops (PLUS_EXPR
, t4
, t1
, t3
);
2124 if (post_shift
!= 1)
2126 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2128 q
= vect_recog_temp_ssa_var (itype
, NULL
);
2130 = gimple_build_assign_with_ops (RSHIFT_EXPR
, q
, t4
,
2131 build_int_cst (itype
,
2138 pattern_stmt
= def_stmt
;
2143 if (pre_shift
>= prec
|| post_shift
>= prec
)
2146 /* t1 = oprnd0 >> pre_shift;
2148 q = t2 >> post_shift; */
2151 t1
= vect_recog_temp_ssa_var (itype
, NULL
);
2153 = gimple_build_assign_with_ops (RSHIFT_EXPR
, t1
, oprnd0
,
2154 build_int_cst (NULL
,
2156 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2161 t2
= vect_recog_temp_ssa_var (itype
, NULL
);
2163 = gimple_build_assign_with_ops (MULT_HIGHPART_EXPR
, t2
, t1
,
2164 build_int_cst (itype
, ml
));
2168 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2170 q
= vect_recog_temp_ssa_var (itype
, NULL
);
2172 = gimple_build_assign_with_ops (RSHIFT_EXPR
, q
, t2
,
2173 build_int_cst (itype
,
2179 pattern_stmt
= def_stmt
;
2184 unsigned HOST_WIDE_INT ml
;
2186 HOST_WIDE_INT d
= tree_low_cst (oprnd1
, 0);
2187 unsigned HOST_WIDE_INT abs_d
;
2189 tree t1
, t2
, t3
, t4
;
2191 /* Give up for -1. */
2195 /* Since d might be INT_MIN, we have to cast to
2196 unsigned HOST_WIDE_INT before negating to avoid
2197 undefined signed overflow. */
2199 ? (unsigned HOST_WIDE_INT
) d
2200 : - (unsigned HOST_WIDE_INT
) d
);
2202 /* n rem d = n rem -d */
2203 if (rhs_code
== TRUNC_MOD_EXPR
&& d
< 0)
2206 oprnd1
= build_int_cst (itype
, abs_d
);
2208 else if (HOST_BITS_PER_WIDE_INT
>= prec
2209 && abs_d
== (unsigned HOST_WIDE_INT
) 1 << (prec
- 1))
2210 /* This case is not handled correctly below. */
2213 choose_multiplier (abs_d
, prec
, prec
- 1, &ml
, &post_shift
, &dummy_int
);
2214 if (ml
>= (unsigned HOST_WIDE_INT
) 1 << (prec
- 1))
2217 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (prec
- 1);
2219 if (post_shift
>= prec
)
2222 /* t1 = oprnd1 h* ml; */
2223 t1
= vect_recog_temp_ssa_var (itype
, NULL
);
2225 = gimple_build_assign_with_ops (MULT_HIGHPART_EXPR
, t1
, oprnd0
,
2226 build_int_cst (itype
, ml
));
2227 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2231 /* t2 = t1 + oprnd0; */
2232 t2
= vect_recog_temp_ssa_var (itype
, NULL
);
2234 = gimple_build_assign_with_ops (PLUS_EXPR
, t2
, t1
, oprnd0
);
2235 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2242 /* t3 = t2 >> post_shift; */
2243 t3
= vect_recog_temp_ssa_var (itype
, NULL
);
2245 = gimple_build_assign_with_ops (RSHIFT_EXPR
, t3
, t2
,
2246 build_int_cst (itype
, post_shift
));
2247 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2252 /* t4 = oprnd0 >> (prec - 1); */
2253 t4
= vect_recog_temp_ssa_var (itype
, NULL
);
2255 = gimple_build_assign_with_ops (RSHIFT_EXPR
, t4
, oprnd0
,
2256 build_int_cst (itype
, prec
- 1));
2257 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2259 /* q = t3 - t4; or q = t4 - t3; */
2260 q
= vect_recog_temp_ssa_var (itype
, NULL
);
2262 = gimple_build_assign_with_ops (MINUS_EXPR
, q
, d
< 0 ? t4
: t3
,
2266 if (rhs_code
== TRUNC_MOD_EXPR
)
2270 /* We divided. Now finish by:
2273 append_pattern_def_seq (stmt_vinfo
, pattern_stmt
);
2275 t1
= vect_recog_temp_ssa_var (itype
, NULL
);
2277 = gimple_build_assign_with_ops (MULT_EXPR
, t1
, q
, oprnd1
);
2278 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2280 r
= vect_recog_temp_ssa_var (itype
, NULL
);
2282 = gimple_build_assign_with_ops (MINUS_EXPR
, r
, oprnd0
, t1
);
2285 /* Pattern detected. */
2286 if (dump_enabled_p ())
2288 dump_printf_loc (MSG_NOTE
, vect_location
,
2289 "vect_recog_divmod_pattern: detected: ");
2290 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
2293 stmts
->safe_push (last_stmt
);
2296 *type_out
= vectype
;
2297 return pattern_stmt
;
2300 /* Function vect_recog_mixed_size_cond_pattern
2302 Try to find the following pattern:
2307 S1 a_T = x_t CMP y_t ? b_T : c_T;
2309 where type 'TYPE' is an integral type which has different size
2310 from 'type'. b_T and c_T are either constants (and if 'TYPE' is wider
2311 than 'type', the constants need to fit into an integer type
2312 with the same width as 'type') or results of conversion from 'type'.
2316 * LAST_STMT: A stmt from which the pattern search begins.
2320 * TYPE_IN: The type of the input arguments to the pattern.
2322 * TYPE_OUT: The type of the output of this pattern.
2324 * Return value: A new stmt that will be used to replace the pattern.
2325 Additionally a def_stmt is added.
2327 a_it = x_t CMP y_t ? b_it : c_it;
2328 a_T = (TYPE) a_it; */
2331 vect_recog_mixed_size_cond_pattern (vec
<gimple
> *stmts
, tree
*type_in
,
2334 gimple last_stmt
= (*stmts
)[0];
2335 tree cond_expr
, then_clause
, else_clause
;
2336 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
), def_stmt_info
;
2337 tree type
, vectype
, comp_vectype
, itype
= NULL_TREE
, vecitype
;
2338 enum machine_mode cmpmode
;
2339 gimple pattern_stmt
, def_stmt
;
2340 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
2341 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
2342 tree orig_type0
= NULL_TREE
, orig_type1
= NULL_TREE
;
2343 gimple def_stmt0
= NULL
, def_stmt1
= NULL
;
2345 tree comp_scalar_type
;
2347 if (!is_gimple_assign (last_stmt
)
2348 || gimple_assign_rhs_code (last_stmt
) != COND_EXPR
2349 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_internal_def
)
2352 cond_expr
= gimple_assign_rhs1 (last_stmt
);
2353 then_clause
= gimple_assign_rhs2 (last_stmt
);
2354 else_clause
= gimple_assign_rhs3 (last_stmt
);
2356 if (!COMPARISON_CLASS_P (cond_expr
))
2359 comp_scalar_type
= TREE_TYPE (TREE_OPERAND (cond_expr
, 0));
2360 comp_vectype
= get_vectype_for_scalar_type (comp_scalar_type
);
2361 if (comp_vectype
== NULL_TREE
)
2364 type
= gimple_expr_type (last_stmt
);
2365 if (types_compatible_p (type
, comp_scalar_type
)
2366 || ((TREE_CODE (then_clause
) != INTEGER_CST
2367 || TREE_CODE (else_clause
) != INTEGER_CST
)
2368 && !INTEGRAL_TYPE_P (comp_scalar_type
))
2369 || !INTEGRAL_TYPE_P (type
))
2372 if ((TREE_CODE (then_clause
) != INTEGER_CST
2373 && !type_conversion_p (then_clause
, last_stmt
, false, &orig_type0
,
2374 &def_stmt0
, &promotion
))
2375 || (TREE_CODE (else_clause
) != INTEGER_CST
2376 && !type_conversion_p (else_clause
, last_stmt
, false, &orig_type1
,
2377 &def_stmt1
, &promotion
)))
2380 if (orig_type0
&& orig_type1
2381 && !types_compatible_p (orig_type0
, orig_type1
))
2386 if (!types_compatible_p (orig_type0
, comp_scalar_type
))
2388 then_clause
= gimple_assign_rhs1 (def_stmt0
);
2394 if (!types_compatible_p (orig_type1
, comp_scalar_type
))
2396 else_clause
= gimple_assign_rhs1 (def_stmt1
);
2400 cmpmode
= GET_MODE_INNER (TYPE_MODE (comp_vectype
));
2402 if (GET_MODE_BITSIZE (TYPE_MODE (type
)) == GET_MODE_BITSIZE (cmpmode
))
2405 vectype
= get_vectype_for_scalar_type (type
);
2406 if (vectype
== NULL_TREE
)
2409 if (expand_vec_cond_expr_p (vectype
, comp_vectype
))
2412 if (itype
== NULL_TREE
)
2413 itype
= build_nonstandard_integer_type (GET_MODE_BITSIZE (cmpmode
),
2414 TYPE_UNSIGNED (type
));
2416 if (itype
== NULL_TREE
2417 || GET_MODE_BITSIZE (TYPE_MODE (itype
)) != GET_MODE_BITSIZE (cmpmode
))
2420 vecitype
= get_vectype_for_scalar_type (itype
);
2421 if (vecitype
== NULL_TREE
)
2424 if (!expand_vec_cond_expr_p (vecitype
, comp_vectype
))
2427 if (GET_MODE_BITSIZE (TYPE_MODE (type
)) > GET_MODE_BITSIZE (cmpmode
))
2429 if ((TREE_CODE (then_clause
) == INTEGER_CST
2430 && !int_fits_type_p (then_clause
, itype
))
2431 || (TREE_CODE (else_clause
) == INTEGER_CST
2432 && !int_fits_type_p (else_clause
, itype
)))
2437 = gimple_build_assign_with_ops (COND_EXPR
,
2438 vect_recog_temp_ssa_var (itype
, NULL
),
2439 unshare_expr (cond_expr
),
2440 fold_convert (itype
, then_clause
),
2441 fold_convert (itype
, else_clause
));
2443 = gimple_build_assign_with_ops (NOP_EXPR
,
2444 vect_recog_temp_ssa_var (type
, NULL
),
2445 gimple_assign_lhs (def_stmt
), NULL_TREE
);
2447 new_pattern_def_seq (stmt_vinfo
, def_stmt
);
2448 def_stmt_info
= new_stmt_vec_info (def_stmt
, loop_vinfo
, bb_vinfo
);
2449 set_vinfo_for_stmt (def_stmt
, def_stmt_info
);
2450 STMT_VINFO_VECTYPE (def_stmt_info
) = vecitype
;
2451 *type_in
= vecitype
;
2452 *type_out
= vectype
;
2454 if (dump_enabled_p ())
2455 dump_printf_loc (MSG_NOTE
, vect_location
,
2456 "vect_recog_mixed_size_cond_pattern: detected: ");
2458 return pattern_stmt
;
2462 /* Helper function of vect_recog_bool_pattern. Called recursively, return
2463 true if bool VAR can be optimized that way. */
2466 check_bool_pattern (tree var
, loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
2469 enum vect_def_type dt
;
2471 enum tree_code rhs_code
;
2473 if (!vect_is_simple_use (var
, NULL
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
,
2477 if (dt
!= vect_internal_def
)
2480 if (!is_gimple_assign (def_stmt
))
2483 if (!has_single_use (def
))
2486 rhs1
= gimple_assign_rhs1 (def_stmt
);
2487 rhs_code
= gimple_assign_rhs_code (def_stmt
);
2491 return check_bool_pattern (rhs1
, loop_vinfo
, bb_vinfo
);
2494 if ((TYPE_PRECISION (TREE_TYPE (rhs1
)) != 1
2495 || !TYPE_UNSIGNED (TREE_TYPE (rhs1
)))
2496 && TREE_CODE (TREE_TYPE (rhs1
)) != BOOLEAN_TYPE
)
2498 return check_bool_pattern (rhs1
, loop_vinfo
, bb_vinfo
);
2501 return check_bool_pattern (rhs1
, loop_vinfo
, bb_vinfo
);
2506 if (!check_bool_pattern (rhs1
, loop_vinfo
, bb_vinfo
))
2508 return check_bool_pattern (gimple_assign_rhs2 (def_stmt
), loop_vinfo
,
2512 if (TREE_CODE_CLASS (rhs_code
) == tcc_comparison
)
2514 tree vecitype
, comp_vectype
;
2516 /* If the comparison can throw, then is_gimple_condexpr will be
2517 false and we can't make a COND_EXPR/VEC_COND_EXPR out of it. */
2518 if (stmt_could_throw_p (def_stmt
))
2521 comp_vectype
= get_vectype_for_scalar_type (TREE_TYPE (rhs1
));
2522 if (comp_vectype
== NULL_TREE
)
2525 if (TREE_CODE (TREE_TYPE (rhs1
)) != INTEGER_TYPE
)
2527 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (rhs1
));
2529 = build_nonstandard_integer_type (GET_MODE_BITSIZE (mode
), 1);
2530 vecitype
= get_vectype_for_scalar_type (itype
);
2531 if (vecitype
== NULL_TREE
)
2535 vecitype
= comp_vectype
;
2536 return expand_vec_cond_expr_p (vecitype
, comp_vectype
);
2543 /* Helper function of adjust_bool_pattern. Add a cast to TYPE to a previous
2544 stmt (SSA_NAME_DEF_STMT of VAR) by moving the COND_EXPR from RELATED_STMT
2545 to PATTERN_DEF_SEQ and adding a cast as RELATED_STMT. */
2548 adjust_bool_pattern_cast (tree type
, tree var
)
2550 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (SSA_NAME_DEF_STMT (var
));
2551 gimple cast_stmt
, pattern_stmt
;
2553 gcc_assert (!STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo
));
2554 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_vinfo
);
2555 new_pattern_def_seq (stmt_vinfo
, pattern_stmt
);
2557 = gimple_build_assign_with_ops (NOP_EXPR
,
2558 vect_recog_temp_ssa_var (type
, NULL
),
2559 gimple_assign_lhs (pattern_stmt
),
2561 STMT_VINFO_RELATED_STMT (stmt_vinfo
) = cast_stmt
;
2562 return gimple_assign_lhs (cast_stmt
);
2566 /* Helper function of vect_recog_bool_pattern. Do the actual transformations,
2567 recursively. VAR is an SSA_NAME that should be transformed from bool
2568 to a wider integer type, OUT_TYPE is the desired final integer type of
2569 the whole pattern, TRUEVAL should be NULL unless optimizing
2570 BIT_AND_EXPR into a COND_EXPR with one integer from one of the operands
2571 in the then_clause, STMTS is where statements with added pattern stmts
2572 should be pushed to. */
2575 adjust_bool_pattern (tree var
, tree out_type
, tree trueval
,
2578 gimple stmt
= SSA_NAME_DEF_STMT (var
);
2579 enum tree_code rhs_code
, def_rhs_code
;
2580 tree itype
, cond_expr
, rhs1
, rhs2
, irhs1
, irhs2
;
2582 gimple pattern_stmt
, def_stmt
;
2584 rhs1
= gimple_assign_rhs1 (stmt
);
2585 rhs2
= gimple_assign_rhs2 (stmt
);
2586 rhs_code
= gimple_assign_rhs_code (stmt
);
2587 loc
= gimple_location (stmt
);
2592 irhs1
= adjust_bool_pattern (rhs1
, out_type
, NULL_TREE
, stmts
);
2593 itype
= TREE_TYPE (irhs1
);
2595 = gimple_build_assign_with_ops (SSA_NAME
,
2596 vect_recog_temp_ssa_var (itype
, NULL
),
2601 irhs1
= adjust_bool_pattern (rhs1
, out_type
, NULL_TREE
, stmts
);
2602 itype
= TREE_TYPE (irhs1
);
2604 = gimple_build_assign_with_ops (BIT_XOR_EXPR
,
2605 vect_recog_temp_ssa_var (itype
, NULL
),
2606 irhs1
, build_int_cst (itype
, 1));
2610 /* Try to optimize x = y & (a < b ? 1 : 0); into
2611 x = (a < b ? y : 0);
2617 S1 a_b = x1 CMP1 y1;
2618 S2 b_b = x2 CMP2 y2;
2620 S4 d_T = (TYPE) c_b;
2622 we would normally emit:
2624 S1' a_T = x1 CMP1 y1 ? 1 : 0;
2625 S2' b_T = x2 CMP2 y2 ? 1 : 0;
2626 S3' c_T = a_T & b_T;
2629 but we can save one stmt by using the
2630 result of one of the COND_EXPRs in the other COND_EXPR and leave
2631 BIT_AND_EXPR stmt out:
2633 S1' a_T = x1 CMP1 y1 ? 1 : 0;
2634 S3' c_T = x2 CMP2 y2 ? a_T : 0;
2637 At least when VEC_COND_EXPR is implemented using masks
2638 cond ? 1 : 0 is as expensive as cond ? var : 0, in both cases it
2639 computes the comparison masks and ands it, in one case with
2640 all ones vector, in the other case with a vector register.
2641 Don't do this for BIT_IOR_EXPR, because cond ? 1 : var; is
2642 often more expensive. */
2643 def_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2644 def_rhs_code
= gimple_assign_rhs_code (def_stmt
);
2645 if (TREE_CODE_CLASS (def_rhs_code
) == tcc_comparison
)
2647 tree def_rhs1
= gimple_assign_rhs1 (def_stmt
);
2648 irhs1
= adjust_bool_pattern (rhs1
, out_type
, NULL_TREE
, stmts
);
2649 if (TYPE_PRECISION (TREE_TYPE (irhs1
))
2650 == GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (def_rhs1
))))
2653 stmt_vec_info stmt_def_vinfo
= vinfo_for_stmt (def_stmt
);
2654 irhs2
= adjust_bool_pattern (rhs2
, out_type
, irhs1
, stmts
);
2655 tstmt
= stmts
->pop ();
2656 gcc_assert (tstmt
== def_stmt
);
2657 stmts
->quick_push (stmt
);
2658 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
))
2659 = STMT_VINFO_RELATED_STMT (stmt_def_vinfo
);
2660 gcc_assert (!STMT_VINFO_PATTERN_DEF_SEQ (stmt_def_vinfo
));
2661 STMT_VINFO_RELATED_STMT (stmt_def_vinfo
) = NULL
;
2665 irhs2
= adjust_bool_pattern (rhs2
, out_type
, NULL_TREE
, stmts
);
2668 def_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2669 def_rhs_code
= gimple_assign_rhs_code (def_stmt
);
2670 if (TREE_CODE_CLASS (def_rhs_code
) == tcc_comparison
)
2672 tree def_rhs1
= gimple_assign_rhs1 (def_stmt
);
2673 irhs2
= adjust_bool_pattern (rhs2
, out_type
, NULL_TREE
, stmts
);
2674 if (TYPE_PRECISION (TREE_TYPE (irhs2
))
2675 == GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (def_rhs1
))))
2678 stmt_vec_info stmt_def_vinfo
= vinfo_for_stmt (def_stmt
);
2679 irhs1
= adjust_bool_pattern (rhs1
, out_type
, irhs2
, stmts
);
2680 tstmt
= stmts
->pop ();
2681 gcc_assert (tstmt
== def_stmt
);
2682 stmts
->quick_push (stmt
);
2683 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
))
2684 = STMT_VINFO_RELATED_STMT (stmt_def_vinfo
);
2685 gcc_assert (!STMT_VINFO_PATTERN_DEF_SEQ (stmt_def_vinfo
));
2686 STMT_VINFO_RELATED_STMT (stmt_def_vinfo
) = NULL
;
2690 irhs1
= adjust_bool_pattern (rhs1
, out_type
, NULL_TREE
, stmts
);
2696 irhs1
= adjust_bool_pattern (rhs1
, out_type
, NULL_TREE
, stmts
);
2697 irhs2
= adjust_bool_pattern (rhs2
, out_type
, NULL_TREE
, stmts
);
2699 if (TYPE_PRECISION (TREE_TYPE (irhs1
))
2700 != TYPE_PRECISION (TREE_TYPE (irhs2
)))
2702 int prec1
= TYPE_PRECISION (TREE_TYPE (irhs1
));
2703 int prec2
= TYPE_PRECISION (TREE_TYPE (irhs2
));
2704 int out_prec
= TYPE_PRECISION (out_type
);
2705 if (absu_hwi (out_prec
- prec1
) < absu_hwi (out_prec
- prec2
))
2706 irhs2
= adjust_bool_pattern_cast (TREE_TYPE (irhs1
), rhs2
);
2707 else if (absu_hwi (out_prec
- prec1
) > absu_hwi (out_prec
- prec2
))
2708 irhs1
= adjust_bool_pattern_cast (TREE_TYPE (irhs2
), rhs1
);
2711 irhs1
= adjust_bool_pattern_cast (out_type
, rhs1
);
2712 irhs2
= adjust_bool_pattern_cast (out_type
, rhs2
);
2715 itype
= TREE_TYPE (irhs1
);
2717 = gimple_build_assign_with_ops (rhs_code
,
2718 vect_recog_temp_ssa_var (itype
, NULL
),
2723 gcc_assert (TREE_CODE_CLASS (rhs_code
) == tcc_comparison
);
2724 if (TREE_CODE (TREE_TYPE (rhs1
)) != INTEGER_TYPE
2725 || !TYPE_UNSIGNED (TREE_TYPE (rhs1
))
2726 || (TYPE_PRECISION (TREE_TYPE (rhs1
))
2727 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1
)))))
2729 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (rhs1
));
2731 = build_nonstandard_integer_type (GET_MODE_BITSIZE (mode
), 1);
2734 itype
= TREE_TYPE (rhs1
);
2735 cond_expr
= build2_loc (loc
, rhs_code
, itype
, rhs1
, rhs2
);
2736 if (trueval
== NULL_TREE
)
2737 trueval
= build_int_cst (itype
, 1);
2739 gcc_checking_assert (useless_type_conversion_p (itype
,
2740 TREE_TYPE (trueval
)));
2742 = gimple_build_assign_with_ops (COND_EXPR
,
2743 vect_recog_temp_ssa_var (itype
, NULL
),
2745 build_int_cst (itype
, 0));
2749 stmts
->safe_push (stmt
);
2750 gimple_set_location (pattern_stmt
, loc
);
2751 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
)) = pattern_stmt
;
2752 return gimple_assign_lhs (pattern_stmt
);
2756 /* Function vect_recog_bool_pattern
2758 Try to find pattern like following:
2760 bool a_b, b_b, c_b, d_b, e_b;
2763 S1 a_b = x1 CMP1 y1;
2764 S2 b_b = x2 CMP2 y2;
2766 S4 d_b = x3 CMP3 y3;
2768 S6 f_T = (TYPE) e_b;
2770 where type 'TYPE' is an integral type.
2774 * LAST_STMT: A stmt at the end from which the pattern
2775 search begins, i.e. cast of a bool to
2780 * TYPE_IN: The type of the input arguments to the pattern.
2782 * TYPE_OUT: The type of the output of this pattern.
2784 * Return value: A new stmt that will be used to replace the pattern.
2786 Assuming size of TYPE is the same as size of all comparisons
2787 (otherwise some casts would be added where needed), the above
2788 sequence we create related pattern stmts:
2789 S1' a_T = x1 CMP1 y1 ? 1 : 0;
2790 S3' c_T = x2 CMP2 y2 ? a_T : 0;
2791 S4' d_T = x3 CMP3 y3 ? 1 : 0;
2792 S5' e_T = c_T | d_T;
2795 Instead of the above S3' we could emit:
2796 S2' b_T = x2 CMP2 y2 ? 1 : 0;
2797 S3' c_T = a_T | b_T;
2798 but the above is more efficient. */
2801 vect_recog_bool_pattern (vec
<gimple
> *stmts
, tree
*type_in
,
2804 gimple last_stmt
= stmts
->pop ();
2805 enum tree_code rhs_code
;
2806 tree var
, lhs
, rhs
, vectype
;
2807 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (last_stmt
);
2808 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
2809 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
2810 gimple pattern_stmt
;
2812 if (!is_gimple_assign (last_stmt
))
2815 var
= gimple_assign_rhs1 (last_stmt
);
2816 lhs
= gimple_assign_lhs (last_stmt
);
2818 if ((TYPE_PRECISION (TREE_TYPE (var
)) != 1
2819 || !TYPE_UNSIGNED (TREE_TYPE (var
)))
2820 && TREE_CODE (TREE_TYPE (var
)) != BOOLEAN_TYPE
)
2823 rhs_code
= gimple_assign_rhs_code (last_stmt
);
2824 if (CONVERT_EXPR_CODE_P (rhs_code
))
2826 if (TREE_CODE (TREE_TYPE (lhs
)) != INTEGER_TYPE
2827 || TYPE_PRECISION (TREE_TYPE (lhs
)) == 1)
2829 vectype
= get_vectype_for_scalar_type (TREE_TYPE (lhs
));
2830 if (vectype
== NULL_TREE
)
2833 if (!check_bool_pattern (var
, loop_vinfo
, bb_vinfo
))
2836 rhs
= adjust_bool_pattern (var
, TREE_TYPE (lhs
), NULL_TREE
, stmts
);
2837 lhs
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
2838 if (useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
2840 = gimple_build_assign_with_ops (SSA_NAME
, lhs
, rhs
, NULL_TREE
);
2843 = gimple_build_assign_with_ops (NOP_EXPR
, lhs
, rhs
, NULL_TREE
);
2844 *type_out
= vectype
;
2846 stmts
->safe_push (last_stmt
);
2847 if (dump_enabled_p ())
2848 dump_printf_loc (MSG_NOTE
, vect_location
,
2849 "vect_recog_bool_pattern: detected: ");
2851 return pattern_stmt
;
2853 else if (rhs_code
== SSA_NAME
2854 && STMT_VINFO_DATA_REF (stmt_vinfo
))
2856 stmt_vec_info pattern_stmt_info
;
2857 vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
2858 gcc_assert (vectype
!= NULL_TREE
);
2859 if (!VECTOR_MODE_P (TYPE_MODE (vectype
)))
2861 if (!check_bool_pattern (var
, loop_vinfo
, bb_vinfo
))
2864 rhs
= adjust_bool_pattern (var
, TREE_TYPE (vectype
), NULL_TREE
, stmts
);
2865 lhs
= build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (vectype
), lhs
);
2866 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
2868 tree rhs2
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
2870 = gimple_build_assign_with_ops (NOP_EXPR
, rhs2
, rhs
, NULL_TREE
);
2871 new_pattern_def_seq (stmt_vinfo
, cast_stmt
);
2875 = gimple_build_assign_with_ops (SSA_NAME
, lhs
, rhs
, NULL_TREE
);
2876 pattern_stmt_info
= new_stmt_vec_info (pattern_stmt
, loop_vinfo
,
2878 set_vinfo_for_stmt (pattern_stmt
, pattern_stmt_info
);
2879 STMT_VINFO_DATA_REF (pattern_stmt_info
)
2880 = STMT_VINFO_DATA_REF (stmt_vinfo
);
2881 STMT_VINFO_DR_BASE_ADDRESS (pattern_stmt_info
)
2882 = STMT_VINFO_DR_BASE_ADDRESS (stmt_vinfo
);
2883 STMT_VINFO_DR_INIT (pattern_stmt_info
) = STMT_VINFO_DR_INIT (stmt_vinfo
);
2884 STMT_VINFO_DR_OFFSET (pattern_stmt_info
)
2885 = STMT_VINFO_DR_OFFSET (stmt_vinfo
);
2886 STMT_VINFO_DR_STEP (pattern_stmt_info
) = STMT_VINFO_DR_STEP (stmt_vinfo
);
2887 STMT_VINFO_DR_ALIGNED_TO (pattern_stmt_info
)
2888 = STMT_VINFO_DR_ALIGNED_TO (stmt_vinfo
);
2889 DR_STMT (STMT_VINFO_DATA_REF (stmt_vinfo
)) = pattern_stmt
;
2890 *type_out
= vectype
;
2892 stmts
->safe_push (last_stmt
);
2893 if (dump_enabled_p ())
2894 dump_printf_loc (MSG_NOTE
, vect_location
,
2895 "vect_recog_bool_pattern: detected: ");
2896 return pattern_stmt
;
2903 /* Mark statements that are involved in a pattern. */
2906 vect_mark_pattern_stmts (gimple orig_stmt
, gimple pattern_stmt
,
2907 tree pattern_vectype
)
2909 stmt_vec_info pattern_stmt_info
, def_stmt_info
;
2910 stmt_vec_info orig_stmt_info
= vinfo_for_stmt (orig_stmt
);
2911 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (orig_stmt_info
);
2912 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (orig_stmt_info
);
2915 pattern_stmt_info
= vinfo_for_stmt (pattern_stmt
);
2916 if (pattern_stmt_info
== NULL
)
2918 pattern_stmt_info
= new_stmt_vec_info (pattern_stmt
, loop_vinfo
,
2920 set_vinfo_for_stmt (pattern_stmt
, pattern_stmt_info
);
2922 gimple_set_bb (pattern_stmt
, gimple_bb (orig_stmt
));
2924 STMT_VINFO_RELATED_STMT (pattern_stmt_info
) = orig_stmt
;
2925 STMT_VINFO_DEF_TYPE (pattern_stmt_info
)
2926 = STMT_VINFO_DEF_TYPE (orig_stmt_info
);
2927 STMT_VINFO_VECTYPE (pattern_stmt_info
) = pattern_vectype
;
2928 STMT_VINFO_IN_PATTERN_P (orig_stmt_info
) = true;
2929 STMT_VINFO_RELATED_STMT (orig_stmt_info
) = pattern_stmt
;
2930 STMT_VINFO_PATTERN_DEF_SEQ (pattern_stmt_info
)
2931 = STMT_VINFO_PATTERN_DEF_SEQ (orig_stmt_info
);
2932 if (STMT_VINFO_PATTERN_DEF_SEQ (pattern_stmt_info
))
2934 gimple_stmt_iterator si
;
2935 for (si
= gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (pattern_stmt_info
));
2936 !gsi_end_p (si
); gsi_next (&si
))
2938 def_stmt
= gsi_stmt (si
);
2939 def_stmt_info
= vinfo_for_stmt (def_stmt
);
2940 if (def_stmt_info
== NULL
)
2942 def_stmt_info
= new_stmt_vec_info (def_stmt
, loop_vinfo
,
2944 set_vinfo_for_stmt (def_stmt
, def_stmt_info
);
2946 gimple_set_bb (def_stmt
, gimple_bb (orig_stmt
));
2947 STMT_VINFO_RELATED_STMT (def_stmt_info
) = orig_stmt
;
2948 STMT_VINFO_DEF_TYPE (def_stmt_info
)
2949 = STMT_VINFO_DEF_TYPE (orig_stmt_info
);
2950 if (STMT_VINFO_VECTYPE (def_stmt_info
) == NULL_TREE
)
2951 STMT_VINFO_VECTYPE (def_stmt_info
) = pattern_vectype
;
2956 /* Function vect_pattern_recog_1
2959 PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
2960 computation pattern.
2961 STMT: A stmt from which the pattern search should start.
2963 If PATTERN_RECOG_FUNC successfully detected the pattern, it creates an
2964 expression that computes the same functionality and can be used to
2965 replace the sequence of stmts that are involved in the pattern.
2968 This function checks if the expression returned by PATTERN_RECOG_FUNC is
2969 supported in vector form by the target. We use 'TYPE_IN' to obtain the
2970 relevant vector type. If 'TYPE_IN' is already a vector type, then this
2971 indicates that target support had already been checked by PATTERN_RECOG_FUNC.
2972 If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
2973 to the available target pattern.
2975 This function also does some bookkeeping, as explained in the documentation
2976 for vect_recog_pattern. */
2979 vect_pattern_recog_1 (vect_recog_func_ptr vect_recog_func
,
2980 gimple_stmt_iterator si
,
2981 vec
<gimple
> *stmts_to_replace
)
2983 gimple stmt
= gsi_stmt (si
), pattern_stmt
;
2984 stmt_vec_info stmt_info
;
2985 loop_vec_info loop_vinfo
;
2986 tree pattern_vectype
;
2987 tree type_in
, type_out
;
2988 enum tree_code code
;
2992 stmts_to_replace
->truncate (0);
2993 stmts_to_replace
->quick_push (stmt
);
2994 pattern_stmt
= (* vect_recog_func
) (stmts_to_replace
, &type_in
, &type_out
);
2998 stmt
= stmts_to_replace
->last ();
2999 stmt_info
= vinfo_for_stmt (stmt
);
3000 loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3002 if (VECTOR_MODE_P (TYPE_MODE (type_in
)))
3004 /* No need to check target support (already checked by the pattern
3005 recognition function). */
3006 pattern_vectype
= type_out
? type_out
: type_in
;
3010 enum machine_mode vec_mode
;
3011 enum insn_code icode
;
3014 /* Check target support */
3015 type_in
= get_vectype_for_scalar_type (type_in
);
3019 type_out
= get_vectype_for_scalar_type (type_out
);
3024 pattern_vectype
= type_out
;
3026 if (is_gimple_assign (pattern_stmt
))
3027 code
= gimple_assign_rhs_code (pattern_stmt
);
3030 gcc_assert (is_gimple_call (pattern_stmt
));
3034 optab
= optab_for_tree_code (code
, type_in
, optab_default
);
3035 vec_mode
= TYPE_MODE (type_in
);
3037 || (icode
= optab_handler (optab
, vec_mode
)) == CODE_FOR_nothing
3038 || (insn_data
[icode
].operand
[0].mode
!= TYPE_MODE (type_out
)))
3042 /* Found a vectorizable pattern. */
3043 if (dump_enabled_p ())
3045 dump_printf_loc (MSG_NOTE
, vect_location
,
3046 "pattern recognized: ");
3047 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
3050 /* Mark the stmts that are involved in the pattern. */
3051 vect_mark_pattern_stmts (stmt
, pattern_stmt
, pattern_vectype
);
3053 /* Patterns cannot be vectorized using SLP, because they change the order of
3056 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTIONS (loop_vinfo
), i
, next
)
3058 LOOP_VINFO_REDUCTIONS (loop_vinfo
).ordered_remove (i
);
3060 /* It is possible that additional pattern stmts are created and inserted in
3061 STMTS_TO_REPLACE. We create a stmt_info for each of them, and mark the
3062 relevant statements. */
3063 for (i
= 0; stmts_to_replace
->iterate (i
, &stmt
)
3064 && (unsigned) i
< (stmts_to_replace
->length () - 1);
3067 stmt_info
= vinfo_for_stmt (stmt
);
3068 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
3069 if (dump_enabled_p ())
3071 dump_printf_loc (MSG_NOTE
, vect_location
,
3072 "additional pattern stmt: ");
3073 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
3076 vect_mark_pattern_stmts (stmt
, pattern_stmt
, NULL_TREE
);
3081 /* Function vect_pattern_recog
3084 LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
3087 Output - for each computation idiom that is detected we create a new stmt
3088 that provides the same functionality and that can be vectorized. We
3089 also record some information in the struct_stmt_info of the relevant
3090 stmts, as explained below:
3092 At the entry to this function we have the following stmts, with the
3093 following initial value in the STMT_VINFO fields:
3095 stmt in_pattern_p related_stmt vec_stmt
3096 S1: a_i = .... - - -
3097 S2: a_2 = ..use(a_i).. - - -
3098 S3: a_1 = ..use(a_2).. - - -
3099 S4: a_0 = ..use(a_1).. - - -
3100 S5: ... = ..use(a_0).. - - -
3102 Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
3103 represented by a single stmt. We then:
3104 - create a new stmt S6 equivalent to the pattern (the stmt is not
3105 inserted into the code)
3106 - fill in the STMT_VINFO fields as follows:
3108 in_pattern_p related_stmt vec_stmt
3109 S1: a_i = .... - - -
3110 S2: a_2 = ..use(a_i).. - - -
3111 S3: a_1 = ..use(a_2).. - - -
3112 S4: a_0 = ..use(a_1).. true S6 -
3113 '---> S6: a_new = .... - S4 -
3114 S5: ... = ..use(a_0).. - - -
3116 (the last stmt in the pattern (S4) and the new pattern stmt (S6) point
3117 to each other through the RELATED_STMT field).
3119 S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
3120 of S4 because it will replace all its uses. Stmts {S1,S2,S3} will
3121 remain irrelevant unless used by stmts other than S4.
3123 If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
3124 (because they are marked as irrelevant). It will vectorize S6, and record
3125 a pointer to the new vector stmt VS6 from S6 (as usual).
3126 S4 will be skipped, and S5 will be vectorized as usual:
3128 in_pattern_p related_stmt vec_stmt
3129 S1: a_i = .... - - -
3130 S2: a_2 = ..use(a_i).. - - -
3131 S3: a_1 = ..use(a_2).. - - -
3132 > VS6: va_new = .... - - -
3133 S4: a_0 = ..use(a_1).. true S6 VS6
3134 '---> S6: a_new = .... - S4 VS6
3135 > VS5: ... = ..vuse(va_new).. - - -
3136 S5: ... = ..use(a_0).. - - -
3138 DCE could then get rid of {S1,S2,S3,S4,S5} (if their defs are not used
3139 elsewhere), and we'll end up with:
3142 VS5: ... = ..vuse(va_new)..
3144 In case of more than one pattern statements, e.g., widen-mult with
3148 S2 a_T = (TYPE) a_t;
3149 '--> S3: a_it = (interm_type) a_t;
3150 S4 prod_T = a_T * CONST;
3151 '--> S5: prod_T' = a_it w* CONST;
3153 there may be other users of a_T outside the pattern. In that case S2 will
3154 be marked as relevant (as well as S3), and both S2 and S3 will be analyzed
3155 and vectorized. The vector stmt VS2 will be recorded in S2, and VS3 will
3156 be recorded in S3. */
3159 vect_pattern_recog (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
3164 gimple_stmt_iterator si
;
3166 vect_recog_func_ptr vect_recog_func
;
3167 vec
<gimple
> stmts_to_replace
;
3168 stmts_to_replace
.create (1);
3171 if (dump_enabled_p ())
3172 dump_printf_loc (MSG_NOTE
, vect_location
,
3173 "=== vect_pattern_recog ===");
3177 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3178 bbs
= LOOP_VINFO_BBS (loop_vinfo
);
3179 nbbs
= loop
->num_nodes
;
3183 bbs
= &BB_VINFO_BB (bb_vinfo
);
3187 /* Scan through the loop stmts, applying the pattern recognition
3188 functions starting at each stmt visited: */
3189 for (i
= 0; i
< nbbs
; i
++)
3191 basic_block bb
= bbs
[i
];
3192 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
3194 if (bb_vinfo
&& (stmt
= gsi_stmt (si
))
3195 && vinfo_for_stmt (stmt
)
3196 && !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt
)))
3199 /* Scan over all generic vect_recog_xxx_pattern functions. */
3200 for (j
= 0; j
< NUM_PATTERNS
; j
++)
3202 vect_recog_func
= vect_vect_recog_func_ptrs
[j
];
3203 vect_pattern_recog_1 (vect_recog_func
, si
,
3209 stmts_to_replace
.release ();