1 /* Analysis Utilities for Loop Vectorization.
2 Copyright (C) 2006-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Nuzman <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "optabs-tree.h"
31 #include "insn-config.h"
32 #include "recog.h" /* FIXME: for insn_data */
33 #include "fold-const.h"
34 #include "stor-layout.h"
37 #include "gimple-iterator.h"
39 #include "tree-vectorizer.h"
42 #include "internal-fn.h"
43 #include "case-cfn-macros.h"
44 #include "fold-const-call.h"
47 #include "omp-simd-clone.h"
50 /* Return true if we have a useful VR_RANGE range for VAR, storing it
51 in *MIN_VALUE and *MAX_VALUE if so. Note the range in the dump files. */
54 vect_get_range_info (tree var
, wide_int
*min_value
, wide_int
*max_value
)
56 value_range_type vr_type
= get_range_info (var
, min_value
, max_value
);
57 wide_int nonzero
= get_nonzero_bits (var
);
58 signop sgn
= TYPE_SIGN (TREE_TYPE (var
));
59 if (intersect_range_with_nonzero_bits (vr_type
, min_value
, max_value
,
60 nonzero
, sgn
) == VR_RANGE
)
62 if (dump_enabled_p ())
64 dump_generic_expr_loc (MSG_NOTE
, vect_location
, TDF_SLIM
, var
);
65 dump_printf (MSG_NOTE
, " has range [");
66 dump_hex (MSG_NOTE
, *min_value
);
67 dump_printf (MSG_NOTE
, ", ");
68 dump_hex (MSG_NOTE
, *max_value
);
69 dump_printf (MSG_NOTE
, "]\n");
75 if (dump_enabled_p ())
77 dump_generic_expr_loc (MSG_NOTE
, vect_location
, TDF_SLIM
, var
);
78 dump_printf (MSG_NOTE
, " has no range info\n");
84 /* Report that we've found an instance of pattern PATTERN in
88 vect_pattern_detected (const char *name
, gimple
*stmt
)
90 if (dump_enabled_p ())
92 dump_printf_loc (MSG_NOTE
, vect_location
, "%s: detected: ", name
);
93 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
97 /* Associate pattern statement PATTERN_STMT with ORIG_STMT_INFO and
98 return the pattern statement's stmt_vec_info. Set its vector type to
99 VECTYPE if it doesn't have one already. */
102 vect_init_pattern_stmt (gimple
*pattern_stmt
, stmt_vec_info orig_stmt_info
,
105 vec_info
*vinfo
= orig_stmt_info
->vinfo
;
106 stmt_vec_info pattern_stmt_info
= vinfo
->lookup_stmt (pattern_stmt
);
107 if (pattern_stmt_info
== NULL_STMT_VEC_INFO
)
108 pattern_stmt_info
= orig_stmt_info
->vinfo
->add_stmt (pattern_stmt
);
109 gimple_set_bb (pattern_stmt
, gimple_bb (orig_stmt_info
->stmt
));
111 pattern_stmt_info
->pattern_stmt_p
= true;
112 STMT_VINFO_RELATED_STMT (pattern_stmt_info
) = orig_stmt_info
;
113 STMT_VINFO_DEF_TYPE (pattern_stmt_info
)
114 = STMT_VINFO_DEF_TYPE (orig_stmt_info
);
115 if (!STMT_VINFO_VECTYPE (pattern_stmt_info
))
116 STMT_VINFO_VECTYPE (pattern_stmt_info
) = vectype
;
117 return pattern_stmt_info
;
120 /* Set the pattern statement of ORIG_STMT_INFO to PATTERN_STMT.
121 Also set the vector type of PATTERN_STMT to VECTYPE, if it doesn't
125 vect_set_pattern_stmt (gimple
*pattern_stmt
, stmt_vec_info orig_stmt_info
,
128 STMT_VINFO_IN_PATTERN_P (orig_stmt_info
) = true;
129 STMT_VINFO_RELATED_STMT (orig_stmt_info
)
130 = vect_init_pattern_stmt (pattern_stmt
, orig_stmt_info
, vectype
);
133 /* Add NEW_STMT to STMT_INFO's pattern definition statements. If VECTYPE
134 is nonnull, record that NEW_STMT's vector type is VECTYPE, which might
135 be different from the vector type of the final pattern statement. */
138 append_pattern_def_seq (stmt_vec_info stmt_info
, gimple
*new_stmt
,
139 tree vectype
= NULL_TREE
)
141 vec_info
*vinfo
= stmt_info
->vinfo
;
144 stmt_vec_info new_stmt_info
= vinfo
->add_stmt (new_stmt
);
145 STMT_VINFO_VECTYPE (new_stmt_info
) = vectype
;
147 gimple_seq_add_stmt_without_update (&STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
),
151 /* The caller wants to perform new operations on vect_external variable
152 VAR, so that the result of the operations would also be vect_external.
153 Return the edge on which the operations can be performed, if one exists.
154 Return null if the operations should instead be treated as part of
155 the pattern that needs them. */
158 vect_get_external_def_edge (vec_info
*vinfo
, tree var
)
161 if (loop_vec_info loop_vinfo
= dyn_cast
<loop_vec_info
> (vinfo
))
163 e
= loop_preheader_edge (loop_vinfo
->loop
);
164 if (!SSA_NAME_IS_DEFAULT_DEF (var
))
166 basic_block bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
168 || !dominated_by_p (CDI_DOMINATORS
, e
->dest
, bb
))
175 /* Return true if the target supports a vector version of CODE,
176 where CODE is known to map to a direct optab. ITYPE specifies
177 the type of (some of) the scalar inputs and OTYPE specifies the
178 type of the scalar result.
180 If CODE allows the inputs and outputs to have different type
181 (such as for WIDEN_SUM_EXPR), it is the input mode rather
182 than the output mode that determines the appropriate target pattern.
183 Operand 0 of the target pattern then specifies the mode that the output
186 When returning true, set *VECOTYPE_OUT to the vector version of OTYPE.
187 Also set *VECITYPE_OUT to the vector version of ITYPE if VECITYPE_OUT
191 vect_supportable_direct_optab_p (tree otype
, tree_code code
,
192 tree itype
, tree
*vecotype_out
,
193 tree
*vecitype_out
= NULL
)
195 tree vecitype
= get_vectype_for_scalar_type (itype
);
199 tree vecotype
= get_vectype_for_scalar_type (otype
);
203 optab optab
= optab_for_tree_code (code
, vecitype
, optab_default
);
207 insn_code icode
= optab_handler (optab
, TYPE_MODE (vecitype
));
208 if (icode
== CODE_FOR_nothing
209 || insn_data
[icode
].operand
[0].mode
!= TYPE_MODE (vecotype
))
212 *vecotype_out
= vecotype
;
214 *vecitype_out
= vecitype
;
218 /* Round bit precision PRECISION up to a full element. */
221 vect_element_precision (unsigned int precision
)
223 precision
= 1 << ceil_log2 (precision
);
224 return MAX (precision
, BITS_PER_UNIT
);
227 /* If OP is defined by a statement that's being considered for vectorization,
228 return information about that statement, otherwise return NULL. */
231 vect_get_internal_def (vec_info
*vinfo
, tree op
)
233 stmt_vec_info def_stmt_info
= vinfo
->lookup_def (op
);
235 && STMT_VINFO_DEF_TYPE (def_stmt_info
) == vect_internal_def
)
236 return def_stmt_info
;
240 /* Check whether NAME, an ssa-name used in STMT_VINFO,
241 is a result of a type promotion, such that:
242 DEF_STMT: NAME = NOP (name0)
243 If CHECK_SIGN is TRUE, check that either both types are signed or both are
247 type_conversion_p (tree name
, stmt_vec_info stmt_vinfo
, bool check_sign
,
248 tree
*orig_type
, gimple
**def_stmt
, bool *promotion
)
250 tree type
= TREE_TYPE (name
);
252 enum vect_def_type dt
;
254 stmt_vec_info def_stmt_info
;
255 if (!vect_is_simple_use (name
, stmt_vinfo
->vinfo
, &dt
, &def_stmt_info
,
259 if (dt
!= vect_internal_def
260 && dt
!= vect_external_def
&& dt
!= vect_constant_def
)
266 if (!is_gimple_assign (*def_stmt
))
269 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (*def_stmt
)))
272 oprnd0
= gimple_assign_rhs1 (*def_stmt
);
274 *orig_type
= TREE_TYPE (oprnd0
);
275 if (!INTEGRAL_TYPE_P (type
) || !INTEGRAL_TYPE_P (*orig_type
)
276 || ((TYPE_UNSIGNED (type
) != TYPE_UNSIGNED (*orig_type
)) && check_sign
))
279 if (TYPE_PRECISION (type
) >= (TYPE_PRECISION (*orig_type
) * 2))
284 if (!vect_is_simple_use (oprnd0
, stmt_vinfo
->vinfo
, &dt
))
290 /* Holds information about an input operand after some sign changes
291 and type promotions have been peeled away. */
292 struct vect_unpromoted_value
{
293 vect_unpromoted_value ();
295 void set_op (tree
, vect_def_type
, stmt_vec_info
= NULL
);
297 /* The value obtained after peeling away zero or more casts. */
300 /* The type of OP. */
303 /* The definition type of OP. */
306 /* If OP is the result of peeling at least one cast, and if the cast
307 of OP itself is a vectorizable statement, CASTER identifies that
308 statement, otherwise it is null. */
309 stmt_vec_info caster
;
312 inline vect_unpromoted_value::vect_unpromoted_value ()
315 dt (vect_uninitialized_def
),
320 /* Set the operand to OP_IN, its definition type to DT_IN, and the
321 statement that casts it to CASTER_IN. */
324 vect_unpromoted_value::set_op (tree op_in
, vect_def_type dt_in
,
325 stmt_vec_info caster_in
)
328 type
= TREE_TYPE (op
);
333 /* If OP is a vectorizable SSA name, strip a sequence of integer conversions
334 to reach some vectorizable inner operand OP', continuing as long as it
335 is possible to convert OP' back to OP using a possible sign change
336 followed by a possible promotion P. Return this OP', or null if OP is
337 not a vectorizable SSA name. If there is a promotion P, describe its
338 input in UNPROM, otherwise describe OP' in UNPROM. If SINGLE_USE_P
339 is nonnull, set *SINGLE_USE_P to false if any of the SSA names involved
340 have more than one user.
342 A successful return means that it is possible to go from OP' to OP
343 via UNPROM. The cast from OP' to UNPROM is at most a sign change,
344 whereas the cast from UNPROM to OP might be a promotion, a sign
349 signed short *ptr = ...;
350 signed short C = *ptr;
351 unsigned short B = (unsigned short) C; // sign change
352 signed int A = (signed int) B; // unsigned promotion
353 ...possible other uses of A...
354 unsigned int OP = (unsigned int) A; // sign change
356 In this case it's possible to go directly from C to OP using:
358 OP = (unsigned int) (unsigned short) C;
359 +------------+ +--------------+
360 promotion sign change
362 so OP' would be C. The input to the promotion is B, so UNPROM
366 vect_look_through_possible_promotion (vec_info
*vinfo
, tree op
,
367 vect_unpromoted_value
*unprom
,
368 bool *single_use_p
= NULL
)
370 tree res
= NULL_TREE
;
371 tree op_type
= TREE_TYPE (op
);
372 unsigned int orig_precision
= TYPE_PRECISION (op_type
);
373 stmt_vec_info caster
= NULL
;
374 while (TREE_CODE (op
) == SSA_NAME
&& INTEGRAL_TYPE_P (op_type
))
376 /* See whether OP is simple enough to vectorize. */
377 stmt_vec_info def_stmt_info
;
380 if (!vect_is_simple_use (op
, vinfo
, &dt
, &def_stmt_info
, &def_stmt
))
383 /* If OP is the input of a demotion, skip over it to see whether
384 OP is itself the result of a promotion. If so, the combined
385 effect of the promotion and the demotion might fit the required
386 pattern, otherwise neither operation fits.
388 This copes with cases such as the result of an arithmetic
389 operation being truncated before being stored, and where that
390 arithmetic operation has been recognized as an over-widened one. */
391 if (TYPE_PRECISION (op_type
) <= orig_precision
)
393 /* Use OP as the UNPROM described above if we haven't yet
394 found a promotion, or if using the new input preserves the
395 sign of the previous promotion. */
397 || TYPE_PRECISION (unprom
->type
) == orig_precision
398 || TYPE_SIGN (unprom
->type
) == TYPE_SIGN (op_type
))
399 unprom
->set_op (op
, dt
, caster
);
400 /* Stop if we've already seen a promotion and if this
401 conversion does more than change the sign. */
402 else if (TYPE_PRECISION (op_type
)
403 != TYPE_PRECISION (unprom
->type
))
406 /* The sequence now extends to OP. */
410 /* See whether OP is defined by a cast. Record it as CASTER if
411 the cast is potentially vectorizable. */
414 caster
= def_stmt_info
;
416 /* Ignore pattern statements, since we don't link uses for them. */
419 && !STMT_VINFO_RELATED_STMT (caster
)
420 && !has_single_use (res
))
421 *single_use_p
= false;
423 gassign
*assign
= dyn_cast
<gassign
*> (def_stmt
);
424 if (!assign
|| !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt
)))
427 /* Continue with the input to the cast. */
428 op
= gimple_assign_rhs1 (def_stmt
);
429 op_type
= TREE_TYPE (op
);
434 /* OP is an integer operand to an operation that returns TYPE, and we
435 want to treat the operation as a widening one. So far we can treat
436 it as widening from *COMMON_TYPE.
438 Return true if OP is suitable for such a widening operation,
439 either widening from *COMMON_TYPE or from some supertype of it.
440 Update *COMMON_TYPE to the supertype in the latter case.
442 SHIFT_P is true if OP is a shift amount. */
445 vect_joust_widened_integer (tree type
, bool shift_p
, tree op
,
448 /* Calculate the minimum precision required by OP, without changing
449 the sign of either operand. */
450 unsigned int precision
;
453 if (!wi::leu_p (wi::to_widest (op
), TYPE_PRECISION (type
) / 2))
455 precision
= TREE_INT_CST_LOW (op
);
459 precision
= wi::min_precision (wi::to_widest (op
),
460 TYPE_SIGN (*common_type
));
461 if (precision
* 2 > TYPE_PRECISION (type
))
465 /* If OP requires a wider type, switch to that type. The checks
466 above ensure that this is still narrower than the result. */
467 precision
= vect_element_precision (precision
);
468 if (TYPE_PRECISION (*common_type
) < precision
)
469 *common_type
= build_nonstandard_integer_type
470 (precision
, TYPE_UNSIGNED (*common_type
));
474 /* Return true if the common supertype of NEW_TYPE and *COMMON_TYPE
475 is narrower than type, storing the supertype in *COMMON_TYPE if so. */
478 vect_joust_widened_type (tree type
, tree new_type
, tree
*common_type
)
480 if (types_compatible_p (*common_type
, new_type
))
483 /* See if *COMMON_TYPE can hold all values of NEW_TYPE. */
484 if ((TYPE_PRECISION (new_type
) < TYPE_PRECISION (*common_type
))
485 && (TYPE_UNSIGNED (new_type
) || !TYPE_UNSIGNED (*common_type
)))
488 /* See if NEW_TYPE can hold all values of *COMMON_TYPE. */
489 if (TYPE_PRECISION (*common_type
) < TYPE_PRECISION (new_type
)
490 && (TYPE_UNSIGNED (*common_type
) || !TYPE_UNSIGNED (new_type
)))
492 *common_type
= new_type
;
496 /* We have mismatched signs, with the signed type being
497 no wider than the unsigned type. In this case we need
498 a wider signed type. */
499 unsigned int precision
= MAX (TYPE_PRECISION (*common_type
),
500 TYPE_PRECISION (new_type
));
502 if (precision
* 2 > TYPE_PRECISION (type
))
505 *common_type
= build_nonstandard_integer_type (precision
, false);
509 /* Check whether STMT_INFO can be viewed as a tree of integer operations
510 in which each node either performs CODE or WIDENED_CODE, and where
511 each leaf operand is narrower than the result of STMT_INFO. MAX_NOPS
512 specifies the maximum number of leaf operands. SHIFT_P says whether
513 CODE and WIDENED_CODE are some sort of shift.
515 If STMT_INFO is such a tree, return the number of leaf operands
516 and describe them in UNPROM[0] onwards. Also set *COMMON_TYPE
517 to a type that (a) is narrower than the result of STMT_INFO and
518 (b) can hold all leaf operand values.
520 Return 0 if STMT_INFO isn't such a tree, or if no such COMMON_TYPE
524 vect_widened_op_tree (stmt_vec_info stmt_info
, tree_code code
,
525 tree_code widened_code
, bool shift_p
,
526 unsigned int max_nops
,
527 vect_unpromoted_value
*unprom
, tree
*common_type
)
529 /* Check for an integer operation with the right code. */
530 vec_info
*vinfo
= stmt_info
->vinfo
;
531 gassign
*assign
= dyn_cast
<gassign
*> (stmt_info
->stmt
);
535 tree_code rhs_code
= gimple_assign_rhs_code (assign
);
536 if (rhs_code
!= code
&& rhs_code
!= widened_code
)
539 tree type
= gimple_expr_type (assign
);
540 if (!INTEGRAL_TYPE_P (type
))
543 /* Assume that both operands will be leaf operands. */
546 /* Check the operands. */
547 unsigned int next_op
= 0;
548 for (unsigned int i
= 0; i
< 2; ++i
)
550 vect_unpromoted_value
*this_unprom
= &unprom
[next_op
];
551 unsigned int nops
= 1;
552 tree op
= gimple_op (assign
, i
+ 1);
553 if (i
== 1 && TREE_CODE (op
) == INTEGER_CST
)
555 /* We already have a common type from earlier operands.
556 Update it to account for OP. */
557 this_unprom
->set_op (op
, vect_constant_def
);
558 if (!vect_joust_widened_integer (type
, shift_p
, op
, common_type
))
563 /* Only allow shifts by constants. */
564 if (shift_p
&& i
== 1)
567 if (!vect_look_through_possible_promotion (stmt_info
->vinfo
, op
,
571 if (TYPE_PRECISION (this_unprom
->type
) == TYPE_PRECISION (type
))
573 /* The operand isn't widened. If STMT_INFO has the code
574 for an unwidened operation, recursively check whether
575 this operand is a node of the tree. */
578 || this_unprom
->dt
!= vect_internal_def
)
581 /* Give back the leaf slot allocated above now that we're
582 not treating this as a leaf operand. */
585 /* Recursively process the definition of the operand. */
586 stmt_vec_info def_stmt_info
587 = vinfo
->lookup_def (this_unprom
->op
);
588 nops
= vect_widened_op_tree (def_stmt_info
, code
, widened_code
,
589 shift_p
, max_nops
, this_unprom
,
598 /* Make sure that the operand is narrower than the result. */
599 if (TYPE_PRECISION (this_unprom
->type
) * 2
600 > TYPE_PRECISION (type
))
603 /* Update COMMON_TYPE for the new operand. */
605 *common_type
= this_unprom
->type
;
606 else if (!vect_joust_widened_type (type
, this_unprom
->type
,
616 /* Helper to return a new temporary for pattern of TYPE for STMT. If STMT
617 is NULL, the caller must set SSA_NAME_DEF_STMT for the returned SSA var. */
620 vect_recog_temp_ssa_var (tree type
, gimple
*stmt
)
622 return make_temp_ssa_name (type
, stmt
, "patt");
625 /* STMT2_INFO describes a type conversion that could be split into STMT1
626 followed by a version of STMT2_INFO that takes NEW_RHS as its first
627 input. Try to do this using pattern statements, returning true on
631 vect_split_statement (stmt_vec_info stmt2_info
, tree new_rhs
,
632 gimple
*stmt1
, tree vectype
)
634 if (is_pattern_stmt_p (stmt2_info
))
636 /* STMT2_INFO is part of a pattern. Get the statement to which
637 the pattern is attached. */
638 stmt_vec_info orig_stmt2_info
= STMT_VINFO_RELATED_STMT (stmt2_info
);
639 vect_init_pattern_stmt (stmt1
, orig_stmt2_info
, vectype
);
641 if (dump_enabled_p ())
643 dump_printf_loc (MSG_NOTE
, vect_location
,
644 "Splitting pattern statement: ");
645 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt2_info
->stmt
, 0);
648 /* Since STMT2_INFO is a pattern statement, we can change it
649 in-situ without worrying about changing the code for the
651 gimple_assign_set_rhs1 (stmt2_info
->stmt
, new_rhs
);
653 if (dump_enabled_p ())
655 dump_printf_loc (MSG_NOTE
, vect_location
, "into: ");
656 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt1
, 0);
657 dump_printf_loc (MSG_NOTE
, vect_location
, "and: ");
658 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt2_info
->stmt
, 0);
661 gimple_seq
*def_seq
= &STMT_VINFO_PATTERN_DEF_SEQ (orig_stmt2_info
);
662 if (STMT_VINFO_RELATED_STMT (orig_stmt2_info
) == stmt2_info
)
663 /* STMT2_INFO is the actual pattern statement. Add STMT1
664 to the end of the definition sequence. */
665 gimple_seq_add_stmt_without_update (def_seq
, stmt1
);
668 /* STMT2_INFO belongs to the definition sequence. Insert STMT1
670 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt2_info
->stmt
, def_seq
);
671 gsi_insert_before_without_update (&gsi
, stmt1
, GSI_SAME_STMT
);
677 /* STMT2_INFO doesn't yet have a pattern. Try to create a
678 two-statement pattern now. */
679 gcc_assert (!STMT_VINFO_RELATED_STMT (stmt2_info
));
680 tree lhs_type
= TREE_TYPE (gimple_get_lhs (stmt2_info
->stmt
));
681 tree lhs_vectype
= get_vectype_for_scalar_type (lhs_type
);
685 if (dump_enabled_p ())
687 dump_printf_loc (MSG_NOTE
, vect_location
,
688 "Splitting statement: ");
689 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt2_info
->stmt
, 0);
692 /* Add STMT1 as a singleton pattern definition sequence. */
693 gimple_seq
*def_seq
= &STMT_VINFO_PATTERN_DEF_SEQ (stmt2_info
);
694 vect_init_pattern_stmt (stmt1
, stmt2_info
, vectype
);
695 gimple_seq_add_stmt_without_update (def_seq
, stmt1
);
697 /* Build the second of the two pattern statements. */
698 tree new_lhs
= vect_recog_temp_ssa_var (lhs_type
, NULL
);
699 gassign
*new_stmt2
= gimple_build_assign (new_lhs
, NOP_EXPR
, new_rhs
);
700 vect_set_pattern_stmt (new_stmt2
, stmt2_info
, lhs_vectype
);
702 if (dump_enabled_p ())
704 dump_printf_loc (MSG_NOTE
, vect_location
,
705 "into pattern statements: ");
706 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt1
, 0);
707 dump_printf_loc (MSG_NOTE
, vect_location
, "and: ");
708 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt2
, 0);
715 /* Convert UNPROM to TYPE and return the result, adding new statements
716 to STMT_INFO's pattern definition statements if no better way is
717 available. VECTYPE is the vector form of TYPE. */
720 vect_convert_input (stmt_vec_info stmt_info
, tree type
,
721 vect_unpromoted_value
*unprom
, tree vectype
)
723 /* Check for a no-op conversion. */
724 if (types_compatible_p (type
, TREE_TYPE (unprom
->op
)))
727 /* Allow the caller to create constant vect_unpromoted_values. */
728 if (TREE_CODE (unprom
->op
) == INTEGER_CST
)
729 return wide_int_to_tree (type
, wi::to_widest (unprom
->op
));
731 /* See if we can reuse an existing result. */
734 tree lhs
= gimple_get_lhs (unprom
->caster
->stmt
);
735 if (types_compatible_p (TREE_TYPE (lhs
), type
))
739 /* We need a new conversion statement. */
740 tree new_op
= vect_recog_temp_ssa_var (type
, NULL
);
741 gassign
*new_stmt
= gimple_build_assign (new_op
, NOP_EXPR
, unprom
->op
);
743 /* If the operation is the input to a vectorizable cast, try splitting
744 that cast into two, taking the required result as a mid-way point. */
747 tree lhs
= gimple_get_lhs (unprom
->caster
->stmt
);
748 if (TYPE_PRECISION (TREE_TYPE (lhs
)) > TYPE_PRECISION (type
)
749 && TYPE_PRECISION (type
) > TYPE_PRECISION (unprom
->type
)
750 && (TYPE_UNSIGNED (unprom
->type
) || !TYPE_UNSIGNED (type
))
751 && vect_split_statement (unprom
->caster
, new_op
, new_stmt
, vectype
))
755 /* If OP is an external value, see if we can insert the new statement
756 on an incoming edge. */
757 if (unprom
->dt
== vect_external_def
)
758 if (edge e
= vect_get_external_def_edge (stmt_info
->vinfo
, unprom
->op
))
760 basic_block new_bb
= gsi_insert_on_edge_immediate (e
, new_stmt
);
761 gcc_assert (!new_bb
);
765 /* As a (common) last resort, add the statement to the pattern itself. */
766 append_pattern_def_seq (stmt_info
, new_stmt
, vectype
);
770 /* Invoke vect_convert_input for N elements of UNPROM and store the
771 result in the corresponding elements of RESULT. */
774 vect_convert_inputs (stmt_vec_info stmt_info
, unsigned int n
,
775 tree
*result
, tree type
, vect_unpromoted_value
*unprom
,
778 for (unsigned int i
= 0; i
< n
; ++i
)
781 for (j
= 0; j
< i
; ++j
)
782 if (unprom
[j
].op
== unprom
[i
].op
)
785 result
[i
] = result
[j
];
787 result
[i
] = vect_convert_input (stmt_info
, type
, &unprom
[i
], vectype
);
791 /* The caller has created a (possibly empty) sequence of pattern definition
792 statements followed by a single statement PATTERN_STMT. Cast the result
793 of this final statement to TYPE. If a new statement is needed, add
794 PATTERN_STMT to the end of STMT_INFO's pattern definition statements
795 and return the new statement, otherwise return PATTERN_STMT as-is.
796 VECITYPE is the vector form of PATTERN_STMT's result type. */
799 vect_convert_output (stmt_vec_info stmt_info
, tree type
, gimple
*pattern_stmt
,
802 tree lhs
= gimple_get_lhs (pattern_stmt
);
803 if (!types_compatible_p (type
, TREE_TYPE (lhs
)))
805 append_pattern_def_seq (stmt_info
, pattern_stmt
, vecitype
);
806 tree cast_var
= vect_recog_temp_ssa_var (type
, NULL
);
807 pattern_stmt
= gimple_build_assign (cast_var
, NOP_EXPR
, lhs
);
812 /* Return true if STMT_VINFO describes a reduction for which reassociation
813 is allowed. If STMT_INFO is part of a group, assume that it's part of
814 a reduction chain and optimistically assume that all statements
815 except the last allow reassociation. */
818 vect_reassociating_reduction_p (stmt_vec_info stmt_vinfo
)
820 return (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
821 ? STMT_VINFO_REDUC_TYPE (stmt_vinfo
) != FOLD_LEFT_REDUCTION
822 : REDUC_GROUP_FIRST_ELEMENT (stmt_vinfo
) != NULL_STMT_VEC_INFO
);
825 /* As above, but also require it to have code CODE and to be a reduction
826 in the outermost loop. When returning true, store the operands in
827 *OP0_OUT and *OP1_OUT. */
830 vect_reassociating_reduction_p (stmt_vec_info stmt_info
, tree_code code
,
831 tree
*op0_out
, tree
*op1_out
)
833 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
837 gassign
*assign
= dyn_cast
<gassign
*> (stmt_info
->stmt
);
838 if (!assign
|| gimple_assign_rhs_code (assign
) != code
)
841 /* We don't allow changing the order of the computation in the inner-loop
842 when doing outer-loop vectorization. */
843 struct loop
*loop
= LOOP_VINFO_LOOP (loop_info
);
844 if (loop
&& nested_in_vect_loop_p (loop
, stmt_info
))
847 if (!vect_reassociating_reduction_p (stmt_info
))
850 *op0_out
= gimple_assign_rhs1 (assign
);
851 *op1_out
= gimple_assign_rhs2 (assign
);
855 /* Function vect_recog_dot_prod_pattern
857 Try to find the following pattern:
863 sum_0 = phi <init, sum_1>
866 S3 x_T = (TYPE1) x_t;
867 S4 y_T = (TYPE1) y_t;
869 [S6 prod = (TYPE2) prod; #optional]
870 S7 sum_1 = prod + sum_0;
872 where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
873 same size of 'TYPE1' or bigger. This is a special case of a reduction
878 * STMT_VINFO: The stmt from which the pattern search begins. In the
879 example, when this function is called with S7, the pattern {S3,S4,S5,S6,S7}
884 * TYPE_OUT: The type of the output of this pattern.
886 * Return value: A new stmt that will be used to replace the sequence of
887 stmts that constitute the pattern. In this case it will be:
888 WIDEN_DOT_PRODUCT <x_t, y_t, sum_0>
890 Note: The dot-prod idiom is a widening reduction pattern that is
891 vectorized without preserving all the intermediate results. It
892 produces only N/2 (widened) results (by summing up pairs of
893 intermediate results) rather than all N results. Therefore, we
894 cannot allow this pattern when we want to get all the results and in
895 the correct order (as is the case when this computation is in an
896 inner-loop nested in an outer-loop that us being vectorized). */
899 vect_recog_dot_prod_pattern (stmt_vec_info stmt_vinfo
, tree
*type_out
)
902 gimple
*last_stmt
= stmt_vinfo
->stmt
;
903 vec_info
*vinfo
= stmt_vinfo
->vinfo
;
904 tree type
, half_type
;
905 gimple
*pattern_stmt
;
908 /* Look for the following pattern
912 DDPROD = (TYPE2) DPROD;
913 sum_1 = DDPROD + sum_0;
915 - DX is double the size of X
916 - DY is double the size of Y
917 - DX, DY, DPROD all have the same type
918 - sum is the same size of DPROD or bigger
919 - sum has been recognized as a reduction variable.
921 This is equivalent to:
922 DPROD = X w* Y; #widen mult
923 sum_1 = DPROD w+ sum_0; #widen summation
925 DPROD = X w* Y; #widen mult
926 sum_1 = DPROD + sum_0; #summation
929 /* Starting from LAST_STMT, follow the defs of its uses in search
930 of the above pattern. */
932 if (!vect_reassociating_reduction_p (stmt_vinfo
, PLUS_EXPR
,
936 type
= gimple_expr_type (last_stmt
);
938 vect_unpromoted_value unprom_mult
;
939 oprnd0
= vect_look_through_possible_promotion (vinfo
, oprnd0
, &unprom_mult
);
941 /* So far so good. Since last_stmt was detected as a (summation) reduction,
942 we know that oprnd1 is the reduction variable (defined by a loop-header
943 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
944 Left to check that oprnd0 is defined by a (widen_)mult_expr */
948 stmt_vec_info mult_vinfo
= vect_get_internal_def (vinfo
, oprnd0
);
952 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
953 inside the loop (in case we are analyzing an outer-loop). */
954 vect_unpromoted_value unprom0
[2];
955 if (!vect_widened_op_tree (mult_vinfo
, MULT_EXPR
, WIDEN_MULT_EXPR
,
956 false, 2, unprom0
, &half_type
))
959 /* If there are two widening operations, make sure they agree on
960 the sign of the extension. */
961 if (TYPE_PRECISION (unprom_mult
.type
) != TYPE_PRECISION (type
)
962 && TYPE_SIGN (unprom_mult
.type
) != TYPE_SIGN (half_type
))
965 vect_pattern_detected ("vect_recog_dot_prod_pattern", last_stmt
);
968 if (!vect_supportable_direct_optab_p (type
, DOT_PROD_EXPR
, half_type
,
969 type_out
, &half_vectype
))
972 /* Get the inputs in the appropriate types. */
974 vect_convert_inputs (stmt_vinfo
, 2, mult_oprnd
, half_type
,
975 unprom0
, half_vectype
);
977 var
= vect_recog_temp_ssa_var (type
, NULL
);
978 pattern_stmt
= gimple_build_assign (var
, DOT_PROD_EXPR
,
979 mult_oprnd
[0], mult_oprnd
[1], oprnd1
);
985 /* Function vect_recog_sad_pattern
987 Try to find the following Sum of Absolute Difference (SAD) pattern:
990 signed TYPE1 diff, abs_diff;
993 sum_0 = phi <init, sum_1>
996 S3 x_T = (TYPE1) x_t;
997 S4 y_T = (TYPE1) y_t;
999 S6 abs_diff = ABS_EXPR <diff>;
1000 [S7 abs_diff = (TYPE2) abs_diff; #optional]
1001 S8 sum_1 = abs_diff + sum_0;
1003 where 'TYPE1' is at least double the size of type 'type', and 'TYPE2' is the
1004 same size of 'TYPE1' or bigger. This is a special case of a reduction
1009 * STMT_VINFO: The stmt from which the pattern search begins. In the
1010 example, when this function is called with S8, the pattern
1011 {S3,S4,S5,S6,S7,S8} will be detected.
1015 * TYPE_OUT: The type of the output of this pattern.
1017 * Return value: A new stmt that will be used to replace the sequence of
1018 stmts that constitute the pattern. In this case it will be:
1019 SAD_EXPR <x_t, y_t, sum_0>
1023 vect_recog_sad_pattern (stmt_vec_info stmt_vinfo
, tree
*type_out
)
1025 gimple
*last_stmt
= stmt_vinfo
->stmt
;
1026 vec_info
*vinfo
= stmt_vinfo
->vinfo
;
1029 /* Look for the following pattern
1033 DAD = ABS_EXPR <DDIFF>;
1034 DDPROD = (TYPE2) DPROD;
1035 sum_1 = DAD + sum_0;
1037 - DX is at least double the size of X
1038 - DY is at least double the size of Y
1039 - DX, DY, DDIFF, DAD all have the same type
1040 - sum is the same size of DAD or bigger
1041 - sum has been recognized as a reduction variable.
1043 This is equivalent to:
1044 DDIFF = X w- Y; #widen sub
1045 DAD = ABS_EXPR <DDIFF>;
1046 sum_1 = DAD w+ sum_0; #widen summation
1048 DDIFF = X w- Y; #widen sub
1049 DAD = ABS_EXPR <DDIFF>;
1050 sum_1 = DAD + sum_0; #summation
1053 /* Starting from LAST_STMT, follow the defs of its uses in search
1054 of the above pattern. */
1056 tree plus_oprnd0
, plus_oprnd1
;
1057 if (!vect_reassociating_reduction_p (stmt_vinfo
, PLUS_EXPR
,
1058 &plus_oprnd0
, &plus_oprnd1
))
1061 tree sum_type
= gimple_expr_type (last_stmt
);
1063 /* Any non-truncating sequence of conversions is OK here, since
1064 with a successful match, the result of the ABS(U) is known to fit
1065 within the nonnegative range of the result type. (It cannot be the
1066 negative of the minimum signed value due to the range of the widening
1068 vect_unpromoted_value unprom_abs
;
1069 plus_oprnd0
= vect_look_through_possible_promotion (vinfo
, plus_oprnd0
,
1072 /* So far so good. Since last_stmt was detected as a (summation) reduction,
1073 we know that plus_oprnd1 is the reduction variable (defined by a loop-header
1074 phi), and plus_oprnd0 is an ssa-name defined by a stmt in the loop body.
1075 Then check that plus_oprnd0 is defined by an abs_expr. */
1080 stmt_vec_info abs_stmt_vinfo
= vect_get_internal_def (vinfo
, plus_oprnd0
);
1081 if (!abs_stmt_vinfo
)
1084 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
1085 inside the loop (in case we are analyzing an outer-loop). */
1086 gassign
*abs_stmt
= dyn_cast
<gassign
*> (abs_stmt_vinfo
->stmt
);
1088 || (gimple_assign_rhs_code (abs_stmt
) != ABS_EXPR
1089 && gimple_assign_rhs_code (abs_stmt
) != ABSU_EXPR
))
1092 tree abs_oprnd
= gimple_assign_rhs1 (abs_stmt
);
1093 tree abs_type
= TREE_TYPE (abs_oprnd
);
1094 if (TYPE_UNSIGNED (abs_type
))
1097 /* Peel off conversions from the ABS input. This can involve sign
1098 changes (e.g. from an unsigned subtraction to a signed ABS input)
1099 or signed promotion, but it can't include unsigned promotion.
1100 (Note that ABS of an unsigned promotion should have been folded
1101 away before now anyway.) */
1102 vect_unpromoted_value unprom_diff
;
1103 abs_oprnd
= vect_look_through_possible_promotion (vinfo
, abs_oprnd
,
1107 if (TYPE_PRECISION (unprom_diff
.type
) != TYPE_PRECISION (abs_type
)
1108 && TYPE_UNSIGNED (unprom_diff
.type
))
1111 /* We then detect if the operand of abs_expr is defined by a minus_expr. */
1112 stmt_vec_info diff_stmt_vinfo
= vect_get_internal_def (vinfo
, abs_oprnd
);
1113 if (!diff_stmt_vinfo
)
1116 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
1117 inside the loop (in case we are analyzing an outer-loop). */
1118 vect_unpromoted_value unprom
[2];
1119 if (!vect_widened_op_tree (diff_stmt_vinfo
, MINUS_EXPR
, MINUS_EXPR
,
1120 false, 2, unprom
, &half_type
))
1123 vect_pattern_detected ("vect_recog_sad_pattern", last_stmt
);
1126 if (!vect_supportable_direct_optab_p (sum_type
, SAD_EXPR
, half_type
,
1127 type_out
, &half_vectype
))
1130 /* Get the inputs to the SAD_EXPR in the appropriate types. */
1132 vect_convert_inputs (stmt_vinfo
, 2, sad_oprnd
, half_type
,
1133 unprom
, half_vectype
);
1135 tree var
= vect_recog_temp_ssa_var (sum_type
, NULL
);
1136 gimple
*pattern_stmt
= gimple_build_assign (var
, SAD_EXPR
, sad_oprnd
[0],
1137 sad_oprnd
[1], plus_oprnd1
);
1139 return pattern_stmt
;
1142 /* Recognize an operation that performs ORIG_CODE on widened inputs,
1143 so that it can be treated as though it had the form:
1147 HALF_TYPE a_cast = (HALF_TYPE) a; // possible no-op
1148 HALF_TYPE b_cast = (HALF_TYPE) b; // possible no-op
1149 | RES_TYPE a_extend = (RES_TYPE) a_cast; // promotion from HALF_TYPE
1150 | RES_TYPE b_extend = (RES_TYPE) b_cast; // promotion from HALF_TYPE
1151 | RES_TYPE res = a_extend ORIG_CODE b_extend;
1153 Try to replace the pattern with:
1157 HALF_TYPE a_cast = (HALF_TYPE) a; // possible no-op
1158 HALF_TYPE b_cast = (HALF_TYPE) b; // possible no-op
1159 | EXT_TYPE ext = a_cast WIDE_CODE b_cast;
1160 | RES_TYPE res = (EXT_TYPE) ext; // possible no-op
1162 where EXT_TYPE is wider than HALF_TYPE but has the same signedness.
1164 SHIFT_P is true if ORIG_CODE and WIDE_CODE are shifts. NAME is the
1165 name of the pattern being matched, for dump purposes. */
1168 vect_recog_widen_op_pattern (stmt_vec_info last_stmt_info
, tree
*type_out
,
1169 tree_code orig_code
, tree_code wide_code
,
1170 bool shift_p
, const char *name
)
1172 gimple
*last_stmt
= last_stmt_info
->stmt
;
1174 vect_unpromoted_value unprom
[2];
1176 if (!vect_widened_op_tree (last_stmt_info
, orig_code
, orig_code
,
1177 shift_p
, 2, unprom
, &half_type
))
1180 /* Pattern detected. */
1181 vect_pattern_detected (name
, last_stmt
);
1183 tree type
= gimple_expr_type (last_stmt
);
1185 if (TYPE_PRECISION (type
) != TYPE_PRECISION (half_type
) * 2
1186 || TYPE_UNSIGNED (type
) != TYPE_UNSIGNED (half_type
))
1187 itype
= build_nonstandard_integer_type (TYPE_PRECISION (half_type
) * 2,
1188 TYPE_UNSIGNED (half_type
));
1190 /* Check target support */
1191 tree vectype
= get_vectype_for_scalar_type (half_type
);
1192 tree vecitype
= get_vectype_for_scalar_type (itype
);
1193 enum tree_code dummy_code
;
1195 auto_vec
<tree
> dummy_vec
;
1198 || !supportable_widening_operation (wide_code
, last_stmt_info
,
1200 &dummy_code
, &dummy_code
,
1201 &dummy_int
, &dummy_vec
))
1204 *type_out
= get_vectype_for_scalar_type (type
);
1209 vect_convert_inputs (last_stmt_info
, 2, oprnd
, half_type
, unprom
, vectype
);
1211 tree var
= vect_recog_temp_ssa_var (itype
, NULL
);
1212 gimple
*pattern_stmt
= gimple_build_assign (var
, wide_code
,
1213 oprnd
[0], oprnd
[1]);
1215 return vect_convert_output (last_stmt_info
, type
, pattern_stmt
, vecitype
);
1218 /* Try to detect multiplication on widened inputs, converting MULT_EXPR
1219 to WIDEN_MULT_EXPR. See vect_recog_widen_op_pattern for details. */
1222 vect_recog_widen_mult_pattern (stmt_vec_info last_stmt_info
, tree
*type_out
)
1224 return vect_recog_widen_op_pattern (last_stmt_info
, type_out
, MULT_EXPR
,
1225 WIDEN_MULT_EXPR
, false,
1226 "vect_recog_widen_mult_pattern");
1229 /* Function vect_recog_pow_pattern
1231 Try to find the following pattern:
1235 with POW being one of pow, powf, powi, powif and N being
1240 * STMT_VINFO: The stmt from which the pattern search begins.
1244 * TYPE_OUT: The type of the output of this pattern.
1246 * Return value: A new stmt that will be used to replace the sequence of
1247 stmts that constitute the pattern. In this case it will be:
1254 vect_recog_pow_pattern (stmt_vec_info stmt_vinfo
, tree
*type_out
)
1256 gimple
*last_stmt
= stmt_vinfo
->stmt
;
1261 if (!is_gimple_call (last_stmt
) || gimple_call_lhs (last_stmt
) == NULL
)
1264 switch (gimple_call_combined_fn (last_stmt
))
1274 base
= gimple_call_arg (last_stmt
, 0);
1275 exp
= gimple_call_arg (last_stmt
, 1);
1276 if (TREE_CODE (exp
) != REAL_CST
1277 && TREE_CODE (exp
) != INTEGER_CST
)
1279 if (flag_unsafe_math_optimizations
1280 && TREE_CODE (base
) == REAL_CST
1281 && !gimple_call_internal_p (last_stmt
))
1283 combined_fn log_cfn
;
1284 built_in_function exp_bfn
;
1285 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (last_stmt
)))
1288 log_cfn
= CFN_BUILT_IN_LOG
;
1289 exp_bfn
= BUILT_IN_EXP
;
1292 log_cfn
= CFN_BUILT_IN_LOGF
;
1293 exp_bfn
= BUILT_IN_EXPF
;
1296 log_cfn
= CFN_BUILT_IN_LOGL
;
1297 exp_bfn
= BUILT_IN_EXPL
;
1302 tree logc
= fold_const_call (log_cfn
, TREE_TYPE (base
), base
);
1303 tree exp_decl
= builtin_decl_implicit (exp_bfn
);
1304 /* Optimize pow (C, x) as exp (log (C) * x). Normally match.pd
1305 does that, but if C is a power of 2, we want to use
1306 exp2 (log2 (C) * x) in the non-vectorized version, but for
1307 vectorization we don't have vectorized exp2. */
1309 && TREE_CODE (logc
) == REAL_CST
1311 && lookup_attribute ("omp declare simd",
1312 DECL_ATTRIBUTES (exp_decl
)))
1314 cgraph_node
*node
= cgraph_node::get_create (exp_decl
);
1315 if (node
->simd_clones
== NULL
)
1317 if (targetm
.simd_clone
.compute_vecsize_and_simdlen
== NULL
1318 || node
->definition
)
1320 expand_simd_clones (node
);
1321 if (node
->simd_clones
== NULL
)
1324 *type_out
= get_vectype_for_scalar_type (TREE_TYPE (base
));
1327 tree def
= vect_recog_temp_ssa_var (TREE_TYPE (base
), NULL
);
1328 gimple
*g
= gimple_build_assign (def
, MULT_EXPR
, exp
, logc
);
1329 append_pattern_def_seq (stmt_vinfo
, g
);
1330 tree res
= vect_recog_temp_ssa_var (TREE_TYPE (base
), NULL
);
1331 g
= gimple_build_call (exp_decl
, 1, def
);
1332 gimple_call_set_lhs (g
, res
);
1340 /* We now have a pow or powi builtin function call with a constant
1343 /* Catch squaring. */
1344 if ((tree_fits_shwi_p (exp
)
1345 && tree_to_shwi (exp
) == 2)
1346 || (TREE_CODE (exp
) == REAL_CST
1347 && real_equal (&TREE_REAL_CST (exp
), &dconst2
)))
1349 if (!vect_supportable_direct_optab_p (TREE_TYPE (base
), MULT_EXPR
,
1350 TREE_TYPE (base
), type_out
))
1353 var
= vect_recog_temp_ssa_var (TREE_TYPE (base
), NULL
);
1354 stmt
= gimple_build_assign (var
, MULT_EXPR
, base
, base
);
1358 /* Catch square root. */
1359 if (TREE_CODE (exp
) == REAL_CST
1360 && real_equal (&TREE_REAL_CST (exp
), &dconsthalf
))
1362 *type_out
= get_vectype_for_scalar_type (TREE_TYPE (base
));
1364 && direct_internal_fn_supported_p (IFN_SQRT
, *type_out
,
1365 OPTIMIZE_FOR_SPEED
))
1367 gcall
*stmt
= gimple_build_call_internal (IFN_SQRT
, 1, base
);
1368 var
= vect_recog_temp_ssa_var (TREE_TYPE (base
), stmt
);
1369 gimple_call_set_lhs (stmt
, var
);
1370 gimple_call_set_nothrow (stmt
, true);
1379 /* Function vect_recog_widen_sum_pattern
1381 Try to find the following pattern:
1384 TYPE x_T, sum = init;
1386 sum_0 = phi <init, sum_1>
1388 S2 x_T = (TYPE) x_t;
1389 S3 sum_1 = x_T + sum_0;
1391 where type 'TYPE' is at least double the size of type 'type', i.e - we're
1392 summing elements of type 'type' into an accumulator of type 'TYPE'. This is
1393 a special case of a reduction computation.
1397 * STMT_VINFO: The stmt from which the pattern search begins. In the example,
1398 when this function is called with S3, the pattern {S2,S3} will be detected.
1402 * TYPE_OUT: The type of the output of this pattern.
1404 * Return value: A new stmt that will be used to replace the sequence of
1405 stmts that constitute the pattern. In this case it will be:
1406 WIDEN_SUM <x_t, sum_0>
1408 Note: The widening-sum idiom is a widening reduction pattern that is
1409 vectorized without preserving all the intermediate results. It
1410 produces only N/2 (widened) results (by summing up pairs of
1411 intermediate results) rather than all N results. Therefore, we
1412 cannot allow this pattern when we want to get all the results and in
1413 the correct order (as is the case when this computation is in an
1414 inner-loop nested in an outer-loop that us being vectorized). */
1417 vect_recog_widen_sum_pattern (stmt_vec_info stmt_vinfo
, tree
*type_out
)
1419 gimple
*last_stmt
= stmt_vinfo
->stmt
;
1420 tree oprnd0
, oprnd1
;
1421 vec_info
*vinfo
= stmt_vinfo
->vinfo
;
1423 gimple
*pattern_stmt
;
1426 /* Look for the following pattern
1429 In which DX is at least double the size of X, and sum_1 has been
1430 recognized as a reduction variable.
1433 /* Starting from LAST_STMT, follow the defs of its uses in search
1434 of the above pattern. */
1436 if (!vect_reassociating_reduction_p (stmt_vinfo
, PLUS_EXPR
,
1440 type
= gimple_expr_type (last_stmt
);
1442 /* So far so good. Since last_stmt was detected as a (summation) reduction,
1443 we know that oprnd1 is the reduction variable (defined by a loop-header
1444 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
1445 Left to check that oprnd0 is defined by a cast from type 'type' to type
1448 vect_unpromoted_value unprom0
;
1449 if (!vect_look_through_possible_promotion (vinfo
, oprnd0
, &unprom0
)
1450 || TYPE_PRECISION (unprom0
.type
) * 2 > TYPE_PRECISION (type
))
1453 vect_pattern_detected ("vect_recog_widen_sum_pattern", last_stmt
);
1455 if (!vect_supportable_direct_optab_p (type
, WIDEN_SUM_EXPR
, unprom0
.type
,
1459 var
= vect_recog_temp_ssa_var (type
, NULL
);
1460 pattern_stmt
= gimple_build_assign (var
, WIDEN_SUM_EXPR
, unprom0
.op
, oprnd1
);
1462 return pattern_stmt
;
1465 /* Recognize cases in which an operation is performed in one type WTYPE
1466 but could be done more efficiently in a narrower type NTYPE. For example,
1469 ATYPE a; // narrower than NTYPE
1470 BTYPE b; // narrower than NTYPE
1471 WTYPE aw = (WTYPE) a;
1472 WTYPE bw = (WTYPE) b;
1473 WTYPE res = aw + bw; // only uses of aw and bw
1475 then it would be more efficient to do:
1477 NTYPE an = (NTYPE) a;
1478 NTYPE bn = (NTYPE) b;
1479 NTYPE resn = an + bn;
1480 WTYPE res = (WTYPE) resn;
1482 Other situations include things like:
1484 ATYPE a; // NTYPE or narrower
1485 WTYPE aw = (WTYPE) a;
1488 when only "(NTYPE) res" is significant. In that case it's more efficient
1489 to truncate "b" and do the operation on NTYPE instead:
1491 NTYPE an = (NTYPE) a;
1492 NTYPE bn = (NTYPE) b; // truncation
1493 NTYPE resn = an + bn;
1494 WTYPE res = (WTYPE) resn;
1496 All users of "res" should then use "resn" instead, making the final
1497 statement dead (not marked as relevant). The final statement is still
1498 needed to maintain the type correctness of the IR.
1500 vect_determine_precisions has already determined the minimum
1501 precison of the operation and the minimum precision required
1502 by users of the result. */
1505 vect_recog_over_widening_pattern (stmt_vec_info last_stmt_info
, tree
*type_out
)
1507 gassign
*last_stmt
= dyn_cast
<gassign
*> (last_stmt_info
->stmt
);
1511 /* See whether we have found that this operation can be done on a
1512 narrower type without changing its semantics. */
1513 unsigned int new_precision
= last_stmt_info
->operation_precision
;
1517 vec_info
*vinfo
= last_stmt_info
->vinfo
;
1518 tree lhs
= gimple_assign_lhs (last_stmt
);
1519 tree type
= TREE_TYPE (lhs
);
1520 tree_code code
= gimple_assign_rhs_code (last_stmt
);
1522 /* Keep the first operand of a COND_EXPR as-is: only the other two
1523 operands are interesting. */
1524 unsigned int first_op
= (code
== COND_EXPR
? 2 : 1);
1526 /* Check the operands. */
1527 unsigned int nops
= gimple_num_ops (last_stmt
) - first_op
;
1528 auto_vec
<vect_unpromoted_value
, 3> unprom (nops
);
1529 unprom
.quick_grow (nops
);
1530 unsigned int min_precision
= 0;
1531 bool single_use_p
= false;
1532 for (unsigned int i
= 0; i
< nops
; ++i
)
1534 tree op
= gimple_op (last_stmt
, first_op
+ i
);
1535 if (TREE_CODE (op
) == INTEGER_CST
)
1536 unprom
[i
].set_op (op
, vect_constant_def
);
1537 else if (TREE_CODE (op
) == SSA_NAME
)
1539 bool op_single_use_p
= true;
1540 if (!vect_look_through_possible_promotion (vinfo
, op
, &unprom
[i
],
1545 (1) N bits of the result are needed;
1546 (2) all inputs are widened from M<N bits; and
1547 (3) one operand OP is a single-use SSA name
1549 we can shift the M->N widening from OP to the output
1550 without changing the number or type of extensions involved.
1551 This then reduces the number of copies of STMT_INFO.
1553 If instead of (3) more than one operand is a single-use SSA name,
1554 shifting the extension to the output is even more of a win.
1558 (1) N bits of the result are needed;
1559 (2) one operand OP2 is widened from M2<N bits;
1560 (3) another operand OP1 is widened from M1<M2 bits; and
1561 (4) both OP1 and OP2 are single-use
1563 the choice is between:
1565 (a) truncating OP2 to M1, doing the operation on M1,
1566 and then widening the result to N
1568 (b) widening OP1 to M2, doing the operation on M2, and then
1569 widening the result to N
1571 Both shift the M2->N widening of the inputs to the output.
1572 (a) additionally shifts the M1->M2 widening to the output;
1573 it requires fewer copies of STMT_INFO but requires an extra
1576 Which is better will depend on the complexity and cost of
1577 STMT_INFO, which is hard to predict at this stage. However,
1578 a clear tie-breaker in favor of (b) is the fact that the
1579 truncation in (a) increases the length of the operation chain.
1581 If instead of (4) only one of OP1 or OP2 is single-use,
1582 (b) is still a win over doing the operation in N bits:
1583 it still shifts the M2->N widening on the single-use operand
1584 to the output and reduces the number of STMT_INFO copies.
1586 If neither operand is single-use then operating on fewer than
1587 N bits might lead to more extensions overall. Whether it does
1588 or not depends on global information about the vectorization
1589 region, and whether that's a good trade-off would again
1590 depend on the complexity and cost of the statements involved,
1591 as well as things like register pressure that are not normally
1592 modelled at this stage. We therefore ignore these cases
1593 and just optimize the clear single-use wins above.
1595 Thus we take the maximum precision of the unpromoted operands
1596 and record whether any operand is single-use. */
1597 if (unprom
[i
].dt
== vect_internal_def
)
1599 min_precision
= MAX (min_precision
,
1600 TYPE_PRECISION (unprom
[i
].type
));
1601 single_use_p
|= op_single_use_p
;
1606 /* Although the operation could be done in operation_precision, we have
1607 to balance that against introducing extra truncations or extensions.
1608 Calculate the minimum precision that can be handled efficiently.
1610 The loop above determined that the operation could be handled
1611 efficiently in MIN_PRECISION if SINGLE_USE_P; this would shift an
1612 extension from the inputs to the output without introducing more
1613 instructions, and would reduce the number of instructions required
1614 for STMT_INFO itself.
1616 vect_determine_precisions has also determined that the result only
1617 needs min_output_precision bits. Truncating by a factor of N times
1618 requires a tree of N - 1 instructions, so if TYPE is N times wider
1619 than min_output_precision, doing the operation in TYPE and truncating
1620 the result requires N + (N - 1) = 2N - 1 instructions per output vector.
1623 - truncating the input to a unary operation and doing the operation
1624 in the new type requires at most N - 1 + 1 = N instructions per
1627 - doing the same for a binary operation requires at most
1628 (N - 1) * 2 + 1 = 2N - 1 instructions per output vector
1630 Both unary and binary operations require fewer instructions than
1631 this if the operands were extended from a suitable truncated form.
1632 Thus there is usually nothing to lose by doing operations in
1633 min_output_precision bits, but there can be something to gain. */
1635 min_precision
= last_stmt_info
->min_output_precision
;
1637 min_precision
= MIN (min_precision
, last_stmt_info
->min_output_precision
);
1639 /* Apply the minimum efficient precision we just calculated. */
1640 if (new_precision
< min_precision
)
1641 new_precision
= min_precision
;
1642 if (new_precision
>= TYPE_PRECISION (type
))
1645 vect_pattern_detected ("vect_recog_over_widening_pattern", last_stmt
);
1647 *type_out
= get_vectype_for_scalar_type (type
);
1651 /* We've found a viable pattern. Get the new type of the operation. */
1652 bool unsigned_p
= (last_stmt_info
->operation_sign
== UNSIGNED
);
1653 tree new_type
= build_nonstandard_integer_type (new_precision
, unsigned_p
);
1655 /* We specifically don't check here whether the target supports the
1656 new operation, since it might be something that a later pattern
1657 wants to rewrite anyway. If targets have a minimum element size
1658 for some optabs, we should pattern-match smaller ops to larger ops
1659 where beneficial. */
1660 tree new_vectype
= get_vectype_for_scalar_type (new_type
);
1664 if (dump_enabled_p ())
1666 dump_printf_loc (MSG_NOTE
, vect_location
, "demoting ");
1667 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, type
);
1668 dump_printf (MSG_NOTE
, " to ");
1669 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, new_type
);
1670 dump_printf (MSG_NOTE
, "\n");
1673 /* Calculate the rhs operands for an operation on NEW_TYPE. */
1675 for (unsigned int i
= 1; i
< first_op
; ++i
)
1676 ops
[i
- 1] = gimple_op (last_stmt
, i
);
1677 vect_convert_inputs (last_stmt_info
, nops
, &ops
[first_op
- 1],
1678 new_type
, &unprom
[0], new_vectype
);
1680 /* Use the operation to produce a result of type NEW_TYPE. */
1681 tree new_var
= vect_recog_temp_ssa_var (new_type
, NULL
);
1682 gimple
*pattern_stmt
= gimple_build_assign (new_var
, code
,
1683 ops
[0], ops
[1], ops
[2]);
1684 gimple_set_location (pattern_stmt
, gimple_location (last_stmt
));
1686 if (dump_enabled_p ())
1688 dump_printf_loc (MSG_NOTE
, vect_location
,
1689 "created pattern stmt: ");
1690 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
1693 pattern_stmt
= vect_convert_output (last_stmt_info
, type
,
1694 pattern_stmt
, new_vectype
);
1696 return pattern_stmt
;
1699 /* Recognize the patterns:
1701 ATYPE a; // narrower than TYPE
1702 BTYPE b; // narrower than TYPE
1703 (1) TYPE avg = ((TYPE) a + (TYPE) b) >> 1;
1704 or (2) TYPE avg = ((TYPE) a + (TYPE) b + 1) >> 1;
1706 where only the bottom half of avg is used. Try to transform them into:
1708 (1) NTYPE avg' = .AVG_FLOOR ((NTYPE) a, (NTYPE) b);
1709 or (2) NTYPE avg' = .AVG_CEIL ((NTYPE) a, (NTYPE) b);
1713 TYPE avg = (TYPE) avg';
1715 where NTYPE is no wider than half of TYPE. Since only the bottom half
1716 of avg is used, all or part of the cast of avg' should become redundant. */
1719 vect_recog_average_pattern (stmt_vec_info last_stmt_info
, tree
*type_out
)
1721 /* Check for a shift right by one bit. */
1722 gassign
*last_stmt
= dyn_cast
<gassign
*> (last_stmt_info
->stmt
);
1723 vec_info
*vinfo
= last_stmt_info
->vinfo
;
1725 || gimple_assign_rhs_code (last_stmt
) != RSHIFT_EXPR
1726 || !integer_onep (gimple_assign_rhs2 (last_stmt
)))
1729 /* Check that the shift result is wider than the users of the
1730 result need (i.e. that narrowing would be a natural choice). */
1731 tree lhs
= gimple_assign_lhs (last_stmt
);
1732 tree type
= TREE_TYPE (lhs
);
1733 unsigned int target_precision
1734 = vect_element_precision (last_stmt_info
->min_output_precision
);
1735 if (!INTEGRAL_TYPE_P (type
) || target_precision
>= TYPE_PRECISION (type
))
1738 /* Get the definition of the shift input. */
1739 tree rshift_rhs
= gimple_assign_rhs1 (last_stmt
);
1740 stmt_vec_info plus_stmt_info
= vect_get_internal_def (vinfo
, rshift_rhs
);
1741 if (!plus_stmt_info
)
1744 /* Check whether the shift input can be seen as a tree of additions on
1745 2 or 3 widened inputs.
1747 Note that the pattern should be a win even if the result of one or
1748 more additions is reused elsewhere: if the pattern matches, we'd be
1749 replacing 2N RSHIFT_EXPRs and N VEC_PACK_*s with N IFN_AVG_*s. */
1750 internal_fn ifn
= IFN_AVG_FLOOR
;
1751 vect_unpromoted_value unprom
[3];
1753 unsigned int nops
= vect_widened_op_tree (plus_stmt_info
, PLUS_EXPR
,
1754 PLUS_EXPR
, false, 3,
1760 /* Check that one operand is 1. */
1762 for (i
= 0; i
< 3; ++i
)
1763 if (integer_onep (unprom
[i
].op
))
1767 /* Throw away the 1 operand and keep the other two. */
1769 unprom
[i
] = unprom
[2];
1773 vect_pattern_detected ("vect_recog_average_pattern", last_stmt
);
1777 (a) the operation can be viewed as:
1779 TYPE widened0 = (TYPE) UNPROM[0];
1780 TYPE widened1 = (TYPE) UNPROM[1];
1781 TYPE tmp1 = widened0 + widened1 {+ 1};
1782 TYPE tmp2 = tmp1 >> 1; // LAST_STMT_INFO
1784 (b) the first two statements are equivalent to:
1786 TYPE widened0 = (TYPE) (NEW_TYPE) UNPROM[0];
1787 TYPE widened1 = (TYPE) (NEW_TYPE) UNPROM[1];
1789 (c) vect_recog_over_widening_pattern has already tried to narrow TYPE
1792 (d) all the operations can be performed correctly at twice the width of
1793 NEW_TYPE, due to the nature of the average operation; and
1795 (e) users of the result of the right shift need only TARGET_PRECISION
1796 bits, where TARGET_PRECISION is no more than half of TYPE's
1799 Under these circumstances, the only situation in which NEW_TYPE
1800 could be narrower than TARGET_PRECISION is if widened0, widened1
1801 and an addition result are all used more than once. Thus we can
1802 treat any widening of UNPROM[0] and UNPROM[1] to TARGET_PRECISION
1803 as "free", whereas widening the result of the average instruction
1804 from NEW_TYPE to TARGET_PRECISION would be a new operation. It's
1805 therefore better not to go narrower than TARGET_PRECISION. */
1806 if (TYPE_PRECISION (new_type
) < target_precision
)
1807 new_type
= build_nonstandard_integer_type (target_precision
,
1808 TYPE_UNSIGNED (new_type
));
1810 /* Check for target support. */
1811 tree new_vectype
= get_vectype_for_scalar_type (new_type
);
1813 || !direct_internal_fn_supported_p (ifn
, new_vectype
,
1814 OPTIMIZE_FOR_SPEED
))
1817 /* The IR requires a valid vector type for the cast result, even though
1818 it's likely to be discarded. */
1819 *type_out
= get_vectype_for_scalar_type (type
);
1823 /* Generate the IFN_AVG* call. */
1824 tree new_var
= vect_recog_temp_ssa_var (new_type
, NULL
);
1826 vect_convert_inputs (last_stmt_info
, 2, new_ops
, new_type
,
1827 unprom
, new_vectype
);
1828 gcall
*average_stmt
= gimple_build_call_internal (ifn
, 2, new_ops
[0],
1830 gimple_call_set_lhs (average_stmt
, new_var
);
1831 gimple_set_location (average_stmt
, gimple_location (last_stmt
));
1833 if (dump_enabled_p ())
1835 dump_printf_loc (MSG_NOTE
, vect_location
,
1836 "created pattern stmt: ");
1837 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, average_stmt
, 0);
1840 return vect_convert_output (last_stmt_info
, type
, average_stmt
, new_vectype
);
1843 /* Recognize cases in which the input to a cast is wider than its
1844 output, and the input is fed by a widening operation. Fold this
1845 by removing the unnecessary intermediate widening. E.g.:
1848 unsigned int b = (unsigned int) a;
1849 unsigned short c = (unsigned short) b;
1853 unsigned short c = (unsigned short) a;
1855 Although this is rare in input IR, it is an expected side-effect
1856 of the over-widening pattern above.
1858 This is beneficial also for integer-to-float conversions, if the
1859 widened integer has more bits than the float, and if the unwidened
1863 vect_recog_cast_forwprop_pattern (stmt_vec_info last_stmt_info
, tree
*type_out
)
1865 /* Check for a cast, including an integer-to-float conversion. */
1866 gassign
*last_stmt
= dyn_cast
<gassign
*> (last_stmt_info
->stmt
);
1869 tree_code code
= gimple_assign_rhs_code (last_stmt
);
1870 if (!CONVERT_EXPR_CODE_P (code
) && code
!= FLOAT_EXPR
)
1873 /* Make sure that the rhs is a scalar with a natural bitsize. */
1874 tree lhs
= gimple_assign_lhs (last_stmt
);
1877 tree lhs_type
= TREE_TYPE (lhs
);
1878 scalar_mode lhs_mode
;
1879 if (VECT_SCALAR_BOOLEAN_TYPE_P (lhs_type
)
1880 || !is_a
<scalar_mode
> (TYPE_MODE (lhs_type
), &lhs_mode
))
1883 /* Check for a narrowing operation (from a vector point of view). */
1884 tree rhs
= gimple_assign_rhs1 (last_stmt
);
1885 tree rhs_type
= TREE_TYPE (rhs
);
1886 if (!INTEGRAL_TYPE_P (rhs_type
)
1887 || VECT_SCALAR_BOOLEAN_TYPE_P (rhs_type
)
1888 || TYPE_PRECISION (rhs_type
) <= GET_MODE_BITSIZE (lhs_mode
))
1891 /* Try to find an unpromoted input. */
1892 vec_info
*vinfo
= last_stmt_info
->vinfo
;
1893 vect_unpromoted_value unprom
;
1894 if (!vect_look_through_possible_promotion (vinfo
, rhs
, &unprom
)
1895 || TYPE_PRECISION (unprom
.type
) >= TYPE_PRECISION (rhs_type
))
1898 /* If the bits above RHS_TYPE matter, make sure that they're the
1899 same when extending from UNPROM as they are when extending from RHS. */
1900 if (!INTEGRAL_TYPE_P (lhs_type
)
1901 && TYPE_SIGN (rhs_type
) != TYPE_SIGN (unprom
.type
))
1904 /* We can get the same result by casting UNPROM directly, to avoid
1905 the unnecessary widening and narrowing. */
1906 vect_pattern_detected ("vect_recog_cast_forwprop_pattern", last_stmt
);
1908 *type_out
= get_vectype_for_scalar_type (lhs_type
);
1912 tree new_var
= vect_recog_temp_ssa_var (lhs_type
, NULL
);
1913 gimple
*pattern_stmt
= gimple_build_assign (new_var
, code
, unprom
.op
);
1914 gimple_set_location (pattern_stmt
, gimple_location (last_stmt
));
1916 return pattern_stmt
;
1919 /* Try to detect a shift left of a widened input, converting LSHIFT_EXPR
1920 to WIDEN_LSHIFT_EXPR. See vect_recog_widen_op_pattern for details. */
1923 vect_recog_widen_shift_pattern (stmt_vec_info last_stmt_info
, tree
*type_out
)
1925 return vect_recog_widen_op_pattern (last_stmt_info
, type_out
, LSHIFT_EXPR
,
1926 WIDEN_LSHIFT_EXPR
, true,
1927 "vect_recog_widen_shift_pattern");
1930 /* Detect a rotate pattern wouldn't be otherwise vectorized:
1934 S0 a_t = b_t r<< c_t;
1938 * STMT_VINFO: The stmt from which the pattern search begins,
1939 i.e. the shift/rotate stmt. The original stmt (S0) is replaced
1943 S2 e_t = d_t & (B - 1);
1944 S3 f_t = b_t << c_t;
1945 S4 g_t = b_t >> e_t;
1948 where B is element bitsize of type.
1952 * TYPE_OUT: The type of the output of this pattern.
1954 * Return value: A new stmt that will be used to replace the rotate
1958 vect_recog_rotate_pattern (stmt_vec_info stmt_vinfo
, tree
*type_out
)
1960 gimple
*last_stmt
= stmt_vinfo
->stmt
;
1961 tree oprnd0
, oprnd1
, lhs
, var
, var1
, var2
, vectype
, type
, stype
, def
, def2
;
1962 gimple
*pattern_stmt
, *def_stmt
;
1963 enum tree_code rhs_code
;
1964 vec_info
*vinfo
= stmt_vinfo
->vinfo
;
1965 enum vect_def_type dt
;
1966 optab optab1
, optab2
;
1967 edge ext_def
= NULL
;
1969 if (!is_gimple_assign (last_stmt
))
1972 rhs_code
= gimple_assign_rhs_code (last_stmt
);
1982 lhs
= gimple_assign_lhs (last_stmt
);
1983 oprnd0
= gimple_assign_rhs1 (last_stmt
);
1984 type
= TREE_TYPE (oprnd0
);
1985 oprnd1
= gimple_assign_rhs2 (last_stmt
);
1986 if (TREE_CODE (oprnd0
) != SSA_NAME
1987 || TYPE_PRECISION (TREE_TYPE (lhs
)) != TYPE_PRECISION (type
)
1988 || !INTEGRAL_TYPE_P (type
)
1989 || !TYPE_UNSIGNED (type
))
1992 stmt_vec_info def_stmt_info
;
1993 if (!vect_is_simple_use (oprnd1
, vinfo
, &dt
, &def_stmt_info
, &def_stmt
))
1996 if (dt
!= vect_internal_def
1997 && dt
!= vect_constant_def
1998 && dt
!= vect_external_def
)
2001 vectype
= get_vectype_for_scalar_type (type
);
2002 if (vectype
== NULL_TREE
)
2005 /* If vector/vector or vector/scalar rotate is supported by the target,
2006 don't do anything here. */
2007 optab1
= optab_for_tree_code (rhs_code
, vectype
, optab_vector
);
2009 && optab_handler (optab1
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
2012 if (is_a
<bb_vec_info
> (vinfo
) || dt
!= vect_internal_def
)
2014 optab2
= optab_for_tree_code (rhs_code
, vectype
, optab_scalar
);
2016 && optab_handler (optab2
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
2020 /* If vector/vector or vector/scalar shifts aren't supported by the target,
2021 don't do anything here either. */
2022 optab1
= optab_for_tree_code (LSHIFT_EXPR
, vectype
, optab_vector
);
2023 optab2
= optab_for_tree_code (RSHIFT_EXPR
, vectype
, optab_vector
);
2025 || optab_handler (optab1
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
2027 || optab_handler (optab2
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
2029 if (! is_a
<bb_vec_info
> (vinfo
) && dt
== vect_internal_def
)
2031 optab1
= optab_for_tree_code (LSHIFT_EXPR
, vectype
, optab_scalar
);
2032 optab2
= optab_for_tree_code (RSHIFT_EXPR
, vectype
, optab_scalar
);
2034 || optab_handler (optab1
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
2036 || optab_handler (optab2
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
2040 *type_out
= vectype
;
2042 if (dt
== vect_external_def
2043 && TREE_CODE (oprnd1
) == SSA_NAME
)
2044 ext_def
= vect_get_external_def_edge (vinfo
, oprnd1
);
2047 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (type
);
2048 if (TREE_CODE (oprnd1
) == INTEGER_CST
2049 || TYPE_MODE (TREE_TYPE (oprnd1
)) == mode
)
2051 else if (def_stmt
&& gimple_assign_cast_p (def_stmt
))
2053 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
2054 if (TYPE_MODE (TREE_TYPE (rhs1
)) == mode
2055 && TYPE_PRECISION (TREE_TYPE (rhs1
))
2056 == TYPE_PRECISION (type
))
2060 if (def
== NULL_TREE
)
2062 def
= vect_recog_temp_ssa_var (type
, NULL
);
2063 def_stmt
= gimple_build_assign (def
, NOP_EXPR
, oprnd1
);
2067 = gsi_insert_on_edge_immediate (ext_def
, def_stmt
);
2068 gcc_assert (!new_bb
);
2071 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2073 stype
= TREE_TYPE (def
);
2074 scalar_int_mode smode
= SCALAR_INT_TYPE_MODE (stype
);
2076 if (TREE_CODE (def
) == INTEGER_CST
)
2078 if (!tree_fits_uhwi_p (def
)
2079 || tree_to_uhwi (def
) >= GET_MODE_PRECISION (mode
)
2080 || integer_zerop (def
))
2082 def2
= build_int_cst (stype
,
2083 GET_MODE_PRECISION (mode
) - tree_to_uhwi (def
));
2087 tree vecstype
= get_vectype_for_scalar_type (stype
);
2089 if (vecstype
== NULL_TREE
)
2091 def2
= vect_recog_temp_ssa_var (stype
, NULL
);
2092 def_stmt
= gimple_build_assign (def2
, NEGATE_EXPR
, def
);
2096 = gsi_insert_on_edge_immediate (ext_def
, def_stmt
);
2097 gcc_assert (!new_bb
);
2100 append_pattern_def_seq (stmt_vinfo
, def_stmt
, vecstype
);
2102 def2
= vect_recog_temp_ssa_var (stype
, NULL
);
2103 tree mask
= build_int_cst (stype
, GET_MODE_PRECISION (smode
) - 1);
2104 def_stmt
= gimple_build_assign (def2
, BIT_AND_EXPR
,
2105 gimple_assign_lhs (def_stmt
), mask
);
2109 = gsi_insert_on_edge_immediate (ext_def
, def_stmt
);
2110 gcc_assert (!new_bb
);
2113 append_pattern_def_seq (stmt_vinfo
, def_stmt
, vecstype
);
2116 var1
= vect_recog_temp_ssa_var (type
, NULL
);
2117 def_stmt
= gimple_build_assign (var1
, rhs_code
== LROTATE_EXPR
2118 ? LSHIFT_EXPR
: RSHIFT_EXPR
,
2120 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2122 var2
= vect_recog_temp_ssa_var (type
, NULL
);
2123 def_stmt
= gimple_build_assign (var2
, rhs_code
== LROTATE_EXPR
2124 ? RSHIFT_EXPR
: LSHIFT_EXPR
,
2126 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2128 /* Pattern detected. */
2129 vect_pattern_detected ("vect_recog_rotate_pattern", last_stmt
);
2131 /* Pattern supported. Create a stmt to be used to replace the pattern. */
2132 var
= vect_recog_temp_ssa_var (type
, NULL
);
2133 pattern_stmt
= gimple_build_assign (var
, BIT_IOR_EXPR
, var1
, var2
);
2135 return pattern_stmt
;
2138 /* Detect a vector by vector shift pattern that wouldn't be otherwise
2146 S3 res_T = b_T op a_t;
2148 where type 'TYPE' is a type with different size than 'type',
2149 and op is <<, >> or rotate.
2154 TYPE b_T, c_T, res_T;
2157 S1 a_t = (type) c_T;
2159 S3 res_T = b_T op a_t;
2163 * STMT_VINFO: The stmt from which the pattern search begins,
2164 i.e. the shift/rotate stmt. The original stmt (S3) is replaced
2165 with a shift/rotate which has same type on both operands, in the
2166 second case just b_T op c_T, in the first case with added cast
2167 from a_t to c_T in STMT_VINFO_PATTERN_DEF_SEQ.
2171 * TYPE_OUT: The type of the output of this pattern.
2173 * Return value: A new stmt that will be used to replace the shift/rotate
2177 vect_recog_vector_vector_shift_pattern (stmt_vec_info stmt_vinfo
,
2180 gimple
*last_stmt
= stmt_vinfo
->stmt
;
2181 tree oprnd0
, oprnd1
, lhs
, var
;
2182 gimple
*pattern_stmt
;
2183 enum tree_code rhs_code
;
2184 vec_info
*vinfo
= stmt_vinfo
->vinfo
;
2186 if (!is_gimple_assign (last_stmt
))
2189 rhs_code
= gimple_assign_rhs_code (last_stmt
);
2201 lhs
= gimple_assign_lhs (last_stmt
);
2202 oprnd0
= gimple_assign_rhs1 (last_stmt
);
2203 oprnd1
= gimple_assign_rhs2 (last_stmt
);
2204 if (TREE_CODE (oprnd0
) != SSA_NAME
2205 || TREE_CODE (oprnd1
) != SSA_NAME
2206 || TYPE_MODE (TREE_TYPE (oprnd0
)) == TYPE_MODE (TREE_TYPE (oprnd1
))
2207 || !type_has_mode_precision_p (TREE_TYPE (oprnd1
))
2208 || TYPE_PRECISION (TREE_TYPE (lhs
))
2209 != TYPE_PRECISION (TREE_TYPE (oprnd0
)))
2212 stmt_vec_info def_vinfo
= vect_get_internal_def (vinfo
, oprnd1
);
2216 *type_out
= get_vectype_for_scalar_type (TREE_TYPE (oprnd0
));
2217 if (*type_out
== NULL_TREE
)
2220 tree def
= NULL_TREE
;
2221 gassign
*def_stmt
= dyn_cast
<gassign
*> (def_vinfo
->stmt
);
2222 if (def_stmt
&& gimple_assign_cast_p (def_stmt
))
2224 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
2225 if (TYPE_MODE (TREE_TYPE (rhs1
)) == TYPE_MODE (TREE_TYPE (oprnd0
))
2226 && TYPE_PRECISION (TREE_TYPE (rhs1
))
2227 == TYPE_PRECISION (TREE_TYPE (oprnd0
)))
2229 if (TYPE_PRECISION (TREE_TYPE (oprnd1
))
2230 >= TYPE_PRECISION (TREE_TYPE (rhs1
)))
2235 = build_low_bits_mask (TREE_TYPE (rhs1
),
2236 TYPE_PRECISION (TREE_TYPE (oprnd1
)));
2237 def
= vect_recog_temp_ssa_var (TREE_TYPE (rhs1
), NULL
);
2238 def_stmt
= gimple_build_assign (def
, BIT_AND_EXPR
, rhs1
, mask
);
2239 tree vecstype
= get_vectype_for_scalar_type (TREE_TYPE (rhs1
));
2240 append_pattern_def_seq (stmt_vinfo
, def_stmt
, vecstype
);
2245 if (def
== NULL_TREE
)
2247 def
= vect_recog_temp_ssa_var (TREE_TYPE (oprnd0
), NULL
);
2248 def_stmt
= gimple_build_assign (def
, NOP_EXPR
, oprnd1
);
2249 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2252 /* Pattern detected. */
2253 vect_pattern_detected ("vect_recog_vector_vector_shift_pattern", last_stmt
);
2255 /* Pattern supported. Create a stmt to be used to replace the pattern. */
2256 var
= vect_recog_temp_ssa_var (TREE_TYPE (oprnd0
), NULL
);
2257 pattern_stmt
= gimple_build_assign (var
, rhs_code
, oprnd0
, def
);
2259 return pattern_stmt
;
2262 /* Return true iff the target has a vector optab implementing the operation
2263 CODE on type VECTYPE. */
2266 target_has_vecop_for_code (tree_code code
, tree vectype
)
2268 optab voptab
= optab_for_tree_code (code
, vectype
, optab_vector
);
2270 && optab_handler (voptab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
;
2273 /* Verify that the target has optabs of VECTYPE to perform all the steps
2274 needed by the multiplication-by-immediate synthesis algorithm described by
2275 ALG and VAR. If SYNTH_SHIFT_P is true ensure that vector addition is
2276 present. Return true iff the target supports all the steps. */
2279 target_supports_mult_synth_alg (struct algorithm
*alg
, mult_variant var
,
2280 tree vectype
, bool synth_shift_p
)
2282 if (alg
->op
[0] != alg_zero
&& alg
->op
[0] != alg_m
)
2285 bool supports_vminus
= target_has_vecop_for_code (MINUS_EXPR
, vectype
);
2286 bool supports_vplus
= target_has_vecop_for_code (PLUS_EXPR
, vectype
);
2288 if (var
== negate_variant
2289 && !target_has_vecop_for_code (NEGATE_EXPR
, vectype
))
2292 /* If we must synthesize shifts with additions make sure that vector
2293 addition is available. */
2294 if ((var
== add_variant
|| synth_shift_p
) && !supports_vplus
)
2297 for (int i
= 1; i
< alg
->ops
; i
++)
2305 case alg_add_factor
:
2306 if (!supports_vplus
)
2311 case alg_sub_factor
:
2312 if (!supports_vminus
)
2318 case alg_impossible
:
2328 /* Synthesize a left shift of OP by AMNT bits using a series of additions and
2329 putting the final result in DEST. Append all statements but the last into
2330 VINFO. Return the last statement. */
2333 synth_lshift_by_additions (tree dest
, tree op
, HOST_WIDE_INT amnt
,
2334 stmt_vec_info vinfo
)
2337 tree itype
= TREE_TYPE (op
);
2339 gcc_assert (amnt
>= 0);
2340 for (i
= 0; i
< amnt
; i
++)
2342 tree tmp_var
= (i
< amnt
- 1) ? vect_recog_temp_ssa_var (itype
, NULL
)
2345 = gimple_build_assign (tmp_var
, PLUS_EXPR
, prev_res
, prev_res
);
2348 append_pattern_def_seq (vinfo
, stmt
);
2356 /* Helper for vect_synth_mult_by_constant. Apply a binary operation
2357 CODE to operands OP1 and OP2, creating a new temporary SSA var in
2358 the process if necessary. Append the resulting assignment statements
2359 to the sequence in STMT_VINFO. Return the SSA variable that holds the
2360 result of the binary operation. If SYNTH_SHIFT_P is true synthesize
2361 left shifts using additions. */
2364 apply_binop_and_append_stmt (tree_code code
, tree op1
, tree op2
,
2365 stmt_vec_info stmt_vinfo
, bool synth_shift_p
)
2367 if (integer_zerop (op2
)
2368 && (code
== LSHIFT_EXPR
2369 || code
== PLUS_EXPR
))
2371 gcc_assert (TREE_CODE (op1
) == SSA_NAME
);
2376 tree itype
= TREE_TYPE (op1
);
2377 tree tmp_var
= vect_recog_temp_ssa_var (itype
, NULL
);
2379 if (code
== LSHIFT_EXPR
2382 stmt
= synth_lshift_by_additions (tmp_var
, op1
, TREE_INT_CST_LOW (op2
),
2384 append_pattern_def_seq (stmt_vinfo
, stmt
);
2388 stmt
= gimple_build_assign (tmp_var
, code
, op1
, op2
);
2389 append_pattern_def_seq (stmt_vinfo
, stmt
);
2393 /* Synthesize a multiplication of OP by an INTEGER_CST VAL using shifts
2394 and simple arithmetic operations to be vectorized. Record the statements
2395 produced in STMT_VINFO and return the last statement in the sequence or
2396 NULL if it's not possible to synthesize such a multiplication.
2397 This function mirrors the behavior of expand_mult_const in expmed.c but
2398 works on tree-ssa form. */
2401 vect_synth_mult_by_constant (tree op
, tree val
,
2402 stmt_vec_info stmt_vinfo
)
2404 tree itype
= TREE_TYPE (op
);
2405 machine_mode mode
= TYPE_MODE (itype
);
2406 struct algorithm alg
;
2407 mult_variant variant
;
2408 if (!tree_fits_shwi_p (val
))
2411 /* Multiplication synthesis by shifts, adds and subs can introduce
2412 signed overflow where the original operation didn't. Perform the
2413 operations on an unsigned type and cast back to avoid this.
2414 In the future we may want to relax this for synthesis algorithms
2415 that we can prove do not cause unexpected overflow. */
2416 bool cast_to_unsigned_p
= !TYPE_OVERFLOW_WRAPS (itype
);
2418 tree multtype
= cast_to_unsigned_p
? unsigned_type_for (itype
) : itype
;
2420 /* Targets that don't support vector shifts but support vector additions
2421 can synthesize shifts that way. */
2422 bool synth_shift_p
= !vect_supportable_shift (LSHIFT_EXPR
, multtype
);
2424 HOST_WIDE_INT hwval
= tree_to_shwi (val
);
2425 /* Use MAX_COST here as we don't want to limit the sequence on rtx costs.
2426 The vectorizer's benefit analysis will decide whether it's beneficial
2428 bool possible
= choose_mult_variant (mode
, hwval
, &alg
,
2429 &variant
, MAX_COST
);
2433 tree vectype
= get_vectype_for_scalar_type (multtype
);
2436 || !target_supports_mult_synth_alg (&alg
, variant
,
2437 vectype
, synth_shift_p
))
2442 /* Clear out the sequence of statements so we can populate it below. */
2443 gimple
*stmt
= NULL
;
2445 if (cast_to_unsigned_p
)
2447 tree tmp_op
= vect_recog_temp_ssa_var (multtype
, NULL
);
2448 stmt
= gimple_build_assign (tmp_op
, CONVERT_EXPR
, op
);
2449 append_pattern_def_seq (stmt_vinfo
, stmt
);
2453 if (alg
.op
[0] == alg_zero
)
2454 accumulator
= build_int_cst (multtype
, 0);
2458 bool needs_fixup
= (variant
== negate_variant
)
2459 || (variant
== add_variant
);
2461 for (int i
= 1; i
< alg
.ops
; i
++)
2463 tree shft_log
= build_int_cst (multtype
, alg
.log
[i
]);
2464 tree accum_tmp
= vect_recog_temp_ssa_var (multtype
, NULL
);
2465 tree tmp_var
= NULL_TREE
;
2472 = synth_lshift_by_additions (accum_tmp
, accumulator
, alg
.log
[i
],
2475 stmt
= gimple_build_assign (accum_tmp
, LSHIFT_EXPR
, accumulator
,
2480 = apply_binop_and_append_stmt (LSHIFT_EXPR
, op
, shft_log
,
2481 stmt_vinfo
, synth_shift_p
);
2482 stmt
= gimple_build_assign (accum_tmp
, PLUS_EXPR
, accumulator
,
2486 tmp_var
= apply_binop_and_append_stmt (LSHIFT_EXPR
, op
,
2487 shft_log
, stmt_vinfo
,
2489 /* In some algorithms the first step involves zeroing the
2490 accumulator. If subtracting from such an accumulator
2491 just emit the negation directly. */
2492 if (integer_zerop (accumulator
))
2493 stmt
= gimple_build_assign (accum_tmp
, NEGATE_EXPR
, tmp_var
);
2495 stmt
= gimple_build_assign (accum_tmp
, MINUS_EXPR
, accumulator
,
2500 = apply_binop_and_append_stmt (LSHIFT_EXPR
, accumulator
, shft_log
,
2501 stmt_vinfo
, synth_shift_p
);
2502 stmt
= gimple_build_assign (accum_tmp
, PLUS_EXPR
, tmp_var
, op
);
2506 = apply_binop_and_append_stmt (LSHIFT_EXPR
, accumulator
, shft_log
,
2507 stmt_vinfo
, synth_shift_p
);
2508 stmt
= gimple_build_assign (accum_tmp
, MINUS_EXPR
, tmp_var
, op
);
2510 case alg_add_factor
:
2512 = apply_binop_and_append_stmt (LSHIFT_EXPR
, accumulator
, shft_log
,
2513 stmt_vinfo
, synth_shift_p
);
2514 stmt
= gimple_build_assign (accum_tmp
, PLUS_EXPR
, accumulator
,
2517 case alg_sub_factor
:
2519 = apply_binop_and_append_stmt (LSHIFT_EXPR
, accumulator
, shft_log
,
2520 stmt_vinfo
, synth_shift_p
);
2521 stmt
= gimple_build_assign (accum_tmp
, MINUS_EXPR
, tmp_var
,
2527 /* We don't want to append the last stmt in the sequence to stmt_vinfo
2528 but rather return it directly. */
2530 if ((i
< alg
.ops
- 1) || needs_fixup
|| cast_to_unsigned_p
)
2531 append_pattern_def_seq (stmt_vinfo
, stmt
);
2532 accumulator
= accum_tmp
;
2534 if (variant
== negate_variant
)
2536 tree accum_tmp
= vect_recog_temp_ssa_var (multtype
, NULL
);
2537 stmt
= gimple_build_assign (accum_tmp
, NEGATE_EXPR
, accumulator
);
2538 accumulator
= accum_tmp
;
2539 if (cast_to_unsigned_p
)
2540 append_pattern_def_seq (stmt_vinfo
, stmt
);
2542 else if (variant
== add_variant
)
2544 tree accum_tmp
= vect_recog_temp_ssa_var (multtype
, NULL
);
2545 stmt
= gimple_build_assign (accum_tmp
, PLUS_EXPR
, accumulator
, op
);
2546 accumulator
= accum_tmp
;
2547 if (cast_to_unsigned_p
)
2548 append_pattern_def_seq (stmt_vinfo
, stmt
);
2550 /* Move back to a signed if needed. */
2551 if (cast_to_unsigned_p
)
2553 tree accum_tmp
= vect_recog_temp_ssa_var (itype
, NULL
);
2554 stmt
= gimple_build_assign (accum_tmp
, CONVERT_EXPR
, accumulator
);
2560 /* Detect multiplication by constant and convert it into a sequence of
2561 shifts and additions, subtractions, negations. We reuse the
2562 choose_mult_variant algorithms from expmed.c
2566 STMT_VINFO: The stmt from which the pattern search begins,
2571 * TYPE_OUT: The type of the output of this pattern.
2573 * Return value: A new stmt that will be used to replace
2574 the multiplication. */
2577 vect_recog_mult_pattern (stmt_vec_info stmt_vinfo
, tree
*type_out
)
2579 gimple
*last_stmt
= stmt_vinfo
->stmt
;
2580 tree oprnd0
, oprnd1
, vectype
, itype
;
2581 gimple
*pattern_stmt
;
2583 if (!is_gimple_assign (last_stmt
))
2586 if (gimple_assign_rhs_code (last_stmt
) != MULT_EXPR
)
2589 oprnd0
= gimple_assign_rhs1 (last_stmt
);
2590 oprnd1
= gimple_assign_rhs2 (last_stmt
);
2591 itype
= TREE_TYPE (oprnd0
);
2593 if (TREE_CODE (oprnd0
) != SSA_NAME
2594 || TREE_CODE (oprnd1
) != INTEGER_CST
2595 || !INTEGRAL_TYPE_P (itype
)
2596 || !type_has_mode_precision_p (itype
))
2599 vectype
= get_vectype_for_scalar_type (itype
);
2600 if (vectype
== NULL_TREE
)
2603 /* If the target can handle vectorized multiplication natively,
2604 don't attempt to optimize this. */
2605 optab mul_optab
= optab_for_tree_code (MULT_EXPR
, vectype
, optab_default
);
2606 if (mul_optab
!= unknown_optab
)
2608 machine_mode vec_mode
= TYPE_MODE (vectype
);
2609 int icode
= (int) optab_handler (mul_optab
, vec_mode
);
2610 if (icode
!= CODE_FOR_nothing
)
2614 pattern_stmt
= vect_synth_mult_by_constant (oprnd0
, oprnd1
, stmt_vinfo
);
2618 /* Pattern detected. */
2619 vect_pattern_detected ("vect_recog_mult_pattern", last_stmt
);
2621 *type_out
= vectype
;
2623 return pattern_stmt
;
2626 /* Detect a signed division by a constant that wouldn't be
2627 otherwise vectorized:
2633 where type 'type' is an integral type and N is a constant.
2635 Similarly handle modulo by a constant:
2641 * STMT_VINFO: The stmt from which the pattern search begins,
2642 i.e. the division stmt. S1 is replaced by if N is a power
2643 of two constant and type is signed:
2644 S3 y_t = b_t < 0 ? N - 1 : 0;
2646 S1' a_t = x_t >> log2 (N);
2648 S4 is replaced if N is a power of two constant and
2649 type is signed by (where *_T temporaries have unsigned type):
2650 S9 y_T = b_t < 0 ? -1U : 0U;
2651 S8 z_T = y_T >> (sizeof (type_t) * CHAR_BIT - log2 (N));
2652 S7 z_t = (type) z_T;
2654 S5 x_t = w_t & (N - 1);
2655 S4' a_t = x_t - z_t;
2659 * TYPE_OUT: The type of the output of this pattern.
2661 * Return value: A new stmt that will be used to replace the division
2662 S1 or modulo S4 stmt. */
2665 vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo
, tree
*type_out
)
2667 gimple
*last_stmt
= stmt_vinfo
->stmt
;
2668 tree oprnd0
, oprnd1
, vectype
, itype
, cond
;
2669 gimple
*pattern_stmt
, *def_stmt
;
2670 enum tree_code rhs_code
;
2673 int dummy_int
, prec
;
2675 if (!is_gimple_assign (last_stmt
))
2678 rhs_code
= gimple_assign_rhs_code (last_stmt
);
2681 case TRUNC_DIV_EXPR
:
2682 case EXACT_DIV_EXPR
:
2683 case TRUNC_MOD_EXPR
:
2689 oprnd0
= gimple_assign_rhs1 (last_stmt
);
2690 oprnd1
= gimple_assign_rhs2 (last_stmt
);
2691 itype
= TREE_TYPE (oprnd0
);
2692 if (TREE_CODE (oprnd0
) != SSA_NAME
2693 || TREE_CODE (oprnd1
) != INTEGER_CST
2694 || TREE_CODE (itype
) != INTEGER_TYPE
2695 || !type_has_mode_precision_p (itype
))
2698 scalar_int_mode itype_mode
= SCALAR_INT_TYPE_MODE (itype
);
2699 vectype
= get_vectype_for_scalar_type (itype
);
2700 if (vectype
== NULL_TREE
)
2703 if (optimize_bb_for_size_p (gimple_bb (last_stmt
)))
2705 /* If the target can handle vectorized division or modulo natively,
2706 don't attempt to optimize this, since native division is likely
2707 to give smaller code. */
2708 optab
= optab_for_tree_code (rhs_code
, vectype
, optab_default
);
2709 if (optab
!= unknown_optab
)
2711 machine_mode vec_mode
= TYPE_MODE (vectype
);
2712 int icode
= (int) optab_handler (optab
, vec_mode
);
2713 if (icode
!= CODE_FOR_nothing
)
2718 prec
= TYPE_PRECISION (itype
);
2719 if (integer_pow2p (oprnd1
))
2721 if (TYPE_UNSIGNED (itype
) || tree_int_cst_sgn (oprnd1
) != 1)
2724 /* Pattern detected. */
2725 vect_pattern_detected ("vect_recog_divmod_pattern", last_stmt
);
2727 cond
= build2 (LT_EXPR
, boolean_type_node
, oprnd0
,
2728 build_int_cst (itype
, 0));
2729 if (rhs_code
== TRUNC_DIV_EXPR
2730 || rhs_code
== EXACT_DIV_EXPR
)
2732 tree var
= vect_recog_temp_ssa_var (itype
, NULL
);
2735 = gimple_build_assign (var
, COND_EXPR
, cond
,
2736 fold_build2 (MINUS_EXPR
, itype
, oprnd1
,
2737 build_int_cst (itype
, 1)),
2738 build_int_cst (itype
, 0));
2739 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2740 var
= vect_recog_temp_ssa_var (itype
, NULL
);
2742 = gimple_build_assign (var
, PLUS_EXPR
, oprnd0
,
2743 gimple_assign_lhs (def_stmt
));
2744 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2746 shift
= build_int_cst (itype
, tree_log2 (oprnd1
));
2748 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
2749 RSHIFT_EXPR
, var
, shift
);
2754 if (compare_tree_int (oprnd1
, 2) == 0)
2756 signmask
= vect_recog_temp_ssa_var (itype
, NULL
);
2757 def_stmt
= gimple_build_assign (signmask
, COND_EXPR
, cond
,
2758 build_int_cst (itype
, 1),
2759 build_int_cst (itype
, 0));
2760 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2765 = build_nonstandard_integer_type (prec
, 1);
2766 tree vecutype
= get_vectype_for_scalar_type (utype
);
2768 = build_int_cst (utype
, GET_MODE_BITSIZE (itype_mode
)
2769 - tree_log2 (oprnd1
));
2770 tree var
= vect_recog_temp_ssa_var (utype
, NULL
);
2772 def_stmt
= gimple_build_assign (var
, COND_EXPR
, cond
,
2773 build_int_cst (utype
, -1),
2774 build_int_cst (utype
, 0));
2775 append_pattern_def_seq (stmt_vinfo
, def_stmt
, vecutype
);
2776 var
= vect_recog_temp_ssa_var (utype
, NULL
);
2777 def_stmt
= gimple_build_assign (var
, RSHIFT_EXPR
,
2778 gimple_assign_lhs (def_stmt
),
2780 append_pattern_def_seq (stmt_vinfo
, def_stmt
, vecutype
);
2781 signmask
= vect_recog_temp_ssa_var (itype
, NULL
);
2783 = gimple_build_assign (signmask
, NOP_EXPR
, var
);
2784 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2787 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
2788 PLUS_EXPR
, oprnd0
, signmask
);
2789 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2791 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
2792 BIT_AND_EXPR
, gimple_assign_lhs (def_stmt
),
2793 fold_build2 (MINUS_EXPR
, itype
, oprnd1
,
2794 build_int_cst (itype
, 1)));
2795 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2798 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
2799 MINUS_EXPR
, gimple_assign_lhs (def_stmt
),
2803 *type_out
= vectype
;
2804 return pattern_stmt
;
2807 if (prec
> HOST_BITS_PER_WIDE_INT
2808 || integer_zerop (oprnd1
))
2811 if (!can_mult_highpart_p (TYPE_MODE (vectype
), TYPE_UNSIGNED (itype
)))
2814 if (TYPE_UNSIGNED (itype
))
2816 unsigned HOST_WIDE_INT mh
, ml
;
2817 int pre_shift
, post_shift
;
2818 unsigned HOST_WIDE_INT d
= (TREE_INT_CST_LOW (oprnd1
)
2819 & GET_MODE_MASK (itype_mode
));
2820 tree t1
, t2
, t3
, t4
;
2822 if (d
>= (HOST_WIDE_INT_1U
<< (prec
- 1)))
2823 /* FIXME: Can transform this into oprnd0 >= oprnd1 ? 1 : 0. */
2826 /* Find a suitable multiplier and right shift count
2827 instead of multiplying with D. */
2828 mh
= choose_multiplier (d
, prec
, prec
, &ml
, &post_shift
, &dummy_int
);
2830 /* If the suggested multiplier is more than SIZE bits, we can do better
2831 for even divisors, using an initial right shift. */
2832 if (mh
!= 0 && (d
& 1) == 0)
2834 pre_shift
= ctz_or_zero (d
);
2835 mh
= choose_multiplier (d
>> pre_shift
, prec
, prec
- pre_shift
,
2836 &ml
, &post_shift
, &dummy_int
);
2844 if (post_shift
- 1 >= prec
)
2847 /* t1 = oprnd0 h* ml;
2851 q = t4 >> (post_shift - 1); */
2852 t1
= vect_recog_temp_ssa_var (itype
, NULL
);
2853 def_stmt
= gimple_build_assign (t1
, MULT_HIGHPART_EXPR
, oprnd0
,
2854 build_int_cst (itype
, ml
));
2855 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2857 t2
= vect_recog_temp_ssa_var (itype
, NULL
);
2859 = gimple_build_assign (t2
, MINUS_EXPR
, oprnd0
, t1
);
2860 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2862 t3
= vect_recog_temp_ssa_var (itype
, NULL
);
2864 = gimple_build_assign (t3
, RSHIFT_EXPR
, t2
, integer_one_node
);
2865 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2867 t4
= vect_recog_temp_ssa_var (itype
, NULL
);
2869 = gimple_build_assign (t4
, PLUS_EXPR
, t1
, t3
);
2871 if (post_shift
!= 1)
2873 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2875 q
= vect_recog_temp_ssa_var (itype
, NULL
);
2877 = gimple_build_assign (q
, RSHIFT_EXPR
, t4
,
2878 build_int_cst (itype
, post_shift
- 1));
2883 pattern_stmt
= def_stmt
;
2888 if (pre_shift
>= prec
|| post_shift
>= prec
)
2891 /* t1 = oprnd0 >> pre_shift;
2893 q = t2 >> post_shift; */
2896 t1
= vect_recog_temp_ssa_var (itype
, NULL
);
2898 = gimple_build_assign (t1
, RSHIFT_EXPR
, oprnd0
,
2899 build_int_cst (NULL
, pre_shift
));
2900 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2905 t2
= vect_recog_temp_ssa_var (itype
, NULL
);
2906 def_stmt
= gimple_build_assign (t2
, MULT_HIGHPART_EXPR
, t1
,
2907 build_int_cst (itype
, ml
));
2911 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2913 q
= vect_recog_temp_ssa_var (itype
, NULL
);
2915 = gimple_build_assign (q
, RSHIFT_EXPR
, t2
,
2916 build_int_cst (itype
, post_shift
));
2921 pattern_stmt
= def_stmt
;
2926 unsigned HOST_WIDE_INT ml
;
2928 HOST_WIDE_INT d
= TREE_INT_CST_LOW (oprnd1
);
2929 unsigned HOST_WIDE_INT abs_d
;
2931 tree t1
, t2
, t3
, t4
;
2933 /* Give up for -1. */
2937 /* Since d might be INT_MIN, we have to cast to
2938 unsigned HOST_WIDE_INT before negating to avoid
2939 undefined signed overflow. */
2941 ? (unsigned HOST_WIDE_INT
) d
2942 : - (unsigned HOST_WIDE_INT
) d
);
2944 /* n rem d = n rem -d */
2945 if (rhs_code
== TRUNC_MOD_EXPR
&& d
< 0)
2948 oprnd1
= build_int_cst (itype
, abs_d
);
2950 else if (HOST_BITS_PER_WIDE_INT
>= prec
2951 && abs_d
== HOST_WIDE_INT_1U
<< (prec
- 1))
2952 /* This case is not handled correctly below. */
2955 choose_multiplier (abs_d
, prec
, prec
- 1, &ml
, &post_shift
, &dummy_int
);
2956 if (ml
>= HOST_WIDE_INT_1U
<< (prec
- 1))
2959 ml
|= HOST_WIDE_INT_M1U
<< (prec
- 1);
2961 if (post_shift
>= prec
)
2964 /* t1 = oprnd0 h* ml; */
2965 t1
= vect_recog_temp_ssa_var (itype
, NULL
);
2966 def_stmt
= gimple_build_assign (t1
, MULT_HIGHPART_EXPR
, oprnd0
,
2967 build_int_cst (itype
, ml
));
2971 /* t2 = t1 + oprnd0; */
2972 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2973 t2
= vect_recog_temp_ssa_var (itype
, NULL
);
2974 def_stmt
= gimple_build_assign (t2
, PLUS_EXPR
, t1
, oprnd0
);
2981 /* t3 = t2 >> post_shift; */
2982 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
2983 t3
= vect_recog_temp_ssa_var (itype
, NULL
);
2984 def_stmt
= gimple_build_assign (t3
, RSHIFT_EXPR
, t2
,
2985 build_int_cst (itype
, post_shift
));
2990 wide_int oprnd0_min
, oprnd0_max
;
2992 if (get_range_info (oprnd0
, &oprnd0_min
, &oprnd0_max
) == VR_RANGE
)
2994 if (!wi::neg_p (oprnd0_min
, TYPE_SIGN (itype
)))
2996 else if (wi::neg_p (oprnd0_max
, TYPE_SIGN (itype
)))
3000 if (msb
== 0 && d
>= 0)
3004 pattern_stmt
= def_stmt
;
3008 /* t4 = oprnd0 >> (prec - 1);
3009 or if we know from VRP that oprnd0 >= 0
3011 or if we know from VRP that oprnd0 < 0
3013 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
3014 t4
= vect_recog_temp_ssa_var (itype
, NULL
);
3016 def_stmt
= gimple_build_assign (t4
, INTEGER_CST
,
3017 build_int_cst (itype
, msb
));
3019 def_stmt
= gimple_build_assign (t4
, RSHIFT_EXPR
, oprnd0
,
3020 build_int_cst (itype
, prec
- 1));
3021 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
3023 /* q = t3 - t4; or q = t4 - t3; */
3024 q
= vect_recog_temp_ssa_var (itype
, NULL
);
3025 pattern_stmt
= gimple_build_assign (q
, MINUS_EXPR
, d
< 0 ? t4
: t3
,
3030 if (rhs_code
== TRUNC_MOD_EXPR
)
3034 /* We divided. Now finish by:
3037 append_pattern_def_seq (stmt_vinfo
, pattern_stmt
);
3039 t1
= vect_recog_temp_ssa_var (itype
, NULL
);
3040 def_stmt
= gimple_build_assign (t1
, MULT_EXPR
, q
, oprnd1
);
3041 append_pattern_def_seq (stmt_vinfo
, def_stmt
);
3043 r
= vect_recog_temp_ssa_var (itype
, NULL
);
3044 pattern_stmt
= gimple_build_assign (r
, MINUS_EXPR
, oprnd0
, t1
);
3047 /* Pattern detected. */
3048 vect_pattern_detected ("vect_recog_divmod_pattern", last_stmt
);
3050 *type_out
= vectype
;
3051 return pattern_stmt
;
3054 /* Function vect_recog_mixed_size_cond_pattern
3056 Try to find the following pattern:
3061 S1 a_T = x_t CMP y_t ? b_T : c_T;
3063 where type 'TYPE' is an integral type which has different size
3064 from 'type'. b_T and c_T are either constants (and if 'TYPE' is wider
3065 than 'type', the constants need to fit into an integer type
3066 with the same width as 'type') or results of conversion from 'type'.
3070 * STMT_VINFO: The stmt from which the pattern search begins.
3074 * TYPE_OUT: The type of the output of this pattern.
3076 * Return value: A new stmt that will be used to replace the pattern.
3077 Additionally a def_stmt is added.
3079 a_it = x_t CMP y_t ? b_it : c_it;
3080 a_T = (TYPE) a_it; */
3083 vect_recog_mixed_size_cond_pattern (stmt_vec_info stmt_vinfo
, tree
*type_out
)
3085 gimple
*last_stmt
= stmt_vinfo
->stmt
;
3086 tree cond_expr
, then_clause
, else_clause
;
3087 tree type
, vectype
, comp_vectype
, itype
= NULL_TREE
, vecitype
;
3088 gimple
*pattern_stmt
, *def_stmt
;
3089 tree orig_type0
= NULL_TREE
, orig_type1
= NULL_TREE
;
3090 gimple
*def_stmt0
= NULL
, *def_stmt1
= NULL
;
3092 tree comp_scalar_type
;
3094 if (!is_gimple_assign (last_stmt
)
3095 || gimple_assign_rhs_code (last_stmt
) != COND_EXPR
3096 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_internal_def
)
3099 cond_expr
= gimple_assign_rhs1 (last_stmt
);
3100 then_clause
= gimple_assign_rhs2 (last_stmt
);
3101 else_clause
= gimple_assign_rhs3 (last_stmt
);
3103 if (!COMPARISON_CLASS_P (cond_expr
))
3106 comp_scalar_type
= TREE_TYPE (TREE_OPERAND (cond_expr
, 0));
3107 comp_vectype
= get_vectype_for_scalar_type (comp_scalar_type
);
3108 if (comp_vectype
== NULL_TREE
)
3111 type
= gimple_expr_type (last_stmt
);
3112 if (types_compatible_p (type
, comp_scalar_type
)
3113 || ((TREE_CODE (then_clause
) != INTEGER_CST
3114 || TREE_CODE (else_clause
) != INTEGER_CST
)
3115 && !INTEGRAL_TYPE_P (comp_scalar_type
))
3116 || !INTEGRAL_TYPE_P (type
))
3119 if ((TREE_CODE (then_clause
) != INTEGER_CST
3120 && !type_conversion_p (then_clause
, stmt_vinfo
, false, &orig_type0
,
3121 &def_stmt0
, &promotion
))
3122 || (TREE_CODE (else_clause
) != INTEGER_CST
3123 && !type_conversion_p (else_clause
, stmt_vinfo
, false, &orig_type1
,
3124 &def_stmt1
, &promotion
)))
3127 if (orig_type0
&& orig_type1
3128 && !types_compatible_p (orig_type0
, orig_type1
))
3133 if (!types_compatible_p (orig_type0
, comp_scalar_type
))
3135 then_clause
= gimple_assign_rhs1 (def_stmt0
);
3141 if (!types_compatible_p (orig_type1
, comp_scalar_type
))
3143 else_clause
= gimple_assign_rhs1 (def_stmt1
);
3148 HOST_WIDE_INT cmp_mode_size
3149 = GET_MODE_UNIT_BITSIZE (TYPE_MODE (comp_vectype
));
3151 scalar_int_mode type_mode
= SCALAR_INT_TYPE_MODE (type
);
3152 if (GET_MODE_BITSIZE (type_mode
) == cmp_mode_size
)
3155 vectype
= get_vectype_for_scalar_type (type
);
3156 if (vectype
== NULL_TREE
)
3159 if (expand_vec_cond_expr_p (vectype
, comp_vectype
, TREE_CODE (cond_expr
)))
3162 if (itype
== NULL_TREE
)
3163 itype
= build_nonstandard_integer_type (cmp_mode_size
,
3164 TYPE_UNSIGNED (type
));
3166 if (itype
== NULL_TREE
3167 || GET_MODE_BITSIZE (SCALAR_TYPE_MODE (itype
)) != cmp_mode_size
)
3170 vecitype
= get_vectype_for_scalar_type (itype
);
3171 if (vecitype
== NULL_TREE
)
3174 if (!expand_vec_cond_expr_p (vecitype
, comp_vectype
, TREE_CODE (cond_expr
)))
3177 if (GET_MODE_BITSIZE (type_mode
) > cmp_mode_size
)
3179 if ((TREE_CODE (then_clause
) == INTEGER_CST
3180 && !int_fits_type_p (then_clause
, itype
))
3181 || (TREE_CODE (else_clause
) == INTEGER_CST
3182 && !int_fits_type_p (else_clause
, itype
)))
3186 def_stmt
= gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
3187 COND_EXPR
, unshare_expr (cond_expr
),
3188 fold_convert (itype
, then_clause
),
3189 fold_convert (itype
, else_clause
));
3190 pattern_stmt
= gimple_build_assign (vect_recog_temp_ssa_var (type
, NULL
),
3191 NOP_EXPR
, gimple_assign_lhs (def_stmt
));
3193 append_pattern_def_seq (stmt_vinfo
, def_stmt
, vecitype
);
3194 *type_out
= vectype
;
3196 vect_pattern_detected ("vect_recog_mixed_size_cond_pattern", last_stmt
);
3198 return pattern_stmt
;
3202 /* Helper function of vect_recog_bool_pattern. Called recursively, return
3203 true if bool VAR can and should be optimized that way. Assume it shouldn't
3204 in case it's a result of a comparison which can be directly vectorized into
3205 a vector comparison. Fills in STMTS with all stmts visited during the
3209 check_bool_pattern (tree var
, vec_info
*vinfo
, hash_set
<gimple
*> &stmts
)
3212 enum tree_code rhs_code
;
3214 stmt_vec_info def_stmt_info
= vect_get_internal_def (vinfo
, var
);
3218 gassign
*def_stmt
= dyn_cast
<gassign
*> (def_stmt_info
->stmt
);
3222 if (stmts
.contains (def_stmt
))
3225 rhs1
= gimple_assign_rhs1 (def_stmt
);
3226 rhs_code
= gimple_assign_rhs_code (def_stmt
);
3230 if (! check_bool_pattern (rhs1
, vinfo
, stmts
))
3235 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (rhs1
)))
3237 if (! check_bool_pattern (rhs1
, vinfo
, stmts
))
3242 if (! check_bool_pattern (rhs1
, vinfo
, stmts
))
3249 if (! check_bool_pattern (rhs1
, vinfo
, stmts
)
3250 || ! check_bool_pattern (gimple_assign_rhs2 (def_stmt
), vinfo
, stmts
))
3255 if (TREE_CODE_CLASS (rhs_code
) == tcc_comparison
)
3257 tree vecitype
, comp_vectype
;
3259 /* If the comparison can throw, then is_gimple_condexpr will be
3260 false and we can't make a COND_EXPR/VEC_COND_EXPR out of it. */
3261 if (stmt_could_throw_p (def_stmt
))
3264 comp_vectype
= get_vectype_for_scalar_type (TREE_TYPE (rhs1
));
3265 if (comp_vectype
== NULL_TREE
)
3268 tree mask_type
= get_mask_type_for_scalar_type (TREE_TYPE (rhs1
));
3270 && expand_vec_cmp_expr_p (comp_vectype
, mask_type
, rhs_code
))
3273 if (TREE_CODE (TREE_TYPE (rhs1
)) != INTEGER_TYPE
)
3275 scalar_mode mode
= SCALAR_TYPE_MODE (TREE_TYPE (rhs1
));
3277 = build_nonstandard_integer_type (GET_MODE_BITSIZE (mode
), 1);
3278 vecitype
= get_vectype_for_scalar_type (itype
);
3279 if (vecitype
== NULL_TREE
)
3283 vecitype
= comp_vectype
;
3284 if (! expand_vec_cond_expr_p (vecitype
, comp_vectype
, rhs_code
))
3292 bool res
= stmts
.add (def_stmt
);
3293 /* We can't end up recursing when just visiting SSA defs but not PHIs. */
3300 /* Helper function of adjust_bool_pattern. Add a cast to TYPE to a previous
3301 stmt (SSA_NAME_DEF_STMT of VAR) adding a cast to STMT_INFOs
3302 pattern sequence. */
3305 adjust_bool_pattern_cast (tree type
, tree var
, stmt_vec_info stmt_info
)
3307 gimple
*cast_stmt
= gimple_build_assign (vect_recog_temp_ssa_var (type
, NULL
),
3309 append_pattern_def_seq (stmt_info
, cast_stmt
,
3310 get_vectype_for_scalar_type (type
));
3311 return gimple_assign_lhs (cast_stmt
);
3314 /* Helper function of vect_recog_bool_pattern. Do the actual transformations.
3315 VAR is an SSA_NAME that should be transformed from bool to a wider integer
3316 type, OUT_TYPE is the desired final integer type of the whole pattern.
3317 STMT_INFO is the info of the pattern root and is where pattern stmts should
3318 be associated with. DEFS is a map of pattern defs. */
3321 adjust_bool_pattern (tree var
, tree out_type
,
3322 stmt_vec_info stmt_info
, hash_map
<tree
, tree
> &defs
)
3324 gimple
*stmt
= SSA_NAME_DEF_STMT (var
);
3325 enum tree_code rhs_code
, def_rhs_code
;
3326 tree itype
, cond_expr
, rhs1
, rhs2
, irhs1
, irhs2
;
3328 gimple
*pattern_stmt
, *def_stmt
;
3329 tree trueval
= NULL_TREE
;
3331 rhs1
= gimple_assign_rhs1 (stmt
);
3332 rhs2
= gimple_assign_rhs2 (stmt
);
3333 rhs_code
= gimple_assign_rhs_code (stmt
);
3334 loc
= gimple_location (stmt
);
3339 irhs1
= *defs
.get (rhs1
);
3340 itype
= TREE_TYPE (irhs1
);
3342 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
3347 irhs1
= *defs
.get (rhs1
);
3348 itype
= TREE_TYPE (irhs1
);
3350 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
3351 BIT_XOR_EXPR
, irhs1
, build_int_cst (itype
, 1));
3355 /* Try to optimize x = y & (a < b ? 1 : 0); into
3356 x = (a < b ? y : 0);
3362 S1 a_b = x1 CMP1 y1;
3363 S2 b_b = x2 CMP2 y2;
3365 S4 d_T = (TYPE) c_b;
3367 we would normally emit:
3369 S1' a_T = x1 CMP1 y1 ? 1 : 0;
3370 S2' b_T = x2 CMP2 y2 ? 1 : 0;
3371 S3' c_T = a_T & b_T;
3374 but we can save one stmt by using the
3375 result of one of the COND_EXPRs in the other COND_EXPR and leave
3376 BIT_AND_EXPR stmt out:
3378 S1' a_T = x1 CMP1 y1 ? 1 : 0;
3379 S3' c_T = x2 CMP2 y2 ? a_T : 0;
3382 At least when VEC_COND_EXPR is implemented using masks
3383 cond ? 1 : 0 is as expensive as cond ? var : 0, in both cases it
3384 computes the comparison masks and ands it, in one case with
3385 all ones vector, in the other case with a vector register.
3386 Don't do this for BIT_IOR_EXPR, because cond ? 1 : var; is
3387 often more expensive. */
3388 def_stmt
= SSA_NAME_DEF_STMT (rhs2
);
3389 def_rhs_code
= gimple_assign_rhs_code (def_stmt
);
3390 if (TREE_CODE_CLASS (def_rhs_code
) == tcc_comparison
)
3392 irhs1
= *defs
.get (rhs1
);
3393 tree def_rhs1
= gimple_assign_rhs1 (def_stmt
);
3394 if (TYPE_PRECISION (TREE_TYPE (irhs1
))
3395 == GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (def_rhs1
))))
3397 rhs_code
= def_rhs_code
;
3399 rhs2
= gimple_assign_rhs2 (def_stmt
);
3404 irhs2
= *defs
.get (rhs2
);
3407 def_stmt
= SSA_NAME_DEF_STMT (rhs1
);
3408 def_rhs_code
= gimple_assign_rhs_code (def_stmt
);
3409 if (TREE_CODE_CLASS (def_rhs_code
) == tcc_comparison
)
3411 irhs2
= *defs
.get (rhs2
);
3412 tree def_rhs1
= gimple_assign_rhs1 (def_stmt
);
3413 if (TYPE_PRECISION (TREE_TYPE (irhs2
))
3414 == GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (def_rhs1
))))
3416 rhs_code
= def_rhs_code
;
3418 rhs2
= gimple_assign_rhs2 (def_stmt
);
3423 irhs1
= *defs
.get (rhs1
);
3429 irhs1
= *defs
.get (rhs1
);
3430 irhs2
= *defs
.get (rhs2
);
3432 if (TYPE_PRECISION (TREE_TYPE (irhs1
))
3433 != TYPE_PRECISION (TREE_TYPE (irhs2
)))
3435 int prec1
= TYPE_PRECISION (TREE_TYPE (irhs1
));
3436 int prec2
= TYPE_PRECISION (TREE_TYPE (irhs2
));
3437 int out_prec
= TYPE_PRECISION (out_type
);
3438 if (absu_hwi (out_prec
- prec1
) < absu_hwi (out_prec
- prec2
))
3439 irhs2
= adjust_bool_pattern_cast (TREE_TYPE (irhs1
), irhs2
,
3441 else if (absu_hwi (out_prec
- prec1
) > absu_hwi (out_prec
- prec2
))
3442 irhs1
= adjust_bool_pattern_cast (TREE_TYPE (irhs2
), irhs1
,
3446 irhs1
= adjust_bool_pattern_cast (out_type
, irhs1
, stmt_info
);
3447 irhs2
= adjust_bool_pattern_cast (out_type
, irhs2
, stmt_info
);
3450 itype
= TREE_TYPE (irhs1
);
3452 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
3453 rhs_code
, irhs1
, irhs2
);
3458 gcc_assert (TREE_CODE_CLASS (rhs_code
) == tcc_comparison
);
3459 if (TREE_CODE (TREE_TYPE (rhs1
)) != INTEGER_TYPE
3460 || !TYPE_UNSIGNED (TREE_TYPE (rhs1
))
3461 || maybe_ne (TYPE_PRECISION (TREE_TYPE (rhs1
)),
3462 GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1
)))))
3464 scalar_mode mode
= SCALAR_TYPE_MODE (TREE_TYPE (rhs1
));
3466 = build_nonstandard_integer_type (GET_MODE_BITSIZE (mode
), 1);
3469 itype
= TREE_TYPE (rhs1
);
3470 cond_expr
= build2_loc (loc
, rhs_code
, itype
, rhs1
, rhs2
);
3471 if (trueval
== NULL_TREE
)
3472 trueval
= build_int_cst (itype
, 1);
3474 gcc_checking_assert (useless_type_conversion_p (itype
,
3475 TREE_TYPE (trueval
)));
3477 = gimple_build_assign (vect_recog_temp_ssa_var (itype
, NULL
),
3478 COND_EXPR
, cond_expr
, trueval
,
3479 build_int_cst (itype
, 0));
3483 gimple_set_location (pattern_stmt
, loc
);
3484 append_pattern_def_seq (stmt_info
, pattern_stmt
,
3485 get_vectype_for_scalar_type (itype
));
3486 defs
.put (var
, gimple_assign_lhs (pattern_stmt
));
3489 /* Comparison function to qsort a vector of gimple stmts after UID. */
3492 sort_after_uid (const void *p1
, const void *p2
)
3494 const gimple
*stmt1
= *(const gimple
* const *)p1
;
3495 const gimple
*stmt2
= *(const gimple
* const *)p2
;
3496 return gimple_uid (stmt1
) - gimple_uid (stmt2
);
3499 /* Create pattern stmts for all stmts participating in the bool pattern
3500 specified by BOOL_STMT_SET and its root STMT_INFO with the desired type
3501 OUT_TYPE. Return the def of the pattern root. */
3504 adjust_bool_stmts (hash_set
<gimple
*> &bool_stmt_set
,
3505 tree out_type
, stmt_vec_info stmt_info
)
3507 /* Gather original stmts in the bool pattern in their order of appearance
3509 auto_vec
<gimple
*> bool_stmts (bool_stmt_set
.elements ());
3510 for (hash_set
<gimple
*>::iterator i
= bool_stmt_set
.begin ();
3511 i
!= bool_stmt_set
.end (); ++i
)
3512 bool_stmts
.quick_push (*i
);
3513 bool_stmts
.qsort (sort_after_uid
);
3515 /* Now process them in that order, producing pattern stmts. */
3516 hash_map
<tree
, tree
> defs
;
3517 for (unsigned i
= 0; i
< bool_stmts
.length (); ++i
)
3518 adjust_bool_pattern (gimple_assign_lhs (bool_stmts
[i
]),
3519 out_type
, stmt_info
, defs
);
3521 /* Pop the last pattern seq stmt and install it as pattern root for STMT. */
3522 gimple
*pattern_stmt
3523 = gimple_seq_last_stmt (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
));
3524 return gimple_assign_lhs (pattern_stmt
);
3527 /* Helper for search_type_for_mask. */
3530 search_type_for_mask_1 (tree var
, vec_info
*vinfo
,
3531 hash_map
<gimple
*, tree
> &cache
)
3534 enum tree_code rhs_code
;
3535 tree res
= NULL_TREE
, res2
;
3537 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (var
)))
3540 stmt_vec_info def_stmt_info
= vect_get_internal_def (vinfo
, var
);
3544 gassign
*def_stmt
= dyn_cast
<gassign
*> (def_stmt_info
->stmt
);
3548 tree
*c
= cache
.get (def_stmt
);
3552 rhs_code
= gimple_assign_rhs_code (def_stmt
);
3553 rhs1
= gimple_assign_rhs1 (def_stmt
);
3560 res
= search_type_for_mask_1 (rhs1
, vinfo
, cache
);
3566 res
= search_type_for_mask_1 (rhs1
, vinfo
, cache
);
3567 res2
= search_type_for_mask_1 (gimple_assign_rhs2 (def_stmt
), vinfo
,
3569 if (!res
|| (res2
&& TYPE_PRECISION (res
) > TYPE_PRECISION (res2
)))
3574 if (TREE_CODE_CLASS (rhs_code
) == tcc_comparison
)
3576 tree comp_vectype
, mask_type
;
3578 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (rhs1
)))
3580 res
= search_type_for_mask_1 (rhs1
, vinfo
, cache
);
3581 res2
= search_type_for_mask_1 (gimple_assign_rhs2 (def_stmt
),
3583 if (!res
|| (res2
&& TYPE_PRECISION (res
) > TYPE_PRECISION (res2
)))
3588 comp_vectype
= get_vectype_for_scalar_type (TREE_TYPE (rhs1
));
3589 if (comp_vectype
== NULL_TREE
)
3595 mask_type
= get_mask_type_for_scalar_type (TREE_TYPE (rhs1
));
3597 || !expand_vec_cmp_expr_p (comp_vectype
, mask_type
, rhs_code
))
3603 if (TREE_CODE (TREE_TYPE (rhs1
)) != INTEGER_TYPE
3604 || !TYPE_UNSIGNED (TREE_TYPE (rhs1
)))
3606 scalar_mode mode
= SCALAR_TYPE_MODE (TREE_TYPE (rhs1
));
3607 res
= build_nonstandard_integer_type (GET_MODE_BITSIZE (mode
), 1);
3610 res
= TREE_TYPE (rhs1
);
3614 cache
.put (def_stmt
, res
);
3618 /* Return the proper type for converting bool VAR into
3619 an integer value or NULL_TREE if no such type exists.
3620 The type is chosen so that converted value has the
3621 same number of elements as VAR's vector type. */
3624 search_type_for_mask (tree var
, vec_info
*vinfo
)
3626 hash_map
<gimple
*, tree
> cache
;
3627 return search_type_for_mask_1 (var
, vinfo
, cache
);
3630 /* Function vect_recog_bool_pattern
3632 Try to find pattern like following:
3634 bool a_b, b_b, c_b, d_b, e_b;
3637 S1 a_b = x1 CMP1 y1;
3638 S2 b_b = x2 CMP2 y2;
3640 S4 d_b = x3 CMP3 y3;
3642 S6 f_T = (TYPE) e_b;
3644 where type 'TYPE' is an integral type. Or a similar pattern
3647 S6 f_Y = e_b ? r_Y : s_Y;
3649 as results from if-conversion of a complex condition.
3653 * STMT_VINFO: The stmt at the end from which the pattern
3654 search begins, i.e. cast of a bool to
3659 * TYPE_OUT: The type of the output of this pattern.
3661 * Return value: A new stmt that will be used to replace the pattern.
3663 Assuming size of TYPE is the same as size of all comparisons
3664 (otherwise some casts would be added where needed), the above
3665 sequence we create related pattern stmts:
3666 S1' a_T = x1 CMP1 y1 ? 1 : 0;
3667 S3' c_T = x2 CMP2 y2 ? a_T : 0;
3668 S4' d_T = x3 CMP3 y3 ? 1 : 0;
3669 S5' e_T = c_T | d_T;
3672 Instead of the above S3' we could emit:
3673 S2' b_T = x2 CMP2 y2 ? 1 : 0;
3674 S3' c_T = a_T | b_T;
3675 but the above is more efficient. */
3678 vect_recog_bool_pattern (stmt_vec_info stmt_vinfo
, tree
*type_out
)
3680 gimple
*last_stmt
= stmt_vinfo
->stmt
;
3681 enum tree_code rhs_code
;
3682 tree var
, lhs
, rhs
, vectype
;
3683 vec_info
*vinfo
= stmt_vinfo
->vinfo
;
3684 gimple
*pattern_stmt
;
3686 if (!is_gimple_assign (last_stmt
))
3689 var
= gimple_assign_rhs1 (last_stmt
);
3690 lhs
= gimple_assign_lhs (last_stmt
);
3692 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (var
)))
3695 hash_set
<gimple
*> bool_stmts
;
3697 rhs_code
= gimple_assign_rhs_code (last_stmt
);
3698 if (CONVERT_EXPR_CODE_P (rhs_code
))
3700 if (! INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3701 || TYPE_PRECISION (TREE_TYPE (lhs
)) == 1)
3703 vectype
= get_vectype_for_scalar_type (TREE_TYPE (lhs
));
3704 if (vectype
== NULL_TREE
)
3707 if (check_bool_pattern (var
, vinfo
, bool_stmts
))
3709 rhs
= adjust_bool_stmts (bool_stmts
, TREE_TYPE (lhs
), stmt_vinfo
);
3710 lhs
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
3711 if (useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
3712 pattern_stmt
= gimple_build_assign (lhs
, SSA_NAME
, rhs
);
3715 = gimple_build_assign (lhs
, NOP_EXPR
, rhs
);
3719 tree type
= search_type_for_mask (var
, vinfo
);
3720 tree cst0
, cst1
, tmp
;
3725 /* We may directly use cond with narrowed type to avoid
3726 multiple cond exprs with following result packing and
3727 perform single cond with packed mask instead. In case
3728 of widening we better make cond first and then extract
3730 if (TYPE_MODE (type
) == TYPE_MODE (TREE_TYPE (lhs
)))
3731 type
= TREE_TYPE (lhs
);
3733 cst0
= build_int_cst (type
, 0);
3734 cst1
= build_int_cst (type
, 1);
3735 tmp
= vect_recog_temp_ssa_var (type
, NULL
);
3736 pattern_stmt
= gimple_build_assign (tmp
, COND_EXPR
, var
, cst1
, cst0
);
3738 if (!useless_type_conversion_p (type
, TREE_TYPE (lhs
)))
3740 tree new_vectype
= get_vectype_for_scalar_type (type
);
3741 append_pattern_def_seq (stmt_vinfo
, pattern_stmt
, new_vectype
);
3743 lhs
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
3744 pattern_stmt
= gimple_build_assign (lhs
, CONVERT_EXPR
, tmp
);
3748 *type_out
= vectype
;
3749 vect_pattern_detected ("vect_recog_bool_pattern", last_stmt
);
3751 return pattern_stmt
;
3753 else if (rhs_code
== COND_EXPR
3754 && TREE_CODE (var
) == SSA_NAME
)
3756 vectype
= get_vectype_for_scalar_type (TREE_TYPE (lhs
));
3757 if (vectype
== NULL_TREE
)
3760 /* Build a scalar type for the boolean result that when
3761 vectorized matches the vector type of the result in
3762 size and number of elements. */
3764 = vector_element_size (tree_to_poly_uint64 (TYPE_SIZE (vectype
)),
3765 TYPE_VECTOR_SUBPARTS (vectype
));
3768 = build_nonstandard_integer_type (prec
,
3769 TYPE_UNSIGNED (TREE_TYPE (var
)));
3770 if (get_vectype_for_scalar_type (type
) == NULL_TREE
)
3773 if (!check_bool_pattern (var
, vinfo
, bool_stmts
))
3776 rhs
= adjust_bool_stmts (bool_stmts
, type
, stmt_vinfo
);
3778 lhs
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
3780 = gimple_build_assign (lhs
, COND_EXPR
,
3781 build2 (NE_EXPR
, boolean_type_node
,
3782 rhs
, build_int_cst (type
, 0)),
3783 gimple_assign_rhs2 (last_stmt
),
3784 gimple_assign_rhs3 (last_stmt
));
3785 *type_out
= vectype
;
3786 vect_pattern_detected ("vect_recog_bool_pattern", last_stmt
);
3788 return pattern_stmt
;
3790 else if (rhs_code
== SSA_NAME
3791 && STMT_VINFO_DATA_REF (stmt_vinfo
))
3793 stmt_vec_info pattern_stmt_info
;
3794 vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
3795 gcc_assert (vectype
!= NULL_TREE
);
3796 if (!VECTOR_MODE_P (TYPE_MODE (vectype
)))
3799 if (check_bool_pattern (var
, vinfo
, bool_stmts
))
3800 rhs
= adjust_bool_stmts (bool_stmts
, TREE_TYPE (vectype
), stmt_vinfo
);
3803 tree type
= search_type_for_mask (var
, vinfo
);
3804 tree cst0
, cst1
, new_vectype
;
3809 if (TYPE_MODE (type
) == TYPE_MODE (TREE_TYPE (vectype
)))
3810 type
= TREE_TYPE (vectype
);
3812 cst0
= build_int_cst (type
, 0);
3813 cst1
= build_int_cst (type
, 1);
3814 new_vectype
= get_vectype_for_scalar_type (type
);
3816 rhs
= vect_recog_temp_ssa_var (type
, NULL
);
3817 pattern_stmt
= gimple_build_assign (rhs
, COND_EXPR
, var
, cst1
, cst0
);
3818 append_pattern_def_seq (stmt_vinfo
, pattern_stmt
, new_vectype
);
3821 lhs
= build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (vectype
), lhs
);
3822 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
3824 tree rhs2
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
3825 gimple
*cast_stmt
= gimple_build_assign (rhs2
, NOP_EXPR
, rhs
);
3826 append_pattern_def_seq (stmt_vinfo
, cast_stmt
);
3829 pattern_stmt
= gimple_build_assign (lhs
, SSA_NAME
, rhs
);
3830 pattern_stmt_info
= vinfo
->add_stmt (pattern_stmt
);
3831 STMT_VINFO_DATA_REF (pattern_stmt_info
)
3832 = STMT_VINFO_DATA_REF (stmt_vinfo
);
3833 STMT_VINFO_DR_WRT_VEC_LOOP (pattern_stmt_info
)
3834 = STMT_VINFO_DR_WRT_VEC_LOOP (stmt_vinfo
);
3835 *type_out
= vectype
;
3836 vect_pattern_detected ("vect_recog_bool_pattern", last_stmt
);
3838 return pattern_stmt
;
3845 /* A helper for vect_recog_mask_conversion_pattern. Build
3846 conversion of MASK to a type suitable for masking VECTYPE.
3847 Built statement gets required vectype and is appended to
3848 a pattern sequence of STMT_VINFO.
3850 Return converted mask. */
3853 build_mask_conversion (tree mask
, tree vectype
, stmt_vec_info stmt_vinfo
)
3858 masktype
= build_same_sized_truth_vector_type (vectype
);
3859 tmp
= vect_recog_temp_ssa_var (TREE_TYPE (masktype
), NULL
);
3860 stmt
= gimple_build_assign (tmp
, CONVERT_EXPR
, mask
);
3861 append_pattern_def_seq (stmt_vinfo
, stmt
, masktype
);
3867 /* Function vect_recog_mask_conversion_pattern
3869 Try to find statements which require boolean type
3870 converison. Additional conversion statements are
3871 added to handle such cases. For example:
3881 S4 c_1 = m_3 ? c_2 : c_3;
3883 Will be transformed into:
3887 S3'' m_2' = (_Bool[bitsize=32])m_2
3888 S3' m_3' = m_1 & m_2';
3889 S4'' m_3'' = (_Bool[bitsize=8])m_3'
3890 S4' c_1' = m_3'' ? c_2 : c_3; */
3893 vect_recog_mask_conversion_pattern (stmt_vec_info stmt_vinfo
, tree
*type_out
)
3895 gimple
*last_stmt
= stmt_vinfo
->stmt
;
3896 enum tree_code rhs_code
;
3897 tree lhs
= NULL_TREE
, rhs1
, rhs2
, tmp
, rhs1_type
, rhs2_type
;
3898 tree vectype1
, vectype2
;
3899 stmt_vec_info pattern_stmt_info
;
3900 vec_info
*vinfo
= stmt_vinfo
->vinfo
;
3902 /* Check for MASK_LOAD ans MASK_STORE calls requiring mask conversion. */
3903 if (is_gimple_call (last_stmt
)
3904 && gimple_call_internal_p (last_stmt
))
3906 gcall
*pattern_stmt
;
3908 internal_fn ifn
= gimple_call_internal_fn (last_stmt
);
3909 int mask_argno
= internal_fn_mask_index (ifn
);
3913 bool store_p
= internal_store_fn_p (ifn
);
3916 int rhs_index
= internal_fn_stored_value_index (ifn
);
3917 tree rhs
= gimple_call_arg (last_stmt
, rhs_index
);
3918 vectype1
= get_vectype_for_scalar_type (TREE_TYPE (rhs
));
3922 lhs
= gimple_call_lhs (last_stmt
);
3923 vectype1
= get_vectype_for_scalar_type (TREE_TYPE (lhs
));
3926 tree mask_arg
= gimple_call_arg (last_stmt
, mask_argno
);
3927 tree mask_arg_type
= search_type_for_mask (mask_arg
, vinfo
);
3930 vectype2
= get_mask_type_for_scalar_type (mask_arg_type
);
3932 if (!vectype1
|| !vectype2
3933 || known_eq (TYPE_VECTOR_SUBPARTS (vectype1
),
3934 TYPE_VECTOR_SUBPARTS (vectype2
)))
3937 tmp
= build_mask_conversion (mask_arg
, vectype1
, stmt_vinfo
);
3939 auto_vec
<tree
, 8> args
;
3940 unsigned int nargs
= gimple_call_num_args (last_stmt
);
3941 args
.safe_grow (nargs
);
3942 for (unsigned int i
= 0; i
< nargs
; ++i
)
3943 args
[i
] = ((int) i
== mask_argno
3945 : gimple_call_arg (last_stmt
, i
));
3946 pattern_stmt
= gimple_build_call_internal_vec (ifn
, args
);
3950 lhs
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
3951 gimple_call_set_lhs (pattern_stmt
, lhs
);
3953 gimple_call_set_nothrow (pattern_stmt
, true);
3955 pattern_stmt_info
= vinfo
->add_stmt (pattern_stmt
);
3956 if (STMT_VINFO_DATA_REF (stmt_vinfo
))
3958 STMT_VINFO_DATA_REF (pattern_stmt_info
)
3959 = STMT_VINFO_DATA_REF (stmt_vinfo
);
3960 STMT_VINFO_DR_WRT_VEC_LOOP (pattern_stmt_info
)
3961 = STMT_VINFO_DR_WRT_VEC_LOOP (stmt_vinfo
);
3962 STMT_VINFO_GATHER_SCATTER_P (pattern_stmt_info
)
3963 = STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
);
3966 *type_out
= vectype1
;
3967 vect_pattern_detected ("vect_recog_mask_conversion_pattern", last_stmt
);
3969 return pattern_stmt
;
3972 if (!is_gimple_assign (last_stmt
))
3975 gimple
*pattern_stmt
;
3976 lhs
= gimple_assign_lhs (last_stmt
);
3977 rhs1
= gimple_assign_rhs1 (last_stmt
);
3978 rhs_code
= gimple_assign_rhs_code (last_stmt
);
3980 /* Check for cond expression requiring mask conversion. */
3981 if (rhs_code
== COND_EXPR
)
3983 vectype1
= get_vectype_for_scalar_type (TREE_TYPE (lhs
));
3985 if (TREE_CODE (rhs1
) == SSA_NAME
)
3987 rhs1_type
= search_type_for_mask (rhs1
, vinfo
);
3991 else if (COMPARISON_CLASS_P (rhs1
))
3993 /* Check whether we're comparing scalar booleans and (if so)
3994 whether a better mask type exists than the mask associated
3995 with boolean-sized elements. This avoids unnecessary packs
3996 and unpacks if the booleans are set from comparisons of
3997 wider types. E.g. in:
3999 int x1, x2, x3, x4, y1, y1;
4001 bool b1 = (x1 == x2);
4002 bool b2 = (x3 == x4);
4003 ... = b1 == b2 ? y1 : y2;
4005 it is better for b1 and b2 to use the mask type associated
4006 with int elements rather bool (byte) elements. */
4007 rhs1_type
= search_type_for_mask (TREE_OPERAND (rhs1
, 0), vinfo
);
4009 rhs1_type
= TREE_TYPE (TREE_OPERAND (rhs1
, 0));
4014 vectype2
= get_mask_type_for_scalar_type (rhs1_type
);
4016 if (!vectype1
|| !vectype2
)
4019 /* Continue if a conversion is needed. Also continue if we have
4020 a comparison whose vector type would normally be different from
4021 VECTYPE2 when considered in isolation. In that case we'll
4022 replace the comparison with an SSA name (so that we can record
4023 its vector type) and behave as though the comparison was an SSA
4024 name from the outset. */
4025 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype1
),
4026 TYPE_VECTOR_SUBPARTS (vectype2
))
4027 && (TREE_CODE (rhs1
) == SSA_NAME
4028 || rhs1_type
== TREE_TYPE (TREE_OPERAND (rhs1
, 0))))
4031 /* If rhs1 is invariant and we can promote it leave the COND_EXPR
4032 in place, we can handle it in vectorizable_condition. This avoids
4033 unnecessary promotion stmts and increased vectorization factor. */
4034 if (COMPARISON_CLASS_P (rhs1
)
4035 && INTEGRAL_TYPE_P (rhs1_type
)
4036 && known_le (TYPE_VECTOR_SUBPARTS (vectype1
),
4037 TYPE_VECTOR_SUBPARTS (vectype2
)))
4039 enum vect_def_type dt
;
4040 if (vect_is_simple_use (TREE_OPERAND (rhs1
, 0), vinfo
, &dt
)
4041 && dt
== vect_external_def
4042 && vect_is_simple_use (TREE_OPERAND (rhs1
, 1), vinfo
, &dt
)
4043 && (dt
== vect_external_def
4044 || dt
== vect_constant_def
))
4046 tree wide_scalar_type
= build_nonstandard_integer_type
4047 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype1
))),
4048 TYPE_UNSIGNED (rhs1_type
));
4049 tree vectype3
= get_vectype_for_scalar_type (wide_scalar_type
);
4050 if (expand_vec_cond_expr_p (vectype1
, vectype3
, TREE_CODE (rhs1
)))
4055 /* If rhs1 is a comparison we need to move it into a
4056 separate statement. */
4057 if (TREE_CODE (rhs1
) != SSA_NAME
)
4059 tmp
= vect_recog_temp_ssa_var (TREE_TYPE (rhs1
), NULL
);
4060 pattern_stmt
= gimple_build_assign (tmp
, rhs1
);
4062 append_pattern_def_seq (stmt_vinfo
, pattern_stmt
, vectype2
);
4065 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1
),
4066 TYPE_VECTOR_SUBPARTS (vectype2
)))
4067 tmp
= build_mask_conversion (rhs1
, vectype1
, stmt_vinfo
);
4071 lhs
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
4072 pattern_stmt
= gimple_build_assign (lhs
, COND_EXPR
, tmp
,
4073 gimple_assign_rhs2 (last_stmt
),
4074 gimple_assign_rhs3 (last_stmt
));
4076 *type_out
= vectype1
;
4077 vect_pattern_detected ("vect_recog_mask_conversion_pattern", last_stmt
);
4079 return pattern_stmt
;
4082 /* Now check for binary boolean operations requiring conversion for
4084 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (lhs
)))
4087 if (rhs_code
!= BIT_IOR_EXPR
4088 && rhs_code
!= BIT_XOR_EXPR
4089 && rhs_code
!= BIT_AND_EXPR
4090 && TREE_CODE_CLASS (rhs_code
) != tcc_comparison
)
4093 rhs2
= gimple_assign_rhs2 (last_stmt
);
4095 rhs1_type
= search_type_for_mask (rhs1
, vinfo
);
4096 rhs2_type
= search_type_for_mask (rhs2
, vinfo
);
4098 if (!rhs1_type
|| !rhs2_type
4099 || TYPE_PRECISION (rhs1_type
) == TYPE_PRECISION (rhs2_type
))
4102 if (TYPE_PRECISION (rhs1_type
) < TYPE_PRECISION (rhs2_type
))
4104 vectype1
= get_mask_type_for_scalar_type (rhs1_type
);
4107 rhs2
= build_mask_conversion (rhs2
, vectype1
, stmt_vinfo
);
4111 vectype1
= get_mask_type_for_scalar_type (rhs2_type
);
4114 rhs1
= build_mask_conversion (rhs1
, vectype1
, stmt_vinfo
);
4117 lhs
= vect_recog_temp_ssa_var (TREE_TYPE (lhs
), NULL
);
4118 pattern_stmt
= gimple_build_assign (lhs
, rhs_code
, rhs1
, rhs2
);
4120 *type_out
= vectype1
;
4121 vect_pattern_detected ("vect_recog_mask_conversion_pattern", last_stmt
);
4123 return pattern_stmt
;
4126 /* STMT_INFO is a load or store. If the load or store is conditional, return
4127 the boolean condition under which it occurs, otherwise return null. */
4130 vect_get_load_store_mask (stmt_vec_info stmt_info
)
4132 if (gassign
*def_assign
= dyn_cast
<gassign
*> (stmt_info
->stmt
))
4134 gcc_assert (gimple_assign_single_p (def_assign
));
4138 if (gcall
*def_call
= dyn_cast
<gcall
*> (stmt_info
->stmt
))
4140 internal_fn ifn
= gimple_call_internal_fn (def_call
);
4141 int mask_index
= internal_fn_mask_index (ifn
);
4142 return gimple_call_arg (def_call
, mask_index
);
4148 /* Return the scalar offset type that an internal gather/scatter function
4149 should use. GS_INFO describes the gather/scatter operation. */
4152 vect_get_gather_scatter_offset_type (gather_scatter_info
*gs_info
)
4154 tree offset_type
= TREE_TYPE (gs_info
->offset
);
4155 unsigned int element_bits
= tree_to_uhwi (TYPE_SIZE (gs_info
->element_type
));
4157 /* Enforced by vect_check_gather_scatter. */
4158 unsigned int offset_bits
= TYPE_PRECISION (offset_type
);
4159 gcc_assert (element_bits
>= offset_bits
);
4161 /* If the offset is narrower than the elements, extend it according
4163 if (element_bits
> offset_bits
)
4164 return build_nonstandard_integer_type (element_bits
,
4165 TYPE_UNSIGNED (offset_type
));
4170 /* Return MASK if MASK is suitable for masking an operation on vectors
4171 of type VECTYPE, otherwise convert it into such a form and return
4172 the result. Associate any conversion statements with STMT_INFO's
4176 vect_convert_mask_for_vectype (tree mask
, tree vectype
,
4177 stmt_vec_info stmt_info
, vec_info
*vinfo
)
4179 tree mask_type
= search_type_for_mask (mask
, vinfo
);
4182 tree mask_vectype
= get_mask_type_for_scalar_type (mask_type
);
4184 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype
),
4185 TYPE_VECTOR_SUBPARTS (mask_vectype
)))
4186 mask
= build_mask_conversion (mask
, vectype
, stmt_info
);
4191 /* Return the equivalent of:
4193 fold_convert (TYPE, VALUE)
4195 with the expectation that the operation will be vectorized.
4196 If new statements are needed, add them as pattern statements
4200 vect_add_conversion_to_pattern (tree type
, tree value
, stmt_vec_info stmt_info
)
4202 if (useless_type_conversion_p (type
, TREE_TYPE (value
)))
4205 tree new_value
= vect_recog_temp_ssa_var (type
, NULL
);
4206 gassign
*conversion
= gimple_build_assign (new_value
, CONVERT_EXPR
, value
);
4207 append_pattern_def_seq (stmt_info
, conversion
,
4208 get_vectype_for_scalar_type (type
));
4212 /* Try to convert STMT_INFO into a call to a gather load or scatter store
4213 internal function. Return the final statement on success and set
4214 *TYPE_OUT to the vector type being loaded or stored.
4216 This function only handles gathers and scatters that were recognized
4217 as such from the outset (indicated by STMT_VINFO_GATHER_SCATTER_P). */
4220 vect_recog_gather_scatter_pattern (stmt_vec_info stmt_info
, tree
*type_out
)
4222 /* Currently we only support this for loop vectorization. */
4223 loop_vec_info loop_vinfo
= dyn_cast
<loop_vec_info
> (stmt_info
->vinfo
);
4227 /* Make sure that we're looking at a gather load or scatter store. */
4228 data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
4229 if (!dr
|| !STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
4232 /* Get the boolean that controls whether the load or store happens.
4233 This is null if the operation is unconditional. */
4234 tree mask
= vect_get_load_store_mask (stmt_info
);
4236 /* Make sure that the target supports an appropriate internal
4237 function for the gather/scatter operation. */
4238 gather_scatter_info gs_info
;
4239 if (!vect_check_gather_scatter (stmt_info
, loop_vinfo
, &gs_info
)
4243 /* Convert the mask to the right form. */
4244 tree gs_vectype
= get_vectype_for_scalar_type (gs_info
.element_type
);
4246 mask
= vect_convert_mask_for_vectype (mask
, gs_vectype
, stmt_info
,
4249 /* Get the invariant base and non-invariant offset, converting the
4250 latter to the same width as the vector elements. */
4251 tree base
= gs_info
.base
;
4252 tree offset_type
= vect_get_gather_scatter_offset_type (&gs_info
);
4253 tree offset
= vect_add_conversion_to_pattern (offset_type
, gs_info
.offset
,
4256 /* Build the new pattern statement. */
4257 tree scale
= size_int (gs_info
.scale
);
4258 gcall
*pattern_stmt
;
4259 if (DR_IS_READ (dr
))
4262 pattern_stmt
= gimple_build_call_internal (gs_info
.ifn
, 4, base
,
4263 offset
, scale
, mask
);
4265 pattern_stmt
= gimple_build_call_internal (gs_info
.ifn
, 3, base
,
4267 tree load_lhs
= vect_recog_temp_ssa_var (gs_info
.element_type
, NULL
);
4268 gimple_call_set_lhs (pattern_stmt
, load_lhs
);
4272 tree rhs
= vect_get_store_rhs (stmt_info
);
4274 pattern_stmt
= gimple_build_call_internal (IFN_MASK_SCATTER_STORE
, 5,
4275 base
, offset
, scale
, rhs
,
4278 pattern_stmt
= gimple_build_call_internal (IFN_SCATTER_STORE
, 4,
4279 base
, offset
, scale
, rhs
);
4281 gimple_call_set_nothrow (pattern_stmt
, true);
4283 /* Copy across relevant vectorization info and associate DR with the
4284 new pattern statement instead of the original statement. */
4285 stmt_vec_info pattern_stmt_info
= loop_vinfo
->add_stmt (pattern_stmt
);
4286 STMT_VINFO_DATA_REF (pattern_stmt_info
) = dr
;
4287 STMT_VINFO_DR_WRT_VEC_LOOP (pattern_stmt_info
)
4288 = STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info
);
4289 STMT_VINFO_GATHER_SCATTER_P (pattern_stmt_info
)
4290 = STMT_VINFO_GATHER_SCATTER_P (stmt_info
);
4292 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4293 *type_out
= vectype
;
4294 vect_pattern_detected ("gather/scatter pattern", stmt_info
->stmt
);
4296 return pattern_stmt
;
4299 /* Return true if TYPE is a non-boolean integer type. These are the types
4300 that we want to consider for narrowing. */
4303 vect_narrowable_type_p (tree type
)
4305 return INTEGRAL_TYPE_P (type
) && !VECT_SCALAR_BOOLEAN_TYPE_P (type
);
4308 /* Return true if the operation given by CODE can be truncated to N bits
4309 when only N bits of the output are needed. This is only true if bit N+1
4310 of the inputs has no effect on the low N bits of the result. */
4313 vect_truncatable_operation_p (tree_code code
)
4331 /* Record that STMT_INFO could be changed from operating on TYPE to
4332 operating on a type with the precision and sign given by PRECISION
4333 and SIGN respectively. PRECISION is an arbitrary bit precision;
4334 it might not be a whole number of bytes. */
4337 vect_set_operation_type (stmt_vec_info stmt_info
, tree type
,
4338 unsigned int precision
, signop sign
)
4340 /* Round the precision up to a whole number of bytes. */
4341 precision
= vect_element_precision (precision
);
4342 if (precision
< TYPE_PRECISION (type
)
4343 && (!stmt_info
->operation_precision
4344 || stmt_info
->operation_precision
> precision
))
4346 stmt_info
->operation_precision
= precision
;
4347 stmt_info
->operation_sign
= sign
;
4351 /* Record that STMT_INFO only requires MIN_INPUT_PRECISION from its
4352 non-boolean inputs, all of which have type TYPE. MIN_INPUT_PRECISION
4353 is an arbitrary bit precision; it might not be a whole number of bytes. */
4356 vect_set_min_input_precision (stmt_vec_info stmt_info
, tree type
,
4357 unsigned int min_input_precision
)
4359 /* This operation in isolation only requires the inputs to have
4360 MIN_INPUT_PRECISION of precision, However, that doesn't mean
4361 that MIN_INPUT_PRECISION is a natural precision for the chain
4362 as a whole. E.g. consider something like:
4364 unsigned short *x, *y;
4365 *y = ((*x & 0xf0) >> 4) | (*y << 4);
4367 The right shift can be done on unsigned chars, and only requires the
4368 result of "*x & 0xf0" to be done on unsigned chars. But taking that
4369 approach would mean turning a natural chain of single-vector unsigned
4370 short operations into one that truncates "*x" and then extends
4371 "(*x & 0xf0) >> 4", with two vectors for each unsigned short
4372 operation and one vector for each unsigned char operation.
4373 This would be a significant pessimization.
4375 Instead only propagate the maximum of this precision and the precision
4376 required by the users of the result. This means that we don't pessimize
4377 the case above but continue to optimize things like:
4381 *y = ((*x & 0xf0) >> 4) | (*y << 4);
4383 Here we would truncate two vectors of *x to a single vector of
4384 unsigned chars and use single-vector unsigned char operations for
4385 everything else, rather than doing two unsigned short copies of
4386 "(*x & 0xf0) >> 4" and then truncating the result. */
4387 min_input_precision
= MAX (min_input_precision
,
4388 stmt_info
->min_output_precision
);
4390 if (min_input_precision
< TYPE_PRECISION (type
)
4391 && (!stmt_info
->min_input_precision
4392 || stmt_info
->min_input_precision
> min_input_precision
))
4393 stmt_info
->min_input_precision
= min_input_precision
;
4396 /* Subroutine of vect_determine_min_output_precision. Return true if
4397 we can calculate a reduced number of output bits for STMT_INFO,
4398 whose result is LHS. */
4401 vect_determine_min_output_precision_1 (stmt_vec_info stmt_info
, tree lhs
)
4403 /* Take the maximum precision required by users of the result. */
4404 vec_info
*vinfo
= stmt_info
->vinfo
;
4405 unsigned int precision
= 0;
4406 imm_use_iterator iter
;
4408 FOR_EACH_IMM_USE_FAST (use
, iter
, lhs
)
4410 gimple
*use_stmt
= USE_STMT (use
);
4411 if (is_gimple_debug (use_stmt
))
4413 stmt_vec_info use_stmt_info
= vinfo
->lookup_stmt (use_stmt
);
4414 if (!use_stmt_info
|| !use_stmt_info
->min_input_precision
)
4416 precision
= MAX (precision
, use_stmt_info
->min_input_precision
);
4419 if (dump_enabled_p ())
4421 dump_printf_loc (MSG_NOTE
, vect_location
, "only the low %d bits of ",
4423 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, lhs
);
4424 dump_printf (MSG_NOTE
, " are significant\n");
4426 stmt_info
->min_output_precision
= precision
;
4430 /* Calculate min_output_precision for STMT_INFO. */
4433 vect_determine_min_output_precision (stmt_vec_info stmt_info
)
4435 /* We're only interested in statements with a narrowable result. */
4436 tree lhs
= gimple_get_lhs (stmt_info
->stmt
);
4438 || TREE_CODE (lhs
) != SSA_NAME
4439 || !vect_narrowable_type_p (TREE_TYPE (lhs
)))
4442 if (!vect_determine_min_output_precision_1 (stmt_info
, lhs
))
4443 stmt_info
->min_output_precision
= TYPE_PRECISION (TREE_TYPE (lhs
));
4446 /* Use range information to decide whether STMT (described by STMT_INFO)
4447 could be done in a narrower type. This is effectively a forward
4448 propagation, since it uses context-independent information that applies
4449 to all users of an SSA name. */
4452 vect_determine_precisions_from_range (stmt_vec_info stmt_info
, gassign
*stmt
)
4454 tree lhs
= gimple_assign_lhs (stmt
);
4455 if (!lhs
|| TREE_CODE (lhs
) != SSA_NAME
)
4458 tree type
= TREE_TYPE (lhs
);
4459 if (!vect_narrowable_type_p (type
))
4462 /* First see whether we have any useful range information for the result. */
4463 unsigned int precision
= TYPE_PRECISION (type
);
4464 signop sign
= TYPE_SIGN (type
);
4465 wide_int min_value
, max_value
;
4466 if (!vect_get_range_info (lhs
, &min_value
, &max_value
))
4469 tree_code code
= gimple_assign_rhs_code (stmt
);
4470 unsigned int nops
= gimple_num_ops (stmt
);
4472 if (!vect_truncatable_operation_p (code
))
4473 /* Check that all relevant input operands are compatible, and update
4474 [MIN_VALUE, MAX_VALUE] to include their ranges. */
4475 for (unsigned int i
= 1; i
< nops
; ++i
)
4477 tree op
= gimple_op (stmt
, i
);
4478 if (TREE_CODE (op
) == INTEGER_CST
)
4480 /* Don't require the integer to have RHS_TYPE (which it might
4481 not for things like shift amounts, etc.), but do require it
4483 if (!int_fits_type_p (op
, type
))
4486 min_value
= wi::min (min_value
, wi::to_wide (op
, precision
), sign
);
4487 max_value
= wi::max (max_value
, wi::to_wide (op
, precision
), sign
);
4489 else if (TREE_CODE (op
) == SSA_NAME
)
4491 /* Ignore codes that don't take uniform arguments. */
4492 if (!types_compatible_p (TREE_TYPE (op
), type
))
4495 wide_int op_min_value
, op_max_value
;
4496 if (!vect_get_range_info (op
, &op_min_value
, &op_max_value
))
4499 min_value
= wi::min (min_value
, op_min_value
, sign
);
4500 max_value
= wi::max (max_value
, op_max_value
, sign
);
4506 /* Try to switch signed types for unsigned types if we can.
4507 This is better for two reasons. First, unsigned ops tend
4508 to be cheaper than signed ops. Second, it means that we can
4512 int res = (int) c & 0xff00; // range [0x0000, 0xff00]
4517 unsigned short res_1 = (unsigned short) c & 0xff00;
4518 int res = (int) res_1;
4520 where the intermediate result res_1 has unsigned rather than
4522 if (sign
== SIGNED
&& !wi::neg_p (min_value
))
4525 /* See what precision is required for MIN_VALUE and MAX_VALUE. */
4526 unsigned int precision1
= wi::min_precision (min_value
, sign
);
4527 unsigned int precision2
= wi::min_precision (max_value
, sign
);
4528 unsigned int value_precision
= MAX (precision1
, precision2
);
4529 if (value_precision
>= precision
)
4532 if (dump_enabled_p ())
4534 dump_printf_loc (MSG_NOTE
, vect_location
, "can narrow to %s:%d"
4535 " without loss of precision: ",
4536 sign
== SIGNED
? "signed" : "unsigned",
4538 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
4541 vect_set_operation_type (stmt_info
, type
, value_precision
, sign
);
4542 vect_set_min_input_precision (stmt_info
, type
, value_precision
);
4545 /* Use information about the users of STMT's result to decide whether
4546 STMT (described by STMT_INFO) could be done in a narrower type.
4547 This is effectively a backward propagation. */
4550 vect_determine_precisions_from_users (stmt_vec_info stmt_info
, gassign
*stmt
)
4552 tree_code code
= gimple_assign_rhs_code (stmt
);
4553 unsigned int opno
= (code
== COND_EXPR
? 2 : 1);
4554 tree type
= TREE_TYPE (gimple_op (stmt
, opno
));
4555 if (!vect_narrowable_type_p (type
))
4558 unsigned int precision
= TYPE_PRECISION (type
);
4559 unsigned int operation_precision
, min_input_precision
;
4563 /* Only the bits that contribute to the output matter. Don't change
4564 the precision of the operation itself. */
4565 operation_precision
= precision
;
4566 min_input_precision
= stmt_info
->min_output_precision
;
4572 tree shift
= gimple_assign_rhs2 (stmt
);
4573 if (TREE_CODE (shift
) != INTEGER_CST
4574 || !wi::ltu_p (wi::to_widest (shift
), precision
))
4576 unsigned int const_shift
= TREE_INT_CST_LOW (shift
);
4577 if (code
== LSHIFT_EXPR
)
4579 /* We need CONST_SHIFT fewer bits of the input. */
4580 operation_precision
= stmt_info
->min_output_precision
;
4581 min_input_precision
= (MAX (operation_precision
, const_shift
)
4586 /* We need CONST_SHIFT extra bits to do the operation. */
4587 operation_precision
= (stmt_info
->min_output_precision
4589 min_input_precision
= operation_precision
;
4595 if (vect_truncatable_operation_p (code
))
4597 /* Input bit N has no effect on output bits N-1 and lower. */
4598 operation_precision
= stmt_info
->min_output_precision
;
4599 min_input_precision
= operation_precision
;
4605 if (operation_precision
< precision
)
4607 if (dump_enabled_p ())
4609 dump_printf_loc (MSG_NOTE
, vect_location
, "can narrow to %s:%d"
4610 " without affecting users: ",
4611 TYPE_UNSIGNED (type
) ? "unsigned" : "signed",
4612 operation_precision
);
4613 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
4615 vect_set_operation_type (stmt_info
, type
, operation_precision
,
4618 vect_set_min_input_precision (stmt_info
, type
, min_input_precision
);
4621 /* Handle vect_determine_precisions for STMT_INFO, given that we
4622 have already done so for the users of its result. */
4625 vect_determine_stmt_precisions (stmt_vec_info stmt_info
)
4627 vect_determine_min_output_precision (stmt_info
);
4628 if (gassign
*stmt
= dyn_cast
<gassign
*> (stmt_info
->stmt
))
4630 vect_determine_precisions_from_range (stmt_info
, stmt
);
4631 vect_determine_precisions_from_users (stmt_info
, stmt
);
4635 /* Walk backwards through the vectorizable region to determine the
4636 values of these fields:
4638 - min_output_precision
4639 - min_input_precision
4640 - operation_precision
4641 - operation_sign. */
4644 vect_determine_precisions (vec_info
*vinfo
)
4646 DUMP_VECT_SCOPE ("vect_determine_precisions");
4648 if (loop_vec_info loop_vinfo
= dyn_cast
<loop_vec_info
> (vinfo
))
4650 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4651 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
4652 unsigned int nbbs
= loop
->num_nodes
;
4654 for (unsigned int i
= 0; i
< nbbs
; i
++)
4656 basic_block bb
= bbs
[nbbs
- i
- 1];
4657 for (gimple_stmt_iterator si
= gsi_last_bb (bb
);
4658 !gsi_end_p (si
); gsi_prev (&si
))
4659 vect_determine_stmt_precisions
4660 (vinfo
->lookup_stmt (gsi_stmt (si
)));
4665 bb_vec_info bb_vinfo
= as_a
<bb_vec_info
> (vinfo
);
4666 gimple_stmt_iterator si
= bb_vinfo
->region_end
;
4671 si
= gsi_last_bb (bb_vinfo
->bb
);
4674 stmt
= gsi_stmt (si
);
4675 stmt_vec_info stmt_info
= vinfo
->lookup_stmt (stmt
);
4676 if (stmt_info
&& STMT_VINFO_VECTORIZABLE (stmt_info
))
4677 vect_determine_stmt_precisions (stmt_info
);
4679 while (stmt
!= gsi_stmt (bb_vinfo
->region_begin
));
4683 typedef gimple
*(*vect_recog_func_ptr
) (stmt_vec_info
, tree
*);
4685 struct vect_recog_func
4687 vect_recog_func_ptr fn
;
4691 /* Note that ordering matters - the first pattern matching on a stmt is
4692 taken which means usually the more complex one needs to preceed the
4693 less comples onex (widen_sum only after dot_prod or sad for example). */
4694 static vect_recog_func vect_vect_recog_func_ptrs
[] = {
4695 { vect_recog_over_widening_pattern
, "over_widening" },
4696 /* Must come after over_widening, which narrows the shift as much as
4697 possible beforehand. */
4698 { vect_recog_average_pattern
, "average" },
4699 { vect_recog_cast_forwprop_pattern
, "cast_forwprop" },
4700 { vect_recog_widen_mult_pattern
, "widen_mult" },
4701 { vect_recog_dot_prod_pattern
, "dot_prod" },
4702 { vect_recog_sad_pattern
, "sad" },
4703 { vect_recog_widen_sum_pattern
, "widen_sum" },
4704 { vect_recog_pow_pattern
, "pow" },
4705 { vect_recog_widen_shift_pattern
, "widen_shift" },
4706 { vect_recog_rotate_pattern
, "rotate" },
4707 { vect_recog_vector_vector_shift_pattern
, "vector_vector_shift" },
4708 { vect_recog_divmod_pattern
, "divmod" },
4709 { vect_recog_mult_pattern
, "mult" },
4710 { vect_recog_mixed_size_cond_pattern
, "mixed_size_cond" },
4711 { vect_recog_bool_pattern
, "bool" },
4712 /* This must come before mask conversion, and includes the parts
4713 of mask conversion that are needed for gather and scatter
4714 internal functions. */
4715 { vect_recog_gather_scatter_pattern
, "gather_scatter" },
4716 { vect_recog_mask_conversion_pattern
, "mask_conversion" }
4719 const unsigned int NUM_PATTERNS
= ARRAY_SIZE (vect_vect_recog_func_ptrs
);
4721 /* Mark statements that are involved in a pattern. */
4724 vect_mark_pattern_stmts (stmt_vec_info orig_stmt_info
, gimple
*pattern_stmt
,
4725 tree pattern_vectype
)
4727 gimple
*def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (orig_stmt_info
);
4729 gimple
*orig_pattern_stmt
= NULL
;
4730 if (is_pattern_stmt_p (orig_stmt_info
))
4732 /* We're replacing a statement in an existing pattern definition
4734 orig_pattern_stmt
= orig_stmt_info
->stmt
;
4735 if (dump_enabled_p ())
4737 dump_printf_loc (MSG_NOTE
, vect_location
,
4738 "replacing earlier pattern ");
4739 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, orig_pattern_stmt
, 0);
4742 /* To keep the book-keeping simple, just swap the lhs of the
4743 old and new statements, so that the old one has a valid but
4745 tree old_lhs
= gimple_get_lhs (orig_pattern_stmt
);
4746 gimple_set_lhs (orig_pattern_stmt
, gimple_get_lhs (pattern_stmt
));
4747 gimple_set_lhs (pattern_stmt
, old_lhs
);
4749 if (dump_enabled_p ())
4751 dump_printf_loc (MSG_NOTE
, vect_location
, "with ");
4752 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
4755 /* Switch to the statement that ORIG replaces. */
4756 orig_stmt_info
= STMT_VINFO_RELATED_STMT (orig_stmt_info
);
4758 /* We shouldn't be replacing the main pattern statement. */
4759 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info
)->stmt
4760 != orig_pattern_stmt
);
4764 for (gimple_stmt_iterator si
= gsi_start (def_seq
);
4765 !gsi_end_p (si
); gsi_next (&si
))
4766 vect_init_pattern_stmt (gsi_stmt (si
), orig_stmt_info
, pattern_vectype
);
4768 if (orig_pattern_stmt
)
4770 vect_init_pattern_stmt (pattern_stmt
, orig_stmt_info
, pattern_vectype
);
4772 /* Insert all the new pattern statements before the original one. */
4773 gimple_seq
*orig_def_seq
= &STMT_VINFO_PATTERN_DEF_SEQ (orig_stmt_info
);
4774 gimple_stmt_iterator gsi
= gsi_for_stmt (orig_pattern_stmt
,
4776 gsi_insert_seq_before_without_update (&gsi
, def_seq
, GSI_SAME_STMT
);
4777 gsi_insert_before_without_update (&gsi
, pattern_stmt
, GSI_SAME_STMT
);
4779 /* Remove the pattern statement that this new pattern replaces. */
4780 gsi_remove (&gsi
, false);
4783 vect_set_pattern_stmt (pattern_stmt
, orig_stmt_info
, pattern_vectype
);
4786 /* Function vect_pattern_recog_1
4789 PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
4790 computation pattern.
4791 STMT_INFO: A stmt from which the pattern search should start.
4793 If PATTERN_RECOG_FUNC successfully detected the pattern, it creates
4794 a sequence of statements that has the same functionality and can be
4795 used to replace STMT_INFO. It returns the last statement in the sequence
4796 and adds any earlier statements to STMT_INFO's STMT_VINFO_PATTERN_DEF_SEQ.
4797 PATTERN_RECOG_FUNC also sets *TYPE_OUT to the vector type of the final
4798 statement, having first checked that the target supports the new operation
4801 This function also does some bookkeeping, as explained in the documentation
4802 for vect_recog_pattern. */
4805 vect_pattern_recog_1 (vect_recog_func
*recog_func
, stmt_vec_info stmt_info
)
4807 vec_info
*vinfo
= stmt_info
->vinfo
;
4808 gimple
*pattern_stmt
;
4809 loop_vec_info loop_vinfo
;
4810 tree pattern_vectype
;
4812 /* If this statement has already been replaced with pattern statements,
4813 leave the original statement alone, since the first match wins.
4814 Instead try to match against the definition statements that feed
4815 the main pattern statement. */
4816 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
4818 gimple_stmt_iterator gsi
;
4819 for (gsi
= gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
));
4820 !gsi_end_p (gsi
); gsi_next (&gsi
))
4821 vect_pattern_recog_1 (recog_func
, vinfo
->lookup_stmt (gsi_stmt (gsi
)));
4825 gcc_assert (!STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
));
4826 pattern_stmt
= recog_func
->fn (stmt_info
, &pattern_vectype
);
4829 /* Clear any half-formed pattern definition sequence. */
4830 STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
) = NULL
;
4834 loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4835 gcc_assert (pattern_vectype
);
4837 /* Found a vectorizable pattern. */
4838 if (dump_enabled_p ())
4840 dump_printf_loc (MSG_NOTE
, vect_location
,
4841 "%s pattern recognized: ", recog_func
->name
);
4842 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt
, 0);
4845 /* Mark the stmts that are involved in the pattern. */
4846 vect_mark_pattern_stmts (stmt_info
, pattern_stmt
, pattern_vectype
);
4848 /* Patterns cannot be vectorized using SLP, because they change the order of
4853 stmt_vec_info
*elem_ptr
;
4854 VEC_ORDERED_REMOVE_IF (LOOP_VINFO_REDUCTIONS (loop_vinfo
), ix
, ix2
,
4855 elem_ptr
, *elem_ptr
== stmt_info
);
4860 /* Function vect_pattern_recog
4863 LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
4866 Output - for each computation idiom that is detected we create a new stmt
4867 that provides the same functionality and that can be vectorized. We
4868 also record some information in the struct_stmt_info of the relevant
4869 stmts, as explained below:
4871 At the entry to this function we have the following stmts, with the
4872 following initial value in the STMT_VINFO fields:
4874 stmt in_pattern_p related_stmt vec_stmt
4875 S1: a_i = .... - - -
4876 S2: a_2 = ..use(a_i).. - - -
4877 S3: a_1 = ..use(a_2).. - - -
4878 S4: a_0 = ..use(a_1).. - - -
4879 S5: ... = ..use(a_0).. - - -
4881 Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
4882 represented by a single stmt. We then:
4883 - create a new stmt S6 equivalent to the pattern (the stmt is not
4884 inserted into the code)
4885 - fill in the STMT_VINFO fields as follows:
4887 in_pattern_p related_stmt vec_stmt
4888 S1: a_i = .... - - -
4889 S2: a_2 = ..use(a_i).. - - -
4890 S3: a_1 = ..use(a_2).. - - -
4891 S4: a_0 = ..use(a_1).. true S6 -
4892 '---> S6: a_new = .... - S4 -
4893 S5: ... = ..use(a_0).. - - -
4895 (the last stmt in the pattern (S4) and the new pattern stmt (S6) point
4896 to each other through the RELATED_STMT field).
4898 S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
4899 of S4 because it will replace all its uses. Stmts {S1,S2,S3} will
4900 remain irrelevant unless used by stmts other than S4.
4902 If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
4903 (because they are marked as irrelevant). It will vectorize S6, and record
4904 a pointer to the new vector stmt VS6 from S6 (as usual).
4905 S4 will be skipped, and S5 will be vectorized as usual:
4907 in_pattern_p related_stmt vec_stmt
4908 S1: a_i = .... - - -
4909 S2: a_2 = ..use(a_i).. - - -
4910 S3: a_1 = ..use(a_2).. - - -
4911 > VS6: va_new = .... - - -
4912 S4: a_0 = ..use(a_1).. true S6 VS6
4913 '---> S6: a_new = .... - S4 VS6
4914 > VS5: ... = ..vuse(va_new).. - - -
4915 S5: ... = ..use(a_0).. - - -
4917 DCE could then get rid of {S1,S2,S3,S4,S5} (if their defs are not used
4918 elsewhere), and we'll end up with:
4921 VS5: ... = ..vuse(va_new)..
4923 In case of more than one pattern statements, e.g., widen-mult with
4927 S2 a_T = (TYPE) a_t;
4928 '--> S3: a_it = (interm_type) a_t;
4929 S4 prod_T = a_T * CONST;
4930 '--> S5: prod_T' = a_it w* CONST;
4932 there may be other users of a_T outside the pattern. In that case S2 will
4933 be marked as relevant (as well as S3), and both S2 and S3 will be analyzed
4934 and vectorized. The vector stmt VS2 will be recorded in S2, and VS3 will
4935 be recorded in S3. */
4938 vect_pattern_recog (vec_info
*vinfo
)
4943 gimple_stmt_iterator si
;
4946 vect_determine_precisions (vinfo
);
4948 DUMP_VECT_SCOPE ("vect_pattern_recog");
4950 if (loop_vec_info loop_vinfo
= dyn_cast
<loop_vec_info
> (vinfo
))
4952 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4953 bbs
= LOOP_VINFO_BBS (loop_vinfo
);
4954 nbbs
= loop
->num_nodes
;
4956 /* Scan through the loop stmts, applying the pattern recognition
4957 functions starting at each stmt visited: */
4958 for (i
= 0; i
< nbbs
; i
++)
4960 basic_block bb
= bbs
[i
];
4961 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
4963 stmt_vec_info stmt_info
= vinfo
->lookup_stmt (gsi_stmt (si
));
4964 /* Scan over all generic vect_recog_xxx_pattern functions. */
4965 for (j
= 0; j
< NUM_PATTERNS
; j
++)
4966 vect_pattern_recog_1 (&vect_vect_recog_func_ptrs
[j
],
4973 bb_vec_info bb_vinfo
= as_a
<bb_vec_info
> (vinfo
);
4974 for (si
= bb_vinfo
->region_begin
;
4975 gsi_stmt (si
) != gsi_stmt (bb_vinfo
->region_end
); gsi_next (&si
))
4977 gimple
*stmt
= gsi_stmt (si
);
4978 stmt_vec_info stmt_info
= bb_vinfo
->lookup_stmt (stmt
);
4979 if (stmt_info
&& !STMT_VINFO_VECTORIZABLE (stmt_info
))
4982 /* Scan over all generic vect_recog_xxx_pattern functions. */
4983 for (j
= 0; j
< NUM_PATTERNS
; j
++)
4984 vect_pattern_recog_1 (&vect_vect_recog_func_ptrs
[j
], stmt_info
);