* lto.c (do_stream_out): Add PART parameter; open dump file.
[official-gcc.git] / gcc / tree-vect-patterns.c
blob4c22afd2b5fa1885d9163a6e752e991dec0206f9
1 /* Analysis Utilities for Loop Vectorization.
2 Copyright (C) 2006-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Nuzman <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "ssa.h"
29 #include "expmed.h"
30 #include "optabs-tree.h"
31 #include "insn-config.h"
32 #include "recog.h" /* FIXME: for insn_data */
33 #include "fold-const.h"
34 #include "stor-layout.h"
35 #include "tree-eh.h"
36 #include "gimplify.h"
37 #include "gimple-iterator.h"
38 #include "cfgloop.h"
39 #include "tree-vectorizer.h"
40 #include "dumpfile.h"
41 #include "builtins.h"
42 #include "internal-fn.h"
43 #include "case-cfn-macros.h"
44 #include "fold-const-call.h"
45 #include "attribs.h"
46 #include "cgraph.h"
47 #include "omp-simd-clone.h"
48 #include "predict.h"
50 /* Return true if we have a useful VR_RANGE range for VAR, storing it
51 in *MIN_VALUE and *MAX_VALUE if so. Note the range in the dump files. */
53 static bool
54 vect_get_range_info (tree var, wide_int *min_value, wide_int *max_value)
56 value_range_type vr_type = get_range_info (var, min_value, max_value);
57 wide_int nonzero = get_nonzero_bits (var);
58 signop sgn = TYPE_SIGN (TREE_TYPE (var));
59 if (intersect_range_with_nonzero_bits (vr_type, min_value, max_value,
60 nonzero, sgn) == VR_RANGE)
62 if (dump_enabled_p ())
64 dump_generic_expr_loc (MSG_NOTE, vect_location, TDF_SLIM, var);
65 dump_printf (MSG_NOTE, " has range [");
66 dump_hex (MSG_NOTE, *min_value);
67 dump_printf (MSG_NOTE, ", ");
68 dump_hex (MSG_NOTE, *max_value);
69 dump_printf (MSG_NOTE, "]\n");
71 return true;
73 else
75 if (dump_enabled_p ())
77 dump_generic_expr_loc (MSG_NOTE, vect_location, TDF_SLIM, var);
78 dump_printf (MSG_NOTE, " has no range info\n");
80 return false;
84 /* Report that we've found an instance of pattern PATTERN in
85 statement STMT. */
87 static void
88 vect_pattern_detected (const char *name, gimple *stmt)
90 if (dump_enabled_p ())
92 dump_printf_loc (MSG_NOTE, vect_location, "%s: detected: ", name);
93 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
97 /* Associate pattern statement PATTERN_STMT with ORIG_STMT_INFO.
98 Set its vector type to VECTYPE if it doesn't have one already. */
100 static void
101 vect_init_pattern_stmt (gimple *pattern_stmt, stmt_vec_info orig_stmt_info,
102 tree vectype)
104 stmt_vec_info pattern_stmt_info = vinfo_for_stmt (pattern_stmt);
105 if (pattern_stmt_info == NULL)
107 pattern_stmt_info = new_stmt_vec_info (pattern_stmt,
108 orig_stmt_info->vinfo);
109 set_vinfo_for_stmt (pattern_stmt, pattern_stmt_info);
111 gimple_set_bb (pattern_stmt, gimple_bb (orig_stmt_info->stmt));
113 STMT_VINFO_RELATED_STMT (pattern_stmt_info) = orig_stmt_info->stmt;
114 STMT_VINFO_DEF_TYPE (pattern_stmt_info)
115 = STMT_VINFO_DEF_TYPE (orig_stmt_info);
116 if (!STMT_VINFO_VECTYPE (pattern_stmt_info))
117 STMT_VINFO_VECTYPE (pattern_stmt_info) = vectype;
120 /* Set the pattern statement of ORIG_STMT_INFO to PATTERN_STMT.
121 Also set the vector type of PATTERN_STMT to VECTYPE, if it doesn't
122 have one already. */
124 static void
125 vect_set_pattern_stmt (gimple *pattern_stmt, stmt_vec_info orig_stmt_info,
126 tree vectype)
128 STMT_VINFO_IN_PATTERN_P (orig_stmt_info) = true;
129 STMT_VINFO_RELATED_STMT (orig_stmt_info) = pattern_stmt;
130 vect_init_pattern_stmt (pattern_stmt, orig_stmt_info, vectype);
133 /* Add NEW_STMT to STMT_INFO's pattern definition statements. If VECTYPE
134 is nonnull, record that NEW_STMT's vector type is VECTYPE, which might
135 be different from the vector type of the final pattern statement. */
137 static inline void
138 append_pattern_def_seq (stmt_vec_info stmt_info, gimple *new_stmt,
139 tree vectype = NULL_TREE)
141 vec_info *vinfo = stmt_info->vinfo;
142 if (vectype)
144 gcc_assert (!vinfo_for_stmt (new_stmt));
145 stmt_vec_info new_stmt_info = new_stmt_vec_info (new_stmt, vinfo);
146 set_vinfo_for_stmt (new_stmt, new_stmt_info);
147 STMT_VINFO_VECTYPE (new_stmt_info) = vectype;
149 gimple_seq_add_stmt_without_update (&STMT_VINFO_PATTERN_DEF_SEQ (stmt_info),
150 new_stmt);
153 /* The caller wants to perform new operations on vect_external variable
154 VAR, so that the result of the operations would also be vect_external.
155 Return the edge on which the operations can be performed, if one exists.
156 Return null if the operations should instead be treated as part of
157 the pattern that needs them. */
159 static edge
160 vect_get_external_def_edge (vec_info *vinfo, tree var)
162 edge e = NULL;
163 if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
165 e = loop_preheader_edge (loop_vinfo->loop);
166 if (!SSA_NAME_IS_DEFAULT_DEF (var))
168 basic_block bb = gimple_bb (SSA_NAME_DEF_STMT (var));
169 if (bb == NULL
170 || !dominated_by_p (CDI_DOMINATORS, e->dest, bb))
171 e = NULL;
174 return e;
177 /* Return true if the target supports a vector version of CODE,
178 where CODE is known to map to a direct optab. ITYPE specifies
179 the type of (some of) the scalar inputs and OTYPE specifies the
180 type of the scalar result.
182 If CODE allows the inputs and outputs to have different type
183 (such as for WIDEN_SUM_EXPR), it is the input mode rather
184 than the output mode that determines the appropriate target pattern.
185 Operand 0 of the target pattern then specifies the mode that the output
186 must have.
188 When returning true, set *VECOTYPE_OUT to the vector version of OTYPE.
189 Also set *VECITYPE_OUT to the vector version of ITYPE if VECITYPE_OUT
190 is nonnull. */
192 static bool
193 vect_supportable_direct_optab_p (tree otype, tree_code code,
194 tree itype, tree *vecotype_out,
195 tree *vecitype_out = NULL)
197 tree vecitype = get_vectype_for_scalar_type (itype);
198 if (!vecitype)
199 return false;
201 tree vecotype = get_vectype_for_scalar_type (otype);
202 if (!vecotype)
203 return false;
205 optab optab = optab_for_tree_code (code, vecitype, optab_default);
206 if (!optab)
207 return false;
209 insn_code icode = optab_handler (optab, TYPE_MODE (vecitype));
210 if (icode == CODE_FOR_nothing
211 || insn_data[icode].operand[0].mode != TYPE_MODE (vecotype))
212 return false;
214 *vecotype_out = vecotype;
215 if (vecitype_out)
216 *vecitype_out = vecitype;
217 return true;
220 /* Round bit precision PRECISION up to a full element. */
222 static unsigned int
223 vect_element_precision (unsigned int precision)
225 precision = 1 << ceil_log2 (precision);
226 return MAX (precision, BITS_PER_UNIT);
229 /* If OP is defined by a statement that's being considered for vectorization,
230 return information about that statement, otherwise return NULL. */
232 static stmt_vec_info
233 vect_get_internal_def (vec_info *vinfo, tree op)
235 vect_def_type dt;
236 gimple *def_stmt;
237 if (TREE_CODE (op) != SSA_NAME
238 || !vect_is_simple_use (op, vinfo, &dt, &def_stmt)
239 || dt != vect_internal_def)
240 return NULL;
242 return vinfo_for_stmt (def_stmt);
245 /* Check whether NAME, an ssa-name used in USE_STMT,
246 is a result of a type promotion, such that:
247 DEF_STMT: NAME = NOP (name0)
248 If CHECK_SIGN is TRUE, check that either both types are signed or both are
249 unsigned. */
251 static bool
252 type_conversion_p (tree name, gimple *use_stmt, bool check_sign,
253 tree *orig_type, gimple **def_stmt, bool *promotion)
255 stmt_vec_info stmt_vinfo;
256 tree type = TREE_TYPE (name);
257 tree oprnd0;
258 enum vect_def_type dt;
260 stmt_vinfo = vinfo_for_stmt (use_stmt);
261 if (!vect_is_simple_use (name, stmt_vinfo->vinfo, &dt, def_stmt))
262 return false;
264 if (dt != vect_internal_def
265 && dt != vect_external_def && dt != vect_constant_def)
266 return false;
268 if (!*def_stmt)
269 return false;
271 if (!is_gimple_assign (*def_stmt))
272 return false;
274 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (*def_stmt)))
275 return false;
277 oprnd0 = gimple_assign_rhs1 (*def_stmt);
279 *orig_type = TREE_TYPE (oprnd0);
280 if (!INTEGRAL_TYPE_P (type) || !INTEGRAL_TYPE_P (*orig_type)
281 || ((TYPE_UNSIGNED (type) != TYPE_UNSIGNED (*orig_type)) && check_sign))
282 return false;
284 if (TYPE_PRECISION (type) >= (TYPE_PRECISION (*orig_type) * 2))
285 *promotion = true;
286 else
287 *promotion = false;
289 if (!vect_is_simple_use (oprnd0, stmt_vinfo->vinfo, &dt))
290 return false;
292 return true;
295 /* Holds information about an input operand after some sign changes
296 and type promotions have been peeled away. */
297 struct vect_unpromoted_value {
298 vect_unpromoted_value ();
300 void set_op (tree, vect_def_type, stmt_vec_info = NULL);
302 /* The value obtained after peeling away zero or more casts. */
303 tree op;
305 /* The type of OP. */
306 tree type;
308 /* The definition type of OP. */
309 vect_def_type dt;
311 /* If OP is the result of peeling at least one cast, and if the cast
312 of OP itself is a vectorizable statement, CASTER identifies that
313 statement, otherwise it is null. */
314 stmt_vec_info caster;
317 inline vect_unpromoted_value::vect_unpromoted_value ()
318 : op (NULL_TREE),
319 type (NULL_TREE),
320 dt (vect_uninitialized_def),
321 caster (NULL)
325 /* Set the operand to OP_IN, its definition type to DT_IN, and the
326 statement that casts it to CASTER_IN. */
328 inline void
329 vect_unpromoted_value::set_op (tree op_in, vect_def_type dt_in,
330 stmt_vec_info caster_in)
332 op = op_in;
333 type = TREE_TYPE (op);
334 dt = dt_in;
335 caster = caster_in;
338 /* If OP is a vectorizable SSA name, strip a sequence of integer conversions
339 to reach some vectorizable inner operand OP', continuing as long as it
340 is possible to convert OP' back to OP using a possible sign change
341 followed by a possible promotion P. Return this OP', or null if OP is
342 not a vectorizable SSA name. If there is a promotion P, describe its
343 input in UNPROM, otherwise describe OP' in UNPROM. If SINGLE_USE_P
344 is nonnull, set *SINGLE_USE_P to false if any of the SSA names involved
345 have more than one user.
347 A successful return means that it is possible to go from OP' to OP
348 via UNPROM. The cast from OP' to UNPROM is at most a sign change,
349 whereas the cast from UNPROM to OP might be a promotion, a sign
350 change, or a nop.
352 E.g. say we have:
354 signed short *ptr = ...;
355 signed short C = *ptr;
356 unsigned short B = (unsigned short) C; // sign change
357 signed int A = (signed int) B; // unsigned promotion
358 ...possible other uses of A...
359 unsigned int OP = (unsigned int) A; // sign change
361 In this case it's possible to go directly from C to OP using:
363 OP = (unsigned int) (unsigned short) C;
364 +------------+ +--------------+
365 promotion sign change
367 so OP' would be C. The input to the promotion is B, so UNPROM
368 would describe B. */
370 static tree
371 vect_look_through_possible_promotion (vec_info *vinfo, tree op,
372 vect_unpromoted_value *unprom,
373 bool *single_use_p = NULL)
375 tree res = NULL_TREE;
376 tree op_type = TREE_TYPE (op);
377 unsigned int orig_precision = TYPE_PRECISION (op_type);
378 stmt_vec_info caster = NULL;
379 while (TREE_CODE (op) == SSA_NAME && INTEGRAL_TYPE_P (op_type))
381 /* See whether OP is simple enough to vectorize. */
382 gimple *def_stmt;
383 vect_def_type dt;
384 if (!vect_is_simple_use (op, vinfo, &dt, &def_stmt))
385 break;
387 /* If OP is the input of a demotion, skip over it to see whether
388 OP is itself the result of a promotion. If so, the combined
389 effect of the promotion and the demotion might fit the required
390 pattern, otherwise neither operation fits.
392 This copes with cases such as the result of an arithmetic
393 operation being truncated before being stored, and where that
394 arithmetic operation has been recognized as an over-widened one. */
395 if (TYPE_PRECISION (op_type) <= orig_precision)
397 /* Use OP as the UNPROM described above if we haven't yet
398 found a promotion, or if using the new input preserves the
399 sign of the previous promotion. */
400 if (!res
401 || TYPE_PRECISION (unprom->type) == orig_precision
402 || TYPE_SIGN (unprom->type) == TYPE_SIGN (op_type))
403 unprom->set_op (op, dt, caster);
404 /* Stop if we've already seen a promotion and if this
405 conversion does more than change the sign. */
406 else if (TYPE_PRECISION (op_type)
407 != TYPE_PRECISION (unprom->type))
408 break;
410 /* The sequence now extends to OP. */
411 res = op;
414 /* See whether OP is defined by a cast. Record it as CASTER if
415 the cast is potentially vectorizable. */
416 if (!def_stmt)
417 break;
418 if (dt == vect_internal_def)
420 caster = vinfo_for_stmt (def_stmt);
421 /* Ignore pattern statements, since we don't link uses for them. */
422 if (single_use_p
423 && !STMT_VINFO_RELATED_STMT (caster)
424 && !has_single_use (res))
425 *single_use_p = false;
427 else
428 caster = NULL;
429 gassign *assign = dyn_cast <gassign *> (def_stmt);
430 if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
431 break;
433 /* Continue with the input to the cast. */
434 op = gimple_assign_rhs1 (def_stmt);
435 op_type = TREE_TYPE (op);
437 return res;
440 /* OP is an integer operand to an operation that returns TYPE, and we
441 want to treat the operation as a widening one. So far we can treat
442 it as widening from *COMMON_TYPE.
444 Return true if OP is suitable for such a widening operation,
445 either widening from *COMMON_TYPE or from some supertype of it.
446 Update *COMMON_TYPE to the supertype in the latter case.
448 SHIFT_P is true if OP is a shift amount. */
450 static bool
451 vect_joust_widened_integer (tree type, bool shift_p, tree op,
452 tree *common_type)
454 /* Calculate the minimum precision required by OP, without changing
455 the sign of either operand. */
456 unsigned int precision;
457 if (shift_p)
459 if (!wi::leu_p (wi::to_widest (op), TYPE_PRECISION (type) / 2))
460 return false;
461 precision = TREE_INT_CST_LOW (op);
463 else
465 precision = wi::min_precision (wi::to_widest (op),
466 TYPE_SIGN (*common_type));
467 if (precision * 2 > TYPE_PRECISION (type))
468 return false;
471 /* If OP requires a wider type, switch to that type. The checks
472 above ensure that this is still narrower than the result. */
473 precision = vect_element_precision (precision);
474 if (TYPE_PRECISION (*common_type) < precision)
475 *common_type = build_nonstandard_integer_type
476 (precision, TYPE_UNSIGNED (*common_type));
477 return true;
480 /* Return true if the common supertype of NEW_TYPE and *COMMON_TYPE
481 is narrower than type, storing the supertype in *COMMON_TYPE if so. */
483 static bool
484 vect_joust_widened_type (tree type, tree new_type, tree *common_type)
486 if (types_compatible_p (*common_type, new_type))
487 return true;
489 /* See if *COMMON_TYPE can hold all values of NEW_TYPE. */
490 if ((TYPE_PRECISION (new_type) < TYPE_PRECISION (*common_type))
491 && (TYPE_UNSIGNED (new_type) || !TYPE_UNSIGNED (*common_type)))
492 return true;
494 /* See if NEW_TYPE can hold all values of *COMMON_TYPE. */
495 if (TYPE_PRECISION (*common_type) < TYPE_PRECISION (new_type)
496 && (TYPE_UNSIGNED (*common_type) || !TYPE_UNSIGNED (new_type)))
498 *common_type = new_type;
499 return true;
502 /* We have mismatched signs, with the signed type being
503 no wider than the unsigned type. In this case we need
504 a wider signed type. */
505 unsigned int precision = MAX (TYPE_PRECISION (*common_type),
506 TYPE_PRECISION (new_type));
507 precision *= 2;
508 if (precision * 2 > TYPE_PRECISION (type))
509 return false;
511 *common_type = build_nonstandard_integer_type (precision, false);
512 return true;
515 /* Check whether STMT_INFO can be viewed as a tree of integer operations
516 in which each node either performs CODE or WIDENED_CODE, and where
517 each leaf operand is narrower than the result of STMT_INFO. MAX_NOPS
518 specifies the maximum number of leaf operands. SHIFT_P says whether
519 CODE and WIDENED_CODE are some sort of shift.
521 If STMT_INFO is such a tree, return the number of leaf operands
522 and describe them in UNPROM[0] onwards. Also set *COMMON_TYPE
523 to a type that (a) is narrower than the result of STMT_INFO and
524 (b) can hold all leaf operand values.
526 Return 0 if STMT_INFO isn't such a tree, or if no such COMMON_TYPE
527 exists. */
529 static unsigned int
530 vect_widened_op_tree (stmt_vec_info stmt_info, tree_code code,
531 tree_code widened_code, bool shift_p,
532 unsigned int max_nops,
533 vect_unpromoted_value *unprom, tree *common_type)
535 /* Check for an integer operation with the right code. */
536 gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
537 if (!assign)
538 return 0;
540 tree_code rhs_code = gimple_assign_rhs_code (assign);
541 if (rhs_code != code && rhs_code != widened_code)
542 return 0;
544 tree type = gimple_expr_type (assign);
545 if (!INTEGRAL_TYPE_P (type))
546 return 0;
548 /* Assume that both operands will be leaf operands. */
549 max_nops -= 2;
551 /* Check the operands. */
552 unsigned int next_op = 0;
553 for (unsigned int i = 0; i < 2; ++i)
555 vect_unpromoted_value *this_unprom = &unprom[next_op];
556 unsigned int nops = 1;
557 tree op = gimple_op (assign, i + 1);
558 if (i == 1 && TREE_CODE (op) == INTEGER_CST)
560 /* We already have a common type from earlier operands.
561 Update it to account for OP. */
562 this_unprom->set_op (op, vect_constant_def);
563 if (!vect_joust_widened_integer (type, shift_p, op, common_type))
564 return 0;
566 else
568 /* Only allow shifts by constants. */
569 if (shift_p && i == 1)
570 return 0;
572 if (!vect_look_through_possible_promotion (stmt_info->vinfo, op,
573 this_unprom))
574 return 0;
576 if (TYPE_PRECISION (this_unprom->type) == TYPE_PRECISION (type))
578 /* The operand isn't widened. If STMT_INFO has the code
579 for an unwidened operation, recursively check whether
580 this operand is a node of the tree. */
581 if (rhs_code != code
582 || max_nops == 0
583 || this_unprom->dt != vect_internal_def)
584 return 0;
586 /* Give back the leaf slot allocated above now that we're
587 not treating this as a leaf operand. */
588 max_nops += 1;
590 /* Recursively process the definition of the operand. */
591 stmt_vec_info def_stmt_info
592 = vinfo_for_stmt (SSA_NAME_DEF_STMT (this_unprom->op));
593 nops = vect_widened_op_tree (def_stmt_info, code, widened_code,
594 shift_p, max_nops, this_unprom,
595 common_type);
596 if (nops == 0)
597 return 0;
599 max_nops -= nops;
601 else
603 /* Make sure that the operand is narrower than the result. */
604 if (TYPE_PRECISION (this_unprom->type) * 2
605 > TYPE_PRECISION (type))
606 return 0;
608 /* Update COMMON_TYPE for the new operand. */
609 if (i == 0)
610 *common_type = this_unprom->type;
611 else if (!vect_joust_widened_type (type, this_unprom->type,
612 common_type))
613 return 0;
616 next_op += nops;
618 return next_op;
621 /* Helper to return a new temporary for pattern of TYPE for STMT. If STMT
622 is NULL, the caller must set SSA_NAME_DEF_STMT for the returned SSA var. */
624 static tree
625 vect_recog_temp_ssa_var (tree type, gimple *stmt)
627 return make_temp_ssa_name (type, stmt, "patt");
630 /* STMT2_INFO describes a type conversion that could be split into STMT1
631 followed by a version of STMT2_INFO that takes NEW_RHS as its first
632 input. Try to do this using pattern statements, returning true on
633 success. */
635 static bool
636 vect_split_statement (stmt_vec_info stmt2_info, tree new_rhs,
637 gimple *stmt1, tree vectype)
639 if (is_pattern_stmt_p (stmt2_info))
641 /* STMT2_INFO is part of a pattern. Get the statement to which
642 the pattern is attached. */
643 stmt_vec_info orig_stmt2_info
644 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt2_info));
645 vect_init_pattern_stmt (stmt1, orig_stmt2_info, vectype);
647 if (dump_enabled_p ())
649 dump_printf_loc (MSG_NOTE, vect_location,
650 "Splitting pattern statement: ");
651 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt2_info->stmt, 0);
654 /* Since STMT2_INFO is a pattern statement, we can change it
655 in-situ without worrying about changing the code for the
656 containing block. */
657 gimple_assign_set_rhs1 (stmt2_info->stmt, new_rhs);
659 if (dump_enabled_p ())
661 dump_printf_loc (MSG_NOTE, vect_location, "into: ");
662 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
663 dump_printf_loc (MSG_NOTE, vect_location, "and: ");
664 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt2_info->stmt, 0);
667 gimple_seq *def_seq = &STMT_VINFO_PATTERN_DEF_SEQ (orig_stmt2_info);
668 if (STMT_VINFO_RELATED_STMT (orig_stmt2_info) == stmt2_info->stmt)
669 /* STMT2_INFO is the actual pattern statement. Add STMT1
670 to the end of the definition sequence. */
671 gimple_seq_add_stmt_without_update (def_seq, stmt1);
672 else
674 /* STMT2_INFO belongs to the definition sequence. Insert STMT1
675 before it. */
676 gimple_stmt_iterator gsi = gsi_for_stmt (stmt2_info->stmt, def_seq);
677 gsi_insert_before_without_update (&gsi, stmt1, GSI_SAME_STMT);
679 return true;
681 else
683 /* STMT2_INFO doesn't yet have a pattern. Try to create a
684 two-statement pattern now. */
685 gcc_assert (!STMT_VINFO_RELATED_STMT (stmt2_info));
686 tree lhs_type = TREE_TYPE (gimple_get_lhs (stmt2_info->stmt));
687 tree lhs_vectype = get_vectype_for_scalar_type (lhs_type);
688 if (!lhs_vectype)
689 return false;
691 if (dump_enabled_p ())
693 dump_printf_loc (MSG_NOTE, vect_location,
694 "Splitting statement: ");
695 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt2_info->stmt, 0);
698 /* Add STMT1 as a singleton pattern definition sequence. */
699 gimple_seq *def_seq = &STMT_VINFO_PATTERN_DEF_SEQ (stmt2_info);
700 vect_init_pattern_stmt (stmt1, stmt2_info, vectype);
701 gimple_seq_add_stmt_without_update (def_seq, stmt1);
703 /* Build the second of the two pattern statements. */
704 tree new_lhs = vect_recog_temp_ssa_var (lhs_type, NULL);
705 gassign *new_stmt2 = gimple_build_assign (new_lhs, NOP_EXPR, new_rhs);
706 vect_set_pattern_stmt (new_stmt2, stmt2_info, lhs_vectype);
708 if (dump_enabled_p ())
710 dump_printf_loc (MSG_NOTE, vect_location,
711 "into pattern statements: ");
712 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
713 dump_printf_loc (MSG_NOTE, vect_location, "and: ");
714 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt2, 0);
717 return true;
721 /* Convert UNPROM to TYPE and return the result, adding new statements
722 to STMT_INFO's pattern definition statements if no better way is
723 available. VECTYPE is the vector form of TYPE. */
725 static tree
726 vect_convert_input (stmt_vec_info stmt_info, tree type,
727 vect_unpromoted_value *unprom, tree vectype)
729 /* Check for a no-op conversion. */
730 if (types_compatible_p (type, TREE_TYPE (unprom->op)))
731 return unprom->op;
733 /* Allow the caller to create constant vect_unpromoted_values. */
734 if (TREE_CODE (unprom->op) == INTEGER_CST)
735 return wide_int_to_tree (type, wi::to_widest (unprom->op));
737 /* See if we can reuse an existing result. */
738 if (unprom->caster)
740 tree lhs = gimple_get_lhs (unprom->caster->stmt);
741 if (types_compatible_p (TREE_TYPE (lhs), type))
742 return lhs;
745 /* We need a new conversion statement. */
746 tree new_op = vect_recog_temp_ssa_var (type, NULL);
747 gassign *new_stmt = gimple_build_assign (new_op, NOP_EXPR, unprom->op);
749 /* If the operation is the input to a vectorizable cast, try splitting
750 that cast into two, taking the required result as a mid-way point. */
751 if (unprom->caster)
753 tree lhs = gimple_get_lhs (unprom->caster->stmt);
754 if (TYPE_PRECISION (TREE_TYPE (lhs)) > TYPE_PRECISION (type)
755 && TYPE_PRECISION (type) > TYPE_PRECISION (unprom->type)
756 && (TYPE_UNSIGNED (unprom->type) || !TYPE_UNSIGNED (type))
757 && vect_split_statement (unprom->caster, new_op, new_stmt, vectype))
758 return new_op;
761 /* If OP is an external value, see if we can insert the new statement
762 on an incoming edge. */
763 if (unprom->dt == vect_external_def)
764 if (edge e = vect_get_external_def_edge (stmt_info->vinfo, unprom->op))
766 basic_block new_bb = gsi_insert_on_edge_immediate (e, new_stmt);
767 gcc_assert (!new_bb);
768 return new_op;
771 /* As a (common) last resort, add the statement to the pattern itself. */
772 append_pattern_def_seq (stmt_info, new_stmt, vectype);
773 return new_op;
776 /* Invoke vect_convert_input for N elements of UNPROM and store the
777 result in the corresponding elements of RESULT. */
779 static void
780 vect_convert_inputs (stmt_vec_info stmt_info, unsigned int n,
781 tree *result, tree type, vect_unpromoted_value *unprom,
782 tree vectype)
784 for (unsigned int i = 0; i < n; ++i)
786 unsigned int j;
787 for (j = 0; j < i; ++j)
788 if (unprom[j].op == unprom[i].op)
789 break;
790 if (j < i)
791 result[i] = result[j];
792 else
793 result[i] = vect_convert_input (stmt_info, type, &unprom[i], vectype);
797 /* The caller has created a (possibly empty) sequence of pattern definition
798 statements followed by a single statement PATTERN_STMT. Cast the result
799 of this final statement to TYPE. If a new statement is needed, add
800 PATTERN_STMT to the end of STMT_INFO's pattern definition statements
801 and return the new statement, otherwise return PATTERN_STMT as-is.
802 VECITYPE is the vector form of PATTERN_STMT's result type. */
804 static gimple *
805 vect_convert_output (stmt_vec_info stmt_info, tree type, gimple *pattern_stmt,
806 tree vecitype)
808 tree lhs = gimple_get_lhs (pattern_stmt);
809 if (!types_compatible_p (type, TREE_TYPE (lhs)))
811 append_pattern_def_seq (stmt_info, pattern_stmt, vecitype);
812 tree cast_var = vect_recog_temp_ssa_var (type, NULL);
813 pattern_stmt = gimple_build_assign (cast_var, NOP_EXPR, lhs);
815 return pattern_stmt;
818 /* Return true if STMT_VINFO describes a reduction for which reassociation
819 is allowed. If STMT_INFO is part of a group, assume that it's part of
820 a reduction chain and optimistically assume that all statements
821 except the last allow reassociation. */
823 static bool
824 vect_reassociating_reduction_p (stmt_vec_info stmt_vinfo)
826 return (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
827 ? STMT_VINFO_REDUC_TYPE (stmt_vinfo) != FOLD_LEFT_REDUCTION
828 : REDUC_GROUP_FIRST_ELEMENT (stmt_vinfo) != NULL);
831 /* As above, but also require it to have code CODE and to be a reduction
832 in the outermost loop. When returning true, store the operands in
833 *OP0_OUT and *OP1_OUT. */
835 static bool
836 vect_reassociating_reduction_p (stmt_vec_info stmt_info, tree_code code,
837 tree *op0_out, tree *op1_out)
839 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
840 if (!loop_info)
841 return false;
843 gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
844 if (!assign || gimple_assign_rhs_code (assign) != code)
845 return false;
847 /* We don't allow changing the order of the computation in the inner-loop
848 when doing outer-loop vectorization. */
849 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
850 if (loop && nested_in_vect_loop_p (loop, assign))
851 return false;
853 if (!vect_reassociating_reduction_p (stmt_info))
854 return false;
856 *op0_out = gimple_assign_rhs1 (assign);
857 *op1_out = gimple_assign_rhs2 (assign);
858 return true;
861 /* Function vect_recog_dot_prod_pattern
863 Try to find the following pattern:
865 type x_t, y_t;
866 TYPE1 prod;
867 TYPE2 sum = init;
868 loop:
869 sum_0 = phi <init, sum_1>
870 S1 x_t = ...
871 S2 y_t = ...
872 S3 x_T = (TYPE1) x_t;
873 S4 y_T = (TYPE1) y_t;
874 S5 prod = x_T * y_T;
875 [S6 prod = (TYPE2) prod; #optional]
876 S7 sum_1 = prod + sum_0;
878 where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
879 same size of 'TYPE1' or bigger. This is a special case of a reduction
880 computation.
882 Input:
884 * STMT_VINFO: The stmt from which the pattern search begins. In the
885 example, when this function is called with S7, the pattern {S3,S4,S5,S6,S7}
886 will be detected.
888 Output:
890 * TYPE_OUT: The type of the output of this pattern.
892 * Return value: A new stmt that will be used to replace the sequence of
893 stmts that constitute the pattern. In this case it will be:
894 WIDEN_DOT_PRODUCT <x_t, y_t, sum_0>
896 Note: The dot-prod idiom is a widening reduction pattern that is
897 vectorized without preserving all the intermediate results. It
898 produces only N/2 (widened) results (by summing up pairs of
899 intermediate results) rather than all N results. Therefore, we
900 cannot allow this pattern when we want to get all the results and in
901 the correct order (as is the case when this computation is in an
902 inner-loop nested in an outer-loop that us being vectorized). */
904 static gimple *
905 vect_recog_dot_prod_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
907 tree oprnd0, oprnd1;
908 gimple *last_stmt = stmt_vinfo->stmt;
909 vec_info *vinfo = stmt_vinfo->vinfo;
910 tree type, half_type;
911 gimple *pattern_stmt;
912 tree var;
914 /* Look for the following pattern
915 DX = (TYPE1) X;
916 DY = (TYPE1) Y;
917 DPROD = DX * DY;
918 DDPROD = (TYPE2) DPROD;
919 sum_1 = DDPROD + sum_0;
920 In which
921 - DX is double the size of X
922 - DY is double the size of Y
923 - DX, DY, DPROD all have the same type
924 - sum is the same size of DPROD or bigger
925 - sum has been recognized as a reduction variable.
927 This is equivalent to:
928 DPROD = X w* Y; #widen mult
929 sum_1 = DPROD w+ sum_0; #widen summation
931 DPROD = X w* Y; #widen mult
932 sum_1 = DPROD + sum_0; #summation
935 /* Starting from LAST_STMT, follow the defs of its uses in search
936 of the above pattern. */
938 if (!vect_reassociating_reduction_p (stmt_vinfo, PLUS_EXPR,
939 &oprnd0, &oprnd1))
940 return NULL;
942 type = gimple_expr_type (last_stmt);
944 vect_unpromoted_value unprom_mult;
945 oprnd0 = vect_look_through_possible_promotion (vinfo, oprnd0, &unprom_mult);
947 /* So far so good. Since last_stmt was detected as a (summation) reduction,
948 we know that oprnd1 is the reduction variable (defined by a loop-header
949 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
950 Left to check that oprnd0 is defined by a (widen_)mult_expr */
951 if (!oprnd0)
952 return NULL;
954 stmt_vec_info mult_vinfo = vect_get_internal_def (vinfo, oprnd0);
955 if (!mult_vinfo)
956 return NULL;
958 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
959 inside the loop (in case we are analyzing an outer-loop). */
960 vect_unpromoted_value unprom0[2];
961 if (!vect_widened_op_tree (mult_vinfo, MULT_EXPR, WIDEN_MULT_EXPR,
962 false, 2, unprom0, &half_type))
963 return NULL;
965 /* If there are two widening operations, make sure they agree on
966 the sign of the extension. */
967 if (TYPE_PRECISION (unprom_mult.type) != TYPE_PRECISION (type)
968 && TYPE_SIGN (unprom_mult.type) != TYPE_SIGN (half_type))
969 return NULL;
971 vect_pattern_detected ("vect_recog_dot_prod_pattern", last_stmt);
973 tree half_vectype;
974 if (!vect_supportable_direct_optab_p (type, DOT_PROD_EXPR, half_type,
975 type_out, &half_vectype))
976 return NULL;
978 /* Get the inputs in the appropriate types. */
979 tree mult_oprnd[2];
980 vect_convert_inputs (stmt_vinfo, 2, mult_oprnd, half_type,
981 unprom0, half_vectype);
983 var = vect_recog_temp_ssa_var (type, NULL);
984 pattern_stmt = gimple_build_assign (var, DOT_PROD_EXPR,
985 mult_oprnd[0], mult_oprnd[1], oprnd1);
987 return pattern_stmt;
991 /* Function vect_recog_sad_pattern
993 Try to find the following Sum of Absolute Difference (SAD) pattern:
995 type x_t, y_t;
996 signed TYPE1 diff, abs_diff;
997 TYPE2 sum = init;
998 loop:
999 sum_0 = phi <init, sum_1>
1000 S1 x_t = ...
1001 S2 y_t = ...
1002 S3 x_T = (TYPE1) x_t;
1003 S4 y_T = (TYPE1) y_t;
1004 S5 diff = x_T - y_T;
1005 S6 abs_diff = ABS_EXPR <diff>;
1006 [S7 abs_diff = (TYPE2) abs_diff; #optional]
1007 S8 sum_1 = abs_diff + sum_0;
1009 where 'TYPE1' is at least double the size of type 'type', and 'TYPE2' is the
1010 same size of 'TYPE1' or bigger. This is a special case of a reduction
1011 computation.
1013 Input:
1015 * STMT_VINFO: The stmt from which the pattern search begins. In the
1016 example, when this function is called with S8, the pattern
1017 {S3,S4,S5,S6,S7,S8} will be detected.
1019 Output:
1021 * TYPE_OUT: The type of the output of this pattern.
1023 * Return value: A new stmt that will be used to replace the sequence of
1024 stmts that constitute the pattern. In this case it will be:
1025 SAD_EXPR <x_t, y_t, sum_0>
1028 static gimple *
1029 vect_recog_sad_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
1031 gimple *last_stmt = stmt_vinfo->stmt;
1032 vec_info *vinfo = stmt_vinfo->vinfo;
1033 tree half_type;
1035 /* Look for the following pattern
1036 DX = (TYPE1) X;
1037 DY = (TYPE1) Y;
1038 DDIFF = DX - DY;
1039 DAD = ABS_EXPR <DDIFF>;
1040 DDPROD = (TYPE2) DPROD;
1041 sum_1 = DAD + sum_0;
1042 In which
1043 - DX is at least double the size of X
1044 - DY is at least double the size of Y
1045 - DX, DY, DDIFF, DAD all have the same type
1046 - sum is the same size of DAD or bigger
1047 - sum has been recognized as a reduction variable.
1049 This is equivalent to:
1050 DDIFF = X w- Y; #widen sub
1051 DAD = ABS_EXPR <DDIFF>;
1052 sum_1 = DAD w+ sum_0; #widen summation
1054 DDIFF = X w- Y; #widen sub
1055 DAD = ABS_EXPR <DDIFF>;
1056 sum_1 = DAD + sum_0; #summation
1059 /* Starting from LAST_STMT, follow the defs of its uses in search
1060 of the above pattern. */
1062 tree plus_oprnd0, plus_oprnd1;
1063 if (!vect_reassociating_reduction_p (stmt_vinfo, PLUS_EXPR,
1064 &plus_oprnd0, &plus_oprnd1))
1065 return NULL;
1067 tree sum_type = gimple_expr_type (last_stmt);
1069 /* Any non-truncating sequence of conversions is OK here, since
1070 with a successful match, the result of the ABS(U) is known to fit
1071 within the nonnegative range of the result type. (It cannot be the
1072 negative of the minimum signed value due to the range of the widening
1073 MINUS_EXPR.) */
1074 vect_unpromoted_value unprom_abs;
1075 plus_oprnd0 = vect_look_through_possible_promotion (vinfo, plus_oprnd0,
1076 &unprom_abs);
1078 /* So far so good. Since last_stmt was detected as a (summation) reduction,
1079 we know that plus_oprnd1 is the reduction variable (defined by a loop-header
1080 phi), and plus_oprnd0 is an ssa-name defined by a stmt in the loop body.
1081 Then check that plus_oprnd0 is defined by an abs_expr. */
1083 if (!plus_oprnd0)
1084 return NULL;
1086 stmt_vec_info abs_stmt_vinfo = vect_get_internal_def (vinfo, plus_oprnd0);
1087 if (!abs_stmt_vinfo)
1088 return NULL;
1090 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
1091 inside the loop (in case we are analyzing an outer-loop). */
1092 gassign *abs_stmt = dyn_cast <gassign *> (abs_stmt_vinfo->stmt);
1093 if (!abs_stmt
1094 || (gimple_assign_rhs_code (abs_stmt) != ABS_EXPR
1095 && gimple_assign_rhs_code (abs_stmt) != ABSU_EXPR))
1096 return NULL;
1098 tree abs_oprnd = gimple_assign_rhs1 (abs_stmt);
1099 tree abs_type = TREE_TYPE (abs_oprnd);
1100 if (TYPE_UNSIGNED (abs_type))
1101 return NULL;
1103 /* Peel off conversions from the ABS input. This can involve sign
1104 changes (e.g. from an unsigned subtraction to a signed ABS input)
1105 or signed promotion, but it can't include unsigned promotion.
1106 (Note that ABS of an unsigned promotion should have been folded
1107 away before now anyway.) */
1108 vect_unpromoted_value unprom_diff;
1109 abs_oprnd = vect_look_through_possible_promotion (vinfo, abs_oprnd,
1110 &unprom_diff);
1111 if (!abs_oprnd)
1112 return NULL;
1113 if (TYPE_PRECISION (unprom_diff.type) != TYPE_PRECISION (abs_type)
1114 && TYPE_UNSIGNED (unprom_diff.type))
1115 return NULL;
1117 /* We then detect if the operand of abs_expr is defined by a minus_expr. */
1118 stmt_vec_info diff_stmt_vinfo = vect_get_internal_def (vinfo, abs_oprnd);
1119 if (!diff_stmt_vinfo)
1120 return NULL;
1122 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
1123 inside the loop (in case we are analyzing an outer-loop). */
1124 vect_unpromoted_value unprom[2];
1125 if (!vect_widened_op_tree (diff_stmt_vinfo, MINUS_EXPR, MINUS_EXPR,
1126 false, 2, unprom, &half_type))
1127 return NULL;
1129 vect_pattern_detected ("vect_recog_sad_pattern", last_stmt);
1131 tree half_vectype;
1132 if (!vect_supportable_direct_optab_p (sum_type, SAD_EXPR, half_type,
1133 type_out, &half_vectype))
1134 return NULL;
1136 /* Get the inputs to the SAD_EXPR in the appropriate types. */
1137 tree sad_oprnd[2];
1138 vect_convert_inputs (stmt_vinfo, 2, sad_oprnd, half_type,
1139 unprom, half_vectype);
1141 tree var = vect_recog_temp_ssa_var (sum_type, NULL);
1142 gimple *pattern_stmt = gimple_build_assign (var, SAD_EXPR, sad_oprnd[0],
1143 sad_oprnd[1], plus_oprnd1);
1145 return pattern_stmt;
1148 /* Recognize an operation that performs ORIG_CODE on widened inputs,
1149 so that it can be treated as though it had the form:
1151 A_TYPE a;
1152 B_TYPE b;
1153 HALF_TYPE a_cast = (HALF_TYPE) a; // possible no-op
1154 HALF_TYPE b_cast = (HALF_TYPE) b; // possible no-op
1155 | RES_TYPE a_extend = (RES_TYPE) a_cast; // promotion from HALF_TYPE
1156 | RES_TYPE b_extend = (RES_TYPE) b_cast; // promotion from HALF_TYPE
1157 | RES_TYPE res = a_extend ORIG_CODE b_extend;
1159 Try to replace the pattern with:
1161 A_TYPE a;
1162 B_TYPE b;
1163 HALF_TYPE a_cast = (HALF_TYPE) a; // possible no-op
1164 HALF_TYPE b_cast = (HALF_TYPE) b; // possible no-op
1165 | EXT_TYPE ext = a_cast WIDE_CODE b_cast;
1166 | RES_TYPE res = (EXT_TYPE) ext; // possible no-op
1168 where EXT_TYPE is wider than HALF_TYPE but has the same signedness.
1170 SHIFT_P is true if ORIG_CODE and WIDE_CODE are shifts. NAME is the
1171 name of the pattern being matched, for dump purposes. */
1173 static gimple *
1174 vect_recog_widen_op_pattern (stmt_vec_info last_stmt_info, tree *type_out,
1175 tree_code orig_code, tree_code wide_code,
1176 bool shift_p, const char *name)
1178 gimple *last_stmt = last_stmt_info->stmt;
1180 vect_unpromoted_value unprom[2];
1181 tree half_type;
1182 if (!vect_widened_op_tree (last_stmt_info, orig_code, orig_code,
1183 shift_p, 2, unprom, &half_type))
1184 return NULL;
1186 /* Pattern detected. */
1187 vect_pattern_detected (name, last_stmt);
1189 tree type = gimple_expr_type (last_stmt);
1190 tree itype = type;
1191 if (TYPE_PRECISION (type) != TYPE_PRECISION (half_type) * 2
1192 || TYPE_UNSIGNED (type) != TYPE_UNSIGNED (half_type))
1193 itype = build_nonstandard_integer_type (TYPE_PRECISION (half_type) * 2,
1194 TYPE_UNSIGNED (half_type));
1196 /* Check target support */
1197 tree vectype = get_vectype_for_scalar_type (half_type);
1198 tree vecitype = get_vectype_for_scalar_type (itype);
1199 enum tree_code dummy_code;
1200 int dummy_int;
1201 auto_vec<tree> dummy_vec;
1202 if (!vectype
1203 || !vecitype
1204 || !supportable_widening_operation (wide_code, last_stmt,
1205 vecitype, vectype,
1206 &dummy_code, &dummy_code,
1207 &dummy_int, &dummy_vec))
1208 return NULL;
1210 *type_out = get_vectype_for_scalar_type (type);
1211 if (!*type_out)
1212 return NULL;
1214 tree oprnd[2];
1215 vect_convert_inputs (last_stmt_info, 2, oprnd, half_type, unprom, vectype);
1217 tree var = vect_recog_temp_ssa_var (itype, NULL);
1218 gimple *pattern_stmt = gimple_build_assign (var, wide_code,
1219 oprnd[0], oprnd[1]);
1221 return vect_convert_output (last_stmt_info, type, pattern_stmt, vecitype);
1224 /* Try to detect multiplication on widened inputs, converting MULT_EXPR
1225 to WIDEN_MULT_EXPR. See vect_recog_widen_op_pattern for details. */
1227 static gimple *
1228 vect_recog_widen_mult_pattern (stmt_vec_info last_stmt_info, tree *type_out)
1230 return vect_recog_widen_op_pattern (last_stmt_info, type_out, MULT_EXPR,
1231 WIDEN_MULT_EXPR, false,
1232 "vect_recog_widen_mult_pattern");
1235 /* Function vect_recog_pow_pattern
1237 Try to find the following pattern:
1239 x = POW (y, N);
1241 with POW being one of pow, powf, powi, powif and N being
1242 either 2 or 0.5.
1244 Input:
1246 * STMT_VINFO: The stmt from which the pattern search begins.
1248 Output:
1250 * TYPE_OUT: The type of the output of this pattern.
1252 * Return value: A new stmt that will be used to replace the sequence of
1253 stmts that constitute the pattern. In this case it will be:
1254 x = x * x
1256 x = sqrt (x)
1259 static gimple *
1260 vect_recog_pow_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
1262 gimple *last_stmt = stmt_vinfo->stmt;
1263 tree base, exp;
1264 gimple *stmt;
1265 tree var;
1267 if (!is_gimple_call (last_stmt) || gimple_call_lhs (last_stmt) == NULL)
1268 return NULL;
1270 switch (gimple_call_combined_fn (last_stmt))
1272 CASE_CFN_POW:
1273 CASE_CFN_POWI:
1274 break;
1276 default:
1277 return NULL;
1280 base = gimple_call_arg (last_stmt, 0);
1281 exp = gimple_call_arg (last_stmt, 1);
1282 if (TREE_CODE (exp) != REAL_CST
1283 && TREE_CODE (exp) != INTEGER_CST)
1285 if (flag_unsafe_math_optimizations
1286 && TREE_CODE (base) == REAL_CST
1287 && !gimple_call_internal_p (last_stmt))
1289 combined_fn log_cfn;
1290 built_in_function exp_bfn;
1291 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (last_stmt)))
1293 case BUILT_IN_POW:
1294 log_cfn = CFN_BUILT_IN_LOG;
1295 exp_bfn = BUILT_IN_EXP;
1296 break;
1297 case BUILT_IN_POWF:
1298 log_cfn = CFN_BUILT_IN_LOGF;
1299 exp_bfn = BUILT_IN_EXPF;
1300 break;
1301 case BUILT_IN_POWL:
1302 log_cfn = CFN_BUILT_IN_LOGL;
1303 exp_bfn = BUILT_IN_EXPL;
1304 break;
1305 default:
1306 return NULL;
1308 tree logc = fold_const_call (log_cfn, TREE_TYPE (base), base);
1309 tree exp_decl = builtin_decl_implicit (exp_bfn);
1310 /* Optimize pow (C, x) as exp (log (C) * x). Normally match.pd
1311 does that, but if C is a power of 2, we want to use
1312 exp2 (log2 (C) * x) in the non-vectorized version, but for
1313 vectorization we don't have vectorized exp2. */
1314 if (logc
1315 && TREE_CODE (logc) == REAL_CST
1316 && exp_decl
1317 && lookup_attribute ("omp declare simd",
1318 DECL_ATTRIBUTES (exp_decl)))
1320 cgraph_node *node = cgraph_node::get_create (exp_decl);
1321 if (node->simd_clones == NULL)
1323 if (targetm.simd_clone.compute_vecsize_and_simdlen == NULL
1324 || node->definition)
1325 return NULL;
1326 expand_simd_clones (node);
1327 if (node->simd_clones == NULL)
1328 return NULL;
1330 *type_out = get_vectype_for_scalar_type (TREE_TYPE (base));
1331 if (!*type_out)
1332 return NULL;
1333 tree def = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
1334 gimple *g = gimple_build_assign (def, MULT_EXPR, exp, logc);
1335 append_pattern_def_seq (stmt_vinfo, g);
1336 tree res = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
1337 g = gimple_build_call (exp_decl, 1, def);
1338 gimple_call_set_lhs (g, res);
1339 return g;
1343 return NULL;
1346 /* We now have a pow or powi builtin function call with a constant
1347 exponent. */
1349 /* Catch squaring. */
1350 if ((tree_fits_shwi_p (exp)
1351 && tree_to_shwi (exp) == 2)
1352 || (TREE_CODE (exp) == REAL_CST
1353 && real_equal (&TREE_REAL_CST (exp), &dconst2)))
1355 if (!vect_supportable_direct_optab_p (TREE_TYPE (base), MULT_EXPR,
1356 TREE_TYPE (base), type_out))
1357 return NULL;
1359 var = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
1360 stmt = gimple_build_assign (var, MULT_EXPR, base, base);
1361 return stmt;
1364 /* Catch square root. */
1365 if (TREE_CODE (exp) == REAL_CST
1366 && real_equal (&TREE_REAL_CST (exp), &dconsthalf))
1368 *type_out = get_vectype_for_scalar_type (TREE_TYPE (base));
1369 if (*type_out
1370 && direct_internal_fn_supported_p (IFN_SQRT, *type_out,
1371 OPTIMIZE_FOR_SPEED))
1373 gcall *stmt = gimple_build_call_internal (IFN_SQRT, 1, base);
1374 var = vect_recog_temp_ssa_var (TREE_TYPE (base), stmt);
1375 gimple_call_set_lhs (stmt, var);
1376 gimple_call_set_nothrow (stmt, true);
1377 return stmt;
1381 return NULL;
1385 /* Function vect_recog_widen_sum_pattern
1387 Try to find the following pattern:
1389 type x_t;
1390 TYPE x_T, sum = init;
1391 loop:
1392 sum_0 = phi <init, sum_1>
1393 S1 x_t = *p;
1394 S2 x_T = (TYPE) x_t;
1395 S3 sum_1 = x_T + sum_0;
1397 where type 'TYPE' is at least double the size of type 'type', i.e - we're
1398 summing elements of type 'type' into an accumulator of type 'TYPE'. This is
1399 a special case of a reduction computation.
1401 Input:
1403 * STMT_VINFO: The stmt from which the pattern search begins. In the example,
1404 when this function is called with S3, the pattern {S2,S3} will be detected.
1406 Output:
1408 * TYPE_OUT: The type of the output of this pattern.
1410 * Return value: A new stmt that will be used to replace the sequence of
1411 stmts that constitute the pattern. In this case it will be:
1412 WIDEN_SUM <x_t, sum_0>
1414 Note: The widening-sum idiom is a widening reduction pattern that is
1415 vectorized without preserving all the intermediate results. It
1416 produces only N/2 (widened) results (by summing up pairs of
1417 intermediate results) rather than all N results. Therefore, we
1418 cannot allow this pattern when we want to get all the results and in
1419 the correct order (as is the case when this computation is in an
1420 inner-loop nested in an outer-loop that us being vectorized). */
1422 static gimple *
1423 vect_recog_widen_sum_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
1425 gimple *last_stmt = stmt_vinfo->stmt;
1426 tree oprnd0, oprnd1;
1427 vec_info *vinfo = stmt_vinfo->vinfo;
1428 tree type;
1429 gimple *pattern_stmt;
1430 tree var;
1432 /* Look for the following pattern
1433 DX = (TYPE) X;
1434 sum_1 = DX + sum_0;
1435 In which DX is at least double the size of X, and sum_1 has been
1436 recognized as a reduction variable.
1439 /* Starting from LAST_STMT, follow the defs of its uses in search
1440 of the above pattern. */
1442 if (!vect_reassociating_reduction_p (stmt_vinfo, PLUS_EXPR,
1443 &oprnd0, &oprnd1))
1444 return NULL;
1446 type = gimple_expr_type (last_stmt);
1448 /* So far so good. Since last_stmt was detected as a (summation) reduction,
1449 we know that oprnd1 is the reduction variable (defined by a loop-header
1450 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
1451 Left to check that oprnd0 is defined by a cast from type 'type' to type
1452 'TYPE'. */
1454 vect_unpromoted_value unprom0;
1455 if (!vect_look_through_possible_promotion (vinfo, oprnd0, &unprom0)
1456 || TYPE_PRECISION (unprom0.type) * 2 > TYPE_PRECISION (type))
1457 return NULL;
1459 vect_pattern_detected ("vect_recog_widen_sum_pattern", last_stmt);
1461 if (!vect_supportable_direct_optab_p (type, WIDEN_SUM_EXPR, unprom0.type,
1462 type_out))
1463 return NULL;
1465 var = vect_recog_temp_ssa_var (type, NULL);
1466 pattern_stmt = gimple_build_assign (var, WIDEN_SUM_EXPR, unprom0.op, oprnd1);
1468 return pattern_stmt;
1471 /* Recognize cases in which an operation is performed in one type WTYPE
1472 but could be done more efficiently in a narrower type NTYPE. For example,
1473 if we have:
1475 ATYPE a; // narrower than NTYPE
1476 BTYPE b; // narrower than NTYPE
1477 WTYPE aw = (WTYPE) a;
1478 WTYPE bw = (WTYPE) b;
1479 WTYPE res = aw + bw; // only uses of aw and bw
1481 then it would be more efficient to do:
1483 NTYPE an = (NTYPE) a;
1484 NTYPE bn = (NTYPE) b;
1485 NTYPE resn = an + bn;
1486 WTYPE res = (WTYPE) resn;
1488 Other situations include things like:
1490 ATYPE a; // NTYPE or narrower
1491 WTYPE aw = (WTYPE) a;
1492 WTYPE res = aw + b;
1494 when only "(NTYPE) res" is significant. In that case it's more efficient
1495 to truncate "b" and do the operation on NTYPE instead:
1497 NTYPE an = (NTYPE) a;
1498 NTYPE bn = (NTYPE) b; // truncation
1499 NTYPE resn = an + bn;
1500 WTYPE res = (WTYPE) resn;
1502 All users of "res" should then use "resn" instead, making the final
1503 statement dead (not marked as relevant). The final statement is still
1504 needed to maintain the type correctness of the IR.
1506 vect_determine_precisions has already determined the minimum
1507 precison of the operation and the minimum precision required
1508 by users of the result. */
1510 static gimple *
1511 vect_recog_over_widening_pattern (stmt_vec_info last_stmt_info, tree *type_out)
1513 gassign *last_stmt = dyn_cast <gassign *> (last_stmt_info->stmt);
1514 if (!last_stmt)
1515 return NULL;
1517 /* See whether we have found that this operation can be done on a
1518 narrower type without changing its semantics. */
1519 unsigned int new_precision = last_stmt_info->operation_precision;
1520 if (!new_precision)
1521 return NULL;
1523 vec_info *vinfo = last_stmt_info->vinfo;
1524 tree lhs = gimple_assign_lhs (last_stmt);
1525 tree type = TREE_TYPE (lhs);
1526 tree_code code = gimple_assign_rhs_code (last_stmt);
1528 /* Keep the first operand of a COND_EXPR as-is: only the other two
1529 operands are interesting. */
1530 unsigned int first_op = (code == COND_EXPR ? 2 : 1);
1532 /* Check the operands. */
1533 unsigned int nops = gimple_num_ops (last_stmt) - first_op;
1534 auto_vec <vect_unpromoted_value, 3> unprom (nops);
1535 unprom.quick_grow (nops);
1536 unsigned int min_precision = 0;
1537 bool single_use_p = false;
1538 for (unsigned int i = 0; i < nops; ++i)
1540 tree op = gimple_op (last_stmt, first_op + i);
1541 if (TREE_CODE (op) == INTEGER_CST)
1542 unprom[i].set_op (op, vect_constant_def);
1543 else if (TREE_CODE (op) == SSA_NAME)
1545 bool op_single_use_p = true;
1546 if (!vect_look_through_possible_promotion (vinfo, op, &unprom[i],
1547 &op_single_use_p))
1548 return NULL;
1549 /* If:
1551 (1) N bits of the result are needed;
1552 (2) all inputs are widened from M<N bits; and
1553 (3) one operand OP is a single-use SSA name
1555 we can shift the M->N widening from OP to the output
1556 without changing the number or type of extensions involved.
1557 This then reduces the number of copies of STMT_INFO.
1559 If instead of (3) more than one operand is a single-use SSA name,
1560 shifting the extension to the output is even more of a win.
1562 If instead:
1564 (1) N bits of the result are needed;
1565 (2) one operand OP2 is widened from M2<N bits;
1566 (3) another operand OP1 is widened from M1<M2 bits; and
1567 (4) both OP1 and OP2 are single-use
1569 the choice is between:
1571 (a) truncating OP2 to M1, doing the operation on M1,
1572 and then widening the result to N
1574 (b) widening OP1 to M2, doing the operation on M2, and then
1575 widening the result to N
1577 Both shift the M2->N widening of the inputs to the output.
1578 (a) additionally shifts the M1->M2 widening to the output;
1579 it requires fewer copies of STMT_INFO but requires an extra
1580 M2->M1 truncation.
1582 Which is better will depend on the complexity and cost of
1583 STMT_INFO, which is hard to predict at this stage. However,
1584 a clear tie-breaker in favor of (b) is the fact that the
1585 truncation in (a) increases the length of the operation chain.
1587 If instead of (4) only one of OP1 or OP2 is single-use,
1588 (b) is still a win over doing the operation in N bits:
1589 it still shifts the M2->N widening on the single-use operand
1590 to the output and reduces the number of STMT_INFO copies.
1592 If neither operand is single-use then operating on fewer than
1593 N bits might lead to more extensions overall. Whether it does
1594 or not depends on global information about the vectorization
1595 region, and whether that's a good trade-off would again
1596 depend on the complexity and cost of the statements involved,
1597 as well as things like register pressure that are not normally
1598 modelled at this stage. We therefore ignore these cases
1599 and just optimize the clear single-use wins above.
1601 Thus we take the maximum precision of the unpromoted operands
1602 and record whether any operand is single-use. */
1603 if (unprom[i].dt == vect_internal_def)
1605 min_precision = MAX (min_precision,
1606 TYPE_PRECISION (unprom[i].type));
1607 single_use_p |= op_single_use_p;
1612 /* Although the operation could be done in operation_precision, we have
1613 to balance that against introducing extra truncations or extensions.
1614 Calculate the minimum precision that can be handled efficiently.
1616 The loop above determined that the operation could be handled
1617 efficiently in MIN_PRECISION if SINGLE_USE_P; this would shift an
1618 extension from the inputs to the output without introducing more
1619 instructions, and would reduce the number of instructions required
1620 for STMT_INFO itself.
1622 vect_determine_precisions has also determined that the result only
1623 needs min_output_precision bits. Truncating by a factor of N times
1624 requires a tree of N - 1 instructions, so if TYPE is N times wider
1625 than min_output_precision, doing the operation in TYPE and truncating
1626 the result requires N + (N - 1) = 2N - 1 instructions per output vector.
1627 In contrast:
1629 - truncating the input to a unary operation and doing the operation
1630 in the new type requires at most N - 1 + 1 = N instructions per
1631 output vector
1633 - doing the same for a binary operation requires at most
1634 (N - 1) * 2 + 1 = 2N - 1 instructions per output vector
1636 Both unary and binary operations require fewer instructions than
1637 this if the operands were extended from a suitable truncated form.
1638 Thus there is usually nothing to lose by doing operations in
1639 min_output_precision bits, but there can be something to gain. */
1640 if (!single_use_p)
1641 min_precision = last_stmt_info->min_output_precision;
1642 else
1643 min_precision = MIN (min_precision, last_stmt_info->min_output_precision);
1645 /* Apply the minimum efficient precision we just calculated. */
1646 if (new_precision < min_precision)
1647 new_precision = min_precision;
1648 if (new_precision >= TYPE_PRECISION (type))
1649 return NULL;
1651 vect_pattern_detected ("vect_recog_over_widening_pattern", last_stmt);
1653 *type_out = get_vectype_for_scalar_type (type);
1654 if (!*type_out)
1655 return NULL;
1657 /* We've found a viable pattern. Get the new type of the operation. */
1658 bool unsigned_p = (last_stmt_info->operation_sign == UNSIGNED);
1659 tree new_type = build_nonstandard_integer_type (new_precision, unsigned_p);
1661 /* We specifically don't check here whether the target supports the
1662 new operation, since it might be something that a later pattern
1663 wants to rewrite anyway. If targets have a minimum element size
1664 for some optabs, we should pattern-match smaller ops to larger ops
1665 where beneficial. */
1666 tree new_vectype = get_vectype_for_scalar_type (new_type);
1667 if (!new_vectype)
1668 return NULL;
1670 if (dump_enabled_p ())
1672 dump_printf_loc (MSG_NOTE, vect_location, "demoting ");
1673 dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
1674 dump_printf (MSG_NOTE, " to ");
1675 dump_generic_expr (MSG_NOTE, TDF_SLIM, new_type);
1676 dump_printf (MSG_NOTE, "\n");
1679 /* Calculate the rhs operands for an operation on NEW_TYPE. */
1680 tree ops[3] = {};
1681 for (unsigned int i = 1; i < first_op; ++i)
1682 ops[i - 1] = gimple_op (last_stmt, i);
1683 vect_convert_inputs (last_stmt_info, nops, &ops[first_op - 1],
1684 new_type, &unprom[0], new_vectype);
1686 /* Use the operation to produce a result of type NEW_TYPE. */
1687 tree new_var = vect_recog_temp_ssa_var (new_type, NULL);
1688 gimple *pattern_stmt = gimple_build_assign (new_var, code,
1689 ops[0], ops[1], ops[2]);
1690 gimple_set_location (pattern_stmt, gimple_location (last_stmt));
1692 if (dump_enabled_p ())
1694 dump_printf_loc (MSG_NOTE, vect_location,
1695 "created pattern stmt: ");
1696 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
1699 pattern_stmt = vect_convert_output (last_stmt_info, type,
1700 pattern_stmt, new_vectype);
1702 return pattern_stmt;
1705 /* Recognize the patterns:
1707 ATYPE a; // narrower than TYPE
1708 BTYPE b; // narrower than TYPE
1709 (1) TYPE avg = ((TYPE) a + (TYPE) b) >> 1;
1710 or (2) TYPE avg = ((TYPE) a + (TYPE) b + 1) >> 1;
1712 where only the bottom half of avg is used. Try to transform them into:
1714 (1) NTYPE avg' = .AVG_FLOOR ((NTYPE) a, (NTYPE) b);
1715 or (2) NTYPE avg' = .AVG_CEIL ((NTYPE) a, (NTYPE) b);
1717 followed by:
1719 TYPE avg = (TYPE) avg';
1721 where NTYPE is no wider than half of TYPE. Since only the bottom half
1722 of avg is used, all or part of the cast of avg' should become redundant. */
1724 static gimple *
1725 vect_recog_average_pattern (stmt_vec_info last_stmt_info, tree *type_out)
1727 /* Check for a shift right by one bit. */
1728 gassign *last_stmt = dyn_cast <gassign *> (last_stmt_info->stmt);
1729 vec_info *vinfo = last_stmt_info->vinfo;
1730 if (!last_stmt
1731 || gimple_assign_rhs_code (last_stmt) != RSHIFT_EXPR
1732 || !integer_onep (gimple_assign_rhs2 (last_stmt)))
1733 return NULL;
1735 /* Check that the shift result is wider than the users of the
1736 result need (i.e. that narrowing would be a natural choice). */
1737 tree lhs = gimple_assign_lhs (last_stmt);
1738 tree type = TREE_TYPE (lhs);
1739 unsigned int target_precision
1740 = vect_element_precision (last_stmt_info->min_output_precision);
1741 if (!INTEGRAL_TYPE_P (type) || target_precision >= TYPE_PRECISION (type))
1742 return NULL;
1744 /* Get the definition of the shift input. */
1745 tree rshift_rhs = gimple_assign_rhs1 (last_stmt);
1746 stmt_vec_info plus_stmt_info = vect_get_internal_def (vinfo, rshift_rhs);
1747 if (!plus_stmt_info)
1748 return NULL;
1750 /* Check whether the shift input can be seen as a tree of additions on
1751 2 or 3 widened inputs.
1753 Note that the pattern should be a win even if the result of one or
1754 more additions is reused elsewhere: if the pattern matches, we'd be
1755 replacing 2N RSHIFT_EXPRs and N VEC_PACK_*s with N IFN_AVG_*s. */
1756 internal_fn ifn = IFN_AVG_FLOOR;
1757 vect_unpromoted_value unprom[3];
1758 tree new_type;
1759 unsigned int nops = vect_widened_op_tree (plus_stmt_info, PLUS_EXPR,
1760 PLUS_EXPR, false, 3,
1761 unprom, &new_type);
1762 if (nops == 0)
1763 return NULL;
1764 if (nops == 3)
1766 /* Check that one operand is 1. */
1767 unsigned int i;
1768 for (i = 0; i < 3; ++i)
1769 if (integer_onep (unprom[i].op))
1770 break;
1771 if (i == 3)
1772 return NULL;
1773 /* Throw away the 1 operand and keep the other two. */
1774 if (i < 2)
1775 unprom[i] = unprom[2];
1776 ifn = IFN_AVG_CEIL;
1779 vect_pattern_detected ("vect_recog_average_pattern", last_stmt);
1781 /* We know that:
1783 (a) the operation can be viewed as:
1785 TYPE widened0 = (TYPE) UNPROM[0];
1786 TYPE widened1 = (TYPE) UNPROM[1];
1787 TYPE tmp1 = widened0 + widened1 {+ 1};
1788 TYPE tmp2 = tmp1 >> 1; // LAST_STMT_INFO
1790 (b) the first two statements are equivalent to:
1792 TYPE widened0 = (TYPE) (NEW_TYPE) UNPROM[0];
1793 TYPE widened1 = (TYPE) (NEW_TYPE) UNPROM[1];
1795 (c) vect_recog_over_widening_pattern has already tried to narrow TYPE
1796 where sensible;
1798 (d) all the operations can be performed correctly at twice the width of
1799 NEW_TYPE, due to the nature of the average operation; and
1801 (e) users of the result of the right shift need only TARGET_PRECISION
1802 bits, where TARGET_PRECISION is no more than half of TYPE's
1803 precision.
1805 Under these circumstances, the only situation in which NEW_TYPE
1806 could be narrower than TARGET_PRECISION is if widened0, widened1
1807 and an addition result are all used more than once. Thus we can
1808 treat any widening of UNPROM[0] and UNPROM[1] to TARGET_PRECISION
1809 as "free", whereas widening the result of the average instruction
1810 from NEW_TYPE to TARGET_PRECISION would be a new operation. It's
1811 therefore better not to go narrower than TARGET_PRECISION. */
1812 if (TYPE_PRECISION (new_type) < target_precision)
1813 new_type = build_nonstandard_integer_type (target_precision,
1814 TYPE_UNSIGNED (new_type));
1816 /* Check for target support. */
1817 tree new_vectype = get_vectype_for_scalar_type (new_type);
1818 if (!new_vectype
1819 || !direct_internal_fn_supported_p (ifn, new_vectype,
1820 OPTIMIZE_FOR_SPEED))
1821 return NULL;
1823 /* The IR requires a valid vector type for the cast result, even though
1824 it's likely to be discarded. */
1825 *type_out = get_vectype_for_scalar_type (type);
1826 if (!*type_out)
1827 return NULL;
1829 /* Generate the IFN_AVG* call. */
1830 tree new_var = vect_recog_temp_ssa_var (new_type, NULL);
1831 tree new_ops[2];
1832 vect_convert_inputs (last_stmt_info, 2, new_ops, new_type,
1833 unprom, new_vectype);
1834 gcall *average_stmt = gimple_build_call_internal (ifn, 2, new_ops[0],
1835 new_ops[1]);
1836 gimple_call_set_lhs (average_stmt, new_var);
1837 gimple_set_location (average_stmt, gimple_location (last_stmt));
1839 if (dump_enabled_p ())
1841 dump_printf_loc (MSG_NOTE, vect_location,
1842 "created pattern stmt: ");
1843 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, average_stmt, 0);
1846 return vect_convert_output (last_stmt_info, type, average_stmt, new_vectype);
1849 /* Recognize cases in which the input to a cast is wider than its
1850 output, and the input is fed by a widening operation. Fold this
1851 by removing the unnecessary intermediate widening. E.g.:
1853 unsigned char a;
1854 unsigned int b = (unsigned int) a;
1855 unsigned short c = (unsigned short) b;
1859 unsigned short c = (unsigned short) a;
1861 Although this is rare in input IR, it is an expected side-effect
1862 of the over-widening pattern above.
1864 This is beneficial also for integer-to-float conversions, if the
1865 widened integer has more bits than the float, and if the unwidened
1866 input doesn't. */
1868 static gimple *
1869 vect_recog_cast_forwprop_pattern (stmt_vec_info last_stmt_info, tree *type_out)
1871 /* Check for a cast, including an integer-to-float conversion. */
1872 gassign *last_stmt = dyn_cast <gassign *> (last_stmt_info->stmt);
1873 if (!last_stmt)
1874 return NULL;
1875 tree_code code = gimple_assign_rhs_code (last_stmt);
1876 if (!CONVERT_EXPR_CODE_P (code) && code != FLOAT_EXPR)
1877 return NULL;
1879 /* Make sure that the rhs is a scalar with a natural bitsize. */
1880 tree lhs = gimple_assign_lhs (last_stmt);
1881 if (!lhs)
1882 return NULL;
1883 tree lhs_type = TREE_TYPE (lhs);
1884 scalar_mode lhs_mode;
1885 if (VECT_SCALAR_BOOLEAN_TYPE_P (lhs_type)
1886 || !is_a <scalar_mode> (TYPE_MODE (lhs_type), &lhs_mode))
1887 return NULL;
1889 /* Check for a narrowing operation (from a vector point of view). */
1890 tree rhs = gimple_assign_rhs1 (last_stmt);
1891 tree rhs_type = TREE_TYPE (rhs);
1892 if (!INTEGRAL_TYPE_P (rhs_type)
1893 || VECT_SCALAR_BOOLEAN_TYPE_P (rhs_type)
1894 || TYPE_PRECISION (rhs_type) <= GET_MODE_BITSIZE (lhs_mode))
1895 return NULL;
1897 /* Try to find an unpromoted input. */
1898 vec_info *vinfo = last_stmt_info->vinfo;
1899 vect_unpromoted_value unprom;
1900 if (!vect_look_through_possible_promotion (vinfo, rhs, &unprom)
1901 || TYPE_PRECISION (unprom.type) >= TYPE_PRECISION (rhs_type))
1902 return NULL;
1904 /* If the bits above RHS_TYPE matter, make sure that they're the
1905 same when extending from UNPROM as they are when extending from RHS. */
1906 if (!INTEGRAL_TYPE_P (lhs_type)
1907 && TYPE_SIGN (rhs_type) != TYPE_SIGN (unprom.type))
1908 return NULL;
1910 /* We can get the same result by casting UNPROM directly, to avoid
1911 the unnecessary widening and narrowing. */
1912 vect_pattern_detected ("vect_recog_cast_forwprop_pattern", last_stmt);
1914 *type_out = get_vectype_for_scalar_type (lhs_type);
1915 if (!*type_out)
1916 return NULL;
1918 tree new_var = vect_recog_temp_ssa_var (lhs_type, NULL);
1919 gimple *pattern_stmt = gimple_build_assign (new_var, code, unprom.op);
1920 gimple_set_location (pattern_stmt, gimple_location (last_stmt));
1922 return pattern_stmt;
1925 /* Try to detect a shift left of a widened input, converting LSHIFT_EXPR
1926 to WIDEN_LSHIFT_EXPR. See vect_recog_widen_op_pattern for details. */
1928 static gimple *
1929 vect_recog_widen_shift_pattern (stmt_vec_info last_stmt_info, tree *type_out)
1931 return vect_recog_widen_op_pattern (last_stmt_info, type_out, LSHIFT_EXPR,
1932 WIDEN_LSHIFT_EXPR, true,
1933 "vect_recog_widen_shift_pattern");
1936 /* Detect a rotate pattern wouldn't be otherwise vectorized:
1938 type a_t, b_t, c_t;
1940 S0 a_t = b_t r<< c_t;
1942 Input/Output:
1944 * STMT_VINFO: The stmt from which the pattern search begins,
1945 i.e. the shift/rotate stmt. The original stmt (S0) is replaced
1946 with a sequence:
1948 S1 d_t = -c_t;
1949 S2 e_t = d_t & (B - 1);
1950 S3 f_t = b_t << c_t;
1951 S4 g_t = b_t >> e_t;
1952 S0 a_t = f_t | g_t;
1954 where B is element bitsize of type.
1956 Output:
1958 * TYPE_OUT: The type of the output of this pattern.
1960 * Return value: A new stmt that will be used to replace the rotate
1961 S0 stmt. */
1963 static gimple *
1964 vect_recog_rotate_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
1966 gimple *last_stmt = stmt_vinfo->stmt;
1967 tree oprnd0, oprnd1, lhs, var, var1, var2, vectype, type, stype, def, def2;
1968 gimple *pattern_stmt, *def_stmt;
1969 enum tree_code rhs_code;
1970 vec_info *vinfo = stmt_vinfo->vinfo;
1971 enum vect_def_type dt;
1972 optab optab1, optab2;
1973 edge ext_def = NULL;
1975 if (!is_gimple_assign (last_stmt))
1976 return NULL;
1978 rhs_code = gimple_assign_rhs_code (last_stmt);
1979 switch (rhs_code)
1981 case LROTATE_EXPR:
1982 case RROTATE_EXPR:
1983 break;
1984 default:
1985 return NULL;
1988 lhs = gimple_assign_lhs (last_stmt);
1989 oprnd0 = gimple_assign_rhs1 (last_stmt);
1990 type = TREE_TYPE (oprnd0);
1991 oprnd1 = gimple_assign_rhs2 (last_stmt);
1992 if (TREE_CODE (oprnd0) != SSA_NAME
1993 || TYPE_PRECISION (TREE_TYPE (lhs)) != TYPE_PRECISION (type)
1994 || !INTEGRAL_TYPE_P (type)
1995 || !TYPE_UNSIGNED (type))
1996 return NULL;
1998 if (!vect_is_simple_use (oprnd1, vinfo, &dt, &def_stmt))
1999 return NULL;
2001 if (dt != vect_internal_def
2002 && dt != vect_constant_def
2003 && dt != vect_external_def)
2004 return NULL;
2006 vectype = get_vectype_for_scalar_type (type);
2007 if (vectype == NULL_TREE)
2008 return NULL;
2010 /* If vector/vector or vector/scalar rotate is supported by the target,
2011 don't do anything here. */
2012 optab1 = optab_for_tree_code (rhs_code, vectype, optab_vector);
2013 if (optab1
2014 && optab_handler (optab1, TYPE_MODE (vectype)) != CODE_FOR_nothing)
2015 return NULL;
2017 if (is_a <bb_vec_info> (vinfo) || dt != vect_internal_def)
2019 optab2 = optab_for_tree_code (rhs_code, vectype, optab_scalar);
2020 if (optab2
2021 && optab_handler (optab2, TYPE_MODE (vectype)) != CODE_FOR_nothing)
2022 return NULL;
2025 /* If vector/vector or vector/scalar shifts aren't supported by the target,
2026 don't do anything here either. */
2027 optab1 = optab_for_tree_code (LSHIFT_EXPR, vectype, optab_vector);
2028 optab2 = optab_for_tree_code (RSHIFT_EXPR, vectype, optab_vector);
2029 if (!optab1
2030 || optab_handler (optab1, TYPE_MODE (vectype)) == CODE_FOR_nothing
2031 || !optab2
2032 || optab_handler (optab2, TYPE_MODE (vectype)) == CODE_FOR_nothing)
2034 if (! is_a <bb_vec_info> (vinfo) && dt == vect_internal_def)
2035 return NULL;
2036 optab1 = optab_for_tree_code (LSHIFT_EXPR, vectype, optab_scalar);
2037 optab2 = optab_for_tree_code (RSHIFT_EXPR, vectype, optab_scalar);
2038 if (!optab1
2039 || optab_handler (optab1, TYPE_MODE (vectype)) == CODE_FOR_nothing
2040 || !optab2
2041 || optab_handler (optab2, TYPE_MODE (vectype)) == CODE_FOR_nothing)
2042 return NULL;
2045 *type_out = vectype;
2047 if (dt == vect_external_def
2048 && TREE_CODE (oprnd1) == SSA_NAME)
2049 ext_def = vect_get_external_def_edge (vinfo, oprnd1);
2051 def = NULL_TREE;
2052 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
2053 if (TREE_CODE (oprnd1) == INTEGER_CST
2054 || TYPE_MODE (TREE_TYPE (oprnd1)) == mode)
2055 def = oprnd1;
2056 else if (def_stmt && gimple_assign_cast_p (def_stmt))
2058 tree rhs1 = gimple_assign_rhs1 (def_stmt);
2059 if (TYPE_MODE (TREE_TYPE (rhs1)) == mode
2060 && TYPE_PRECISION (TREE_TYPE (rhs1))
2061 == TYPE_PRECISION (type))
2062 def = rhs1;
2065 if (def == NULL_TREE)
2067 def = vect_recog_temp_ssa_var (type, NULL);
2068 def_stmt = gimple_build_assign (def, NOP_EXPR, oprnd1);
2069 if (ext_def)
2071 basic_block new_bb
2072 = gsi_insert_on_edge_immediate (ext_def, def_stmt);
2073 gcc_assert (!new_bb);
2075 else
2076 append_pattern_def_seq (stmt_vinfo, def_stmt);
2078 stype = TREE_TYPE (def);
2079 scalar_int_mode smode = SCALAR_INT_TYPE_MODE (stype);
2081 if (TREE_CODE (def) == INTEGER_CST)
2083 if (!tree_fits_uhwi_p (def)
2084 || tree_to_uhwi (def) >= GET_MODE_PRECISION (mode)
2085 || integer_zerop (def))
2086 return NULL;
2087 def2 = build_int_cst (stype,
2088 GET_MODE_PRECISION (mode) - tree_to_uhwi (def));
2090 else
2092 tree vecstype = get_vectype_for_scalar_type (stype);
2094 if (vecstype == NULL_TREE)
2095 return NULL;
2096 def2 = vect_recog_temp_ssa_var (stype, NULL);
2097 def_stmt = gimple_build_assign (def2, NEGATE_EXPR, def);
2098 if (ext_def)
2100 basic_block new_bb
2101 = gsi_insert_on_edge_immediate (ext_def, def_stmt);
2102 gcc_assert (!new_bb);
2104 else
2105 append_pattern_def_seq (stmt_vinfo, def_stmt, vecstype);
2107 def2 = vect_recog_temp_ssa_var (stype, NULL);
2108 tree mask = build_int_cst (stype, GET_MODE_PRECISION (smode) - 1);
2109 def_stmt = gimple_build_assign (def2, BIT_AND_EXPR,
2110 gimple_assign_lhs (def_stmt), mask);
2111 if (ext_def)
2113 basic_block new_bb
2114 = gsi_insert_on_edge_immediate (ext_def, def_stmt);
2115 gcc_assert (!new_bb);
2117 else
2118 append_pattern_def_seq (stmt_vinfo, def_stmt, vecstype);
2121 var1 = vect_recog_temp_ssa_var (type, NULL);
2122 def_stmt = gimple_build_assign (var1, rhs_code == LROTATE_EXPR
2123 ? LSHIFT_EXPR : RSHIFT_EXPR,
2124 oprnd0, def);
2125 append_pattern_def_seq (stmt_vinfo, def_stmt);
2127 var2 = vect_recog_temp_ssa_var (type, NULL);
2128 def_stmt = gimple_build_assign (var2, rhs_code == LROTATE_EXPR
2129 ? RSHIFT_EXPR : LSHIFT_EXPR,
2130 oprnd0, def2);
2131 append_pattern_def_seq (stmt_vinfo, def_stmt);
2133 /* Pattern detected. */
2134 vect_pattern_detected ("vect_recog_rotate_pattern", last_stmt);
2136 /* Pattern supported. Create a stmt to be used to replace the pattern. */
2137 var = vect_recog_temp_ssa_var (type, NULL);
2138 pattern_stmt = gimple_build_assign (var, BIT_IOR_EXPR, var1, var2);
2140 return pattern_stmt;
2143 /* Detect a vector by vector shift pattern that wouldn't be otherwise
2144 vectorized:
2146 type a_t;
2147 TYPE b_T, res_T;
2149 S1 a_t = ;
2150 S2 b_T = ;
2151 S3 res_T = b_T op a_t;
2153 where type 'TYPE' is a type with different size than 'type',
2154 and op is <<, >> or rotate.
2156 Also detect cases:
2158 type a_t;
2159 TYPE b_T, c_T, res_T;
2161 S0 c_T = ;
2162 S1 a_t = (type) c_T;
2163 S2 b_T = ;
2164 S3 res_T = b_T op a_t;
2166 Input/Output:
2168 * STMT_VINFO: The stmt from which the pattern search begins,
2169 i.e. the shift/rotate stmt. The original stmt (S3) is replaced
2170 with a shift/rotate which has same type on both operands, in the
2171 second case just b_T op c_T, in the first case with added cast
2172 from a_t to c_T in STMT_VINFO_PATTERN_DEF_SEQ.
2174 Output:
2176 * TYPE_OUT: The type of the output of this pattern.
2178 * Return value: A new stmt that will be used to replace the shift/rotate
2179 S3 stmt. */
2181 static gimple *
2182 vect_recog_vector_vector_shift_pattern (stmt_vec_info stmt_vinfo,
2183 tree *type_out)
2185 gimple *last_stmt = stmt_vinfo->stmt;
2186 tree oprnd0, oprnd1, lhs, var;
2187 gimple *pattern_stmt;
2188 enum tree_code rhs_code;
2189 vec_info *vinfo = stmt_vinfo->vinfo;
2191 if (!is_gimple_assign (last_stmt))
2192 return NULL;
2194 rhs_code = gimple_assign_rhs_code (last_stmt);
2195 switch (rhs_code)
2197 case LSHIFT_EXPR:
2198 case RSHIFT_EXPR:
2199 case LROTATE_EXPR:
2200 case RROTATE_EXPR:
2201 break;
2202 default:
2203 return NULL;
2206 lhs = gimple_assign_lhs (last_stmt);
2207 oprnd0 = gimple_assign_rhs1 (last_stmt);
2208 oprnd1 = gimple_assign_rhs2 (last_stmt);
2209 if (TREE_CODE (oprnd0) != SSA_NAME
2210 || TREE_CODE (oprnd1) != SSA_NAME
2211 || TYPE_MODE (TREE_TYPE (oprnd0)) == TYPE_MODE (TREE_TYPE (oprnd1))
2212 || !type_has_mode_precision_p (TREE_TYPE (oprnd1))
2213 || TYPE_PRECISION (TREE_TYPE (lhs))
2214 != TYPE_PRECISION (TREE_TYPE (oprnd0)))
2215 return NULL;
2217 stmt_vec_info def_vinfo = vect_get_internal_def (vinfo, oprnd1);
2218 if (!def_vinfo)
2219 return NULL;
2221 *type_out = get_vectype_for_scalar_type (TREE_TYPE (oprnd0));
2222 if (*type_out == NULL_TREE)
2223 return NULL;
2225 tree def = NULL_TREE;
2226 gassign *def_stmt = dyn_cast <gassign *> (def_vinfo->stmt);
2227 if (def_stmt && gimple_assign_cast_p (def_stmt))
2229 tree rhs1 = gimple_assign_rhs1 (def_stmt);
2230 if (TYPE_MODE (TREE_TYPE (rhs1)) == TYPE_MODE (TREE_TYPE (oprnd0))
2231 && TYPE_PRECISION (TREE_TYPE (rhs1))
2232 == TYPE_PRECISION (TREE_TYPE (oprnd0)))
2234 if (TYPE_PRECISION (TREE_TYPE (oprnd1))
2235 >= TYPE_PRECISION (TREE_TYPE (rhs1)))
2236 def = rhs1;
2237 else
2239 tree mask
2240 = build_low_bits_mask (TREE_TYPE (rhs1),
2241 TYPE_PRECISION (TREE_TYPE (oprnd1)));
2242 def = vect_recog_temp_ssa_var (TREE_TYPE (rhs1), NULL);
2243 def_stmt = gimple_build_assign (def, BIT_AND_EXPR, rhs1, mask);
2244 tree vecstype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
2245 append_pattern_def_seq (stmt_vinfo, def_stmt, vecstype);
2250 if (def == NULL_TREE)
2252 def = vect_recog_temp_ssa_var (TREE_TYPE (oprnd0), NULL);
2253 def_stmt = gimple_build_assign (def, NOP_EXPR, oprnd1);
2254 append_pattern_def_seq (stmt_vinfo, def_stmt);
2257 /* Pattern detected. */
2258 vect_pattern_detected ("vect_recog_vector_vector_shift_pattern", last_stmt);
2260 /* Pattern supported. Create a stmt to be used to replace the pattern. */
2261 var = vect_recog_temp_ssa_var (TREE_TYPE (oprnd0), NULL);
2262 pattern_stmt = gimple_build_assign (var, rhs_code, oprnd0, def);
2264 return pattern_stmt;
2267 /* Return true iff the target has a vector optab implementing the operation
2268 CODE on type VECTYPE. */
2270 static bool
2271 target_has_vecop_for_code (tree_code code, tree vectype)
2273 optab voptab = optab_for_tree_code (code, vectype, optab_vector);
2274 return voptab
2275 && optab_handler (voptab, TYPE_MODE (vectype)) != CODE_FOR_nothing;
2278 /* Verify that the target has optabs of VECTYPE to perform all the steps
2279 needed by the multiplication-by-immediate synthesis algorithm described by
2280 ALG and VAR. If SYNTH_SHIFT_P is true ensure that vector addition is
2281 present. Return true iff the target supports all the steps. */
2283 static bool
2284 target_supports_mult_synth_alg (struct algorithm *alg, mult_variant var,
2285 tree vectype, bool synth_shift_p)
2287 if (alg->op[0] != alg_zero && alg->op[0] != alg_m)
2288 return false;
2290 bool supports_vminus = target_has_vecop_for_code (MINUS_EXPR, vectype);
2291 bool supports_vplus = target_has_vecop_for_code (PLUS_EXPR, vectype);
2293 if (var == negate_variant
2294 && !target_has_vecop_for_code (NEGATE_EXPR, vectype))
2295 return false;
2297 /* If we must synthesize shifts with additions make sure that vector
2298 addition is available. */
2299 if ((var == add_variant || synth_shift_p) && !supports_vplus)
2300 return false;
2302 for (int i = 1; i < alg->ops; i++)
2304 switch (alg->op[i])
2306 case alg_shift:
2307 break;
2308 case alg_add_t_m2:
2309 case alg_add_t2_m:
2310 case alg_add_factor:
2311 if (!supports_vplus)
2312 return false;
2313 break;
2314 case alg_sub_t_m2:
2315 case alg_sub_t2_m:
2316 case alg_sub_factor:
2317 if (!supports_vminus)
2318 return false;
2319 break;
2320 case alg_unknown:
2321 case alg_m:
2322 case alg_zero:
2323 case alg_impossible:
2324 return false;
2325 default:
2326 gcc_unreachable ();
2330 return true;
2333 /* Synthesize a left shift of OP by AMNT bits using a series of additions and
2334 putting the final result in DEST. Append all statements but the last into
2335 VINFO. Return the last statement. */
2337 static gimple *
2338 synth_lshift_by_additions (tree dest, tree op, HOST_WIDE_INT amnt,
2339 stmt_vec_info vinfo)
2341 HOST_WIDE_INT i;
2342 tree itype = TREE_TYPE (op);
2343 tree prev_res = op;
2344 gcc_assert (amnt >= 0);
2345 for (i = 0; i < amnt; i++)
2347 tree tmp_var = (i < amnt - 1) ? vect_recog_temp_ssa_var (itype, NULL)
2348 : dest;
2349 gimple *stmt
2350 = gimple_build_assign (tmp_var, PLUS_EXPR, prev_res, prev_res);
2351 prev_res = tmp_var;
2352 if (i < amnt - 1)
2353 append_pattern_def_seq (vinfo, stmt);
2354 else
2355 return stmt;
2357 gcc_unreachable ();
2358 return NULL;
2361 /* Helper for vect_synth_mult_by_constant. Apply a binary operation
2362 CODE to operands OP1 and OP2, creating a new temporary SSA var in
2363 the process if necessary. Append the resulting assignment statements
2364 to the sequence in STMT_VINFO. Return the SSA variable that holds the
2365 result of the binary operation. If SYNTH_SHIFT_P is true synthesize
2366 left shifts using additions. */
2368 static tree
2369 apply_binop_and_append_stmt (tree_code code, tree op1, tree op2,
2370 stmt_vec_info stmt_vinfo, bool synth_shift_p)
2372 if (integer_zerop (op2)
2373 && (code == LSHIFT_EXPR
2374 || code == PLUS_EXPR))
2376 gcc_assert (TREE_CODE (op1) == SSA_NAME);
2377 return op1;
2380 gimple *stmt;
2381 tree itype = TREE_TYPE (op1);
2382 tree tmp_var = vect_recog_temp_ssa_var (itype, NULL);
2384 if (code == LSHIFT_EXPR
2385 && synth_shift_p)
2387 stmt = synth_lshift_by_additions (tmp_var, op1, TREE_INT_CST_LOW (op2),
2388 stmt_vinfo);
2389 append_pattern_def_seq (stmt_vinfo, stmt);
2390 return tmp_var;
2393 stmt = gimple_build_assign (tmp_var, code, op1, op2);
2394 append_pattern_def_seq (stmt_vinfo, stmt);
2395 return tmp_var;
2398 /* Synthesize a multiplication of OP by an INTEGER_CST VAL using shifts
2399 and simple arithmetic operations to be vectorized. Record the statements
2400 produced in STMT_VINFO and return the last statement in the sequence or
2401 NULL if it's not possible to synthesize such a multiplication.
2402 This function mirrors the behavior of expand_mult_const in expmed.c but
2403 works on tree-ssa form. */
2405 static gimple *
2406 vect_synth_mult_by_constant (tree op, tree val,
2407 stmt_vec_info stmt_vinfo)
2409 tree itype = TREE_TYPE (op);
2410 machine_mode mode = TYPE_MODE (itype);
2411 struct algorithm alg;
2412 mult_variant variant;
2413 if (!tree_fits_shwi_p (val))
2414 return NULL;
2416 /* Multiplication synthesis by shifts, adds and subs can introduce
2417 signed overflow where the original operation didn't. Perform the
2418 operations on an unsigned type and cast back to avoid this.
2419 In the future we may want to relax this for synthesis algorithms
2420 that we can prove do not cause unexpected overflow. */
2421 bool cast_to_unsigned_p = !TYPE_OVERFLOW_WRAPS (itype);
2423 tree multtype = cast_to_unsigned_p ? unsigned_type_for (itype) : itype;
2425 /* Targets that don't support vector shifts but support vector additions
2426 can synthesize shifts that way. */
2427 bool synth_shift_p = !vect_supportable_shift (LSHIFT_EXPR, multtype);
2429 HOST_WIDE_INT hwval = tree_to_shwi (val);
2430 /* Use MAX_COST here as we don't want to limit the sequence on rtx costs.
2431 The vectorizer's benefit analysis will decide whether it's beneficial
2432 to do this. */
2433 bool possible = choose_mult_variant (mode, hwval, &alg,
2434 &variant, MAX_COST);
2435 if (!possible)
2436 return NULL;
2438 tree vectype = get_vectype_for_scalar_type (multtype);
2440 if (!vectype
2441 || !target_supports_mult_synth_alg (&alg, variant,
2442 vectype, synth_shift_p))
2443 return NULL;
2445 tree accumulator;
2447 /* Clear out the sequence of statements so we can populate it below. */
2448 gimple *stmt = NULL;
2450 if (cast_to_unsigned_p)
2452 tree tmp_op = vect_recog_temp_ssa_var (multtype, NULL);
2453 stmt = gimple_build_assign (tmp_op, CONVERT_EXPR, op);
2454 append_pattern_def_seq (stmt_vinfo, stmt);
2455 op = tmp_op;
2458 if (alg.op[0] == alg_zero)
2459 accumulator = build_int_cst (multtype, 0);
2460 else
2461 accumulator = op;
2463 bool needs_fixup = (variant == negate_variant)
2464 || (variant == add_variant);
2466 for (int i = 1; i < alg.ops; i++)
2468 tree shft_log = build_int_cst (multtype, alg.log[i]);
2469 tree accum_tmp = vect_recog_temp_ssa_var (multtype, NULL);
2470 tree tmp_var = NULL_TREE;
2472 switch (alg.op[i])
2474 case alg_shift:
2475 if (synth_shift_p)
2476 stmt
2477 = synth_lshift_by_additions (accum_tmp, accumulator, alg.log[i],
2478 stmt_vinfo);
2479 else
2480 stmt = gimple_build_assign (accum_tmp, LSHIFT_EXPR, accumulator,
2481 shft_log);
2482 break;
2483 case alg_add_t_m2:
2484 tmp_var
2485 = apply_binop_and_append_stmt (LSHIFT_EXPR, op, shft_log,
2486 stmt_vinfo, synth_shift_p);
2487 stmt = gimple_build_assign (accum_tmp, PLUS_EXPR, accumulator,
2488 tmp_var);
2489 break;
2490 case alg_sub_t_m2:
2491 tmp_var = apply_binop_and_append_stmt (LSHIFT_EXPR, op,
2492 shft_log, stmt_vinfo,
2493 synth_shift_p);
2494 /* In some algorithms the first step involves zeroing the
2495 accumulator. If subtracting from such an accumulator
2496 just emit the negation directly. */
2497 if (integer_zerop (accumulator))
2498 stmt = gimple_build_assign (accum_tmp, NEGATE_EXPR, tmp_var);
2499 else
2500 stmt = gimple_build_assign (accum_tmp, MINUS_EXPR, accumulator,
2501 tmp_var);
2502 break;
2503 case alg_add_t2_m:
2504 tmp_var
2505 = apply_binop_and_append_stmt (LSHIFT_EXPR, accumulator, shft_log,
2506 stmt_vinfo, synth_shift_p);
2507 stmt = gimple_build_assign (accum_tmp, PLUS_EXPR, tmp_var, op);
2508 break;
2509 case alg_sub_t2_m:
2510 tmp_var
2511 = apply_binop_and_append_stmt (LSHIFT_EXPR, accumulator, shft_log,
2512 stmt_vinfo, synth_shift_p);
2513 stmt = gimple_build_assign (accum_tmp, MINUS_EXPR, tmp_var, op);
2514 break;
2515 case alg_add_factor:
2516 tmp_var
2517 = apply_binop_and_append_stmt (LSHIFT_EXPR, accumulator, shft_log,
2518 stmt_vinfo, synth_shift_p);
2519 stmt = gimple_build_assign (accum_tmp, PLUS_EXPR, accumulator,
2520 tmp_var);
2521 break;
2522 case alg_sub_factor:
2523 tmp_var
2524 = apply_binop_and_append_stmt (LSHIFT_EXPR, accumulator, shft_log,
2525 stmt_vinfo, synth_shift_p);
2526 stmt = gimple_build_assign (accum_tmp, MINUS_EXPR, tmp_var,
2527 accumulator);
2528 break;
2529 default:
2530 gcc_unreachable ();
2532 /* We don't want to append the last stmt in the sequence to stmt_vinfo
2533 but rather return it directly. */
2535 if ((i < alg.ops - 1) || needs_fixup || cast_to_unsigned_p)
2536 append_pattern_def_seq (stmt_vinfo, stmt);
2537 accumulator = accum_tmp;
2539 if (variant == negate_variant)
2541 tree accum_tmp = vect_recog_temp_ssa_var (multtype, NULL);
2542 stmt = gimple_build_assign (accum_tmp, NEGATE_EXPR, accumulator);
2543 accumulator = accum_tmp;
2544 if (cast_to_unsigned_p)
2545 append_pattern_def_seq (stmt_vinfo, stmt);
2547 else if (variant == add_variant)
2549 tree accum_tmp = vect_recog_temp_ssa_var (multtype, NULL);
2550 stmt = gimple_build_assign (accum_tmp, PLUS_EXPR, accumulator, op);
2551 accumulator = accum_tmp;
2552 if (cast_to_unsigned_p)
2553 append_pattern_def_seq (stmt_vinfo, stmt);
2555 /* Move back to a signed if needed. */
2556 if (cast_to_unsigned_p)
2558 tree accum_tmp = vect_recog_temp_ssa_var (itype, NULL);
2559 stmt = gimple_build_assign (accum_tmp, CONVERT_EXPR, accumulator);
2562 return stmt;
2565 /* Detect multiplication by constant and convert it into a sequence of
2566 shifts and additions, subtractions, negations. We reuse the
2567 choose_mult_variant algorithms from expmed.c
2569 Input/Output:
2571 STMT_VINFO: The stmt from which the pattern search begins,
2572 i.e. the mult stmt.
2574 Output:
2576 * TYPE_OUT: The type of the output of this pattern.
2578 * Return value: A new stmt that will be used to replace
2579 the multiplication. */
2581 static gimple *
2582 vect_recog_mult_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
2584 gimple *last_stmt = stmt_vinfo->stmt;
2585 tree oprnd0, oprnd1, vectype, itype;
2586 gimple *pattern_stmt;
2588 if (!is_gimple_assign (last_stmt))
2589 return NULL;
2591 if (gimple_assign_rhs_code (last_stmt) != MULT_EXPR)
2592 return NULL;
2594 oprnd0 = gimple_assign_rhs1 (last_stmt);
2595 oprnd1 = gimple_assign_rhs2 (last_stmt);
2596 itype = TREE_TYPE (oprnd0);
2598 if (TREE_CODE (oprnd0) != SSA_NAME
2599 || TREE_CODE (oprnd1) != INTEGER_CST
2600 || !INTEGRAL_TYPE_P (itype)
2601 || !type_has_mode_precision_p (itype))
2602 return NULL;
2604 vectype = get_vectype_for_scalar_type (itype);
2605 if (vectype == NULL_TREE)
2606 return NULL;
2608 /* If the target can handle vectorized multiplication natively,
2609 don't attempt to optimize this. */
2610 optab mul_optab = optab_for_tree_code (MULT_EXPR, vectype, optab_default);
2611 if (mul_optab != unknown_optab)
2613 machine_mode vec_mode = TYPE_MODE (vectype);
2614 int icode = (int) optab_handler (mul_optab, vec_mode);
2615 if (icode != CODE_FOR_nothing)
2616 return NULL;
2619 pattern_stmt = vect_synth_mult_by_constant (oprnd0, oprnd1, stmt_vinfo);
2620 if (!pattern_stmt)
2621 return NULL;
2623 /* Pattern detected. */
2624 vect_pattern_detected ("vect_recog_mult_pattern", last_stmt);
2626 *type_out = vectype;
2628 return pattern_stmt;
2631 /* Detect a signed division by a constant that wouldn't be
2632 otherwise vectorized:
2634 type a_t, b_t;
2636 S1 a_t = b_t / N;
2638 where type 'type' is an integral type and N is a constant.
2640 Similarly handle modulo by a constant:
2642 S4 a_t = b_t % N;
2644 Input/Output:
2646 * STMT_VINFO: The stmt from which the pattern search begins,
2647 i.e. the division stmt. S1 is replaced by if N is a power
2648 of two constant and type is signed:
2649 S3 y_t = b_t < 0 ? N - 1 : 0;
2650 S2 x_t = b_t + y_t;
2651 S1' a_t = x_t >> log2 (N);
2653 S4 is replaced if N is a power of two constant and
2654 type is signed by (where *_T temporaries have unsigned type):
2655 S9 y_T = b_t < 0 ? -1U : 0U;
2656 S8 z_T = y_T >> (sizeof (type_t) * CHAR_BIT - log2 (N));
2657 S7 z_t = (type) z_T;
2658 S6 w_t = b_t + z_t;
2659 S5 x_t = w_t & (N - 1);
2660 S4' a_t = x_t - z_t;
2662 Output:
2664 * TYPE_OUT: The type of the output of this pattern.
2666 * Return value: A new stmt that will be used to replace the division
2667 S1 or modulo S4 stmt. */
2669 static gimple *
2670 vect_recog_divmod_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
2672 gimple *last_stmt = stmt_vinfo->stmt;
2673 tree oprnd0, oprnd1, vectype, itype, cond;
2674 gimple *pattern_stmt, *def_stmt;
2675 enum tree_code rhs_code;
2676 optab optab;
2677 tree q;
2678 int dummy_int, prec;
2680 if (!is_gimple_assign (last_stmt))
2681 return NULL;
2683 rhs_code = gimple_assign_rhs_code (last_stmt);
2684 switch (rhs_code)
2686 case TRUNC_DIV_EXPR:
2687 case TRUNC_MOD_EXPR:
2688 break;
2689 default:
2690 return NULL;
2693 oprnd0 = gimple_assign_rhs1 (last_stmt);
2694 oprnd1 = gimple_assign_rhs2 (last_stmt);
2695 itype = TREE_TYPE (oprnd0);
2696 if (TREE_CODE (oprnd0) != SSA_NAME
2697 || TREE_CODE (oprnd1) != INTEGER_CST
2698 || TREE_CODE (itype) != INTEGER_TYPE
2699 || !type_has_mode_precision_p (itype))
2700 return NULL;
2702 scalar_int_mode itype_mode = SCALAR_INT_TYPE_MODE (itype);
2703 vectype = get_vectype_for_scalar_type (itype);
2704 if (vectype == NULL_TREE)
2705 return NULL;
2707 if (optimize_bb_for_size_p (gimple_bb (last_stmt)))
2709 /* If the target can handle vectorized division or modulo natively,
2710 don't attempt to optimize this, since native division is likely
2711 to give smaller code. */
2712 optab = optab_for_tree_code (rhs_code, vectype, optab_default);
2713 if (optab != unknown_optab)
2715 machine_mode vec_mode = TYPE_MODE (vectype);
2716 int icode = (int) optab_handler (optab, vec_mode);
2717 if (icode != CODE_FOR_nothing)
2718 return NULL;
2722 prec = TYPE_PRECISION (itype);
2723 if (integer_pow2p (oprnd1))
2725 if (TYPE_UNSIGNED (itype) || tree_int_cst_sgn (oprnd1) != 1)
2726 return NULL;
2728 /* Pattern detected. */
2729 vect_pattern_detected ("vect_recog_divmod_pattern", last_stmt);
2731 cond = build2 (LT_EXPR, boolean_type_node, oprnd0,
2732 build_int_cst (itype, 0));
2733 if (rhs_code == TRUNC_DIV_EXPR)
2735 tree var = vect_recog_temp_ssa_var (itype, NULL);
2736 tree shift;
2737 def_stmt
2738 = gimple_build_assign (var, COND_EXPR, cond,
2739 fold_build2 (MINUS_EXPR, itype, oprnd1,
2740 build_int_cst (itype, 1)),
2741 build_int_cst (itype, 0));
2742 append_pattern_def_seq (stmt_vinfo, def_stmt);
2743 var = vect_recog_temp_ssa_var (itype, NULL);
2744 def_stmt
2745 = gimple_build_assign (var, PLUS_EXPR, oprnd0,
2746 gimple_assign_lhs (def_stmt));
2747 append_pattern_def_seq (stmt_vinfo, def_stmt);
2749 shift = build_int_cst (itype, tree_log2 (oprnd1));
2750 pattern_stmt
2751 = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
2752 RSHIFT_EXPR, var, shift);
2754 else
2756 tree signmask;
2757 if (compare_tree_int (oprnd1, 2) == 0)
2759 signmask = vect_recog_temp_ssa_var (itype, NULL);
2760 def_stmt = gimple_build_assign (signmask, COND_EXPR, cond,
2761 build_int_cst (itype, 1),
2762 build_int_cst (itype, 0));
2763 append_pattern_def_seq (stmt_vinfo, def_stmt);
2765 else
2767 tree utype
2768 = build_nonstandard_integer_type (prec, 1);
2769 tree vecutype = get_vectype_for_scalar_type (utype);
2770 tree shift
2771 = build_int_cst (utype, GET_MODE_BITSIZE (itype_mode)
2772 - tree_log2 (oprnd1));
2773 tree var = vect_recog_temp_ssa_var (utype, NULL);
2775 def_stmt = gimple_build_assign (var, COND_EXPR, cond,
2776 build_int_cst (utype, -1),
2777 build_int_cst (utype, 0));
2778 append_pattern_def_seq (stmt_vinfo, def_stmt, vecutype);
2779 var = vect_recog_temp_ssa_var (utype, NULL);
2780 def_stmt = gimple_build_assign (var, RSHIFT_EXPR,
2781 gimple_assign_lhs (def_stmt),
2782 shift);
2783 append_pattern_def_seq (stmt_vinfo, def_stmt, vecutype);
2784 signmask = vect_recog_temp_ssa_var (itype, NULL);
2785 def_stmt
2786 = gimple_build_assign (signmask, NOP_EXPR, var);
2787 append_pattern_def_seq (stmt_vinfo, def_stmt);
2789 def_stmt
2790 = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
2791 PLUS_EXPR, oprnd0, signmask);
2792 append_pattern_def_seq (stmt_vinfo, def_stmt);
2793 def_stmt
2794 = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
2795 BIT_AND_EXPR, gimple_assign_lhs (def_stmt),
2796 fold_build2 (MINUS_EXPR, itype, oprnd1,
2797 build_int_cst (itype, 1)));
2798 append_pattern_def_seq (stmt_vinfo, def_stmt);
2800 pattern_stmt
2801 = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
2802 MINUS_EXPR, gimple_assign_lhs (def_stmt),
2803 signmask);
2806 *type_out = vectype;
2807 return pattern_stmt;
2810 if (prec > HOST_BITS_PER_WIDE_INT
2811 || integer_zerop (oprnd1))
2812 return NULL;
2814 if (!can_mult_highpart_p (TYPE_MODE (vectype), TYPE_UNSIGNED (itype)))
2815 return NULL;
2817 if (TYPE_UNSIGNED (itype))
2819 unsigned HOST_WIDE_INT mh, ml;
2820 int pre_shift, post_shift;
2821 unsigned HOST_WIDE_INT d = (TREE_INT_CST_LOW (oprnd1)
2822 & GET_MODE_MASK (itype_mode));
2823 tree t1, t2, t3, t4;
2825 if (d >= (HOST_WIDE_INT_1U << (prec - 1)))
2826 /* FIXME: Can transform this into oprnd0 >= oprnd1 ? 1 : 0. */
2827 return NULL;
2829 /* Find a suitable multiplier and right shift count
2830 instead of multiplying with D. */
2831 mh = choose_multiplier (d, prec, prec, &ml, &post_shift, &dummy_int);
2833 /* If the suggested multiplier is more than SIZE bits, we can do better
2834 for even divisors, using an initial right shift. */
2835 if (mh != 0 && (d & 1) == 0)
2837 pre_shift = ctz_or_zero (d);
2838 mh = choose_multiplier (d >> pre_shift, prec, prec - pre_shift,
2839 &ml, &post_shift, &dummy_int);
2840 gcc_assert (!mh);
2842 else
2843 pre_shift = 0;
2845 if (mh != 0)
2847 if (post_shift - 1 >= prec)
2848 return NULL;
2850 /* t1 = oprnd0 h* ml;
2851 t2 = oprnd0 - t1;
2852 t3 = t2 >> 1;
2853 t4 = t1 + t3;
2854 q = t4 >> (post_shift - 1); */
2855 t1 = vect_recog_temp_ssa_var (itype, NULL);
2856 def_stmt = gimple_build_assign (t1, MULT_HIGHPART_EXPR, oprnd0,
2857 build_int_cst (itype, ml));
2858 append_pattern_def_seq (stmt_vinfo, def_stmt);
2860 t2 = vect_recog_temp_ssa_var (itype, NULL);
2861 def_stmt
2862 = gimple_build_assign (t2, MINUS_EXPR, oprnd0, t1);
2863 append_pattern_def_seq (stmt_vinfo, def_stmt);
2865 t3 = vect_recog_temp_ssa_var (itype, NULL);
2866 def_stmt
2867 = gimple_build_assign (t3, RSHIFT_EXPR, t2, integer_one_node);
2868 append_pattern_def_seq (stmt_vinfo, def_stmt);
2870 t4 = vect_recog_temp_ssa_var (itype, NULL);
2871 def_stmt
2872 = gimple_build_assign (t4, PLUS_EXPR, t1, t3);
2874 if (post_shift != 1)
2876 append_pattern_def_seq (stmt_vinfo, def_stmt);
2878 q = vect_recog_temp_ssa_var (itype, NULL);
2879 pattern_stmt
2880 = gimple_build_assign (q, RSHIFT_EXPR, t4,
2881 build_int_cst (itype, post_shift - 1));
2883 else
2885 q = t4;
2886 pattern_stmt = def_stmt;
2889 else
2891 if (pre_shift >= prec || post_shift >= prec)
2892 return NULL;
2894 /* t1 = oprnd0 >> pre_shift;
2895 t2 = t1 h* ml;
2896 q = t2 >> post_shift; */
2897 if (pre_shift)
2899 t1 = vect_recog_temp_ssa_var (itype, NULL);
2900 def_stmt
2901 = gimple_build_assign (t1, RSHIFT_EXPR, oprnd0,
2902 build_int_cst (NULL, pre_shift));
2903 append_pattern_def_seq (stmt_vinfo, def_stmt);
2905 else
2906 t1 = oprnd0;
2908 t2 = vect_recog_temp_ssa_var (itype, NULL);
2909 def_stmt = gimple_build_assign (t2, MULT_HIGHPART_EXPR, t1,
2910 build_int_cst (itype, ml));
2912 if (post_shift)
2914 append_pattern_def_seq (stmt_vinfo, def_stmt);
2916 q = vect_recog_temp_ssa_var (itype, NULL);
2917 def_stmt
2918 = gimple_build_assign (q, RSHIFT_EXPR, t2,
2919 build_int_cst (itype, post_shift));
2921 else
2922 q = t2;
2924 pattern_stmt = def_stmt;
2927 else
2929 unsigned HOST_WIDE_INT ml;
2930 int post_shift;
2931 HOST_WIDE_INT d = TREE_INT_CST_LOW (oprnd1);
2932 unsigned HOST_WIDE_INT abs_d;
2933 bool add = false;
2934 tree t1, t2, t3, t4;
2936 /* Give up for -1. */
2937 if (d == -1)
2938 return NULL;
2940 /* Since d might be INT_MIN, we have to cast to
2941 unsigned HOST_WIDE_INT before negating to avoid
2942 undefined signed overflow. */
2943 abs_d = (d >= 0
2944 ? (unsigned HOST_WIDE_INT) d
2945 : - (unsigned HOST_WIDE_INT) d);
2947 /* n rem d = n rem -d */
2948 if (rhs_code == TRUNC_MOD_EXPR && d < 0)
2950 d = abs_d;
2951 oprnd1 = build_int_cst (itype, abs_d);
2953 else if (HOST_BITS_PER_WIDE_INT >= prec
2954 && abs_d == HOST_WIDE_INT_1U << (prec - 1))
2955 /* This case is not handled correctly below. */
2956 return NULL;
2958 choose_multiplier (abs_d, prec, prec - 1, &ml, &post_shift, &dummy_int);
2959 if (ml >= HOST_WIDE_INT_1U << (prec - 1))
2961 add = true;
2962 ml |= HOST_WIDE_INT_M1U << (prec - 1);
2964 if (post_shift >= prec)
2965 return NULL;
2967 /* t1 = oprnd0 h* ml; */
2968 t1 = vect_recog_temp_ssa_var (itype, NULL);
2969 def_stmt = gimple_build_assign (t1, MULT_HIGHPART_EXPR, oprnd0,
2970 build_int_cst (itype, ml));
2972 if (add)
2974 /* t2 = t1 + oprnd0; */
2975 append_pattern_def_seq (stmt_vinfo, def_stmt);
2976 t2 = vect_recog_temp_ssa_var (itype, NULL);
2977 def_stmt = gimple_build_assign (t2, PLUS_EXPR, t1, oprnd0);
2979 else
2980 t2 = t1;
2982 if (post_shift)
2984 /* t3 = t2 >> post_shift; */
2985 append_pattern_def_seq (stmt_vinfo, def_stmt);
2986 t3 = vect_recog_temp_ssa_var (itype, NULL);
2987 def_stmt = gimple_build_assign (t3, RSHIFT_EXPR, t2,
2988 build_int_cst (itype, post_shift));
2990 else
2991 t3 = t2;
2993 wide_int oprnd0_min, oprnd0_max;
2994 int msb = 1;
2995 if (get_range_info (oprnd0, &oprnd0_min, &oprnd0_max) == VR_RANGE)
2997 if (!wi::neg_p (oprnd0_min, TYPE_SIGN (itype)))
2998 msb = 0;
2999 else if (wi::neg_p (oprnd0_max, TYPE_SIGN (itype)))
3000 msb = -1;
3003 if (msb == 0 && d >= 0)
3005 /* q = t3; */
3006 q = t3;
3007 pattern_stmt = def_stmt;
3009 else
3011 /* t4 = oprnd0 >> (prec - 1);
3012 or if we know from VRP that oprnd0 >= 0
3013 t4 = 0;
3014 or if we know from VRP that oprnd0 < 0
3015 t4 = -1; */
3016 append_pattern_def_seq (stmt_vinfo, def_stmt);
3017 t4 = vect_recog_temp_ssa_var (itype, NULL);
3018 if (msb != 1)
3019 def_stmt = gimple_build_assign (t4, INTEGER_CST,
3020 build_int_cst (itype, msb));
3021 else
3022 def_stmt = gimple_build_assign (t4, RSHIFT_EXPR, oprnd0,
3023 build_int_cst (itype, prec - 1));
3024 append_pattern_def_seq (stmt_vinfo, def_stmt);
3026 /* q = t3 - t4; or q = t4 - t3; */
3027 q = vect_recog_temp_ssa_var (itype, NULL);
3028 pattern_stmt = gimple_build_assign (q, MINUS_EXPR, d < 0 ? t4 : t3,
3029 d < 0 ? t3 : t4);
3033 if (rhs_code == TRUNC_MOD_EXPR)
3035 tree r, t1;
3037 /* We divided. Now finish by:
3038 t1 = q * oprnd1;
3039 r = oprnd0 - t1; */
3040 append_pattern_def_seq (stmt_vinfo, pattern_stmt);
3042 t1 = vect_recog_temp_ssa_var (itype, NULL);
3043 def_stmt = gimple_build_assign (t1, MULT_EXPR, q, oprnd1);
3044 append_pattern_def_seq (stmt_vinfo, def_stmt);
3046 r = vect_recog_temp_ssa_var (itype, NULL);
3047 pattern_stmt = gimple_build_assign (r, MINUS_EXPR, oprnd0, t1);
3050 /* Pattern detected. */
3051 vect_pattern_detected ("vect_recog_divmod_pattern", last_stmt);
3053 *type_out = vectype;
3054 return pattern_stmt;
3057 /* Function vect_recog_mixed_size_cond_pattern
3059 Try to find the following pattern:
3061 type x_t, y_t;
3062 TYPE a_T, b_T, c_T;
3063 loop:
3064 S1 a_T = x_t CMP y_t ? b_T : c_T;
3066 where type 'TYPE' is an integral type which has different size
3067 from 'type'. b_T and c_T are either constants (and if 'TYPE' is wider
3068 than 'type', the constants need to fit into an integer type
3069 with the same width as 'type') or results of conversion from 'type'.
3071 Input:
3073 * STMT_VINFO: The stmt from which the pattern search begins.
3075 Output:
3077 * TYPE_OUT: The type of the output of this pattern.
3079 * Return value: A new stmt that will be used to replace the pattern.
3080 Additionally a def_stmt is added.
3082 a_it = x_t CMP y_t ? b_it : c_it;
3083 a_T = (TYPE) a_it; */
3085 static gimple *
3086 vect_recog_mixed_size_cond_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
3088 gimple *last_stmt = stmt_vinfo->stmt;
3089 tree cond_expr, then_clause, else_clause;
3090 tree type, vectype, comp_vectype, itype = NULL_TREE, vecitype;
3091 gimple *pattern_stmt, *def_stmt;
3092 tree orig_type0 = NULL_TREE, orig_type1 = NULL_TREE;
3093 gimple *def_stmt0 = NULL, *def_stmt1 = NULL;
3094 bool promotion;
3095 tree comp_scalar_type;
3097 if (!is_gimple_assign (last_stmt)
3098 || gimple_assign_rhs_code (last_stmt) != COND_EXPR
3099 || STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_internal_def)
3100 return NULL;
3102 cond_expr = gimple_assign_rhs1 (last_stmt);
3103 then_clause = gimple_assign_rhs2 (last_stmt);
3104 else_clause = gimple_assign_rhs3 (last_stmt);
3106 if (!COMPARISON_CLASS_P (cond_expr))
3107 return NULL;
3109 comp_scalar_type = TREE_TYPE (TREE_OPERAND (cond_expr, 0));
3110 comp_vectype = get_vectype_for_scalar_type (comp_scalar_type);
3111 if (comp_vectype == NULL_TREE)
3112 return NULL;
3114 type = gimple_expr_type (last_stmt);
3115 if (types_compatible_p (type, comp_scalar_type)
3116 || ((TREE_CODE (then_clause) != INTEGER_CST
3117 || TREE_CODE (else_clause) != INTEGER_CST)
3118 && !INTEGRAL_TYPE_P (comp_scalar_type))
3119 || !INTEGRAL_TYPE_P (type))
3120 return NULL;
3122 if ((TREE_CODE (then_clause) != INTEGER_CST
3123 && !type_conversion_p (then_clause, last_stmt, false, &orig_type0,
3124 &def_stmt0, &promotion))
3125 || (TREE_CODE (else_clause) != INTEGER_CST
3126 && !type_conversion_p (else_clause, last_stmt, false, &orig_type1,
3127 &def_stmt1, &promotion)))
3128 return NULL;
3130 if (orig_type0 && orig_type1
3131 && !types_compatible_p (orig_type0, orig_type1))
3132 return NULL;
3134 if (orig_type0)
3136 if (!types_compatible_p (orig_type0, comp_scalar_type))
3137 return NULL;
3138 then_clause = gimple_assign_rhs1 (def_stmt0);
3139 itype = orig_type0;
3142 if (orig_type1)
3144 if (!types_compatible_p (orig_type1, comp_scalar_type))
3145 return NULL;
3146 else_clause = gimple_assign_rhs1 (def_stmt1);
3147 itype = orig_type1;
3151 HOST_WIDE_INT cmp_mode_size
3152 = GET_MODE_UNIT_BITSIZE (TYPE_MODE (comp_vectype));
3154 scalar_int_mode type_mode = SCALAR_INT_TYPE_MODE (type);
3155 if (GET_MODE_BITSIZE (type_mode) == cmp_mode_size)
3156 return NULL;
3158 vectype = get_vectype_for_scalar_type (type);
3159 if (vectype == NULL_TREE)
3160 return NULL;
3162 if (expand_vec_cond_expr_p (vectype, comp_vectype, TREE_CODE (cond_expr)))
3163 return NULL;
3165 if (itype == NULL_TREE)
3166 itype = build_nonstandard_integer_type (cmp_mode_size,
3167 TYPE_UNSIGNED (type));
3169 if (itype == NULL_TREE
3170 || GET_MODE_BITSIZE (SCALAR_TYPE_MODE (itype)) != cmp_mode_size)
3171 return NULL;
3173 vecitype = get_vectype_for_scalar_type (itype);
3174 if (vecitype == NULL_TREE)
3175 return NULL;
3177 if (!expand_vec_cond_expr_p (vecitype, comp_vectype, TREE_CODE (cond_expr)))
3178 return NULL;
3180 if (GET_MODE_BITSIZE (type_mode) > cmp_mode_size)
3182 if ((TREE_CODE (then_clause) == INTEGER_CST
3183 && !int_fits_type_p (then_clause, itype))
3184 || (TREE_CODE (else_clause) == INTEGER_CST
3185 && !int_fits_type_p (else_clause, itype)))
3186 return NULL;
3189 def_stmt = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
3190 COND_EXPR, unshare_expr (cond_expr),
3191 fold_convert (itype, then_clause),
3192 fold_convert (itype, else_clause));
3193 pattern_stmt = gimple_build_assign (vect_recog_temp_ssa_var (type, NULL),
3194 NOP_EXPR, gimple_assign_lhs (def_stmt));
3196 append_pattern_def_seq (stmt_vinfo, def_stmt, vecitype);
3197 *type_out = vectype;
3199 vect_pattern_detected ("vect_recog_mixed_size_cond_pattern", last_stmt);
3201 return pattern_stmt;
3205 /* Helper function of vect_recog_bool_pattern. Called recursively, return
3206 true if bool VAR can and should be optimized that way. Assume it shouldn't
3207 in case it's a result of a comparison which can be directly vectorized into
3208 a vector comparison. Fills in STMTS with all stmts visited during the
3209 walk. */
3211 static bool
3212 check_bool_pattern (tree var, vec_info *vinfo, hash_set<gimple *> &stmts)
3214 tree rhs1;
3215 enum tree_code rhs_code;
3217 stmt_vec_info def_stmt_info = vect_get_internal_def (vinfo, var);
3218 if (!def_stmt_info)
3219 return false;
3221 gassign *def_stmt = dyn_cast <gassign *> (def_stmt_info->stmt);
3222 if (!def_stmt)
3223 return false;
3225 if (stmts.contains (def_stmt))
3226 return true;
3228 rhs1 = gimple_assign_rhs1 (def_stmt);
3229 rhs_code = gimple_assign_rhs_code (def_stmt);
3230 switch (rhs_code)
3232 case SSA_NAME:
3233 if (! check_bool_pattern (rhs1, vinfo, stmts))
3234 return false;
3235 break;
3237 CASE_CONVERT:
3238 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (rhs1)))
3239 return false;
3240 if (! check_bool_pattern (rhs1, vinfo, stmts))
3241 return false;
3242 break;
3244 case BIT_NOT_EXPR:
3245 if (! check_bool_pattern (rhs1, vinfo, stmts))
3246 return false;
3247 break;
3249 case BIT_AND_EXPR:
3250 case BIT_IOR_EXPR:
3251 case BIT_XOR_EXPR:
3252 if (! check_bool_pattern (rhs1, vinfo, stmts)
3253 || ! check_bool_pattern (gimple_assign_rhs2 (def_stmt), vinfo, stmts))
3254 return false;
3255 break;
3257 default:
3258 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3260 tree vecitype, comp_vectype;
3262 /* If the comparison can throw, then is_gimple_condexpr will be
3263 false and we can't make a COND_EXPR/VEC_COND_EXPR out of it. */
3264 if (stmt_could_throw_p (def_stmt))
3265 return false;
3267 comp_vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
3268 if (comp_vectype == NULL_TREE)
3269 return false;
3271 tree mask_type = get_mask_type_for_scalar_type (TREE_TYPE (rhs1));
3272 if (mask_type
3273 && expand_vec_cmp_expr_p (comp_vectype, mask_type, rhs_code))
3274 return false;
3276 if (TREE_CODE (TREE_TYPE (rhs1)) != INTEGER_TYPE)
3278 scalar_mode mode = SCALAR_TYPE_MODE (TREE_TYPE (rhs1));
3279 tree itype
3280 = build_nonstandard_integer_type (GET_MODE_BITSIZE (mode), 1);
3281 vecitype = get_vectype_for_scalar_type (itype);
3282 if (vecitype == NULL_TREE)
3283 return false;
3285 else
3286 vecitype = comp_vectype;
3287 if (! expand_vec_cond_expr_p (vecitype, comp_vectype, rhs_code))
3288 return false;
3290 else
3291 return false;
3292 break;
3295 bool res = stmts.add (def_stmt);
3296 /* We can't end up recursing when just visiting SSA defs but not PHIs. */
3297 gcc_assert (!res);
3299 return true;
3303 /* Helper function of adjust_bool_pattern. Add a cast to TYPE to a previous
3304 stmt (SSA_NAME_DEF_STMT of VAR) adding a cast to STMT_INFOs
3305 pattern sequence. */
3307 static tree
3308 adjust_bool_pattern_cast (tree type, tree var, stmt_vec_info stmt_info)
3310 gimple *cast_stmt = gimple_build_assign (vect_recog_temp_ssa_var (type, NULL),
3311 NOP_EXPR, var);
3312 append_pattern_def_seq (stmt_info, cast_stmt,
3313 get_vectype_for_scalar_type (type));
3314 return gimple_assign_lhs (cast_stmt);
3317 /* Helper function of vect_recog_bool_pattern. Do the actual transformations.
3318 VAR is an SSA_NAME that should be transformed from bool to a wider integer
3319 type, OUT_TYPE is the desired final integer type of the whole pattern.
3320 STMT_INFO is the info of the pattern root and is where pattern stmts should
3321 be associated with. DEFS is a map of pattern defs. */
3323 static void
3324 adjust_bool_pattern (tree var, tree out_type,
3325 stmt_vec_info stmt_info, hash_map <tree, tree> &defs)
3327 gimple *stmt = SSA_NAME_DEF_STMT (var);
3328 enum tree_code rhs_code, def_rhs_code;
3329 tree itype, cond_expr, rhs1, rhs2, irhs1, irhs2;
3330 location_t loc;
3331 gimple *pattern_stmt, *def_stmt;
3332 tree trueval = NULL_TREE;
3334 rhs1 = gimple_assign_rhs1 (stmt);
3335 rhs2 = gimple_assign_rhs2 (stmt);
3336 rhs_code = gimple_assign_rhs_code (stmt);
3337 loc = gimple_location (stmt);
3338 switch (rhs_code)
3340 case SSA_NAME:
3341 CASE_CONVERT:
3342 irhs1 = *defs.get (rhs1);
3343 itype = TREE_TYPE (irhs1);
3344 pattern_stmt
3345 = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
3346 SSA_NAME, irhs1);
3347 break;
3349 case BIT_NOT_EXPR:
3350 irhs1 = *defs.get (rhs1);
3351 itype = TREE_TYPE (irhs1);
3352 pattern_stmt
3353 = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
3354 BIT_XOR_EXPR, irhs1, build_int_cst (itype, 1));
3355 break;
3357 case BIT_AND_EXPR:
3358 /* Try to optimize x = y & (a < b ? 1 : 0); into
3359 x = (a < b ? y : 0);
3361 E.g. for:
3362 bool a_b, b_b, c_b;
3363 TYPE d_T;
3365 S1 a_b = x1 CMP1 y1;
3366 S2 b_b = x2 CMP2 y2;
3367 S3 c_b = a_b & b_b;
3368 S4 d_T = (TYPE) c_b;
3370 we would normally emit:
3372 S1' a_T = x1 CMP1 y1 ? 1 : 0;
3373 S2' b_T = x2 CMP2 y2 ? 1 : 0;
3374 S3' c_T = a_T & b_T;
3375 S4' d_T = c_T;
3377 but we can save one stmt by using the
3378 result of one of the COND_EXPRs in the other COND_EXPR and leave
3379 BIT_AND_EXPR stmt out:
3381 S1' a_T = x1 CMP1 y1 ? 1 : 0;
3382 S3' c_T = x2 CMP2 y2 ? a_T : 0;
3383 S4' f_T = c_T;
3385 At least when VEC_COND_EXPR is implemented using masks
3386 cond ? 1 : 0 is as expensive as cond ? var : 0, in both cases it
3387 computes the comparison masks and ands it, in one case with
3388 all ones vector, in the other case with a vector register.
3389 Don't do this for BIT_IOR_EXPR, because cond ? 1 : var; is
3390 often more expensive. */
3391 def_stmt = SSA_NAME_DEF_STMT (rhs2);
3392 def_rhs_code = gimple_assign_rhs_code (def_stmt);
3393 if (TREE_CODE_CLASS (def_rhs_code) == tcc_comparison)
3395 irhs1 = *defs.get (rhs1);
3396 tree def_rhs1 = gimple_assign_rhs1 (def_stmt);
3397 if (TYPE_PRECISION (TREE_TYPE (irhs1))
3398 == GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (def_rhs1))))
3400 rhs_code = def_rhs_code;
3401 rhs1 = def_rhs1;
3402 rhs2 = gimple_assign_rhs2 (def_stmt);
3403 trueval = irhs1;
3404 goto do_compare;
3406 else
3407 irhs2 = *defs.get (rhs2);
3408 goto and_ior_xor;
3410 def_stmt = SSA_NAME_DEF_STMT (rhs1);
3411 def_rhs_code = gimple_assign_rhs_code (def_stmt);
3412 if (TREE_CODE_CLASS (def_rhs_code) == tcc_comparison)
3414 irhs2 = *defs.get (rhs2);
3415 tree def_rhs1 = gimple_assign_rhs1 (def_stmt);
3416 if (TYPE_PRECISION (TREE_TYPE (irhs2))
3417 == GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (def_rhs1))))
3419 rhs_code = def_rhs_code;
3420 rhs1 = def_rhs1;
3421 rhs2 = gimple_assign_rhs2 (def_stmt);
3422 trueval = irhs2;
3423 goto do_compare;
3425 else
3426 irhs1 = *defs.get (rhs1);
3427 goto and_ior_xor;
3429 /* FALLTHRU */
3430 case BIT_IOR_EXPR:
3431 case BIT_XOR_EXPR:
3432 irhs1 = *defs.get (rhs1);
3433 irhs2 = *defs.get (rhs2);
3434 and_ior_xor:
3435 if (TYPE_PRECISION (TREE_TYPE (irhs1))
3436 != TYPE_PRECISION (TREE_TYPE (irhs2)))
3438 int prec1 = TYPE_PRECISION (TREE_TYPE (irhs1));
3439 int prec2 = TYPE_PRECISION (TREE_TYPE (irhs2));
3440 int out_prec = TYPE_PRECISION (out_type);
3441 if (absu_hwi (out_prec - prec1) < absu_hwi (out_prec - prec2))
3442 irhs2 = adjust_bool_pattern_cast (TREE_TYPE (irhs1), irhs2,
3443 stmt_info);
3444 else if (absu_hwi (out_prec - prec1) > absu_hwi (out_prec - prec2))
3445 irhs1 = adjust_bool_pattern_cast (TREE_TYPE (irhs2), irhs1,
3446 stmt_info);
3447 else
3449 irhs1 = adjust_bool_pattern_cast (out_type, irhs1, stmt_info);
3450 irhs2 = adjust_bool_pattern_cast (out_type, irhs2, stmt_info);
3453 itype = TREE_TYPE (irhs1);
3454 pattern_stmt
3455 = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
3456 rhs_code, irhs1, irhs2);
3457 break;
3459 default:
3460 do_compare:
3461 gcc_assert (TREE_CODE_CLASS (rhs_code) == tcc_comparison);
3462 if (TREE_CODE (TREE_TYPE (rhs1)) != INTEGER_TYPE
3463 || !TYPE_UNSIGNED (TREE_TYPE (rhs1))
3464 || maybe_ne (TYPE_PRECISION (TREE_TYPE (rhs1)),
3465 GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1)))))
3467 scalar_mode mode = SCALAR_TYPE_MODE (TREE_TYPE (rhs1));
3468 itype
3469 = build_nonstandard_integer_type (GET_MODE_BITSIZE (mode), 1);
3471 else
3472 itype = TREE_TYPE (rhs1);
3473 cond_expr = build2_loc (loc, rhs_code, itype, rhs1, rhs2);
3474 if (trueval == NULL_TREE)
3475 trueval = build_int_cst (itype, 1);
3476 else
3477 gcc_checking_assert (useless_type_conversion_p (itype,
3478 TREE_TYPE (trueval)));
3479 pattern_stmt
3480 = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
3481 COND_EXPR, cond_expr, trueval,
3482 build_int_cst (itype, 0));
3483 break;
3486 gimple_set_location (pattern_stmt, loc);
3487 append_pattern_def_seq (stmt_info, pattern_stmt,
3488 get_vectype_for_scalar_type (itype));
3489 defs.put (var, gimple_assign_lhs (pattern_stmt));
3492 /* Comparison function to qsort a vector of gimple stmts after UID. */
3494 static int
3495 sort_after_uid (const void *p1, const void *p2)
3497 const gimple *stmt1 = *(const gimple * const *)p1;
3498 const gimple *stmt2 = *(const gimple * const *)p2;
3499 return gimple_uid (stmt1) - gimple_uid (stmt2);
3502 /* Create pattern stmts for all stmts participating in the bool pattern
3503 specified by BOOL_STMT_SET and its root STMT with the desired type
3504 OUT_TYPE. Return the def of the pattern root. */
3506 static tree
3507 adjust_bool_stmts (hash_set <gimple *> &bool_stmt_set,
3508 tree out_type, gimple *stmt)
3510 /* Gather original stmts in the bool pattern in their order of appearance
3511 in the IL. */
3512 auto_vec<gimple *> bool_stmts (bool_stmt_set.elements ());
3513 for (hash_set <gimple *>::iterator i = bool_stmt_set.begin ();
3514 i != bool_stmt_set.end (); ++i)
3515 bool_stmts.quick_push (*i);
3516 bool_stmts.qsort (sort_after_uid);
3518 /* Now process them in that order, producing pattern stmts. */
3519 hash_map <tree, tree> defs;
3520 for (unsigned i = 0; i < bool_stmts.length (); ++i)
3521 adjust_bool_pattern (gimple_assign_lhs (bool_stmts[i]),
3522 out_type, vinfo_for_stmt (stmt), defs);
3524 /* Pop the last pattern seq stmt and install it as pattern root for STMT. */
3525 gimple *pattern_stmt
3526 = gimple_seq_last_stmt (STMT_VINFO_PATTERN_DEF_SEQ (vinfo_for_stmt (stmt)));
3527 return gimple_assign_lhs (pattern_stmt);
3530 /* Helper for search_type_for_mask. */
3532 static tree
3533 search_type_for_mask_1 (tree var, vec_info *vinfo,
3534 hash_map<gimple *, tree> &cache)
3536 tree rhs1;
3537 enum tree_code rhs_code;
3538 tree res = NULL_TREE, res2;
3540 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (var)))
3541 return NULL_TREE;
3543 stmt_vec_info def_stmt_info = vect_get_internal_def (vinfo, var);
3544 if (!def_stmt_info)
3545 return NULL_TREE;
3547 gassign *def_stmt = dyn_cast <gassign *> (def_stmt_info->stmt);
3548 if (!def_stmt)
3549 return NULL_TREE;
3551 tree *c = cache.get (def_stmt);
3552 if (c)
3553 return *c;
3555 rhs_code = gimple_assign_rhs_code (def_stmt);
3556 rhs1 = gimple_assign_rhs1 (def_stmt);
3558 switch (rhs_code)
3560 case SSA_NAME:
3561 case BIT_NOT_EXPR:
3562 CASE_CONVERT:
3563 res = search_type_for_mask_1 (rhs1, vinfo, cache);
3564 break;
3566 case BIT_AND_EXPR:
3567 case BIT_IOR_EXPR:
3568 case BIT_XOR_EXPR:
3569 res = search_type_for_mask_1 (rhs1, vinfo, cache);
3570 res2 = search_type_for_mask_1 (gimple_assign_rhs2 (def_stmt), vinfo,
3571 cache);
3572 if (!res || (res2 && TYPE_PRECISION (res) > TYPE_PRECISION (res2)))
3573 res = res2;
3574 break;
3576 default:
3577 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3579 tree comp_vectype, mask_type;
3581 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (rhs1)))
3583 res = search_type_for_mask_1 (rhs1, vinfo, cache);
3584 res2 = search_type_for_mask_1 (gimple_assign_rhs2 (def_stmt),
3585 vinfo, cache);
3586 if (!res || (res2 && TYPE_PRECISION (res) > TYPE_PRECISION (res2)))
3587 res = res2;
3588 break;
3591 comp_vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
3592 if (comp_vectype == NULL_TREE)
3594 res = NULL_TREE;
3595 break;
3598 mask_type = get_mask_type_for_scalar_type (TREE_TYPE (rhs1));
3599 if (!mask_type
3600 || !expand_vec_cmp_expr_p (comp_vectype, mask_type, rhs_code))
3602 res = NULL_TREE;
3603 break;
3606 if (TREE_CODE (TREE_TYPE (rhs1)) != INTEGER_TYPE
3607 || !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
3609 scalar_mode mode = SCALAR_TYPE_MODE (TREE_TYPE (rhs1));
3610 res = build_nonstandard_integer_type (GET_MODE_BITSIZE (mode), 1);
3612 else
3613 res = TREE_TYPE (rhs1);
3617 cache.put (def_stmt, res);
3618 return res;
3621 /* Return the proper type for converting bool VAR into
3622 an integer value or NULL_TREE if no such type exists.
3623 The type is chosen so that converted value has the
3624 same number of elements as VAR's vector type. */
3626 static tree
3627 search_type_for_mask (tree var, vec_info *vinfo)
3629 hash_map<gimple *, tree> cache;
3630 return search_type_for_mask_1 (var, vinfo, cache);
3633 /* Function vect_recog_bool_pattern
3635 Try to find pattern like following:
3637 bool a_b, b_b, c_b, d_b, e_b;
3638 TYPE f_T;
3639 loop:
3640 S1 a_b = x1 CMP1 y1;
3641 S2 b_b = x2 CMP2 y2;
3642 S3 c_b = a_b & b_b;
3643 S4 d_b = x3 CMP3 y3;
3644 S5 e_b = c_b | d_b;
3645 S6 f_T = (TYPE) e_b;
3647 where type 'TYPE' is an integral type. Or a similar pattern
3648 ending in
3650 S6 f_Y = e_b ? r_Y : s_Y;
3652 as results from if-conversion of a complex condition.
3654 Input:
3656 * STMT_VINFO: The stmt at the end from which the pattern
3657 search begins, i.e. cast of a bool to
3658 an integer type.
3660 Output:
3662 * TYPE_OUT: The type of the output of this pattern.
3664 * Return value: A new stmt that will be used to replace the pattern.
3666 Assuming size of TYPE is the same as size of all comparisons
3667 (otherwise some casts would be added where needed), the above
3668 sequence we create related pattern stmts:
3669 S1' a_T = x1 CMP1 y1 ? 1 : 0;
3670 S3' c_T = x2 CMP2 y2 ? a_T : 0;
3671 S4' d_T = x3 CMP3 y3 ? 1 : 0;
3672 S5' e_T = c_T | d_T;
3673 S6' f_T = e_T;
3675 Instead of the above S3' we could emit:
3676 S2' b_T = x2 CMP2 y2 ? 1 : 0;
3677 S3' c_T = a_T | b_T;
3678 but the above is more efficient. */
3680 static gimple *
3681 vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
3683 gimple *last_stmt = stmt_vinfo->stmt;
3684 enum tree_code rhs_code;
3685 tree var, lhs, rhs, vectype;
3686 vec_info *vinfo = stmt_vinfo->vinfo;
3687 gimple *pattern_stmt;
3689 if (!is_gimple_assign (last_stmt))
3690 return NULL;
3692 var = gimple_assign_rhs1 (last_stmt);
3693 lhs = gimple_assign_lhs (last_stmt);
3695 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (var)))
3696 return NULL;
3698 hash_set<gimple *> bool_stmts;
3700 rhs_code = gimple_assign_rhs_code (last_stmt);
3701 if (CONVERT_EXPR_CODE_P (rhs_code))
3703 if (! INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3704 || TYPE_PRECISION (TREE_TYPE (lhs)) == 1)
3705 return NULL;
3706 vectype = get_vectype_for_scalar_type (TREE_TYPE (lhs));
3707 if (vectype == NULL_TREE)
3708 return NULL;
3710 if (check_bool_pattern (var, vinfo, bool_stmts))
3712 rhs = adjust_bool_stmts (bool_stmts, TREE_TYPE (lhs), last_stmt);
3713 lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
3714 if (useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
3715 pattern_stmt = gimple_build_assign (lhs, SSA_NAME, rhs);
3716 else
3717 pattern_stmt
3718 = gimple_build_assign (lhs, NOP_EXPR, rhs);
3720 else
3722 tree type = search_type_for_mask (var, vinfo);
3723 tree cst0, cst1, tmp;
3725 if (!type)
3726 return NULL;
3728 /* We may directly use cond with narrowed type to avoid
3729 multiple cond exprs with following result packing and
3730 perform single cond with packed mask instead. In case
3731 of widening we better make cond first and then extract
3732 results. */
3733 if (TYPE_MODE (type) == TYPE_MODE (TREE_TYPE (lhs)))
3734 type = TREE_TYPE (lhs);
3736 cst0 = build_int_cst (type, 0);
3737 cst1 = build_int_cst (type, 1);
3738 tmp = vect_recog_temp_ssa_var (type, NULL);
3739 pattern_stmt = gimple_build_assign (tmp, COND_EXPR, var, cst1, cst0);
3741 if (!useless_type_conversion_p (type, TREE_TYPE (lhs)))
3743 tree new_vectype = get_vectype_for_scalar_type (type);
3744 append_pattern_def_seq (stmt_vinfo, pattern_stmt, new_vectype);
3746 lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
3747 pattern_stmt = gimple_build_assign (lhs, CONVERT_EXPR, tmp);
3751 *type_out = vectype;
3752 vect_pattern_detected ("vect_recog_bool_pattern", last_stmt);
3754 return pattern_stmt;
3756 else if (rhs_code == COND_EXPR
3757 && TREE_CODE (var) == SSA_NAME)
3759 vectype = get_vectype_for_scalar_type (TREE_TYPE (lhs));
3760 if (vectype == NULL_TREE)
3761 return NULL;
3763 /* Build a scalar type for the boolean result that when
3764 vectorized matches the vector type of the result in
3765 size and number of elements. */
3766 unsigned prec
3767 = vector_element_size (tree_to_poly_uint64 (TYPE_SIZE (vectype)),
3768 TYPE_VECTOR_SUBPARTS (vectype));
3770 tree type
3771 = build_nonstandard_integer_type (prec,
3772 TYPE_UNSIGNED (TREE_TYPE (var)));
3773 if (get_vectype_for_scalar_type (type) == NULL_TREE)
3774 return NULL;
3776 if (!check_bool_pattern (var, vinfo, bool_stmts))
3777 return NULL;
3779 rhs = adjust_bool_stmts (bool_stmts, type, last_stmt);
3781 lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
3782 pattern_stmt
3783 = gimple_build_assign (lhs, COND_EXPR,
3784 build2 (NE_EXPR, boolean_type_node,
3785 rhs, build_int_cst (type, 0)),
3786 gimple_assign_rhs2 (last_stmt),
3787 gimple_assign_rhs3 (last_stmt));
3788 *type_out = vectype;
3789 vect_pattern_detected ("vect_recog_bool_pattern", last_stmt);
3791 return pattern_stmt;
3793 else if (rhs_code == SSA_NAME
3794 && STMT_VINFO_DATA_REF (stmt_vinfo))
3796 stmt_vec_info pattern_stmt_info;
3797 vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
3798 gcc_assert (vectype != NULL_TREE);
3799 if (!VECTOR_MODE_P (TYPE_MODE (vectype)))
3800 return NULL;
3802 if (check_bool_pattern (var, vinfo, bool_stmts))
3803 rhs = adjust_bool_stmts (bool_stmts, TREE_TYPE (vectype), last_stmt);
3804 else
3806 tree type = search_type_for_mask (var, vinfo);
3807 tree cst0, cst1, new_vectype;
3809 if (!type)
3810 return NULL;
3812 if (TYPE_MODE (type) == TYPE_MODE (TREE_TYPE (vectype)))
3813 type = TREE_TYPE (vectype);
3815 cst0 = build_int_cst (type, 0);
3816 cst1 = build_int_cst (type, 1);
3817 new_vectype = get_vectype_for_scalar_type (type);
3819 rhs = vect_recog_temp_ssa_var (type, NULL);
3820 pattern_stmt = gimple_build_assign (rhs, COND_EXPR, var, cst1, cst0);
3821 append_pattern_def_seq (stmt_vinfo, pattern_stmt, new_vectype);
3824 lhs = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vectype), lhs);
3825 if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
3827 tree rhs2 = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
3828 gimple *cast_stmt = gimple_build_assign (rhs2, NOP_EXPR, rhs);
3829 append_pattern_def_seq (stmt_vinfo, cast_stmt);
3830 rhs = rhs2;
3832 pattern_stmt = gimple_build_assign (lhs, SSA_NAME, rhs);
3833 pattern_stmt_info = new_stmt_vec_info (pattern_stmt, vinfo);
3834 set_vinfo_for_stmt (pattern_stmt, pattern_stmt_info);
3835 STMT_VINFO_DATA_REF (pattern_stmt_info)
3836 = STMT_VINFO_DATA_REF (stmt_vinfo);
3837 STMT_VINFO_DR_WRT_VEC_LOOP (pattern_stmt_info)
3838 = STMT_VINFO_DR_WRT_VEC_LOOP (stmt_vinfo);
3839 *type_out = vectype;
3840 vect_pattern_detected ("vect_recog_bool_pattern", last_stmt);
3842 return pattern_stmt;
3844 else
3845 return NULL;
3849 /* A helper for vect_recog_mask_conversion_pattern. Build
3850 conversion of MASK to a type suitable for masking VECTYPE.
3851 Built statement gets required vectype and is appended to
3852 a pattern sequence of STMT_VINFO.
3854 Return converted mask. */
3856 static tree
3857 build_mask_conversion (tree mask, tree vectype, stmt_vec_info stmt_vinfo)
3859 gimple *stmt;
3860 tree masktype, tmp;
3862 masktype = build_same_sized_truth_vector_type (vectype);
3863 tmp = vect_recog_temp_ssa_var (TREE_TYPE (masktype), NULL);
3864 stmt = gimple_build_assign (tmp, CONVERT_EXPR, mask);
3865 append_pattern_def_seq (stmt_vinfo, stmt, masktype);
3867 return tmp;
3871 /* Function vect_recog_mask_conversion_pattern
3873 Try to find statements which require boolean type
3874 converison. Additional conversion statements are
3875 added to handle such cases. For example:
3877 bool m_1, m_2, m_3;
3878 int i_4, i_5;
3879 double d_6, d_7;
3880 char c_1, c_2, c_3;
3882 S1 m_1 = i_4 > i_5;
3883 S2 m_2 = d_6 < d_7;
3884 S3 m_3 = m_1 & m_2;
3885 S4 c_1 = m_3 ? c_2 : c_3;
3887 Will be transformed into:
3889 S1 m_1 = i_4 > i_5;
3890 S2 m_2 = d_6 < d_7;
3891 S3'' m_2' = (_Bool[bitsize=32])m_2
3892 S3' m_3' = m_1 & m_2';
3893 S4'' m_3'' = (_Bool[bitsize=8])m_3'
3894 S4' c_1' = m_3'' ? c_2 : c_3; */
3896 static gimple *
3897 vect_recog_mask_conversion_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
3899 gimple *last_stmt = stmt_vinfo->stmt;
3900 enum tree_code rhs_code;
3901 tree lhs = NULL_TREE, rhs1, rhs2, tmp, rhs1_type, rhs2_type;
3902 tree vectype1, vectype2;
3903 stmt_vec_info pattern_stmt_info;
3904 vec_info *vinfo = stmt_vinfo->vinfo;
3906 /* Check for MASK_LOAD ans MASK_STORE calls requiring mask conversion. */
3907 if (is_gimple_call (last_stmt)
3908 && gimple_call_internal_p (last_stmt))
3910 gcall *pattern_stmt;
3912 internal_fn ifn = gimple_call_internal_fn (last_stmt);
3913 int mask_argno = internal_fn_mask_index (ifn);
3914 if (mask_argno < 0)
3915 return NULL;
3917 bool store_p = internal_store_fn_p (ifn);
3918 if (store_p)
3920 int rhs_index = internal_fn_stored_value_index (ifn);
3921 tree rhs = gimple_call_arg (last_stmt, rhs_index);
3922 vectype1 = get_vectype_for_scalar_type (TREE_TYPE (rhs));
3924 else
3926 lhs = gimple_call_lhs (last_stmt);
3927 vectype1 = get_vectype_for_scalar_type (TREE_TYPE (lhs));
3930 tree mask_arg = gimple_call_arg (last_stmt, mask_argno);
3931 tree mask_arg_type = search_type_for_mask (mask_arg, vinfo);
3932 if (!mask_arg_type)
3933 return NULL;
3934 vectype2 = get_mask_type_for_scalar_type (mask_arg_type);
3936 if (!vectype1 || !vectype2
3937 || known_eq (TYPE_VECTOR_SUBPARTS (vectype1),
3938 TYPE_VECTOR_SUBPARTS (vectype2)))
3939 return NULL;
3941 tmp = build_mask_conversion (mask_arg, vectype1, stmt_vinfo);
3943 auto_vec<tree, 8> args;
3944 unsigned int nargs = gimple_call_num_args (last_stmt);
3945 args.safe_grow (nargs);
3946 for (unsigned int i = 0; i < nargs; ++i)
3947 args[i] = ((int) i == mask_argno
3948 ? tmp
3949 : gimple_call_arg (last_stmt, i));
3950 pattern_stmt = gimple_build_call_internal_vec (ifn, args);
3952 if (!store_p)
3954 lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
3955 gimple_call_set_lhs (pattern_stmt, lhs);
3957 gimple_call_set_nothrow (pattern_stmt, true);
3959 pattern_stmt_info = new_stmt_vec_info (pattern_stmt, vinfo);
3960 set_vinfo_for_stmt (pattern_stmt, pattern_stmt_info);
3961 if (STMT_VINFO_DATA_REF (stmt_vinfo))
3963 STMT_VINFO_DATA_REF (pattern_stmt_info)
3964 = STMT_VINFO_DATA_REF (stmt_vinfo);
3965 STMT_VINFO_DR_WRT_VEC_LOOP (pattern_stmt_info)
3966 = STMT_VINFO_DR_WRT_VEC_LOOP (stmt_vinfo);
3967 STMT_VINFO_GATHER_SCATTER_P (pattern_stmt_info)
3968 = STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo);
3971 *type_out = vectype1;
3972 vect_pattern_detected ("vect_recog_mask_conversion_pattern", last_stmt);
3974 return pattern_stmt;
3977 if (!is_gimple_assign (last_stmt))
3978 return NULL;
3980 gimple *pattern_stmt;
3981 lhs = gimple_assign_lhs (last_stmt);
3982 rhs1 = gimple_assign_rhs1 (last_stmt);
3983 rhs_code = gimple_assign_rhs_code (last_stmt);
3985 /* Check for cond expression requiring mask conversion. */
3986 if (rhs_code == COND_EXPR)
3988 vectype1 = get_vectype_for_scalar_type (TREE_TYPE (lhs));
3990 if (TREE_CODE (rhs1) == SSA_NAME)
3992 rhs1_type = search_type_for_mask (rhs1, vinfo);
3993 if (!rhs1_type)
3994 return NULL;
3996 else if (COMPARISON_CLASS_P (rhs1))
3998 /* Check whether we're comparing scalar booleans and (if so)
3999 whether a better mask type exists than the mask associated
4000 with boolean-sized elements. This avoids unnecessary packs
4001 and unpacks if the booleans are set from comparisons of
4002 wider types. E.g. in:
4004 int x1, x2, x3, x4, y1, y1;
4006 bool b1 = (x1 == x2);
4007 bool b2 = (x3 == x4);
4008 ... = b1 == b2 ? y1 : y2;
4010 it is better for b1 and b2 to use the mask type associated
4011 with int elements rather bool (byte) elements. */
4012 rhs1_type = search_type_for_mask (TREE_OPERAND (rhs1, 0), vinfo);
4013 if (!rhs1_type)
4014 rhs1_type = TREE_TYPE (TREE_OPERAND (rhs1, 0));
4016 else
4017 return NULL;
4019 vectype2 = get_mask_type_for_scalar_type (rhs1_type);
4021 if (!vectype1 || !vectype2)
4022 return NULL;
4024 /* Continue if a conversion is needed. Also continue if we have
4025 a comparison whose vector type would normally be different from
4026 VECTYPE2 when considered in isolation. In that case we'll
4027 replace the comparison with an SSA name (so that we can record
4028 its vector type) and behave as though the comparison was an SSA
4029 name from the outset. */
4030 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype1),
4031 TYPE_VECTOR_SUBPARTS (vectype2))
4032 && (TREE_CODE (rhs1) == SSA_NAME
4033 || rhs1_type == TREE_TYPE (TREE_OPERAND (rhs1, 0))))
4034 return NULL;
4036 /* If rhs1 is invariant and we can promote it leave the COND_EXPR
4037 in place, we can handle it in vectorizable_condition. This avoids
4038 unnecessary promotion stmts and increased vectorization factor. */
4039 if (COMPARISON_CLASS_P (rhs1)
4040 && INTEGRAL_TYPE_P (rhs1_type)
4041 && known_le (TYPE_VECTOR_SUBPARTS (vectype1),
4042 TYPE_VECTOR_SUBPARTS (vectype2)))
4044 enum vect_def_type dt;
4045 if (vect_is_simple_use (TREE_OPERAND (rhs1, 0), vinfo, &dt)
4046 && dt == vect_external_def
4047 && vect_is_simple_use (TREE_OPERAND (rhs1, 1), vinfo, &dt)
4048 && (dt == vect_external_def
4049 || dt == vect_constant_def))
4051 tree wide_scalar_type = build_nonstandard_integer_type
4052 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype1))),
4053 TYPE_UNSIGNED (rhs1_type));
4054 tree vectype3 = get_vectype_for_scalar_type (wide_scalar_type);
4055 if (expand_vec_cond_expr_p (vectype1, vectype3, TREE_CODE (rhs1)))
4056 return NULL;
4060 /* If rhs1 is a comparison we need to move it into a
4061 separate statement. */
4062 if (TREE_CODE (rhs1) != SSA_NAME)
4064 tmp = vect_recog_temp_ssa_var (TREE_TYPE (rhs1), NULL);
4065 pattern_stmt = gimple_build_assign (tmp, rhs1);
4066 rhs1 = tmp;
4067 append_pattern_def_seq (stmt_vinfo, pattern_stmt, vectype2);
4070 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
4071 TYPE_VECTOR_SUBPARTS (vectype2)))
4072 tmp = build_mask_conversion (rhs1, vectype1, stmt_vinfo);
4073 else
4074 tmp = rhs1;
4076 lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
4077 pattern_stmt = gimple_build_assign (lhs, COND_EXPR, tmp,
4078 gimple_assign_rhs2 (last_stmt),
4079 gimple_assign_rhs3 (last_stmt));
4081 *type_out = vectype1;
4082 vect_pattern_detected ("vect_recog_mask_conversion_pattern", last_stmt);
4084 return pattern_stmt;
4087 /* Now check for binary boolean operations requiring conversion for
4088 one of operands. */
4089 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (lhs)))
4090 return NULL;
4092 if (rhs_code != BIT_IOR_EXPR
4093 && rhs_code != BIT_XOR_EXPR
4094 && rhs_code != BIT_AND_EXPR
4095 && TREE_CODE_CLASS (rhs_code) != tcc_comparison)
4096 return NULL;
4098 rhs2 = gimple_assign_rhs2 (last_stmt);
4100 rhs1_type = search_type_for_mask (rhs1, vinfo);
4101 rhs2_type = search_type_for_mask (rhs2, vinfo);
4103 if (!rhs1_type || !rhs2_type
4104 || TYPE_PRECISION (rhs1_type) == TYPE_PRECISION (rhs2_type))
4105 return NULL;
4107 if (TYPE_PRECISION (rhs1_type) < TYPE_PRECISION (rhs2_type))
4109 vectype1 = get_mask_type_for_scalar_type (rhs1_type);
4110 if (!vectype1)
4111 return NULL;
4112 rhs2 = build_mask_conversion (rhs2, vectype1, stmt_vinfo);
4114 else
4116 vectype1 = get_mask_type_for_scalar_type (rhs2_type);
4117 if (!vectype1)
4118 return NULL;
4119 rhs1 = build_mask_conversion (rhs1, vectype1, stmt_vinfo);
4122 lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
4123 pattern_stmt = gimple_build_assign (lhs, rhs_code, rhs1, rhs2);
4125 *type_out = vectype1;
4126 vect_pattern_detected ("vect_recog_mask_conversion_pattern", last_stmt);
4128 return pattern_stmt;
4131 /* STMT is a load or store. If the load or store is conditional, return
4132 the boolean condition under which it occurs, otherwise return null. */
4134 static tree
4135 vect_get_load_store_mask (gimple *stmt)
4137 if (gassign *def_assign = dyn_cast <gassign *> (stmt))
4139 gcc_assert (gimple_assign_single_p (def_assign));
4140 return NULL_TREE;
4143 if (gcall *def_call = dyn_cast <gcall *> (stmt))
4145 internal_fn ifn = gimple_call_internal_fn (def_call);
4146 int mask_index = internal_fn_mask_index (ifn);
4147 return gimple_call_arg (def_call, mask_index);
4150 gcc_unreachable ();
4153 /* Return the scalar offset type that an internal gather/scatter function
4154 should use. GS_INFO describes the gather/scatter operation. */
4156 static tree
4157 vect_get_gather_scatter_offset_type (gather_scatter_info *gs_info)
4159 tree offset_type = TREE_TYPE (gs_info->offset);
4160 unsigned int element_bits = tree_to_uhwi (TYPE_SIZE (gs_info->element_type));
4162 /* Enforced by vect_check_gather_scatter. */
4163 unsigned int offset_bits = TYPE_PRECISION (offset_type);
4164 gcc_assert (element_bits >= offset_bits);
4166 /* If the offset is narrower than the elements, extend it according
4167 to its sign. */
4168 if (element_bits > offset_bits)
4169 return build_nonstandard_integer_type (element_bits,
4170 TYPE_UNSIGNED (offset_type));
4172 return offset_type;
4175 /* Return MASK if MASK is suitable for masking an operation on vectors
4176 of type VECTYPE, otherwise convert it into such a form and return
4177 the result. Associate any conversion statements with STMT_INFO's
4178 pattern. */
4180 static tree
4181 vect_convert_mask_for_vectype (tree mask, tree vectype,
4182 stmt_vec_info stmt_info, vec_info *vinfo)
4184 tree mask_type = search_type_for_mask (mask, vinfo);
4185 if (mask_type)
4187 tree mask_vectype = get_mask_type_for_scalar_type (mask_type);
4188 if (mask_vectype
4189 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype),
4190 TYPE_VECTOR_SUBPARTS (mask_vectype)))
4191 mask = build_mask_conversion (mask, vectype, stmt_info);
4193 return mask;
4196 /* Return the equivalent of:
4198 fold_convert (TYPE, VALUE)
4200 with the expectation that the operation will be vectorized.
4201 If new statements are needed, add them as pattern statements
4202 to STMT_INFO. */
4204 static tree
4205 vect_add_conversion_to_pattern (tree type, tree value, stmt_vec_info stmt_info)
4207 if (useless_type_conversion_p (type, TREE_TYPE (value)))
4208 return value;
4210 tree new_value = vect_recog_temp_ssa_var (type, NULL);
4211 gassign *conversion = gimple_build_assign (new_value, CONVERT_EXPR, value);
4212 append_pattern_def_seq (stmt_info, conversion,
4213 get_vectype_for_scalar_type (type));
4214 return new_value;
4217 /* Try to convert STMT_INFO into a call to a gather load or scatter store
4218 internal function. Return the final statement on success and set
4219 *TYPE_OUT to the vector type being loaded or stored.
4221 This function only handles gathers and scatters that were recognized
4222 as such from the outset (indicated by STMT_VINFO_GATHER_SCATTER_P). */
4224 static gimple *
4225 vect_recog_gather_scatter_pattern (stmt_vec_info stmt_info, tree *type_out)
4227 /* Currently we only support this for loop vectorization. */
4228 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_info->vinfo);
4229 if (!loop_vinfo)
4230 return NULL;
4232 /* Make sure that we're looking at a gather load or scatter store. */
4233 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4234 if (!dr || !STMT_VINFO_GATHER_SCATTER_P (stmt_info))
4235 return NULL;
4237 /* Get the boolean that controls whether the load or store happens.
4238 This is null if the operation is unconditional. */
4239 gimple *stmt = stmt_info->stmt;
4240 tree mask = vect_get_load_store_mask (stmt);
4242 /* Make sure that the target supports an appropriate internal
4243 function for the gather/scatter operation. */
4244 gather_scatter_info gs_info;
4245 if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info)
4246 || gs_info.decl)
4247 return NULL;
4249 /* Convert the mask to the right form. */
4250 tree gs_vectype = get_vectype_for_scalar_type (gs_info.element_type);
4251 if (mask)
4252 mask = vect_convert_mask_for_vectype (mask, gs_vectype, stmt_info,
4253 loop_vinfo);
4255 /* Get the invariant base and non-invariant offset, converting the
4256 latter to the same width as the vector elements. */
4257 tree base = gs_info.base;
4258 tree offset_type = vect_get_gather_scatter_offset_type (&gs_info);
4259 tree offset = vect_add_conversion_to_pattern (offset_type, gs_info.offset,
4260 stmt_info);
4262 /* Build the new pattern statement. */
4263 tree scale = size_int (gs_info.scale);
4264 gcall *pattern_stmt;
4265 if (DR_IS_READ (dr))
4267 if (mask != NULL)
4268 pattern_stmt = gimple_build_call_internal (gs_info.ifn, 4, base,
4269 offset, scale, mask);
4270 else
4271 pattern_stmt = gimple_build_call_internal (gs_info.ifn, 3, base,
4272 offset, scale);
4273 tree load_lhs = vect_recog_temp_ssa_var (gs_info.element_type, NULL);
4274 gimple_call_set_lhs (pattern_stmt, load_lhs);
4276 else
4278 tree rhs = vect_get_store_rhs (stmt);
4279 if (mask != NULL)
4280 pattern_stmt = gimple_build_call_internal (IFN_MASK_SCATTER_STORE, 5,
4281 base, offset, scale, rhs,
4282 mask);
4283 else
4284 pattern_stmt = gimple_build_call_internal (IFN_SCATTER_STORE, 4,
4285 base, offset, scale, rhs);
4287 gimple_call_set_nothrow (pattern_stmt, true);
4289 /* Copy across relevant vectorization info and associate DR with the
4290 new pattern statement instead of the original statement. */
4291 stmt_vec_info pattern_stmt_info = new_stmt_vec_info (pattern_stmt,
4292 loop_vinfo);
4293 set_vinfo_for_stmt (pattern_stmt, pattern_stmt_info);
4294 STMT_VINFO_DATA_REF (pattern_stmt_info) = dr;
4295 STMT_VINFO_DR_WRT_VEC_LOOP (pattern_stmt_info)
4296 = STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info);
4297 STMT_VINFO_GATHER_SCATTER_P (pattern_stmt_info)
4298 = STMT_VINFO_GATHER_SCATTER_P (stmt_info);
4300 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4301 *type_out = vectype;
4302 vect_pattern_detected ("gather/scatter pattern", stmt);
4304 return pattern_stmt;
4307 /* Return true if TYPE is a non-boolean integer type. These are the types
4308 that we want to consider for narrowing. */
4310 static bool
4311 vect_narrowable_type_p (tree type)
4313 return INTEGRAL_TYPE_P (type) && !VECT_SCALAR_BOOLEAN_TYPE_P (type);
4316 /* Return true if the operation given by CODE can be truncated to N bits
4317 when only N bits of the output are needed. This is only true if bit N+1
4318 of the inputs has no effect on the low N bits of the result. */
4320 static bool
4321 vect_truncatable_operation_p (tree_code code)
4323 switch (code)
4325 case PLUS_EXPR:
4326 case MINUS_EXPR:
4327 case MULT_EXPR:
4328 case BIT_AND_EXPR:
4329 case BIT_IOR_EXPR:
4330 case BIT_XOR_EXPR:
4331 case COND_EXPR:
4332 return true;
4334 default:
4335 return false;
4339 /* Record that STMT_INFO could be changed from operating on TYPE to
4340 operating on a type with the precision and sign given by PRECISION
4341 and SIGN respectively. PRECISION is an arbitrary bit precision;
4342 it might not be a whole number of bytes. */
4344 static void
4345 vect_set_operation_type (stmt_vec_info stmt_info, tree type,
4346 unsigned int precision, signop sign)
4348 /* Round the precision up to a whole number of bytes. */
4349 precision = vect_element_precision (precision);
4350 if (precision < TYPE_PRECISION (type)
4351 && (!stmt_info->operation_precision
4352 || stmt_info->operation_precision > precision))
4354 stmt_info->operation_precision = precision;
4355 stmt_info->operation_sign = sign;
4359 /* Record that STMT_INFO only requires MIN_INPUT_PRECISION from its
4360 non-boolean inputs, all of which have type TYPE. MIN_INPUT_PRECISION
4361 is an arbitrary bit precision; it might not be a whole number of bytes. */
4363 static void
4364 vect_set_min_input_precision (stmt_vec_info stmt_info, tree type,
4365 unsigned int min_input_precision)
4367 /* This operation in isolation only requires the inputs to have
4368 MIN_INPUT_PRECISION of precision, However, that doesn't mean
4369 that MIN_INPUT_PRECISION is a natural precision for the chain
4370 as a whole. E.g. consider something like:
4372 unsigned short *x, *y;
4373 *y = ((*x & 0xf0) >> 4) | (*y << 4);
4375 The right shift can be done on unsigned chars, and only requires the
4376 result of "*x & 0xf0" to be done on unsigned chars. But taking that
4377 approach would mean turning a natural chain of single-vector unsigned
4378 short operations into one that truncates "*x" and then extends
4379 "(*x & 0xf0) >> 4", with two vectors for each unsigned short
4380 operation and one vector for each unsigned char operation.
4381 This would be a significant pessimization.
4383 Instead only propagate the maximum of this precision and the precision
4384 required by the users of the result. This means that we don't pessimize
4385 the case above but continue to optimize things like:
4387 unsigned char *y;
4388 unsigned short *x;
4389 *y = ((*x & 0xf0) >> 4) | (*y << 4);
4391 Here we would truncate two vectors of *x to a single vector of
4392 unsigned chars and use single-vector unsigned char operations for
4393 everything else, rather than doing two unsigned short copies of
4394 "(*x & 0xf0) >> 4" and then truncating the result. */
4395 min_input_precision = MAX (min_input_precision,
4396 stmt_info->min_output_precision);
4398 if (min_input_precision < TYPE_PRECISION (type)
4399 && (!stmt_info->min_input_precision
4400 || stmt_info->min_input_precision > min_input_precision))
4401 stmt_info->min_input_precision = min_input_precision;
4404 /* Subroutine of vect_determine_min_output_precision. Return true if
4405 we can calculate a reduced number of output bits for STMT_INFO,
4406 whose result is LHS. */
4408 static bool
4409 vect_determine_min_output_precision_1 (stmt_vec_info stmt_info, tree lhs)
4411 /* Take the maximum precision required by users of the result. */
4412 unsigned int precision = 0;
4413 imm_use_iterator iter;
4414 use_operand_p use;
4415 FOR_EACH_IMM_USE_FAST (use, iter, lhs)
4417 gimple *use_stmt = USE_STMT (use);
4418 if (is_gimple_debug (use_stmt))
4419 continue;
4420 if (!vect_stmt_in_region_p (stmt_info->vinfo, use_stmt))
4421 return false;
4422 stmt_vec_info use_stmt_info = vinfo_for_stmt (use_stmt);
4423 if (!use_stmt_info->min_input_precision)
4424 return false;
4425 precision = MAX (precision, use_stmt_info->min_input_precision);
4428 if (dump_enabled_p ())
4430 dump_printf_loc (MSG_NOTE, vect_location, "only the low %d bits of ",
4431 precision);
4432 dump_generic_expr (MSG_NOTE, TDF_SLIM, lhs);
4433 dump_printf (MSG_NOTE, " are significant\n");
4435 stmt_info->min_output_precision = precision;
4436 return true;
4439 /* Calculate min_output_precision for STMT_INFO. */
4441 static void
4442 vect_determine_min_output_precision (stmt_vec_info stmt_info)
4444 /* We're only interested in statements with a narrowable result. */
4445 tree lhs = gimple_get_lhs (stmt_info->stmt);
4446 if (!lhs
4447 || TREE_CODE (lhs) != SSA_NAME
4448 || !vect_narrowable_type_p (TREE_TYPE (lhs)))
4449 return;
4451 if (!vect_determine_min_output_precision_1 (stmt_info, lhs))
4452 stmt_info->min_output_precision = TYPE_PRECISION (TREE_TYPE (lhs));
4455 /* Use range information to decide whether STMT (described by STMT_INFO)
4456 could be done in a narrower type. This is effectively a forward
4457 propagation, since it uses context-independent information that applies
4458 to all users of an SSA name. */
4460 static void
4461 vect_determine_precisions_from_range (stmt_vec_info stmt_info, gassign *stmt)
4463 tree lhs = gimple_assign_lhs (stmt);
4464 if (!lhs || TREE_CODE (lhs) != SSA_NAME)
4465 return;
4467 tree type = TREE_TYPE (lhs);
4468 if (!vect_narrowable_type_p (type))
4469 return;
4471 /* First see whether we have any useful range information for the result. */
4472 unsigned int precision = TYPE_PRECISION (type);
4473 signop sign = TYPE_SIGN (type);
4474 wide_int min_value, max_value;
4475 if (!vect_get_range_info (lhs, &min_value, &max_value))
4476 return;
4478 tree_code code = gimple_assign_rhs_code (stmt);
4479 unsigned int nops = gimple_num_ops (stmt);
4481 if (!vect_truncatable_operation_p (code))
4482 /* Check that all relevant input operands are compatible, and update
4483 [MIN_VALUE, MAX_VALUE] to include their ranges. */
4484 for (unsigned int i = 1; i < nops; ++i)
4486 tree op = gimple_op (stmt, i);
4487 if (TREE_CODE (op) == INTEGER_CST)
4489 /* Don't require the integer to have RHS_TYPE (which it might
4490 not for things like shift amounts, etc.), but do require it
4491 to fit the type. */
4492 if (!int_fits_type_p (op, type))
4493 return;
4495 min_value = wi::min (min_value, wi::to_wide (op, precision), sign);
4496 max_value = wi::max (max_value, wi::to_wide (op, precision), sign);
4498 else if (TREE_CODE (op) == SSA_NAME)
4500 /* Ignore codes that don't take uniform arguments. */
4501 if (!types_compatible_p (TREE_TYPE (op), type))
4502 return;
4504 wide_int op_min_value, op_max_value;
4505 if (!vect_get_range_info (op, &op_min_value, &op_max_value))
4506 return;
4508 min_value = wi::min (min_value, op_min_value, sign);
4509 max_value = wi::max (max_value, op_max_value, sign);
4511 else
4512 return;
4515 /* Try to switch signed types for unsigned types if we can.
4516 This is better for two reasons. First, unsigned ops tend
4517 to be cheaper than signed ops. Second, it means that we can
4518 handle things like:
4520 signed char c;
4521 int res = (int) c & 0xff00; // range [0x0000, 0xff00]
4525 signed char c;
4526 unsigned short res_1 = (unsigned short) c & 0xff00;
4527 int res = (int) res_1;
4529 where the intermediate result res_1 has unsigned rather than
4530 signed type. */
4531 if (sign == SIGNED && !wi::neg_p (min_value))
4532 sign = UNSIGNED;
4534 /* See what precision is required for MIN_VALUE and MAX_VALUE. */
4535 unsigned int precision1 = wi::min_precision (min_value, sign);
4536 unsigned int precision2 = wi::min_precision (max_value, sign);
4537 unsigned int value_precision = MAX (precision1, precision2);
4538 if (value_precision >= precision)
4539 return;
4541 if (dump_enabled_p ())
4543 dump_printf_loc (MSG_NOTE, vect_location, "can narrow to %s:%d"
4544 " without loss of precision: ",
4545 sign == SIGNED ? "signed" : "unsigned",
4546 value_precision);
4547 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
4550 vect_set_operation_type (stmt_info, type, value_precision, sign);
4551 vect_set_min_input_precision (stmt_info, type, value_precision);
4554 /* Use information about the users of STMT's result to decide whether
4555 STMT (described by STMT_INFO) could be done in a narrower type.
4556 This is effectively a backward propagation. */
4558 static void
4559 vect_determine_precisions_from_users (stmt_vec_info stmt_info, gassign *stmt)
4561 tree_code code = gimple_assign_rhs_code (stmt);
4562 unsigned int opno = (code == COND_EXPR ? 2 : 1);
4563 tree type = TREE_TYPE (gimple_op (stmt, opno));
4564 if (!vect_narrowable_type_p (type))
4565 return;
4567 unsigned int precision = TYPE_PRECISION (type);
4568 unsigned int operation_precision, min_input_precision;
4569 switch (code)
4571 CASE_CONVERT:
4572 /* Only the bits that contribute to the output matter. Don't change
4573 the precision of the operation itself. */
4574 operation_precision = precision;
4575 min_input_precision = stmt_info->min_output_precision;
4576 break;
4578 case LSHIFT_EXPR:
4579 case RSHIFT_EXPR:
4581 tree shift = gimple_assign_rhs2 (stmt);
4582 if (TREE_CODE (shift) != INTEGER_CST
4583 || !wi::ltu_p (wi::to_widest (shift), precision))
4584 return;
4585 unsigned int const_shift = TREE_INT_CST_LOW (shift);
4586 if (code == LSHIFT_EXPR)
4588 /* We need CONST_SHIFT fewer bits of the input. */
4589 operation_precision = stmt_info->min_output_precision;
4590 min_input_precision = (MAX (operation_precision, const_shift)
4591 - const_shift);
4593 else
4595 /* We need CONST_SHIFT extra bits to do the operation. */
4596 operation_precision = (stmt_info->min_output_precision
4597 + const_shift);
4598 min_input_precision = operation_precision;
4600 break;
4603 default:
4604 if (vect_truncatable_operation_p (code))
4606 /* Input bit N has no effect on output bits N-1 and lower. */
4607 operation_precision = stmt_info->min_output_precision;
4608 min_input_precision = operation_precision;
4609 break;
4611 return;
4614 if (operation_precision < precision)
4616 if (dump_enabled_p ())
4618 dump_printf_loc (MSG_NOTE, vect_location, "can narrow to %s:%d"
4619 " without affecting users: ",
4620 TYPE_UNSIGNED (type) ? "unsigned" : "signed",
4621 operation_precision);
4622 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
4624 vect_set_operation_type (stmt_info, type, operation_precision,
4625 TYPE_SIGN (type));
4627 vect_set_min_input_precision (stmt_info, type, min_input_precision);
4630 /* Handle vect_determine_precisions for STMT_INFO, given that we
4631 have already done so for the users of its result. */
4633 void
4634 vect_determine_stmt_precisions (stmt_vec_info stmt_info)
4636 vect_determine_min_output_precision (stmt_info);
4637 if (gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt))
4639 vect_determine_precisions_from_range (stmt_info, stmt);
4640 vect_determine_precisions_from_users (stmt_info, stmt);
4644 /* Walk backwards through the vectorizable region to determine the
4645 values of these fields:
4647 - min_output_precision
4648 - min_input_precision
4649 - operation_precision
4650 - operation_sign. */
4652 void
4653 vect_determine_precisions (vec_info *vinfo)
4655 DUMP_VECT_SCOPE ("vect_determine_precisions");
4657 if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
4659 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4660 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
4661 unsigned int nbbs = loop->num_nodes;
4663 for (unsigned int i = 0; i < nbbs; i++)
4665 basic_block bb = bbs[nbbs - i - 1];
4666 for (gimple_stmt_iterator si = gsi_last_bb (bb);
4667 !gsi_end_p (si); gsi_prev (&si))
4668 vect_determine_stmt_precisions (vinfo_for_stmt (gsi_stmt (si)));
4671 else
4673 bb_vec_info bb_vinfo = as_a <bb_vec_info> (vinfo);
4674 gimple_stmt_iterator si = bb_vinfo->region_end;
4675 gimple *stmt;
4678 if (!gsi_stmt (si))
4679 si = gsi_last_bb (bb_vinfo->bb);
4680 else
4681 gsi_prev (&si);
4682 stmt = gsi_stmt (si);
4683 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4684 if (stmt_info && STMT_VINFO_VECTORIZABLE (stmt_info))
4685 vect_determine_stmt_precisions (stmt_info);
4687 while (stmt != gsi_stmt (bb_vinfo->region_begin));
4691 typedef gimple *(*vect_recog_func_ptr) (stmt_vec_info, tree *);
4693 struct vect_recog_func
4695 vect_recog_func_ptr fn;
4696 const char *name;
4699 /* Note that ordering matters - the first pattern matching on a stmt is
4700 taken which means usually the more complex one needs to preceed the
4701 less comples onex (widen_sum only after dot_prod or sad for example). */
4702 static vect_recog_func vect_vect_recog_func_ptrs[] = {
4703 { vect_recog_over_widening_pattern, "over_widening" },
4704 /* Must come after over_widening, which narrows the shift as much as
4705 possible beforehand. */
4706 { vect_recog_average_pattern, "average" },
4707 { vect_recog_cast_forwprop_pattern, "cast_forwprop" },
4708 { vect_recog_widen_mult_pattern, "widen_mult" },
4709 { vect_recog_dot_prod_pattern, "dot_prod" },
4710 { vect_recog_sad_pattern, "sad" },
4711 { vect_recog_widen_sum_pattern, "widen_sum" },
4712 { vect_recog_pow_pattern, "pow" },
4713 { vect_recog_widen_shift_pattern, "widen_shift" },
4714 { vect_recog_rotate_pattern, "rotate" },
4715 { vect_recog_vector_vector_shift_pattern, "vector_vector_shift" },
4716 { vect_recog_divmod_pattern, "divmod" },
4717 { vect_recog_mult_pattern, "mult" },
4718 { vect_recog_mixed_size_cond_pattern, "mixed_size_cond" },
4719 { vect_recog_bool_pattern, "bool" },
4720 /* This must come before mask conversion, and includes the parts
4721 of mask conversion that are needed for gather and scatter
4722 internal functions. */
4723 { vect_recog_gather_scatter_pattern, "gather_scatter" },
4724 { vect_recog_mask_conversion_pattern, "mask_conversion" }
4727 const unsigned int NUM_PATTERNS = ARRAY_SIZE (vect_vect_recog_func_ptrs);
4729 /* Mark statements that are involved in a pattern. */
4731 static inline void
4732 vect_mark_pattern_stmts (gimple *orig_stmt, gimple *pattern_stmt,
4733 tree pattern_vectype)
4735 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
4736 gimple *def_seq = STMT_VINFO_PATTERN_DEF_SEQ (orig_stmt_info);
4738 bool old_pattern_p = is_pattern_stmt_p (orig_stmt_info);
4739 if (old_pattern_p)
4741 /* We're replacing a statement in an existing pattern definition
4742 sequence. */
4743 if (dump_enabled_p ())
4745 dump_printf_loc (MSG_NOTE, vect_location,
4746 "replacing earlier pattern ");
4747 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, orig_stmt, 0);
4750 /* To keep the book-keeping simple, just swap the lhs of the
4751 old and new statements, so that the old one has a valid but
4752 unused lhs. */
4753 tree old_lhs = gimple_get_lhs (orig_stmt);
4754 gimple_set_lhs (orig_stmt, gimple_get_lhs (pattern_stmt));
4755 gimple_set_lhs (pattern_stmt, old_lhs);
4757 if (dump_enabled_p ())
4759 dump_printf_loc (MSG_NOTE, vect_location, "with ");
4760 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
4763 /* Switch to the statement that ORIG replaces. */
4764 orig_stmt_info
4765 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (orig_stmt_info));
4767 /* We shouldn't be replacing the main pattern statement. */
4768 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) != orig_stmt);
4771 if (def_seq)
4772 for (gimple_stmt_iterator si = gsi_start (def_seq);
4773 !gsi_end_p (si); gsi_next (&si))
4774 vect_init_pattern_stmt (gsi_stmt (si), orig_stmt_info, pattern_vectype);
4776 if (old_pattern_p)
4778 vect_init_pattern_stmt (pattern_stmt, orig_stmt_info, pattern_vectype);
4780 /* Insert all the new pattern statements before the original one. */
4781 gimple_seq *orig_def_seq = &STMT_VINFO_PATTERN_DEF_SEQ (orig_stmt_info);
4782 gimple_stmt_iterator gsi = gsi_for_stmt (orig_stmt, orig_def_seq);
4783 gsi_insert_seq_before_without_update (&gsi, def_seq, GSI_SAME_STMT);
4784 gsi_insert_before_without_update (&gsi, pattern_stmt, GSI_SAME_STMT);
4786 /* Remove the pattern statement that this new pattern replaces. */
4787 gsi_remove (&gsi, false);
4789 else
4790 vect_set_pattern_stmt (pattern_stmt, orig_stmt_info, pattern_vectype);
4793 /* Function vect_pattern_recog_1
4795 Input:
4796 PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
4797 computation pattern.
4798 STMT: A stmt from which the pattern search should start.
4800 If PATTERN_RECOG_FUNC successfully detected the pattern, it creates
4801 a sequence of statements that has the same functionality and can be
4802 used to replace STMT. It returns the last statement in the sequence
4803 and adds any earlier statements to STMT's STMT_VINFO_PATTERN_DEF_SEQ.
4804 PATTERN_RECOG_FUNC also sets *TYPE_OUT to the vector type of the final
4805 statement, having first checked that the target supports the new operation
4806 in that type.
4808 This function also does some bookkeeping, as explained in the documentation
4809 for vect_recog_pattern. */
4811 static void
4812 vect_pattern_recog_1 (vect_recog_func *recog_func, gimple_stmt_iterator si)
4814 gimple *stmt = gsi_stmt (si), *pattern_stmt;
4815 stmt_vec_info stmt_info;
4816 loop_vec_info loop_vinfo;
4817 tree pattern_vectype;
4819 /* If this statement has already been replaced with pattern statements,
4820 leave the original statement alone, since the first match wins.
4821 Instead try to match against the definition statements that feed
4822 the main pattern statement. */
4823 stmt_info = vinfo_for_stmt (stmt);
4824 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
4826 gimple_stmt_iterator gsi;
4827 for (gsi = gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info));
4828 !gsi_end_p (gsi); gsi_next (&gsi))
4829 vect_pattern_recog_1 (recog_func, gsi);
4830 return;
4833 gcc_assert (!STMT_VINFO_PATTERN_DEF_SEQ (stmt_info));
4834 pattern_stmt = recog_func->fn (stmt_info, &pattern_vectype);
4835 if (!pattern_stmt)
4837 /* Clear any half-formed pattern definition sequence. */
4838 STMT_VINFO_PATTERN_DEF_SEQ (stmt_info) = NULL;
4839 return;
4842 loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4843 gcc_assert (pattern_vectype);
4845 /* Found a vectorizable pattern. */
4846 if (dump_enabled_p ())
4848 dump_printf_loc (MSG_NOTE, vect_location,
4849 "%s pattern recognized: ", recog_func->name);
4850 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
4853 /* Mark the stmts that are involved in the pattern. */
4854 vect_mark_pattern_stmts (stmt, pattern_stmt, pattern_vectype);
4856 /* Patterns cannot be vectorized using SLP, because they change the order of
4857 computation. */
4858 if (loop_vinfo)
4860 unsigned ix, ix2;
4861 gimple **elem_ptr;
4862 VEC_ORDERED_REMOVE_IF (LOOP_VINFO_REDUCTIONS (loop_vinfo), ix, ix2,
4863 elem_ptr, *elem_ptr == stmt);
4868 /* Function vect_pattern_recog
4870 Input:
4871 LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
4872 computation idioms.
4874 Output - for each computation idiom that is detected we create a new stmt
4875 that provides the same functionality and that can be vectorized. We
4876 also record some information in the struct_stmt_info of the relevant
4877 stmts, as explained below:
4879 At the entry to this function we have the following stmts, with the
4880 following initial value in the STMT_VINFO fields:
4882 stmt in_pattern_p related_stmt vec_stmt
4883 S1: a_i = .... - - -
4884 S2: a_2 = ..use(a_i).. - - -
4885 S3: a_1 = ..use(a_2).. - - -
4886 S4: a_0 = ..use(a_1).. - - -
4887 S5: ... = ..use(a_0).. - - -
4889 Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
4890 represented by a single stmt. We then:
4891 - create a new stmt S6 equivalent to the pattern (the stmt is not
4892 inserted into the code)
4893 - fill in the STMT_VINFO fields as follows:
4895 in_pattern_p related_stmt vec_stmt
4896 S1: a_i = .... - - -
4897 S2: a_2 = ..use(a_i).. - - -
4898 S3: a_1 = ..use(a_2).. - - -
4899 S4: a_0 = ..use(a_1).. true S6 -
4900 '---> S6: a_new = .... - S4 -
4901 S5: ... = ..use(a_0).. - - -
4903 (the last stmt in the pattern (S4) and the new pattern stmt (S6) point
4904 to each other through the RELATED_STMT field).
4906 S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
4907 of S4 because it will replace all its uses. Stmts {S1,S2,S3} will
4908 remain irrelevant unless used by stmts other than S4.
4910 If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
4911 (because they are marked as irrelevant). It will vectorize S6, and record
4912 a pointer to the new vector stmt VS6 from S6 (as usual).
4913 S4 will be skipped, and S5 will be vectorized as usual:
4915 in_pattern_p related_stmt vec_stmt
4916 S1: a_i = .... - - -
4917 S2: a_2 = ..use(a_i).. - - -
4918 S3: a_1 = ..use(a_2).. - - -
4919 > VS6: va_new = .... - - -
4920 S4: a_0 = ..use(a_1).. true S6 VS6
4921 '---> S6: a_new = .... - S4 VS6
4922 > VS5: ... = ..vuse(va_new).. - - -
4923 S5: ... = ..use(a_0).. - - -
4925 DCE could then get rid of {S1,S2,S3,S4,S5} (if their defs are not used
4926 elsewhere), and we'll end up with:
4928 VS6: va_new = ....
4929 VS5: ... = ..vuse(va_new)..
4931 In case of more than one pattern statements, e.g., widen-mult with
4932 intermediate type:
4934 S1 a_t = ;
4935 S2 a_T = (TYPE) a_t;
4936 '--> S3: a_it = (interm_type) a_t;
4937 S4 prod_T = a_T * CONST;
4938 '--> S5: prod_T' = a_it w* CONST;
4940 there may be other users of a_T outside the pattern. In that case S2 will
4941 be marked as relevant (as well as S3), and both S2 and S3 will be analyzed
4942 and vectorized. The vector stmt VS2 will be recorded in S2, and VS3 will
4943 be recorded in S3. */
4945 void
4946 vect_pattern_recog (vec_info *vinfo)
4948 struct loop *loop;
4949 basic_block *bbs;
4950 unsigned int nbbs;
4951 gimple_stmt_iterator si;
4952 unsigned int i, j;
4954 vect_determine_precisions (vinfo);
4956 DUMP_VECT_SCOPE ("vect_pattern_recog");
4958 if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
4960 loop = LOOP_VINFO_LOOP (loop_vinfo);
4961 bbs = LOOP_VINFO_BBS (loop_vinfo);
4962 nbbs = loop->num_nodes;
4964 /* Scan through the loop stmts, applying the pattern recognition
4965 functions starting at each stmt visited: */
4966 for (i = 0; i < nbbs; i++)
4968 basic_block bb = bbs[i];
4969 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4970 /* Scan over all generic vect_recog_xxx_pattern functions. */
4971 for (j = 0; j < NUM_PATTERNS; j++)
4972 vect_pattern_recog_1 (&vect_vect_recog_func_ptrs[j], si);
4975 else
4977 bb_vec_info bb_vinfo = as_a <bb_vec_info> (vinfo);
4978 for (si = bb_vinfo->region_begin;
4979 gsi_stmt (si) != gsi_stmt (bb_vinfo->region_end); gsi_next (&si))
4981 gimple *stmt = gsi_stmt (si);
4982 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4983 if (stmt_info && !STMT_VINFO_VECTORIZABLE (stmt_info))
4984 continue;
4986 /* Scan over all generic vect_recog_xxx_pattern functions. */
4987 for (j = 0; j < NUM_PATTERNS; j++)
4988 vect_pattern_recog_1 (&vect_vect_recog_func_ptrs[j], si);