1 /* Lower vector operations to scalar operations.
2 Copyright (C) 2004-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "tree-pass.h"
30 #include "optabs-tree.h"
31 #include "diagnostic.h"
32 #include "fold-const.h"
33 #include "stor-layout.h"
34 #include "langhooks.h"
36 #include "gimple-iterator.h"
37 #include "gimplify-me.h"
42 static void expand_vector_operations_1 (gimple_stmt_iterator
*);
45 /* Build a constant of type TYPE, made of VALUE's bits replicated
46 every TYPE_SIZE (INNER_TYPE) bits to fit TYPE's precision. */
48 build_replicated_const (tree type
, tree inner_type
, HOST_WIDE_INT value
)
50 int width
= tree_to_uhwi (TYPE_SIZE (inner_type
));
51 int n
= (TYPE_PRECISION (type
) + HOST_BITS_PER_WIDE_INT
- 1)
52 / HOST_BITS_PER_WIDE_INT
;
53 unsigned HOST_WIDE_INT low
, mask
;
54 HOST_WIDE_INT a
[WIDE_INT_MAX_ELTS
];
57 gcc_assert (n
&& n
<= WIDE_INT_MAX_ELTS
);
59 if (width
== HOST_BITS_PER_WIDE_INT
)
63 mask
= ((HOST_WIDE_INT
)1 << width
) - 1;
64 low
= (unsigned HOST_WIDE_INT
) ~0 / mask
* (value
& mask
);
67 for (i
= 0; i
< n
; i
++)
70 gcc_assert (TYPE_PRECISION (type
) <= MAX_BITSIZE_MODE_ANY_INT
);
71 return wide_int_to_tree
72 (type
, wide_int::from_array (a
, n
, TYPE_PRECISION (type
)));
75 static GTY(()) tree vector_inner_type
;
76 static GTY(()) tree vector_last_type
;
77 static GTY(()) int vector_last_nunits
;
79 /* Return a suitable vector types made of SUBPARTS units each of mode
80 "word_mode" (the global variable). */
82 build_word_mode_vector_type (int nunits
)
84 if (!vector_inner_type
)
85 vector_inner_type
= lang_hooks
.types
.type_for_mode (word_mode
, 1);
86 else if (vector_last_nunits
== nunits
)
88 gcc_assert (TREE_CODE (vector_last_type
) == VECTOR_TYPE
);
89 return vector_last_type
;
92 /* We build a new type, but we canonicalize it nevertheless,
93 because it still saves some memory. */
94 vector_last_nunits
= nunits
;
95 vector_last_type
= type_hash_canon (nunits
,
96 build_vector_type (vector_inner_type
,
98 return vector_last_type
;
101 typedef tree (*elem_op_func
) (gimple_stmt_iterator
*,
102 tree
, tree
, tree
, tree
, tree
, enum tree_code
,
106 tree_vec_extract (gimple_stmt_iterator
*gsi
, tree type
,
107 tree t
, tree bitsize
, tree bitpos
)
109 if (TREE_CODE (t
) == SSA_NAME
)
111 gimple
*def_stmt
= SSA_NAME_DEF_STMT (t
);
112 if (is_gimple_assign (def_stmt
)
113 && (gimple_assign_rhs_code (def_stmt
) == VECTOR_CST
115 && gimple_assign_rhs_code (def_stmt
) == CONSTRUCTOR
)))
116 t
= gimple_assign_rhs1 (def_stmt
);
120 if (TREE_CODE (type
) == BOOLEAN_TYPE
)
123 = build_nonstandard_integer_type (tree_to_uhwi (bitsize
), 0);
124 tree field
= gimplify_build3 (gsi
, BIT_FIELD_REF
, itype
, t
,
126 return gimplify_build2 (gsi
, NE_EXPR
, type
, field
,
127 build_zero_cst (itype
));
130 return gimplify_build3 (gsi
, BIT_FIELD_REF
, type
, t
, bitsize
, bitpos
);
133 return gimplify_build1 (gsi
, VIEW_CONVERT_EXPR
, type
, t
);
137 do_unop (gimple_stmt_iterator
*gsi
, tree inner_type
, tree a
,
138 tree b ATTRIBUTE_UNUSED
, tree bitpos
, tree bitsize
,
139 enum tree_code code
, tree type ATTRIBUTE_UNUSED
)
141 a
= tree_vec_extract (gsi
, inner_type
, a
, bitsize
, bitpos
);
142 return gimplify_build1 (gsi
, code
, inner_type
, a
);
146 do_binop (gimple_stmt_iterator
*gsi
, tree inner_type
, tree a
, tree b
,
147 tree bitpos
, tree bitsize
, enum tree_code code
,
148 tree type ATTRIBUTE_UNUSED
)
150 if (TREE_CODE (TREE_TYPE (a
)) == VECTOR_TYPE
)
151 a
= tree_vec_extract (gsi
, inner_type
, a
, bitsize
, bitpos
);
152 if (TREE_CODE (TREE_TYPE (b
)) == VECTOR_TYPE
)
153 b
= tree_vec_extract (gsi
, inner_type
, b
, bitsize
, bitpos
);
154 return gimplify_build2 (gsi
, code
, inner_type
, a
, b
);
157 /* Construct expression (A[BITPOS] code B[BITPOS]) ? -1 : 0
159 INNER_TYPE is the type of A and B elements
161 returned expression is of signed integer type with the
162 size equal to the size of INNER_TYPE. */
164 do_compare (gimple_stmt_iterator
*gsi
, tree inner_type
, tree a
, tree b
,
165 tree bitpos
, tree bitsize
, enum tree_code code
, tree type
)
167 tree stype
= TREE_TYPE (type
);
168 tree cst_false
= build_zero_cst (stype
);
169 tree cst_true
= build_all_ones_cst (stype
);
172 a
= tree_vec_extract (gsi
, inner_type
, a
, bitsize
, bitpos
);
173 b
= tree_vec_extract (gsi
, inner_type
, b
, bitsize
, bitpos
);
175 cmp
= build2 (code
, boolean_type_node
, a
, b
);
176 return gimplify_build3 (gsi
, COND_EXPR
, stype
, cmp
, cst_true
, cst_false
);
179 /* Expand vector addition to scalars. This does bit twiddling
180 in order to increase parallelism:
182 a + b = (((int) a & 0x7f7f7f7f) + ((int) b & 0x7f7f7f7f)) ^
185 a - b = (((int) a | 0x80808080) - ((int) b & 0x7f7f7f7f)) ^
186 (a ^ ~b) & 0x80808080
188 -b = (0x80808080 - ((int) b & 0x7f7f7f7f)) ^ (~b & 0x80808080)
190 This optimization should be done only if 4 vector items or more
193 do_plus_minus (gimple_stmt_iterator
*gsi
, tree word_type
, tree a
, tree b
,
194 tree bitpos ATTRIBUTE_UNUSED
, tree bitsize ATTRIBUTE_UNUSED
,
195 enum tree_code code
, tree type ATTRIBUTE_UNUSED
)
197 tree inner_type
= TREE_TYPE (TREE_TYPE (a
));
198 unsigned HOST_WIDE_INT max
;
199 tree low_bits
, high_bits
, a_low
, b_low
, result_low
, signs
;
201 max
= GET_MODE_MASK (TYPE_MODE (inner_type
));
202 low_bits
= build_replicated_const (word_type
, inner_type
, max
>> 1);
203 high_bits
= build_replicated_const (word_type
, inner_type
, max
& ~(max
>> 1));
205 a
= tree_vec_extract (gsi
, word_type
, a
, bitsize
, bitpos
);
206 b
= tree_vec_extract (gsi
, word_type
, b
, bitsize
, bitpos
);
208 signs
= gimplify_build2 (gsi
, BIT_XOR_EXPR
, word_type
, a
, b
);
209 b_low
= gimplify_build2 (gsi
, BIT_AND_EXPR
, word_type
, b
, low_bits
);
210 if (code
== PLUS_EXPR
)
211 a_low
= gimplify_build2 (gsi
, BIT_AND_EXPR
, word_type
, a
, low_bits
);
214 a_low
= gimplify_build2 (gsi
, BIT_IOR_EXPR
, word_type
, a
, high_bits
);
215 signs
= gimplify_build1 (gsi
, BIT_NOT_EXPR
, word_type
, signs
);
218 signs
= gimplify_build2 (gsi
, BIT_AND_EXPR
, word_type
, signs
, high_bits
);
219 result_low
= gimplify_build2 (gsi
, code
, word_type
, a_low
, b_low
);
220 return gimplify_build2 (gsi
, BIT_XOR_EXPR
, word_type
, result_low
, signs
);
224 do_negate (gimple_stmt_iterator
*gsi
, tree word_type
, tree b
,
225 tree unused ATTRIBUTE_UNUSED
, tree bitpos ATTRIBUTE_UNUSED
,
226 tree bitsize ATTRIBUTE_UNUSED
,
227 enum tree_code code ATTRIBUTE_UNUSED
,
228 tree type ATTRIBUTE_UNUSED
)
230 tree inner_type
= TREE_TYPE (TREE_TYPE (b
));
232 tree low_bits
, high_bits
, b_low
, result_low
, signs
;
234 max
= GET_MODE_MASK (TYPE_MODE (inner_type
));
235 low_bits
= build_replicated_const (word_type
, inner_type
, max
>> 1);
236 high_bits
= build_replicated_const (word_type
, inner_type
, max
& ~(max
>> 1));
238 b
= tree_vec_extract (gsi
, word_type
, b
, bitsize
, bitpos
);
240 b_low
= gimplify_build2 (gsi
, BIT_AND_EXPR
, word_type
, b
, low_bits
);
241 signs
= gimplify_build1 (gsi
, BIT_NOT_EXPR
, word_type
, b
);
242 signs
= gimplify_build2 (gsi
, BIT_AND_EXPR
, word_type
, signs
, high_bits
);
243 result_low
= gimplify_build2 (gsi
, MINUS_EXPR
, word_type
, high_bits
, b_low
);
244 return gimplify_build2 (gsi
, BIT_XOR_EXPR
, word_type
, result_low
, signs
);
247 /* Expand a vector operation to scalars, by using many operations
248 whose type is the vector type's inner type. */
250 expand_vector_piecewise (gimple_stmt_iterator
*gsi
, elem_op_func f
,
251 tree type
, tree inner_type
,
252 tree a
, tree b
, enum tree_code code
)
254 vec
<constructor_elt
, va_gc
> *v
;
255 tree part_width
= TYPE_SIZE (inner_type
);
256 tree index
= bitsize_int (0);
257 int nunits
= TYPE_VECTOR_SUBPARTS (type
);
258 int delta
= tree_to_uhwi (part_width
)
259 / tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type
)));
261 location_t loc
= gimple_location (gsi_stmt (*gsi
));
263 if (types_compatible_p (gimple_expr_type (gsi_stmt (*gsi
)), type
))
264 warning_at (loc
, OPT_Wvector_operation_performance
,
265 "vector operation will be expanded piecewise");
267 warning_at (loc
, OPT_Wvector_operation_performance
,
268 "vector operation will be expanded in parallel");
270 vec_alloc (v
, (nunits
+ delta
- 1) / delta
);
271 for (i
= 0; i
< nunits
;
272 i
+= delta
, index
= int_const_binop (PLUS_EXPR
, index
, part_width
))
274 tree result
= f (gsi
, inner_type
, a
, b
, index
, part_width
, code
, type
);
275 constructor_elt ce
= {NULL_TREE
, result
};
279 return build_constructor (type
, v
);
282 /* Expand a vector operation to scalars with the freedom to use
283 a scalar integer type, or to use a different size for the items
284 in the vector type. */
286 expand_vector_parallel (gimple_stmt_iterator
*gsi
, elem_op_func f
, tree type
,
290 tree result
, compute_type
;
291 int n_words
= tree_to_uhwi (TYPE_SIZE_UNIT (type
)) / UNITS_PER_WORD
;
292 location_t loc
= gimple_location (gsi_stmt (*gsi
));
294 /* We have three strategies. If the type is already correct, just do
295 the operation an element at a time. Else, if the vector is wider than
296 one word, do it a word at a time; finally, if the vector is smaller
297 than one word, do it as a scalar. */
298 if (TYPE_MODE (TREE_TYPE (type
)) == word_mode
)
299 return expand_vector_piecewise (gsi
, f
,
300 type
, TREE_TYPE (type
),
302 else if (n_words
> 1)
304 tree word_type
= build_word_mode_vector_type (n_words
);
305 result
= expand_vector_piecewise (gsi
, f
,
306 word_type
, TREE_TYPE (word_type
),
308 result
= force_gimple_operand_gsi (gsi
, result
, true, NULL
, true,
313 /* Use a single scalar operation with a mode no wider than word_mode. */
315 = int_mode_for_size (tree_to_uhwi (TYPE_SIZE (type
)), 0).require ();
316 compute_type
= lang_hooks
.types
.type_for_mode (mode
, 1);
317 result
= f (gsi
, compute_type
, a
, b
, NULL_TREE
, NULL_TREE
, code
, type
);
318 warning_at (loc
, OPT_Wvector_operation_performance
,
319 "vector operation will be expanded with a "
320 "single scalar operation");
326 /* Expand a vector operation to scalars; for integer types we can use
327 special bit twiddling tricks to do the sums a word at a time, using
328 function F_PARALLEL instead of F. These tricks are done only if
329 they can process at least four items, that is, only if the vector
330 holds at least four items and if a word can hold four items. */
332 expand_vector_addition (gimple_stmt_iterator
*gsi
,
333 elem_op_func f
, elem_op_func f_parallel
,
334 tree type
, tree a
, tree b
, enum tree_code code
)
336 int parts_per_word
= UNITS_PER_WORD
337 / tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type
)));
339 if (INTEGRAL_TYPE_P (TREE_TYPE (type
))
340 && parts_per_word
>= 4
341 && TYPE_VECTOR_SUBPARTS (type
) >= 4)
342 return expand_vector_parallel (gsi
, f_parallel
,
345 return expand_vector_piecewise (gsi
, f
,
346 type
, TREE_TYPE (type
),
350 /* Try to expand vector comparison expression OP0 CODE OP1 by
351 querying optab if the following expression:
352 VEC_COND_EXPR< OP0 CODE OP1, {-1,...}, {0,...}>
355 expand_vector_comparison (gimple_stmt_iterator
*gsi
, tree type
, tree op0
,
356 tree op1
, enum tree_code code
)
359 if (!expand_vec_cmp_expr_p (TREE_TYPE (op0
), type
, code
)
360 && !expand_vec_cond_expr_p (type
, TREE_TYPE (op0
), code
))
361 t
= expand_vector_piecewise (gsi
, do_compare
, type
,
362 TREE_TYPE (TREE_TYPE (op0
)), op0
, op1
, code
);
369 /* Helper function of expand_vector_divmod. Gimplify a RSHIFT_EXPR in type
370 of OP0 with shift counts in SHIFTCNTS array and return the temporary holding
371 the result if successful, otherwise return NULL_TREE. */
373 add_rshift (gimple_stmt_iterator
*gsi
, tree type
, tree op0
, int *shiftcnts
)
376 unsigned int i
, nunits
= TYPE_VECTOR_SUBPARTS (type
);
377 bool scalar_shift
= true;
379 for (i
= 1; i
< nunits
; i
++)
381 if (shiftcnts
[i
] != shiftcnts
[0])
382 scalar_shift
= false;
385 if (scalar_shift
&& shiftcnts
[0] == 0)
390 op
= optab_for_tree_code (RSHIFT_EXPR
, type
, optab_scalar
);
391 if (op
!= unknown_optab
392 && optab_handler (op
, TYPE_MODE (type
)) != CODE_FOR_nothing
)
393 return gimplify_build2 (gsi
, RSHIFT_EXPR
, type
, op0
,
394 build_int_cst (NULL_TREE
, shiftcnts
[0]));
397 op
= optab_for_tree_code (RSHIFT_EXPR
, type
, optab_vector
);
398 if (op
!= unknown_optab
399 && optab_handler (op
, TYPE_MODE (type
)) != CODE_FOR_nothing
)
401 tree
*vec
= XALLOCAVEC (tree
, nunits
);
402 for (i
= 0; i
< nunits
; i
++)
403 vec
[i
] = build_int_cst (TREE_TYPE (type
), shiftcnts
[i
]);
404 return gimplify_build2 (gsi
, RSHIFT_EXPR
, type
, op0
,
405 build_vector (type
, vec
));
411 /* Try to expand integer vector division by constant using
412 widening multiply, shifts and additions. */
414 expand_vector_divmod (gimple_stmt_iterator
*gsi
, tree type
, tree op0
,
415 tree op1
, enum tree_code code
)
417 bool use_pow2
= true;
418 bool has_vector_shift
= true;
419 int mode
= -1, this_mode
;
420 int pre_shift
= -1, post_shift
;
421 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (type
);
422 int *shifts
= XALLOCAVEC (int, nunits
* 4);
423 int *pre_shifts
= shifts
+ nunits
;
424 int *post_shifts
= pre_shifts
+ nunits
;
425 int *shift_temps
= post_shifts
+ nunits
;
426 unsigned HOST_WIDE_INT
*mulc
= XALLOCAVEC (unsigned HOST_WIDE_INT
, nunits
);
427 int prec
= TYPE_PRECISION (TREE_TYPE (type
));
430 signop sign_p
= TYPE_SIGN (TREE_TYPE (type
));
431 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (TYPE_MODE (TREE_TYPE (type
)));
433 tree cur_op
, mulcst
, tem
;
436 if (prec
> HOST_BITS_PER_WIDE_INT
)
439 op
= optab_for_tree_code (RSHIFT_EXPR
, type
, optab_vector
);
440 if (op
== unknown_optab
441 || optab_handler (op
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
442 has_vector_shift
= false;
444 /* Analysis phase. Determine if all op1 elements are either power
445 of two and it is possible to expand it using shifts (or for remainder
446 using masking). Additionally compute the multiplicative constants
447 and pre and post shifts if the division is to be expanded using
448 widening or high part multiplication plus shifts. */
449 for (i
= 0; i
< nunits
; i
++)
451 tree cst
= VECTOR_CST_ELT (op1
, i
);
452 unsigned HOST_WIDE_INT ml
;
454 if (TREE_CODE (cst
) != INTEGER_CST
|| integer_zerop (cst
))
460 && (!integer_pow2p (cst
) || tree_int_cst_sgn (cst
) != 1))
464 shifts
[i
] = tree_log2 (cst
);
465 if (shifts
[i
] != shifts
[0]
466 && code
== TRUNC_DIV_EXPR
467 && !has_vector_shift
)
472 if (sign_p
== UNSIGNED
)
474 unsigned HOST_WIDE_INT mh
;
475 unsigned HOST_WIDE_INT d
= TREE_INT_CST_LOW (cst
) & mask
;
477 if (d
>= (HOST_WIDE_INT_1U
<< (prec
- 1)))
478 /* FIXME: Can transform this into op0 >= op1 ? 1 : 0. */
487 /* Find a suitable multiplier and right shift count
488 instead of multiplying with D. */
489 mh
= choose_multiplier (d
, prec
, prec
, &ml
, &post_shift
, &dummy_int
);
491 /* If the suggested multiplier is more than SIZE bits, we can
492 do better for even divisors, using an initial right shift. */
493 if ((mh
!= 0 && (d
& 1) == 0)
494 || (!has_vector_shift
&& pre_shift
!= -1))
496 if (has_vector_shift
)
497 pre_shift
= ctz_or_zero (d
);
498 else if (pre_shift
== -1)
501 for (j
= 0; j
< nunits
; j
++)
503 tree cst2
= VECTOR_CST_ELT (op1
, j
);
504 unsigned HOST_WIDE_INT d2
;
507 if (!tree_fits_uhwi_p (cst2
))
509 d2
= tree_to_uhwi (cst2
) & mask
;
512 this_pre_shift
= floor_log2 (d2
& -d2
);
513 if (pre_shift
== -1 || this_pre_shift
< pre_shift
)
514 pre_shift
= this_pre_shift
;
516 if (i
!= 0 && pre_shift
!= 0)
526 if ((d
>> pre_shift
) <= 1)
531 mh
= choose_multiplier (d
>> pre_shift
, prec
,
533 &ml
, &post_shift
, &dummy_int
);
535 pre_shifts
[i
] = pre_shift
;
545 HOST_WIDE_INT d
= TREE_INT_CST_LOW (cst
);
546 unsigned HOST_WIDE_INT abs_d
;
551 /* Since d might be INT_MIN, we have to cast to
552 unsigned HOST_WIDE_INT before negating to avoid
553 undefined signed overflow. */
555 ? (unsigned HOST_WIDE_INT
) d
556 : - (unsigned HOST_WIDE_INT
) d
);
558 /* n rem d = n rem -d */
559 if (code
== TRUNC_MOD_EXPR
&& d
< 0)
561 else if (abs_d
== HOST_WIDE_INT_1U
<< (prec
- 1))
563 /* This case is not handled correctly below. */
573 choose_multiplier (abs_d
, prec
, prec
- 1, &ml
,
574 &post_shift
, &dummy_int
);
575 if (ml
>= HOST_WIDE_INT_1U
<< (prec
- 1))
577 this_mode
= 4 + (d
< 0);
578 ml
|= HOST_WIDE_INT_M1U
<< (prec
- 1);
581 this_mode
= 2 + (d
< 0);
584 post_shifts
[i
] = post_shift
;
585 if ((i
&& !has_vector_shift
&& post_shifts
[0] != post_shift
)
586 || post_shift
>= prec
587 || pre_shifts
[i
] >= prec
)
592 else if (mode
!= this_mode
)
596 vec
= XALLOCAVEC (tree
, nunits
);
600 tree addend
= NULL_TREE
;
601 if (sign_p
== SIGNED
)
605 /* Both division and remainder sequences need
606 op0 < 0 ? mask : 0 computed. It can be either computed as
607 (type) (((uns_type) (op0 >> (prec - 1))) >> (prec - shifts[i]))
608 if none of the shifts is 0, or as the conditional. */
609 for (i
= 0; i
< nunits
; i
++)
613 = build_vector_type (build_nonstandard_integer_type (prec
, 1),
615 if (i
== nunits
&& TYPE_MODE (uns_type
) == TYPE_MODE (type
))
617 for (i
= 0; i
< nunits
; i
++)
618 shift_temps
[i
] = prec
- 1;
619 cur_op
= add_rshift (gsi
, type
, op0
, shift_temps
);
620 if (cur_op
!= NULL_TREE
)
622 cur_op
= gimplify_build1 (gsi
, VIEW_CONVERT_EXPR
,
624 for (i
= 0; i
< nunits
; i
++)
625 shift_temps
[i
] = prec
- shifts
[i
];
626 cur_op
= add_rshift (gsi
, uns_type
, cur_op
, shift_temps
);
627 if (cur_op
!= NULL_TREE
)
628 addend
= gimplify_build1 (gsi
, VIEW_CONVERT_EXPR
,
632 if (addend
== NULL_TREE
633 && expand_vec_cond_expr_p (type
, type
, LT_EXPR
))
635 tree zero
, cst
, cond
, mask_type
;
638 mask_type
= build_same_sized_truth_vector_type (type
);
639 zero
= build_zero_cst (type
);
640 cond
= build2 (LT_EXPR
, mask_type
, op0
, zero
);
641 for (i
= 0; i
< nunits
; i
++)
642 vec
[i
] = build_int_cst (TREE_TYPE (type
),
645 cst
= build_vector (type
, vec
);
646 addend
= make_ssa_name (type
);
647 stmt
= gimple_build_assign (addend
, VEC_COND_EXPR
, cond
,
649 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
652 if (code
== TRUNC_DIV_EXPR
)
654 if (sign_p
== UNSIGNED
)
656 /* q = op0 >> shift; */
657 cur_op
= add_rshift (gsi
, type
, op0
, shifts
);
658 if (cur_op
!= NULL_TREE
)
661 else if (addend
!= NULL_TREE
)
663 /* t1 = op0 + addend;
665 op
= optab_for_tree_code (PLUS_EXPR
, type
, optab_default
);
666 if (op
!= unknown_optab
667 && optab_handler (op
, TYPE_MODE (type
)) != CODE_FOR_nothing
)
669 cur_op
= gimplify_build2 (gsi
, PLUS_EXPR
, type
, op0
, addend
);
670 cur_op
= add_rshift (gsi
, type
, cur_op
, shifts
);
671 if (cur_op
!= NULL_TREE
)
679 for (i
= 0; i
< nunits
; i
++)
680 vec
[i
] = build_int_cst (TREE_TYPE (type
),
683 mask
= build_vector (type
, vec
);
684 op
= optab_for_tree_code (BIT_AND_EXPR
, type
, optab_default
);
685 if (op
!= unknown_optab
686 && optab_handler (op
, TYPE_MODE (type
)) != CODE_FOR_nothing
)
688 if (sign_p
== UNSIGNED
)
689 /* r = op0 & mask; */
690 return gimplify_build2 (gsi
, BIT_AND_EXPR
, type
, op0
, mask
);
691 else if (addend
!= NULL_TREE
)
693 /* t1 = op0 + addend;
696 op
= optab_for_tree_code (PLUS_EXPR
, type
, optab_default
);
697 if (op
!= unknown_optab
698 && optab_handler (op
, TYPE_MODE (type
))
701 cur_op
= gimplify_build2 (gsi
, PLUS_EXPR
, type
, op0
,
703 cur_op
= gimplify_build2 (gsi
, BIT_AND_EXPR
, type
,
705 op
= optab_for_tree_code (MINUS_EXPR
, type
,
707 if (op
!= unknown_optab
708 && optab_handler (op
, TYPE_MODE (type
))
710 return gimplify_build2 (gsi
, MINUS_EXPR
, type
,
718 if (mode
== -2 || BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
721 if (!can_mult_highpart_p (TYPE_MODE (type
), TYPE_UNSIGNED (type
)))
729 gcc_assert (sign_p
== UNSIGNED
);
730 /* t1 = oprnd0 >> pre_shift;
732 q = t2 >> post_shift; */
733 cur_op
= add_rshift (gsi
, type
, cur_op
, pre_shifts
);
734 if (cur_op
== NULL_TREE
)
738 gcc_assert (sign_p
== UNSIGNED
);
739 for (i
= 0; i
< nunits
; i
++)
749 gcc_assert (sign_p
== SIGNED
);
750 for (i
= 0; i
< nunits
; i
++)
751 shift_temps
[i
] = prec
- 1;
757 for (i
= 0; i
< nunits
; i
++)
758 vec
[i
] = build_int_cst (TREE_TYPE (type
), mulc
[i
]);
759 mulcst
= build_vector (type
, vec
);
761 cur_op
= gimplify_build2 (gsi
, MULT_HIGHPART_EXPR
, type
, cur_op
, mulcst
);
766 /* t1 = oprnd0 >> pre_shift;
768 q = t2 >> post_shift; */
769 cur_op
= add_rshift (gsi
, type
, cur_op
, post_shifts
);
772 /* t1 = oprnd0 h* ml;
776 q = t4 >> (post_shift - 1); */
777 op
= optab_for_tree_code (MINUS_EXPR
, type
, optab_default
);
778 if (op
== unknown_optab
779 || optab_handler (op
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
781 tem
= gimplify_build2 (gsi
, MINUS_EXPR
, type
, op0
, cur_op
);
782 tem
= add_rshift (gsi
, type
, tem
, shift_temps
);
783 op
= optab_for_tree_code (PLUS_EXPR
, type
, optab_default
);
784 if (op
== unknown_optab
785 || optab_handler (op
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
787 tem
= gimplify_build2 (gsi
, PLUS_EXPR
, type
, cur_op
, tem
);
788 cur_op
= add_rshift (gsi
, type
, tem
, post_shifts
);
789 if (cur_op
== NULL_TREE
)
796 /* t1 = oprnd0 h* ml;
797 t2 = t1; [ iff (mode & 2) != 0 ]
798 t2 = t1 + oprnd0; [ iff (mode & 2) == 0 ]
799 t3 = t2 >> post_shift;
800 t4 = oprnd0 >> (prec - 1);
801 q = t3 - t4; [ iff (mode & 1) == 0 ]
802 q = t4 - t3; [ iff (mode & 1) != 0 ] */
805 op
= optab_for_tree_code (PLUS_EXPR
, type
, optab_default
);
806 if (op
== unknown_optab
807 || optab_handler (op
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
809 cur_op
= gimplify_build2 (gsi
, PLUS_EXPR
, type
, cur_op
, op0
);
811 cur_op
= add_rshift (gsi
, type
, cur_op
, post_shifts
);
812 if (cur_op
== NULL_TREE
)
814 tem
= add_rshift (gsi
, type
, op0
, shift_temps
);
815 if (tem
== NULL_TREE
)
817 op
= optab_for_tree_code (MINUS_EXPR
, type
, optab_default
);
818 if (op
== unknown_optab
819 || optab_handler (op
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
822 cur_op
= gimplify_build2 (gsi
, MINUS_EXPR
, type
, cur_op
, tem
);
824 cur_op
= gimplify_build2 (gsi
, MINUS_EXPR
, type
, tem
, cur_op
);
830 if (code
== TRUNC_DIV_EXPR
)
833 /* We divided. Now finish by:
836 op
= optab_for_tree_code (MULT_EXPR
, type
, optab_default
);
837 if (op
== unknown_optab
838 || optab_handler (op
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
840 tem
= gimplify_build2 (gsi
, MULT_EXPR
, type
, cur_op
, op1
);
841 op
= optab_for_tree_code (MINUS_EXPR
, type
, optab_default
);
842 if (op
== unknown_optab
843 || optab_handler (op
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
845 return gimplify_build2 (gsi
, MINUS_EXPR
, type
, op0
, tem
);
848 /* Expand a vector condition to scalars, by using many conditions
849 on the vector's elements. */
851 expand_vector_condition (gimple_stmt_iterator
*gsi
)
853 gassign
*stmt
= as_a
<gassign
*> (gsi_stmt (*gsi
));
854 tree type
= gimple_expr_type (stmt
);
855 tree a
= gimple_assign_rhs1 (stmt
);
858 bool a_is_comparison
= false;
859 tree b
= gimple_assign_rhs2 (stmt
);
860 tree c
= gimple_assign_rhs3 (stmt
);
861 vec
<constructor_elt
, va_gc
> *v
;
863 tree inner_type
= TREE_TYPE (type
);
864 tree cond_type
= TREE_TYPE (TREE_TYPE (a
));
865 tree comp_inner_type
= cond_type
;
866 tree width
= TYPE_SIZE (inner_type
);
867 tree index
= bitsize_int (0);
868 tree comp_width
= width
;
869 tree comp_index
= index
;
870 int nunits
= TYPE_VECTOR_SUBPARTS (type
);
872 location_t loc
= gimple_location (gsi_stmt (*gsi
));
874 if (!is_gimple_val (a
))
876 gcc_assert (COMPARISON_CLASS_P (a
));
877 a_is_comparison
= true;
878 a1
= TREE_OPERAND (a
, 0);
879 a2
= TREE_OPERAND (a
, 1);
880 comp_inner_type
= TREE_TYPE (TREE_TYPE (a1
));
881 comp_width
= TYPE_SIZE (comp_inner_type
);
884 if (expand_vec_cond_expr_p (type
, TREE_TYPE (a1
), TREE_CODE (a
)))
887 /* Handle vector boolean types with bitmasks. If there is a comparison
888 and we can expand the comparison into the vector boolean bitmask,
889 or otherwise if it is compatible with type, we can transform
890 vbfld_1 = x_2 < y_3 ? vbfld_4 : vbfld_5;
893 tmp_7 = tmp_6 & vbfld_4;
895 tmp_9 = tmp_8 & vbfld_5;
896 vbfld_1 = tmp_7 | tmp_9;
897 Similarly for vbfld_10 instead of x_2 < y_3. */
898 if (VECTOR_BOOLEAN_TYPE_P (type
)
899 && SCALAR_INT_MODE_P (TYPE_MODE (type
))
900 && (GET_MODE_BITSIZE (TYPE_MODE (type
))
901 < (TYPE_VECTOR_SUBPARTS (type
)
902 * GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (type
)))))
904 ? useless_type_conversion_p (type
, TREE_TYPE (a
))
905 : expand_vec_cmp_expr_p (TREE_TYPE (a1
), type
, TREE_CODE (a
))))
908 a
= gimplify_build2 (gsi
, TREE_CODE (a
), type
, a1
, a2
);
909 a1
= gimplify_build2 (gsi
, BIT_AND_EXPR
, type
, a
, b
);
910 a2
= gimplify_build1 (gsi
, BIT_NOT_EXPR
, type
, a
);
911 a2
= gimplify_build2 (gsi
, BIT_AND_EXPR
, type
, a2
, c
);
912 a
= gimplify_build2 (gsi
, BIT_IOR_EXPR
, type
, a1
, a2
);
913 gimple_assign_set_rhs_from_tree (gsi
, a
);
914 update_stmt (gsi_stmt (*gsi
));
918 /* TODO: try and find a smaller vector type. */
920 warning_at (loc
, OPT_Wvector_operation_performance
,
921 "vector condition will be expanded piecewise");
923 vec_alloc (v
, nunits
);
924 for (i
= 0; i
< nunits
; i
++)
927 tree bb
= tree_vec_extract (gsi
, inner_type
, b
, width
, index
);
928 tree cc
= tree_vec_extract (gsi
, inner_type
, c
, width
, index
);
931 tree aa1
= tree_vec_extract (gsi
, comp_inner_type
, a1
,
932 comp_width
, comp_index
);
933 tree aa2
= tree_vec_extract (gsi
, comp_inner_type
, a2
,
934 comp_width
, comp_index
);
935 aa
= fold_build2 (TREE_CODE (a
), cond_type
, aa1
, aa2
);
938 aa
= tree_vec_extract (gsi
, cond_type
, a
, width
, index
);
939 result
= gimplify_build3 (gsi
, COND_EXPR
, inner_type
, aa
, bb
, cc
);
940 constructor_elt ce
= {NULL_TREE
, result
};
942 index
= int_const_binop (PLUS_EXPR
, index
, width
);
943 if (width
== comp_width
)
946 comp_index
= int_const_binop (PLUS_EXPR
, comp_index
, comp_width
);
949 constr
= build_constructor (type
, v
);
950 gimple_assign_set_rhs_from_tree (gsi
, constr
);
951 update_stmt (gsi_stmt (*gsi
));
955 expand_vector_operation (gimple_stmt_iterator
*gsi
, tree type
, tree compute_type
,
956 gassign
*assign
, enum tree_code code
)
958 machine_mode compute_mode
= TYPE_MODE (compute_type
);
960 /* If the compute mode is not a vector mode (hence we are not decomposing
961 a BLKmode vector to smaller, hardware-supported vectors), we may want
962 to expand the operations in parallel. */
963 if (GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_INT
964 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_FLOAT
965 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_FRACT
966 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_UFRACT
967 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_ACCUM
968 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_UACCUM
)
973 if (ANY_INTEGRAL_TYPE_P (type
) && !TYPE_OVERFLOW_TRAPS (type
))
974 return expand_vector_addition (gsi
, do_binop
, do_plus_minus
, type
,
975 gimple_assign_rhs1 (assign
),
976 gimple_assign_rhs2 (assign
), code
);
980 if (ANY_INTEGRAL_TYPE_P (type
) && !TYPE_OVERFLOW_TRAPS (type
))
981 return expand_vector_addition (gsi
, do_unop
, do_negate
, type
,
982 gimple_assign_rhs1 (assign
),
989 return expand_vector_parallel (gsi
, do_binop
, type
,
990 gimple_assign_rhs1 (assign
),
991 gimple_assign_rhs2 (assign
), code
);
994 return expand_vector_parallel (gsi
, do_unop
, type
,
995 gimple_assign_rhs1 (assign
),
1010 case UNORDERED_EXPR
:
1012 tree rhs1
= gimple_assign_rhs1 (assign
);
1013 tree rhs2
= gimple_assign_rhs2 (assign
);
1015 return expand_vector_comparison (gsi
, type
, rhs1
, rhs2
, code
);
1018 case TRUNC_DIV_EXPR
:
1019 case TRUNC_MOD_EXPR
:
1021 tree rhs1
= gimple_assign_rhs1 (assign
);
1022 tree rhs2
= gimple_assign_rhs2 (assign
);
1026 || !VECTOR_INTEGER_TYPE_P (type
)
1027 || TREE_CODE (rhs2
) != VECTOR_CST
1028 || !VECTOR_MODE_P (TYPE_MODE (type
)))
1031 ret
= expand_vector_divmod (gsi
, type
, rhs1
, rhs2
, code
);
1032 if (ret
!= NULL_TREE
)
1041 if (TREE_CODE_CLASS (code
) == tcc_unary
)
1042 return expand_vector_piecewise (gsi
, do_unop
, type
, compute_type
,
1043 gimple_assign_rhs1 (assign
),
1046 return expand_vector_piecewise (gsi
, do_binop
, type
, compute_type
,
1047 gimple_assign_rhs1 (assign
),
1048 gimple_assign_rhs2 (assign
), code
);
1052 a_5 = { b_7, b_7 + 3, b_7 + 6, b_7 + 9 };
1054 _9 = { b_7, b_7, b_7, b_7 };
1055 a_5 = _9 + { 0, 3, 6, 9 };
1056 because vector splat operation is usually more efficient
1057 than piecewise initialization of the vector. */
1060 optimize_vector_constructor (gimple_stmt_iterator
*gsi
)
1062 gassign
*stmt
= as_a
<gassign
*> (gsi_stmt (*gsi
));
1063 tree lhs
= gimple_assign_lhs (stmt
);
1064 tree rhs
= gimple_assign_rhs1 (stmt
);
1065 tree type
= TREE_TYPE (rhs
);
1066 unsigned int i
, j
, nelts
= TYPE_VECTOR_SUBPARTS (type
);
1067 bool all_same
= true;
1068 constructor_elt
*elt
;
1071 tree base
= NULL_TREE
;
1074 if (nelts
<= 2 || CONSTRUCTOR_NELTS (rhs
) != nelts
)
1076 op
= optab_for_tree_code (PLUS_EXPR
, type
, optab_default
);
1077 if (op
== unknown_optab
1078 || optab_handler (op
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
1080 FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (rhs
), i
, elt
)
1081 if (TREE_CODE (elt
->value
) != SSA_NAME
1082 || TREE_CODE (TREE_TYPE (elt
->value
)) == VECTOR_TYPE
)
1086 tree this_base
= elt
->value
;
1087 if (this_base
!= CONSTRUCTOR_ELT (rhs
, 0)->value
)
1089 for (j
= 0; j
< nelts
+ 1; j
++)
1091 g
= SSA_NAME_DEF_STMT (this_base
);
1092 if (is_gimple_assign (g
)
1093 && gimple_assign_rhs_code (g
) == PLUS_EXPR
1094 && TREE_CODE (gimple_assign_rhs2 (g
)) == INTEGER_CST
1095 && TREE_CODE (gimple_assign_rhs1 (g
)) == SSA_NAME
1096 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_assign_rhs1 (g
)))
1097 this_base
= gimple_assign_rhs1 (g
);
1103 else if (this_base
!= base
)
1108 cst
= XALLOCAVEC (tree
, nelts
);
1109 for (i
= 0; i
< nelts
; i
++)
1111 tree this_base
= CONSTRUCTOR_ELT (rhs
, i
)->value
;;
1112 cst
[i
] = build_zero_cst (TREE_TYPE (base
));
1113 while (this_base
!= base
)
1115 g
= SSA_NAME_DEF_STMT (this_base
);
1116 cst
[i
] = fold_binary (PLUS_EXPR
, TREE_TYPE (base
),
1117 cst
[i
], gimple_assign_rhs2 (g
));
1118 if (cst
[i
] == NULL_TREE
1119 || TREE_CODE (cst
[i
]) != INTEGER_CST
1120 || TREE_OVERFLOW (cst
[i
]))
1122 this_base
= gimple_assign_rhs1 (g
);
1125 for (i
= 0; i
< nelts
; i
++)
1126 CONSTRUCTOR_ELT (rhs
, i
)->value
= base
;
1127 g
= gimple_build_assign (make_ssa_name (type
), rhs
);
1128 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
1129 g
= gimple_build_assign (lhs
, PLUS_EXPR
, gimple_assign_lhs (g
),
1130 build_vector (type
, cst
));
1131 gsi_replace (gsi
, g
, false);
1134 /* Return a type for the widest vector mode whose components are of type
1135 TYPE, or NULL_TREE if none is found. */
1138 type_for_widest_vector_mode (tree type
, optab op
)
1140 machine_mode inner_mode
= TYPE_MODE (type
);
1141 machine_mode best_mode
= VOIDmode
, mode
;
1142 int best_nunits
= 0;
1144 if (SCALAR_FLOAT_MODE_P (inner_mode
))
1145 mode
= MIN_MODE_VECTOR_FLOAT
;
1146 else if (SCALAR_FRACT_MODE_P (inner_mode
))
1147 mode
= MIN_MODE_VECTOR_FRACT
;
1148 else if (SCALAR_UFRACT_MODE_P (inner_mode
))
1149 mode
= MIN_MODE_VECTOR_UFRACT
;
1150 else if (SCALAR_ACCUM_MODE_P (inner_mode
))
1151 mode
= MIN_MODE_VECTOR_ACCUM
;
1152 else if (SCALAR_UACCUM_MODE_P (inner_mode
))
1153 mode
= MIN_MODE_VECTOR_UACCUM
;
1155 mode
= MIN_MODE_VECTOR_INT
;
1157 FOR_EACH_MODE_FROM (mode
, mode
)
1158 if (GET_MODE_INNER (mode
) == inner_mode
1159 && GET_MODE_NUNITS (mode
) > best_nunits
1160 && optab_handler (op
, mode
) != CODE_FOR_nothing
)
1161 best_mode
= mode
, best_nunits
= GET_MODE_NUNITS (mode
);
1163 if (best_mode
== VOIDmode
)
1166 return build_vector_type_for_mode (type
, best_mode
);
1170 /* Build a reference to the element of the vector VECT. Function
1171 returns either the element itself, either BIT_FIELD_REF, or an
1172 ARRAY_REF expression.
1174 GSI is required to insert temporary variables while building a
1175 refernece to the element of the vector VECT.
1177 PTMPVEC is a pointer to the temporary variable for caching
1178 purposes. In case when PTMPVEC is NULL new temporary variable
1181 vector_element (gimple_stmt_iterator
*gsi
, tree vect
, tree idx
, tree
*ptmpvec
)
1183 tree vect_type
, vect_elt_type
;
1187 bool need_asgn
= true;
1188 unsigned int elements
;
1190 vect_type
= TREE_TYPE (vect
);
1191 vect_elt_type
= TREE_TYPE (vect_type
);
1192 elements
= TYPE_VECTOR_SUBPARTS (vect_type
);
1194 if (TREE_CODE (idx
) == INTEGER_CST
)
1196 unsigned HOST_WIDE_INT index
;
1198 /* Given that we're about to compute a binary modulus,
1199 we don't care about the high bits of the value. */
1200 index
= TREE_INT_CST_LOW (idx
);
1201 if (!tree_fits_uhwi_p (idx
) || index
>= elements
)
1203 index
&= elements
- 1;
1204 idx
= build_int_cst (TREE_TYPE (idx
), index
);
1207 /* When lowering a vector statement sequence do some easy
1208 simplification by looking through intermediate vector results. */
1209 if (TREE_CODE (vect
) == SSA_NAME
)
1211 gimple
*def_stmt
= SSA_NAME_DEF_STMT (vect
);
1212 if (is_gimple_assign (def_stmt
)
1213 && (gimple_assign_rhs_code (def_stmt
) == VECTOR_CST
1214 || gimple_assign_rhs_code (def_stmt
) == CONSTRUCTOR
))
1215 vect
= gimple_assign_rhs1 (def_stmt
);
1218 if (TREE_CODE (vect
) == VECTOR_CST
)
1219 return VECTOR_CST_ELT (vect
, index
);
1220 else if (TREE_CODE (vect
) == CONSTRUCTOR
1221 && (CONSTRUCTOR_NELTS (vect
) == 0
1222 || TREE_CODE (TREE_TYPE (CONSTRUCTOR_ELT (vect
, 0)->value
))
1225 if (index
< CONSTRUCTOR_NELTS (vect
))
1226 return CONSTRUCTOR_ELT (vect
, index
)->value
;
1227 return build_zero_cst (vect_elt_type
);
1231 tree size
= TYPE_SIZE (vect_elt_type
);
1232 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
, bitsize_int (index
),
1234 return fold_build3 (BIT_FIELD_REF
, vect_elt_type
, vect
, size
, pos
);
1239 tmpvec
= create_tmp_var (vect_type
, "vectmp");
1241 tmpvec
= *ptmpvec
= create_tmp_var (vect_type
, "vectmp");
1250 TREE_ADDRESSABLE (tmpvec
) = 1;
1251 asgn
= gimple_build_assign (tmpvec
, vect
);
1252 gsi_insert_before (gsi
, asgn
, GSI_SAME_STMT
);
1255 arraytype
= build_array_type_nelts (vect_elt_type
, elements
);
1256 return build4 (ARRAY_REF
, vect_elt_type
,
1257 build1 (VIEW_CONVERT_EXPR
, arraytype
, tmpvec
),
1258 idx
, NULL_TREE
, NULL_TREE
);
1261 /* Check if VEC_PERM_EXPR within the given setting is supported
1262 by hardware, or lower it piecewise.
1264 When VEC_PERM_EXPR has the same first and second operands:
1265 VEC_PERM_EXPR <v0, v0, mask> the lowered version would be
1266 {v0[mask[0]], v0[mask[1]], ...}
1267 MASK and V0 must have the same number of elements.
1269 Otherwise VEC_PERM_EXPR <v0, v1, mask> is lowered to
1270 {mask[0] < len(v0) ? v0[mask[0]] : v1[mask[0]], ...}
1271 V0 and V1 must have the same type. MASK, V0, V1 must have the
1272 same number of arguments. */
1275 lower_vec_perm (gimple_stmt_iterator
*gsi
)
1277 gassign
*stmt
= as_a
<gassign
*> (gsi_stmt (*gsi
));
1278 tree mask
= gimple_assign_rhs3 (stmt
);
1279 tree vec0
= gimple_assign_rhs1 (stmt
);
1280 tree vec1
= gimple_assign_rhs2 (stmt
);
1281 tree vect_type
= TREE_TYPE (vec0
);
1282 tree mask_type
= TREE_TYPE (mask
);
1283 tree vect_elt_type
= TREE_TYPE (vect_type
);
1284 tree mask_elt_type
= TREE_TYPE (mask_type
);
1285 unsigned int elements
= TYPE_VECTOR_SUBPARTS (vect_type
);
1286 vec
<constructor_elt
, va_gc
> *v
;
1287 tree constr
, t
, si
, i_val
;
1288 tree vec0tmp
= NULL_TREE
, vec1tmp
= NULL_TREE
, masktmp
= NULL_TREE
;
1289 bool two_operand_p
= !operand_equal_p (vec0
, vec1
, 0);
1290 location_t loc
= gimple_location (gsi_stmt (*gsi
));
1293 if (TREE_CODE (mask
) == SSA_NAME
)
1295 gimple
*def_stmt
= SSA_NAME_DEF_STMT (mask
);
1296 if (is_gimple_assign (def_stmt
)
1297 && gimple_assign_rhs_code (def_stmt
) == VECTOR_CST
)
1298 mask
= gimple_assign_rhs1 (def_stmt
);
1301 if (TREE_CODE (mask
) == VECTOR_CST
)
1303 unsigned char *sel_int
= XALLOCAVEC (unsigned char, elements
);
1305 for (i
= 0; i
< elements
; ++i
)
1306 sel_int
[i
] = (TREE_INT_CST_LOW (VECTOR_CST_ELT (mask
, i
))
1307 & (2 * elements
- 1));
1309 if (can_vec_perm_p (TYPE_MODE (vect_type
), false, sel_int
))
1311 gimple_assign_set_rhs3 (stmt
, mask
);
1315 /* Also detect vec_shr pattern - VEC_PERM_EXPR with zero
1316 vector as VEC1 and a right element shift MASK. */
1317 if (optab_handler (vec_shr_optab
, TYPE_MODE (vect_type
))
1319 && TREE_CODE (vec1
) == VECTOR_CST
1320 && initializer_zerop (vec1
)
1322 && sel_int
[0] < elements
)
1324 for (i
= 1; i
< elements
; ++i
)
1326 unsigned int expected
= i
+ sel_int
[0];
1327 /* Indices into the second vector are all equivalent. */
1328 if (MIN (elements
, (unsigned) sel_int
[i
])
1329 != MIN (elements
, expected
))
1334 gimple_assign_set_rhs3 (stmt
, mask
);
1340 else if (can_vec_perm_p (TYPE_MODE (vect_type
), true, NULL
))
1343 warning_at (loc
, OPT_Wvector_operation_performance
,
1344 "vector shuffling operation will be expanded piecewise");
1346 vec_alloc (v
, elements
);
1347 for (i
= 0; i
< elements
; i
++)
1350 i_val
= vector_element (gsi
, mask
, si
, &masktmp
);
1352 if (TREE_CODE (i_val
) == INTEGER_CST
)
1354 unsigned HOST_WIDE_INT index
;
1356 index
= TREE_INT_CST_LOW (i_val
);
1357 if (!tree_fits_uhwi_p (i_val
) || index
>= elements
)
1358 i_val
= build_int_cst (mask_elt_type
, index
& (elements
- 1));
1360 if (two_operand_p
&& (index
& elements
) != 0)
1361 t
= vector_element (gsi
, vec1
, i_val
, &vec1tmp
);
1363 t
= vector_element (gsi
, vec0
, i_val
, &vec0tmp
);
1365 t
= force_gimple_operand_gsi (gsi
, t
, true, NULL_TREE
,
1366 true, GSI_SAME_STMT
);
1370 tree cond
= NULL_TREE
, v0_val
;
1374 cond
= fold_build2 (BIT_AND_EXPR
, mask_elt_type
, i_val
,
1375 build_int_cst (mask_elt_type
, elements
));
1376 cond
= force_gimple_operand_gsi (gsi
, cond
, true, NULL_TREE
,
1377 true, GSI_SAME_STMT
);
1380 i_val
= fold_build2 (BIT_AND_EXPR
, mask_elt_type
, i_val
,
1381 build_int_cst (mask_elt_type
, elements
- 1));
1382 i_val
= force_gimple_operand_gsi (gsi
, i_val
, true, NULL_TREE
,
1383 true, GSI_SAME_STMT
);
1385 v0_val
= vector_element (gsi
, vec0
, i_val
, &vec0tmp
);
1386 v0_val
= force_gimple_operand_gsi (gsi
, v0_val
, true, NULL_TREE
,
1387 true, GSI_SAME_STMT
);
1393 v1_val
= vector_element (gsi
, vec1
, i_val
, &vec1tmp
);
1394 v1_val
= force_gimple_operand_gsi (gsi
, v1_val
, true, NULL_TREE
,
1395 true, GSI_SAME_STMT
);
1397 cond
= fold_build2 (EQ_EXPR
, boolean_type_node
,
1398 cond
, build_zero_cst (mask_elt_type
));
1399 cond
= fold_build3 (COND_EXPR
, vect_elt_type
,
1400 cond
, v0_val
, v1_val
);
1401 t
= force_gimple_operand_gsi (gsi
, cond
, true, NULL_TREE
,
1402 true, GSI_SAME_STMT
);
1408 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, t
);
1411 constr
= build_constructor (vect_type
, v
);
1412 gimple_assign_set_rhs_from_tree (gsi
, constr
);
1413 update_stmt (gsi_stmt (*gsi
));
1416 /* If OP is a uniform vector return the element it is a splat from. */
1419 ssa_uniform_vector_p (tree op
)
1421 if (TREE_CODE (op
) == VECTOR_CST
1422 || TREE_CODE (op
) == CONSTRUCTOR
)
1423 return uniform_vector_p (op
);
1424 if (TREE_CODE (op
) == SSA_NAME
)
1426 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
1427 if (gimple_assign_single_p (def_stmt
))
1428 return uniform_vector_p (gimple_assign_rhs1 (def_stmt
));
1433 /* Return type in which CODE operation with optab OP can be
1437 get_compute_type (enum tree_code code
, optab op
, tree type
)
1439 /* For very wide vectors, try using a smaller vector mode. */
1440 tree compute_type
= type
;
1442 && (!VECTOR_MODE_P (TYPE_MODE (type
))
1443 || optab_handler (op
, TYPE_MODE (type
)) == CODE_FOR_nothing
))
1445 tree vector_compute_type
1446 = type_for_widest_vector_mode (TREE_TYPE (type
), op
);
1447 if (vector_compute_type
!= NULL_TREE
1448 && (TYPE_VECTOR_SUBPARTS (vector_compute_type
)
1449 < TYPE_VECTOR_SUBPARTS (compute_type
))
1450 && TYPE_VECTOR_SUBPARTS (vector_compute_type
) > 1
1451 && (optab_handler (op
, TYPE_MODE (vector_compute_type
))
1452 != CODE_FOR_nothing
))
1453 compute_type
= vector_compute_type
;
1456 /* If we are breaking a BLKmode vector into smaller pieces,
1457 type_for_widest_vector_mode has already looked into the optab,
1458 so skip these checks. */
1459 if (compute_type
== type
)
1461 machine_mode compute_mode
= TYPE_MODE (compute_type
);
1462 if (VECTOR_MODE_P (compute_mode
))
1464 if (op
&& optab_handler (op
, compute_mode
) != CODE_FOR_nothing
)
1465 return compute_type
;
1466 if (code
== MULT_HIGHPART_EXPR
1467 && can_mult_highpart_p (compute_mode
,
1468 TYPE_UNSIGNED (compute_type
)))
1469 return compute_type
;
1471 /* There is no operation in hardware, so fall back to scalars. */
1472 compute_type
= TREE_TYPE (type
);
1475 return compute_type
;
1478 /* Helper function of expand_vector_operations_1. Return number of
1479 vector elements for vector types or 1 for other types. */
1482 count_type_subparts (tree type
)
1484 return VECTOR_TYPE_P (type
) ? TYPE_VECTOR_SUBPARTS (type
) : 1;
1488 do_cond (gimple_stmt_iterator
*gsi
, tree inner_type
, tree a
, tree b
,
1489 tree bitpos
, tree bitsize
, enum tree_code code
,
1490 tree type ATTRIBUTE_UNUSED
)
1492 if (TREE_CODE (TREE_TYPE (a
)) == VECTOR_TYPE
)
1493 a
= tree_vec_extract (gsi
, inner_type
, a
, bitsize
, bitpos
);
1494 if (TREE_CODE (TREE_TYPE (b
)) == VECTOR_TYPE
)
1495 b
= tree_vec_extract (gsi
, inner_type
, b
, bitsize
, bitpos
);
1496 tree cond
= gimple_assign_rhs1 (gsi_stmt (*gsi
));
1497 return gimplify_build3 (gsi
, code
, inner_type
, unshare_expr (cond
), a
, b
);
1500 /* Expand a vector COND_EXPR to scalars, piecewise. */
1502 expand_vector_scalar_condition (gimple_stmt_iterator
*gsi
)
1504 gassign
*stmt
= as_a
<gassign
*> (gsi_stmt (*gsi
));
1505 tree type
= gimple_expr_type (stmt
);
1506 tree compute_type
= get_compute_type (COND_EXPR
, mov_optab
, type
);
1507 machine_mode compute_mode
= TYPE_MODE (compute_type
);
1508 gcc_assert (compute_mode
!= BLKmode
);
1509 tree lhs
= gimple_assign_lhs (stmt
);
1510 tree rhs2
= gimple_assign_rhs2 (stmt
);
1511 tree rhs3
= gimple_assign_rhs3 (stmt
);
1514 /* If the compute mode is not a vector mode (hence we are not decomposing
1515 a BLKmode vector to smaller, hardware-supported vectors), we may want
1516 to expand the operations in parallel. */
1517 if (GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_INT
1518 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_FLOAT
1519 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_FRACT
1520 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_UFRACT
1521 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_ACCUM
1522 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_UACCUM
)
1523 new_rhs
= expand_vector_parallel (gsi
, do_cond
, type
, rhs2
, rhs3
,
1526 new_rhs
= expand_vector_piecewise (gsi
, do_cond
, type
, compute_type
,
1527 rhs2
, rhs3
, COND_EXPR
);
1528 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (new_rhs
)))
1529 new_rhs
= gimplify_build1 (gsi
, VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
),
1532 /* NOTE: We should avoid using gimple_assign_set_rhs_from_tree. One
1533 way to do it is change expand_vector_operation and its callees to
1534 return a tree_code, RHS1 and RHS2 instead of a tree. */
1535 gimple_assign_set_rhs_from_tree (gsi
, new_rhs
);
1536 update_stmt (gsi_stmt (*gsi
));
1539 /* Process one statement. If we identify a vector operation, expand it. */
1542 expand_vector_operations_1 (gimple_stmt_iterator
*gsi
)
1544 tree lhs
, rhs1
, rhs2
= NULL
, type
, compute_type
= NULL_TREE
;
1545 enum tree_code code
;
1546 optab op
= unknown_optab
;
1547 enum gimple_rhs_class rhs_class
;
1550 /* Only consider code == GIMPLE_ASSIGN. */
1551 gassign
*stmt
= dyn_cast
<gassign
*> (gsi_stmt (*gsi
));
1555 code
= gimple_assign_rhs_code (stmt
);
1556 rhs_class
= get_gimple_rhs_class (code
);
1557 lhs
= gimple_assign_lhs (stmt
);
1559 if (code
== VEC_PERM_EXPR
)
1561 lower_vec_perm (gsi
);
1565 if (code
== VEC_COND_EXPR
)
1567 expand_vector_condition (gsi
);
1571 if (code
== COND_EXPR
1572 && TREE_CODE (TREE_TYPE (gimple_assign_lhs (stmt
))) == VECTOR_TYPE
1573 && TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt
))) == BLKmode
)
1575 expand_vector_scalar_condition (gsi
);
1579 if (code
== CONSTRUCTOR
1580 && TREE_CODE (lhs
) == SSA_NAME
1581 && VECTOR_MODE_P (TYPE_MODE (TREE_TYPE (lhs
)))
1582 && !gimple_clobber_p (stmt
)
1585 optimize_vector_constructor (gsi
);
1589 if (rhs_class
!= GIMPLE_UNARY_RHS
&& rhs_class
!= GIMPLE_BINARY_RHS
)
1592 rhs1
= gimple_assign_rhs1 (stmt
);
1593 type
= gimple_expr_type (stmt
);
1594 if (rhs_class
== GIMPLE_BINARY_RHS
)
1595 rhs2
= gimple_assign_rhs2 (stmt
);
1597 if (TREE_CODE (type
) != VECTOR_TYPE
)
1600 /* If the vector operation is operating on all same vector elements
1601 implement it with a scalar operation and a splat if the target
1602 supports the scalar operation. */
1603 tree srhs1
, srhs2
= NULL_TREE
;
1604 if ((srhs1
= ssa_uniform_vector_p (rhs1
)) != NULL_TREE
1605 && (rhs2
== NULL_TREE
1606 || (! VECTOR_TYPE_P (TREE_TYPE (rhs2
))
1608 || (srhs2
= ssa_uniform_vector_p (rhs2
)) != NULL_TREE
)
1609 /* As we query direct optabs restrict to non-convert operations. */
1610 && TYPE_MODE (TREE_TYPE (type
)) == TYPE_MODE (TREE_TYPE (srhs1
)))
1612 op
= optab_for_tree_code (code
, TREE_TYPE (type
), optab_scalar
);
1613 if (op
>= FIRST_NORM_OPTAB
&& op
<= LAST_NORM_OPTAB
1614 && optab_handler (op
, TYPE_MODE (TREE_TYPE (type
))) != CODE_FOR_nothing
)
1616 tree slhs
= make_ssa_name (TREE_TYPE (srhs1
));
1617 gimple
*repl
= gimple_build_assign (slhs
, code
, srhs1
, srhs2
);
1618 gsi_insert_before (gsi
, repl
, GSI_SAME_STMT
);
1619 gimple_assign_set_rhs_from_tree (gsi
,
1620 build_vector_from_val (type
, slhs
));
1626 /* A scalar operation pretending to be a vector one. */
1627 if (VECTOR_BOOLEAN_TYPE_P (type
)
1628 && !VECTOR_MODE_P (TYPE_MODE (type
))
1629 && TYPE_MODE (type
) != BLKmode
)
1632 if (CONVERT_EXPR_CODE_P (code
)
1633 || code
== FLOAT_EXPR
1634 || code
== FIX_TRUNC_EXPR
1635 || code
== VIEW_CONVERT_EXPR
)
1638 /* The signedness is determined from input argument. */
1639 if (code
== VEC_UNPACK_FLOAT_HI_EXPR
1640 || code
== VEC_UNPACK_FLOAT_LO_EXPR
)
1641 type
= TREE_TYPE (rhs1
);
1643 /* For widening/narrowing vector operations, the relevant type is of the
1644 arguments, not the widened result. VEC_UNPACK_FLOAT_*_EXPR is
1645 calculated in the same way above. */
1646 if (code
== WIDEN_SUM_EXPR
1647 || code
== VEC_WIDEN_MULT_HI_EXPR
1648 || code
== VEC_WIDEN_MULT_LO_EXPR
1649 || code
== VEC_WIDEN_MULT_EVEN_EXPR
1650 || code
== VEC_WIDEN_MULT_ODD_EXPR
1651 || code
== VEC_UNPACK_HI_EXPR
1652 || code
== VEC_UNPACK_LO_EXPR
1653 || code
== VEC_PACK_TRUNC_EXPR
1654 || code
== VEC_PACK_SAT_EXPR
1655 || code
== VEC_PACK_FIX_TRUNC_EXPR
1656 || code
== VEC_WIDEN_LSHIFT_HI_EXPR
1657 || code
== VEC_WIDEN_LSHIFT_LO_EXPR
)
1658 type
= TREE_TYPE (rhs1
);
1660 /* Choose between vector shift/rotate by vector and vector shift/rotate by
1662 if (code
== LSHIFT_EXPR
1663 || code
== RSHIFT_EXPR
1664 || code
== LROTATE_EXPR
1665 || code
== RROTATE_EXPR
)
1669 /* Check whether we have vector <op> {x,x,x,x} where x
1670 could be a scalar variable or a constant. Transform
1671 vector <op> {x,x,x,x} ==> vector <op> scalar. */
1672 if (VECTOR_INTEGER_TYPE_P (TREE_TYPE (rhs2
)))
1676 if ((first
= ssa_uniform_vector_p (rhs2
)) != NULL_TREE
)
1678 gimple_assign_set_rhs2 (stmt
, first
);
1684 opv
= optab_for_tree_code (code
, type
, optab_vector
);
1685 if (VECTOR_INTEGER_TYPE_P (TREE_TYPE (rhs2
)))
1689 op
= optab_for_tree_code (code
, type
, optab_scalar
);
1691 compute_type
= get_compute_type (code
, op
, type
);
1692 if (compute_type
== type
)
1694 /* The rtl expander will expand vector/scalar as vector/vector
1695 if necessary. Pick one with wider vector type. */
1696 tree compute_vtype
= get_compute_type (code
, opv
, type
);
1697 if (count_type_subparts (compute_vtype
)
1698 > count_type_subparts (compute_type
))
1700 compute_type
= compute_vtype
;
1705 if (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
1707 if (compute_type
== NULL_TREE
)
1708 compute_type
= get_compute_type (code
, op
, type
);
1709 if (compute_type
== type
)
1711 /* Before splitting vector rotates into scalar rotates,
1712 see if we can't use vector shifts and BIT_IOR_EXPR
1713 instead. For vector by vector rotates we'd also
1714 need to check BIT_AND_EXPR and NEGATE_EXPR, punt there
1715 for now, fold doesn't seem to create such rotates anyway. */
1716 if (compute_type
== TREE_TYPE (type
)
1717 && !VECTOR_INTEGER_TYPE_P (TREE_TYPE (rhs2
)))
1719 optab oplv
= vashl_optab
, opl
= ashl_optab
;
1720 optab oprv
= vlshr_optab
, opr
= lshr_optab
, opo
= ior_optab
;
1721 tree compute_lvtype
= get_compute_type (LSHIFT_EXPR
, oplv
, type
);
1722 tree compute_rvtype
= get_compute_type (RSHIFT_EXPR
, oprv
, type
);
1723 tree compute_otype
= get_compute_type (BIT_IOR_EXPR
, opo
, type
);
1724 tree compute_ltype
= get_compute_type (LSHIFT_EXPR
, opl
, type
);
1725 tree compute_rtype
= get_compute_type (RSHIFT_EXPR
, opr
, type
);
1726 /* The rtl expander will expand vector/scalar as vector/vector
1727 if necessary. Pick one with wider vector type. */
1728 if (count_type_subparts (compute_lvtype
)
1729 > count_type_subparts (compute_ltype
))
1731 compute_ltype
= compute_lvtype
;
1734 if (count_type_subparts (compute_rvtype
)
1735 > count_type_subparts (compute_rtype
))
1737 compute_rtype
= compute_rvtype
;
1740 /* Pick the narrowest type from LSHIFT_EXPR, RSHIFT_EXPR and
1742 compute_type
= compute_ltype
;
1743 if (count_type_subparts (compute_type
)
1744 > count_type_subparts (compute_rtype
))
1745 compute_type
= compute_rtype
;
1746 if (count_type_subparts (compute_type
)
1747 > count_type_subparts (compute_otype
))
1748 compute_type
= compute_otype
;
1749 /* Verify all 3 operations can be performed in that type. */
1750 if (compute_type
!= TREE_TYPE (type
))
1752 if (optab_handler (opl
, TYPE_MODE (compute_type
))
1754 || optab_handler (opr
, TYPE_MODE (compute_type
))
1756 || optab_handler (opo
, TYPE_MODE (compute_type
))
1757 == CODE_FOR_nothing
)
1758 compute_type
= TREE_TYPE (type
);
1764 op
= optab_for_tree_code (code
, type
, optab_default
);
1766 /* Optabs will try converting a negation into a subtraction, so
1767 look for it as well. TODO: negation of floating-point vectors
1768 might be turned into an exclusive OR toggling the sign bit. */
1769 if (op
== unknown_optab
1770 && code
== NEGATE_EXPR
1771 && INTEGRAL_TYPE_P (TREE_TYPE (type
)))
1772 op
= optab_for_tree_code (MINUS_EXPR
, type
, optab_default
);
1774 if (compute_type
== NULL_TREE
)
1775 compute_type
= get_compute_type (code
, op
, type
);
1776 if (compute_type
== type
)
1779 new_rhs
= expand_vector_operation (gsi
, type
, compute_type
, stmt
, code
);
1781 /* Leave expression untouched for later expansion. */
1782 if (new_rhs
== NULL_TREE
)
1785 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (new_rhs
)))
1786 new_rhs
= gimplify_build1 (gsi
, VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
),
1789 /* NOTE: We should avoid using gimple_assign_set_rhs_from_tree. One
1790 way to do it is change expand_vector_operation and its callees to
1791 return a tree_code, RHS1 and RHS2 instead of a tree. */
1792 gimple_assign_set_rhs_from_tree (gsi
, new_rhs
);
1793 update_stmt (gsi_stmt (*gsi
));
1796 /* Use this to lower vector operations introduced by the vectorizer,
1797 if it may need the bit-twiddling tricks implemented in this file. */
1800 expand_vector_operations (void)
1802 gimple_stmt_iterator gsi
;
1804 bool cfg_changed
= false;
1806 FOR_EACH_BB_FN (bb
, cfun
)
1808 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1810 expand_vector_operations_1 (&gsi
);
1811 /* ??? If we do not cleanup EH then we will ICE in
1812 verification. But in reality we have created wrong-code
1813 as we did not properly transition EH info and edges to
1814 the piecewise computations. */
1815 if (maybe_clean_eh_stmt (gsi_stmt (gsi
))
1816 && gimple_purge_dead_eh_edges (bb
))
1821 return cfg_changed
? TODO_cleanup_cfg
: 0;
1826 const pass_data pass_data_lower_vector
=
1828 GIMPLE_PASS
, /* type */
1829 "veclower", /* name */
1830 OPTGROUP_VEC
, /* optinfo_flags */
1831 TV_NONE
, /* tv_id */
1832 PROP_cfg
, /* properties_required */
1833 PROP_gimple_lvec
, /* properties_provided */
1834 0, /* properties_destroyed */
1835 0, /* todo_flags_start */
1836 TODO_update_ssa
, /* todo_flags_finish */
1839 class pass_lower_vector
: public gimple_opt_pass
1842 pass_lower_vector (gcc::context
*ctxt
)
1843 : gimple_opt_pass (pass_data_lower_vector
, ctxt
)
1846 /* opt_pass methods: */
1847 virtual bool gate (function
*fun
)
1849 return !(fun
->curr_properties
& PROP_gimple_lvec
);
1852 virtual unsigned int execute (function
*)
1854 return expand_vector_operations ();
1857 }; // class pass_lower_vector
1862 make_pass_lower_vector (gcc::context
*ctxt
)
1864 return new pass_lower_vector (ctxt
);
1869 const pass_data pass_data_lower_vector_ssa
=
1871 GIMPLE_PASS
, /* type */
1872 "veclower2", /* name */
1873 OPTGROUP_VEC
, /* optinfo_flags */
1874 TV_NONE
, /* tv_id */
1875 PROP_cfg
, /* properties_required */
1876 PROP_gimple_lvec
, /* properties_provided */
1877 0, /* properties_destroyed */
1878 0, /* todo_flags_start */
1880 | TODO_cleanup_cfg
), /* todo_flags_finish */
1883 class pass_lower_vector_ssa
: public gimple_opt_pass
1886 pass_lower_vector_ssa (gcc::context
*ctxt
)
1887 : gimple_opt_pass (pass_data_lower_vector_ssa
, ctxt
)
1890 /* opt_pass methods: */
1891 opt_pass
* clone () { return new pass_lower_vector_ssa (m_ctxt
); }
1892 virtual unsigned int execute (function
*)
1894 return expand_vector_operations ();
1897 }; // class pass_lower_vector_ssa
1902 make_pass_lower_vector_ssa (gcc::context
*ctxt
)
1904 return new pass_lower_vector_ssa (ctxt
);
1907 #include "gt-tree-vect-generic.h"