1 /* Lower vector operations to scalar operations.
2 Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
26 #include "langhooks.h"
27 #include "tree-flow.h"
29 #include "tree-iterator.h"
30 #include "tree-pass.h"
34 /* Need to include rtl.h, expr.h, etc. for optabs. */
38 /* Build a constant of type TYPE, made of VALUE's bits replicated
39 every TYPE_SIZE (INNER_TYPE) bits to fit TYPE's precision. */
41 build_replicated_const (tree type
, tree inner_type
, HOST_WIDE_INT value
)
43 int width
= tree_low_cst (TYPE_SIZE (inner_type
), 1);
44 int n
= HOST_BITS_PER_WIDE_INT
/ width
;
45 unsigned HOST_WIDE_INT low
, high
, mask
;
50 if (width
== HOST_BITS_PER_WIDE_INT
)
54 mask
= ((HOST_WIDE_INT
)1 << width
) - 1;
55 low
= (unsigned HOST_WIDE_INT
) ~0 / mask
* (value
& mask
);
58 if (TYPE_PRECISION (type
) < HOST_BITS_PER_WIDE_INT
)
59 low
&= ((HOST_WIDE_INT
)1 << TYPE_PRECISION (type
)) - 1, high
= 0;
60 else if (TYPE_PRECISION (type
) == HOST_BITS_PER_WIDE_INT
)
62 else if (TYPE_PRECISION (type
) == 2 * HOST_BITS_PER_WIDE_INT
)
67 ret
= build_int_cst_wide (type
, low
, high
);
71 static GTY(()) tree vector_inner_type
;
72 static GTY(()) tree vector_last_type
;
73 static GTY(()) int vector_last_nunits
;
75 /* Return a suitable vector types made of SUBPARTS units each of mode
76 "word_mode" (the global variable). */
78 build_word_mode_vector_type (int nunits
)
80 if (!vector_inner_type
)
81 vector_inner_type
= lang_hooks
.types
.type_for_mode (word_mode
, 1);
82 else if (vector_last_nunits
== nunits
)
84 gcc_assert (TREE_CODE (vector_last_type
) == VECTOR_TYPE
);
85 return vector_last_type
;
88 /* We build a new type, but we canonicalize it nevertheless,
89 because it still saves some memory. */
90 vector_last_nunits
= nunits
;
91 vector_last_type
= type_hash_canon (nunits
,
92 build_vector_type (vector_inner_type
,
94 return vector_last_type
;
97 typedef tree (*elem_op_func
) (gimple_stmt_iterator
*,
98 tree
, tree
, tree
, tree
, tree
, enum tree_code
);
101 tree_vec_extract (gimple_stmt_iterator
*gsi
, tree type
,
102 tree t
, tree bitsize
, tree bitpos
)
105 return gimplify_build3 (gsi
, BIT_FIELD_REF
, type
, t
, bitsize
, bitpos
);
107 return gimplify_build1 (gsi
, VIEW_CONVERT_EXPR
, type
, t
);
111 do_unop (gimple_stmt_iterator
*gsi
, tree inner_type
, tree a
,
112 tree b ATTRIBUTE_UNUSED
, tree bitpos
, tree bitsize
,
115 a
= tree_vec_extract (gsi
, inner_type
, a
, bitsize
, bitpos
);
116 return gimplify_build1 (gsi
, code
, inner_type
, a
);
120 do_binop (gimple_stmt_iterator
*gsi
, tree inner_type
, tree a
, tree b
,
121 tree bitpos
, tree bitsize
, enum tree_code code
)
123 a
= tree_vec_extract (gsi
, inner_type
, a
, bitsize
, bitpos
);
124 b
= tree_vec_extract (gsi
, inner_type
, b
, bitsize
, bitpos
);
125 return gimplify_build2 (gsi
, code
, inner_type
, a
, b
);
128 /* Expand vector addition to scalars. This does bit twiddling
129 in order to increase parallelism:
131 a + b = (((int) a & 0x7f7f7f7f) + ((int) b & 0x7f7f7f7f)) ^
134 a - b = (((int) a | 0x80808080) - ((int) b & 0x7f7f7f7f)) ^
135 (a ^ ~b) & 0x80808080
137 -b = (0x80808080 - ((int) b & 0x7f7f7f7f)) ^ (~b & 0x80808080)
139 This optimization should be done only if 4 vector items or more
142 do_plus_minus (gimple_stmt_iterator
*gsi
, tree word_type
, tree a
, tree b
,
143 tree bitpos ATTRIBUTE_UNUSED
, tree bitsize ATTRIBUTE_UNUSED
,
146 tree inner_type
= TREE_TYPE (TREE_TYPE (a
));
147 unsigned HOST_WIDE_INT max
;
148 tree low_bits
, high_bits
, a_low
, b_low
, result_low
, signs
;
150 max
= GET_MODE_MASK (TYPE_MODE (inner_type
));
151 low_bits
= build_replicated_const (word_type
, inner_type
, max
>> 1);
152 high_bits
= build_replicated_const (word_type
, inner_type
, max
& ~(max
>> 1));
154 a
= tree_vec_extract (gsi
, word_type
, a
, bitsize
, bitpos
);
155 b
= tree_vec_extract (gsi
, word_type
, b
, bitsize
, bitpos
);
157 signs
= gimplify_build2 (gsi
, BIT_XOR_EXPR
, word_type
, a
, b
);
158 b_low
= gimplify_build2 (gsi
, BIT_AND_EXPR
, word_type
, b
, low_bits
);
159 if (code
== PLUS_EXPR
)
160 a_low
= gimplify_build2 (gsi
, BIT_AND_EXPR
, word_type
, a
, low_bits
);
163 a_low
= gimplify_build2 (gsi
, BIT_IOR_EXPR
, word_type
, a
, high_bits
);
164 signs
= gimplify_build1 (gsi
, BIT_NOT_EXPR
, word_type
, signs
);
167 signs
= gimplify_build2 (gsi
, BIT_AND_EXPR
, word_type
, signs
, high_bits
);
168 result_low
= gimplify_build2 (gsi
, code
, word_type
, a_low
, b_low
);
169 return gimplify_build2 (gsi
, BIT_XOR_EXPR
, word_type
, result_low
, signs
);
173 do_negate (gimple_stmt_iterator
*gsi
, tree word_type
, tree b
,
174 tree unused ATTRIBUTE_UNUSED
, tree bitpos ATTRIBUTE_UNUSED
,
175 tree bitsize ATTRIBUTE_UNUSED
,
176 enum tree_code code ATTRIBUTE_UNUSED
)
178 tree inner_type
= TREE_TYPE (TREE_TYPE (b
));
180 tree low_bits
, high_bits
, b_low
, result_low
, signs
;
182 max
= GET_MODE_MASK (TYPE_MODE (inner_type
));
183 low_bits
= build_replicated_const (word_type
, inner_type
, max
>> 1);
184 high_bits
= build_replicated_const (word_type
, inner_type
, max
& ~(max
>> 1));
186 b
= tree_vec_extract (gsi
, word_type
, b
, bitsize
, bitpos
);
188 b_low
= gimplify_build2 (gsi
, BIT_AND_EXPR
, word_type
, b
, low_bits
);
189 signs
= gimplify_build1 (gsi
, BIT_NOT_EXPR
, word_type
, b
);
190 signs
= gimplify_build2 (gsi
, BIT_AND_EXPR
, word_type
, signs
, high_bits
);
191 result_low
= gimplify_build2 (gsi
, MINUS_EXPR
, word_type
, high_bits
, b_low
);
192 return gimplify_build2 (gsi
, BIT_XOR_EXPR
, word_type
, result_low
, signs
);
195 /* Expand a vector operation to scalars, by using many operations
196 whose type is the vector type's inner type. */
198 expand_vector_piecewise (gimple_stmt_iterator
*gsi
, elem_op_func f
,
199 tree type
, tree inner_type
,
200 tree a
, tree b
, enum tree_code code
)
202 VEC(constructor_elt
,gc
) *v
;
203 tree part_width
= TYPE_SIZE (inner_type
);
204 tree index
= bitsize_int (0);
205 int nunits
= TYPE_VECTOR_SUBPARTS (type
);
206 int delta
= tree_low_cst (part_width
, 1)
207 / tree_low_cst (TYPE_SIZE (TREE_TYPE (type
)), 1);
210 v
= VEC_alloc(constructor_elt
, gc
, (nunits
+ delta
- 1) / delta
);
211 for (i
= 0; i
< nunits
;
212 i
+= delta
, index
= int_const_binop (PLUS_EXPR
, index
, part_width
, 0))
214 tree result
= f (gsi
, inner_type
, a
, b
, index
, part_width
, code
);
215 constructor_elt
*ce
= VEC_quick_push (constructor_elt
, v
, NULL
);
216 ce
->index
= NULL_TREE
;
220 return build_constructor (type
, v
);
223 /* Expand a vector operation to scalars with the freedom to use
224 a scalar integer type, or to use a different size for the items
225 in the vector type. */
227 expand_vector_parallel (gimple_stmt_iterator
*gsi
, elem_op_func f
, tree type
,
231 tree result
, compute_type
;
232 enum machine_mode mode
;
233 int n_words
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1) / UNITS_PER_WORD
;
235 /* We have three strategies. If the type is already correct, just do
236 the operation an element at a time. Else, if the vector is wider than
237 one word, do it a word at a time; finally, if the vector is smaller
238 than one word, do it as a scalar. */
239 if (TYPE_MODE (TREE_TYPE (type
)) == word_mode
)
240 return expand_vector_piecewise (gsi
, f
,
241 type
, TREE_TYPE (type
),
243 else if (n_words
> 1)
245 tree word_type
= build_word_mode_vector_type (n_words
);
246 result
= expand_vector_piecewise (gsi
, f
,
247 word_type
, TREE_TYPE (word_type
),
249 result
= force_gimple_operand_gsi (gsi
, result
, true, NULL
, true,
254 /* Use a single scalar operation with a mode no wider than word_mode. */
255 mode
= mode_for_size (tree_low_cst (TYPE_SIZE (type
), 1), MODE_INT
, 0);
256 compute_type
= lang_hooks
.types
.type_for_mode (mode
, 1);
257 result
= f (gsi
, compute_type
, a
, b
, NULL_TREE
, NULL_TREE
, code
);
263 /* Expand a vector operation to scalars; for integer types we can use
264 special bit twiddling tricks to do the sums a word at a time, using
265 function F_PARALLEL instead of F. These tricks are done only if
266 they can process at least four items, that is, only if the vector
267 holds at least four items and if a word can hold four items. */
269 expand_vector_addition (gimple_stmt_iterator
*gsi
,
270 elem_op_func f
, elem_op_func f_parallel
,
271 tree type
, tree a
, tree b
, enum tree_code code
)
273 int parts_per_word
= UNITS_PER_WORD
274 / tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (type
)), 1);
276 if (INTEGRAL_TYPE_P (TREE_TYPE (type
))
277 && parts_per_word
>= 4
278 && TYPE_VECTOR_SUBPARTS (type
) >= 4)
279 return expand_vector_parallel (gsi
, f_parallel
,
282 return expand_vector_piecewise (gsi
, f
,
283 type
, TREE_TYPE (type
),
287 /* Check if vector VEC consists of all the equal elements and
288 that the number of elements corresponds to the type of VEC.
289 The function returns first element of the vector
290 or NULL_TREE if the vector is not uniform. */
292 uniform_vector_p (tree vec
)
297 if (vec
== NULL_TREE
)
300 if (TREE_CODE (vec
) == VECTOR_CST
)
302 els
= TREE_VECTOR_CST_ELTS (vec
);
303 first
= TREE_VALUE (els
);
304 els
= TREE_CHAIN (els
);
306 for (t
= els
; t
; t
= TREE_CHAIN (t
))
307 if (!operand_equal_p (first
, TREE_VALUE (t
), 0))
313 else if (TREE_CODE (vec
) == CONSTRUCTOR
)
315 first
= error_mark_node
;
317 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (vec
), i
, t
)
324 if (!operand_equal_p (first
, t
, 0))
327 if (i
!= TYPE_VECTOR_SUBPARTS (TREE_TYPE (vec
)))
337 expand_vector_operation (gimple_stmt_iterator
*gsi
, tree type
, tree compute_type
,
338 gimple assign
, enum tree_code code
)
340 enum machine_mode compute_mode
= TYPE_MODE (compute_type
);
342 /* If the compute mode is not a vector mode (hence we are not decomposing
343 a BLKmode vector to smaller, hardware-supported vectors), we may want
344 to expand the operations in parallel. */
345 if (GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_INT
346 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_FLOAT
347 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_FRACT
348 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_UFRACT
349 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_ACCUM
350 && GET_MODE_CLASS (compute_mode
) != MODE_VECTOR_UACCUM
)
355 if (!TYPE_OVERFLOW_TRAPS (type
))
356 return expand_vector_addition (gsi
, do_binop
, do_plus_minus
, type
,
357 gimple_assign_rhs1 (assign
),
358 gimple_assign_rhs2 (assign
), code
);
362 if (!TYPE_OVERFLOW_TRAPS (type
))
363 return expand_vector_addition (gsi
, do_unop
, do_negate
, type
,
364 gimple_assign_rhs1 (assign
),
371 return expand_vector_parallel (gsi
, do_binop
, type
,
372 gimple_assign_rhs1 (assign
),
373 gimple_assign_rhs2 (assign
), code
);
376 return expand_vector_parallel (gsi
, do_unop
, type
,
377 gimple_assign_rhs1 (assign
),
384 if (TREE_CODE_CLASS (code
) == tcc_unary
)
385 return expand_vector_piecewise (gsi
, do_unop
, type
, compute_type
,
386 gimple_assign_rhs1 (assign
),
389 return expand_vector_piecewise (gsi
, do_binop
, type
, compute_type
,
390 gimple_assign_rhs1 (assign
),
391 gimple_assign_rhs2 (assign
), code
);
394 /* Return a type for the widest vector mode whose components are of mode
395 INNER_MODE, or NULL_TREE if none is found.
396 SATP is true for saturating fixed-point types. */
399 type_for_widest_vector_mode (enum machine_mode inner_mode
, optab op
, int satp
)
401 enum machine_mode best_mode
= VOIDmode
, mode
;
404 if (SCALAR_FLOAT_MODE_P (inner_mode
))
405 mode
= MIN_MODE_VECTOR_FLOAT
;
406 else if (SCALAR_FRACT_MODE_P (inner_mode
))
407 mode
= MIN_MODE_VECTOR_FRACT
;
408 else if (SCALAR_UFRACT_MODE_P (inner_mode
))
409 mode
= MIN_MODE_VECTOR_UFRACT
;
410 else if (SCALAR_ACCUM_MODE_P (inner_mode
))
411 mode
= MIN_MODE_VECTOR_ACCUM
;
412 else if (SCALAR_UACCUM_MODE_P (inner_mode
))
413 mode
= MIN_MODE_VECTOR_UACCUM
;
415 mode
= MIN_MODE_VECTOR_INT
;
417 for (; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
418 if (GET_MODE_INNER (mode
) == inner_mode
419 && GET_MODE_NUNITS (mode
) > best_nunits
420 && optab_handler (op
, mode
) != CODE_FOR_nothing
)
421 best_mode
= mode
, best_nunits
= GET_MODE_NUNITS (mode
);
423 if (best_mode
== VOIDmode
)
427 /* For fixed-point modes, we need to pass satp as the 2nd parameter. */
428 if (ALL_FIXED_POINT_MODE_P (best_mode
))
429 return lang_hooks
.types
.type_for_mode (best_mode
, satp
);
431 return lang_hooks
.types
.type_for_mode (best_mode
, 1);
435 /* Process one statement. If we identify a vector operation, expand it. */
438 expand_vector_operations_1 (gimple_stmt_iterator
*gsi
)
440 gimple stmt
= gsi_stmt (*gsi
);
441 tree lhs
, rhs1
, rhs2
= NULL
, type
, compute_type
;
443 enum machine_mode compute_mode
;
445 enum gimple_rhs_class rhs_class
;
448 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
451 code
= gimple_assign_rhs_code (stmt
);
452 rhs_class
= get_gimple_rhs_class (code
);
454 if (rhs_class
!= GIMPLE_UNARY_RHS
&& rhs_class
!= GIMPLE_BINARY_RHS
)
457 lhs
= gimple_assign_lhs (stmt
);
458 rhs1
= gimple_assign_rhs1 (stmt
);
459 type
= gimple_expr_type (stmt
);
460 if (rhs_class
== GIMPLE_BINARY_RHS
)
461 rhs2
= gimple_assign_rhs2 (stmt
);
463 if (TREE_CODE (type
) != VECTOR_TYPE
)
467 || code
== FLOAT_EXPR
468 || code
== FIX_TRUNC_EXPR
469 || code
== VIEW_CONVERT_EXPR
)
472 gcc_assert (code
!= CONVERT_EXPR
);
474 /* The signedness is determined from input argument. */
475 if (code
== VEC_UNPACK_FLOAT_HI_EXPR
476 || code
== VEC_UNPACK_FLOAT_LO_EXPR
)
477 type
= TREE_TYPE (rhs1
);
479 /* Choose between vector shift/rotate by vector and vector shift/rotate by
481 if (code
== LSHIFT_EXPR
482 || code
== RSHIFT_EXPR
483 || code
== LROTATE_EXPR
484 || code
== RROTATE_EXPR
)
486 bool vector_scalar_shift
;
487 op
= optab_for_tree_code (code
, type
, optab_scalar
);
489 /* Vector/Scalar shift is supported. */
490 vector_scalar_shift
= (op
&& (optab_handler (op
, TYPE_MODE (type
))
491 != CODE_FOR_nothing
));
493 /* If the 2nd argument is vector, we need a vector/vector shift.
494 Except all the elements in the second vector are the same. */
495 if (VECTOR_MODE_P (TYPE_MODE (TREE_TYPE (rhs2
))))
500 /* Check whether we have vector <op> {x,x,x,x} where x
501 could be a scalar variable or a constant. Transform
502 vector <op> {x,x,x,x} ==> vector <op> scalar. */
503 if (vector_scalar_shift
504 && ((TREE_CODE (rhs2
) == VECTOR_CST
505 && (first
= uniform_vector_p (rhs2
)) != NULL_TREE
)
506 || (TREE_CODE (rhs2
) == SSA_NAME
507 && (def_stmt
= SSA_NAME_DEF_STMT (rhs2
))
508 && gimple_assign_single_p (def_stmt
)
509 && (first
= uniform_vector_p
510 (gimple_assign_rhs1 (def_stmt
))) != NULL_TREE
)))
512 gimple_assign_set_rhs2 (stmt
, first
);
517 op
= optab_for_tree_code (code
, type
, optab_vector
);
520 /* Try for a vector/scalar shift, and if we don't have one, see if we
521 have a vector/vector shift */
522 else if (!vector_scalar_shift
)
524 op
= optab_for_tree_code (code
, type
, optab_vector
);
526 if (op
&& (optab_handler (op
, TYPE_MODE (type
))
527 != CODE_FOR_nothing
))
529 /* Transform vector <op> scalar => vector <op> {x,x,x,x}. */
530 int n_parts
= TYPE_VECTOR_SUBPARTS (type
);
531 int part_size
= tree_low_cst (TYPE_SIZE (TREE_TYPE (type
)), 1);
532 tree part_type
= lang_hooks
.types
.type_for_size (part_size
, 1);
533 tree vect_type
= build_vector_type (part_type
, n_parts
);
535 rhs2
= fold_convert (part_type
, rhs2
);
536 rhs2
= build_vector_from_val (vect_type
, rhs2
);
537 gimple_assign_set_rhs2 (stmt
, rhs2
);
543 op
= optab_for_tree_code (code
, type
, optab_default
);
545 /* For widening/narrowing vector operations, the relevant type is of the
546 arguments, not the widened result. VEC_UNPACK_FLOAT_*_EXPR is
547 calculated in the same way above. */
548 if (code
== WIDEN_SUM_EXPR
549 || code
== VEC_WIDEN_MULT_HI_EXPR
550 || code
== VEC_WIDEN_MULT_LO_EXPR
551 || code
== VEC_UNPACK_HI_EXPR
552 || code
== VEC_UNPACK_LO_EXPR
553 || code
== VEC_PACK_TRUNC_EXPR
554 || code
== VEC_PACK_SAT_EXPR
555 || code
== VEC_PACK_FIX_TRUNC_EXPR
)
556 type
= TREE_TYPE (rhs1
);
558 /* Optabs will try converting a negation into a subtraction, so
559 look for it as well. TODO: negation of floating-point vectors
560 might be turned into an exclusive OR toggling the sign bit. */
562 && code
== NEGATE_EXPR
563 && INTEGRAL_TYPE_P (TREE_TYPE (type
)))
564 op
= optab_for_tree_code (MINUS_EXPR
, type
, optab_default
);
566 /* For very wide vectors, try using a smaller vector mode. */
568 if (TYPE_MODE (type
) == BLKmode
&& op
)
570 tree vector_compute_type
571 = type_for_widest_vector_mode (TYPE_MODE (TREE_TYPE (type
)), op
,
572 TYPE_SATURATING (TREE_TYPE (type
)));
573 if (vector_compute_type
!= NULL_TREE
574 && (TYPE_VECTOR_SUBPARTS (vector_compute_type
)
575 < TYPE_VECTOR_SUBPARTS (compute_type
)))
576 compute_type
= vector_compute_type
;
579 /* If we are breaking a BLKmode vector into smaller pieces,
580 type_for_widest_vector_mode has already looked into the optab,
581 so skip these checks. */
582 if (compute_type
== type
)
584 compute_mode
= TYPE_MODE (compute_type
);
585 if ((GET_MODE_CLASS (compute_mode
) == MODE_VECTOR_INT
586 || GET_MODE_CLASS (compute_mode
) == MODE_VECTOR_FLOAT
587 || GET_MODE_CLASS (compute_mode
) == MODE_VECTOR_FRACT
588 || GET_MODE_CLASS (compute_mode
) == MODE_VECTOR_UFRACT
589 || GET_MODE_CLASS (compute_mode
) == MODE_VECTOR_ACCUM
590 || GET_MODE_CLASS (compute_mode
) == MODE_VECTOR_UACCUM
)
592 && optab_handler (op
, compute_mode
) != CODE_FOR_nothing
)
595 /* There is no operation in hardware, so fall back to scalars. */
596 compute_type
= TREE_TYPE (type
);
599 gcc_assert (code
!= VEC_LSHIFT_EXPR
&& code
!= VEC_RSHIFT_EXPR
);
600 new_rhs
= expand_vector_operation (gsi
, type
, compute_type
, stmt
, code
);
601 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (new_rhs
)))
602 new_rhs
= gimplify_build1 (gsi
, VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
),
605 /* NOTE: We should avoid using gimple_assign_set_rhs_from_tree. One
606 way to do it is change expand_vector_operation and its callees to
607 return a tree_code, RHS1 and RHS2 instead of a tree. */
608 gimple_assign_set_rhs_from_tree (gsi
, new_rhs
);
609 update_stmt (gsi_stmt (*gsi
));
612 /* Use this to lower vector operations introduced by the vectorizer,
613 if it may need the bit-twiddling tricks implemented in this file. */
616 gate_expand_vector_operations (void)
618 return flag_tree_vectorize
!= 0;
622 expand_vector_operations (void)
624 gimple_stmt_iterator gsi
;
626 bool cfg_changed
= false;
630 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
632 expand_vector_operations_1 (&gsi
);
633 /* ??? If we do not cleanup EH then we will ICE in
634 verification. But in reality we have created wrong-code
635 as we did not properly transition EH info and edges to
636 the piecewise computations. */
637 if (maybe_clean_eh_stmt (gsi_stmt (gsi
))
638 && gimple_purge_dead_eh_edges (bb
))
643 return cfg_changed
? TODO_cleanup_cfg
: 0;
646 struct gimple_opt_pass pass_lower_vector
=
650 "veclower", /* name */
652 expand_vector_operations
, /* execute */
655 0, /* static_pass_number */
657 PROP_cfg
, /* properties_required */
658 0, /* properties_provided */
659 0, /* properties_destroyed */
660 0, /* todo_flags_start */
661 TODO_dump_func
| TODO_update_ssa
/* todo_flags_finish */
663 | TODO_verify_stmts
| TODO_verify_flow
667 struct gimple_opt_pass pass_lower_vector_ssa
=
671 "veclower2", /* name */
672 gate_expand_vector_operations
, /* gate */
673 expand_vector_operations
, /* execute */
676 0, /* static_pass_number */
678 PROP_cfg
, /* properties_required */
679 0, /* properties_provided */
680 0, /* properties_destroyed */
681 0, /* todo_flags_start */
682 TODO_dump_func
| TODO_update_ssa
/* todo_flags_finish */
684 | TODO_verify_stmts
| TODO_verify_flow
688 #include "gt-tree-vect-generic.h"