1 /* Convert tree expression to rtl instructions, for GNU compiler.
2 Copyright (C) 1988-2024 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
38 #include "diagnostic.h"
40 #include "fold-const.h"
41 #include "stor-layout.h"
45 #include "insn-attr.h"
50 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
52 #include "optabs-tree.h"
55 #include "langhooks.h"
56 #include "common/common-target.h"
58 #include "tree-ssa-live.h"
59 #include "tree-outof-ssa.h"
60 #include "tree-ssa-address.h"
63 #include "gimple-iterator.h"
64 #include "gimple-fold.h"
65 #include "rtx-vector-builder.h"
66 #include "tree-pretty-print.h"
70 /* If this is nonzero, we do not bother generating VOLATILE
71 around volatile memory references, and we are willing to
72 output indirect addresses. If cse is to follow, we reject
73 indirect addresses so a useful potential cse is generated;
74 if it is used only once, instruction combination will produce
75 the same indirect address eventually. */
78 static bool block_move_libcall_safe_for_call_parm (void);
79 static bool emit_block_move_via_pattern (rtx
, rtx
, rtx
, unsigned, unsigned,
80 HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
81 unsigned HOST_WIDE_INT
,
82 unsigned HOST_WIDE_INT
, bool);
83 static void emit_block_move_via_loop (rtx
, rtx
, rtx
, unsigned, int);
84 static void emit_block_move_via_sized_loop (rtx
, rtx
, rtx
, unsigned, unsigned);
85 static void emit_block_move_via_oriented_loop (rtx
, rtx
, rtx
, unsigned, unsigned);
86 static rtx
emit_block_cmp_via_loop (rtx
, rtx
, rtx
, tree
, rtx
, bool,
88 static rtx_insn
*compress_float_constant (rtx
, rtx
);
89 static rtx
get_subtarget (rtx
);
90 static rtx
store_field (rtx
, poly_int64
, poly_int64
, poly_uint64
, poly_uint64
,
91 machine_mode
, tree
, alias_set_type
, bool, bool);
93 static unsigned HOST_WIDE_INT
highest_pow2_factor_for_target (const_tree
, const_tree
);
95 static bool is_aligning_offset (const_tree
, const_tree
);
96 static rtx
reduce_to_bit_field_precision (rtx
, rtx
, tree
);
97 static rtx
do_store_flag (const_sepops
, rtx
, machine_mode
);
99 static void emit_single_push_insn (machine_mode
, rtx
, tree
);
101 static void do_tablejump (rtx
, machine_mode
, rtx
, rtx
, rtx
,
102 profile_probability
);
103 static rtx
const_vector_from_tree (tree
);
104 static tree
tree_expr_size (const_tree
);
105 static void convert_mode_scalar (rtx
, rtx
, int);
108 /* This is run to set up which modes can be used
109 directly in memory and to initialize the block move optab. It is run
110 at the beginning of compilation and when the target is reinitialized. */
113 init_expr_target (void)
120 /* Try indexing by frame ptr and try by stack ptr.
121 It is known that on the Convex the stack ptr isn't a valid index.
122 With luck, one or the other is valid on any machine. */
123 mem
= gen_rtx_MEM (word_mode
, stack_pointer_rtx
);
124 mem1
= gen_rtx_MEM (word_mode
, frame_pointer_rtx
);
126 /* A scratch register we can modify in-place below to avoid
127 useless RTL allocations. */
128 reg
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 1);
130 rtx_insn
*insn
= as_a
<rtx_insn
*> (rtx_alloc (INSN
));
131 pat
= gen_rtx_SET (NULL_RTX
, NULL_RTX
);
132 PATTERN (insn
) = pat
;
134 for (machine_mode mode
= VOIDmode
; (int) mode
< NUM_MACHINE_MODES
;
135 mode
= (machine_mode
) ((int) mode
+ 1))
139 direct_load
[(int) mode
] = direct_store
[(int) mode
] = 0;
140 PUT_MODE (mem
, mode
);
141 PUT_MODE (mem1
, mode
);
143 /* See if there is some register that can be used in this mode and
144 directly loaded or stored from memory. */
146 if (mode
!= VOIDmode
&& mode
!= BLKmode
)
147 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
148 && (direct_load
[(int) mode
] == 0 || direct_store
[(int) mode
] == 0);
151 if (!targetm
.hard_regno_mode_ok (regno
, mode
))
154 set_mode_and_regno (reg
, mode
, regno
);
157 SET_DEST (pat
) = reg
;
158 if (recog (pat
, insn
, &num_clobbers
) >= 0)
159 direct_load
[(int) mode
] = 1;
161 SET_SRC (pat
) = mem1
;
162 SET_DEST (pat
) = reg
;
163 if (recog (pat
, insn
, &num_clobbers
) >= 0)
164 direct_load
[(int) mode
] = 1;
167 SET_DEST (pat
) = mem
;
168 if (recog (pat
, insn
, &num_clobbers
) >= 0)
169 direct_store
[(int) mode
] = 1;
172 SET_DEST (pat
) = mem1
;
173 if (recog (pat
, insn
, &num_clobbers
) >= 0)
174 direct_store
[(int) mode
] = 1;
178 mem
= gen_rtx_MEM (VOIDmode
, gen_raw_REG (Pmode
, LAST_VIRTUAL_REGISTER
+ 1));
180 opt_scalar_float_mode mode_iter
;
181 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_FLOAT
)
183 scalar_float_mode mode
= mode_iter
.require ();
184 scalar_float_mode srcmode
;
185 FOR_EACH_MODE_UNTIL (srcmode
, mode
)
189 ic
= can_extend_p (mode
, srcmode
, 0);
190 if (ic
== CODE_FOR_nothing
)
193 PUT_MODE (mem
, srcmode
);
195 if (insn_operand_matches (ic
, 1, mem
))
196 float_extend_from_mem
[mode
][srcmode
] = true;
201 /* This is run at the start of compiling a function. */
206 memset (&crtl
->expr
, 0, sizeof (crtl
->expr
));
209 /* Copy data from FROM to TO, where the machine modes are not the same.
210 Both modes may be integer, or both may be floating, or both may be
212 UNSIGNEDP should be nonzero if FROM is an unsigned type.
213 This causes zero-extension instead of sign-extension. */
216 convert_move (rtx to
, rtx from
, int unsignedp
)
218 machine_mode to_mode
= GET_MODE (to
);
219 machine_mode from_mode
= GET_MODE (from
);
221 gcc_assert (to_mode
!= BLKmode
);
222 gcc_assert (from_mode
!= BLKmode
);
224 /* If the source and destination are already the same, then there's
229 /* If FROM is a SUBREG that indicates that we have already done at least
230 the required extension, strip it. We don't handle such SUBREGs as
233 scalar_int_mode to_int_mode
;
234 if (GET_CODE (from
) == SUBREG
235 && SUBREG_PROMOTED_VAR_P (from
)
236 && is_a
<scalar_int_mode
> (to_mode
, &to_int_mode
)
237 && (GET_MODE_PRECISION (subreg_promoted_mode (from
))
238 >= GET_MODE_PRECISION (to_int_mode
))
239 && SUBREG_CHECK_PROMOTED_SIGN (from
, unsignedp
))
241 scalar_int_mode int_orig_mode
;
242 scalar_int_mode int_inner_mode
;
243 machine_mode orig_mode
= GET_MODE (from
);
245 from
= gen_lowpart (to_int_mode
, SUBREG_REG (from
));
246 from_mode
= to_int_mode
;
248 /* Preserve SUBREG_PROMOTED_VAR_P if the new mode is wider than
249 the original mode, but narrower than the inner mode. */
250 if (GET_CODE (from
) == SUBREG
251 && is_a
<scalar_int_mode
> (orig_mode
, &int_orig_mode
)
252 && GET_MODE_PRECISION (to_int_mode
)
253 > GET_MODE_PRECISION (int_orig_mode
)
254 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (from
)),
256 && GET_MODE_PRECISION (int_inner_mode
)
257 > GET_MODE_PRECISION (to_int_mode
))
259 SUBREG_PROMOTED_VAR_P (from
) = 1;
260 SUBREG_PROMOTED_SET (from
, unsignedp
);
264 gcc_assert (GET_CODE (to
) != SUBREG
|| !SUBREG_PROMOTED_VAR_P (to
));
266 if (to_mode
== from_mode
267 || (from_mode
== VOIDmode
&& CONSTANT_P (from
)))
269 emit_move_insn (to
, from
);
273 if (VECTOR_MODE_P (to_mode
) || VECTOR_MODE_P (from_mode
))
275 if (GET_MODE_UNIT_PRECISION (to_mode
)
276 > GET_MODE_UNIT_PRECISION (from_mode
))
278 optab op
= unsignedp
? zext_optab
: sext_optab
;
279 insn_code icode
= convert_optab_handler (op
, to_mode
, from_mode
);
280 if (icode
!= CODE_FOR_nothing
)
282 emit_unop_insn (icode
, to
, from
,
283 unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
);
288 if (GET_MODE_UNIT_PRECISION (to_mode
)
289 < GET_MODE_UNIT_PRECISION (from_mode
))
291 insn_code icode
= convert_optab_handler (trunc_optab
,
293 if (icode
!= CODE_FOR_nothing
)
295 emit_unop_insn (icode
, to
, from
, TRUNCATE
);
300 gcc_assert (known_eq (GET_MODE_BITSIZE (from_mode
),
301 GET_MODE_BITSIZE (to_mode
)));
303 if (VECTOR_MODE_P (to_mode
))
304 from
= force_subreg (to_mode
, from
, GET_MODE (from
), 0);
306 to
= simplify_gen_subreg (from_mode
, to
, GET_MODE (to
), 0);
308 emit_move_insn (to
, from
);
312 if (GET_CODE (to
) == CONCAT
&& GET_CODE (from
) == CONCAT
)
314 convert_move (XEXP (to
, 0), XEXP (from
, 0), unsignedp
);
315 convert_move (XEXP (to
, 1), XEXP (from
, 1), unsignedp
);
319 convert_mode_scalar (to
, from
, unsignedp
);
322 /* Like convert_move, but deals only with scalar modes. */
325 convert_mode_scalar (rtx to
, rtx from
, int unsignedp
)
327 /* Both modes should be scalar types. */
328 scalar_mode from_mode
= as_a
<scalar_mode
> (GET_MODE (from
));
329 scalar_mode to_mode
= as_a
<scalar_mode
> (GET_MODE (to
));
330 bool to_real
= SCALAR_FLOAT_MODE_P (to_mode
);
331 bool from_real
= SCALAR_FLOAT_MODE_P (from_mode
);
335 gcc_assert (to_real
== from_real
);
337 /* rtx code for making an equivalent value. */
338 enum rtx_code equiv_code
= (unsignedp
< 0 ? UNKNOWN
339 : (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
));
341 auto acceptable_same_precision_modes
342 = [] (scalar_mode from_mode
, scalar_mode to_mode
) -> bool
344 if (DECIMAL_FLOAT_MODE_P (from_mode
) != DECIMAL_FLOAT_MODE_P (to_mode
))
347 /* arm_bfloat_half_format <-> ieee_half_format */
348 if ((REAL_MODE_FORMAT (from_mode
) == &arm_bfloat_half_format
349 && REAL_MODE_FORMAT (to_mode
) == &ieee_half_format
)
350 || (REAL_MODE_FORMAT (to_mode
) == &arm_bfloat_half_format
351 && REAL_MODE_FORMAT (from_mode
) == &ieee_half_format
))
354 /* ibm_extended_format <-> ieee_quad_format */
355 if ((REAL_MODE_FORMAT (from_mode
) == &ibm_extended_format
356 && REAL_MODE_FORMAT (to_mode
) == &ieee_quad_format
)
357 || (REAL_MODE_FORMAT (from_mode
) == &ieee_quad_format
358 && REAL_MODE_FORMAT (to_mode
) == &ibm_extended_format
))
370 gcc_assert ((GET_MODE_PRECISION (from_mode
)
371 != GET_MODE_PRECISION (to_mode
))
372 || acceptable_same_precision_modes (from_mode
, to_mode
));
374 if (GET_MODE_PRECISION (from_mode
) == GET_MODE_PRECISION (to_mode
))
376 if ((REAL_MODE_FORMAT (to_mode
) == &arm_bfloat_half_format
377 && REAL_MODE_FORMAT (from_mode
) == &ieee_half_format
)
378 || (REAL_MODE_FORMAT (to_mode
) == &ieee_quad_format
379 && REAL_MODE_FORMAT (from_mode
) == &ibm_extended_format
))
380 /* libgcc implements just __trunchfbf2, not __extendhfbf2;
381 and __trunctfkf2, not __extendtfkf2. */
384 /* Conversion between decimal float and binary float, same
386 tab
= DECIMAL_FLOAT_MODE_P (from_mode
) ? trunc_optab
: sext_optab
;
388 else if (GET_MODE_PRECISION (from_mode
) < GET_MODE_PRECISION (to_mode
))
393 /* Try converting directly if the insn is supported. */
395 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
396 if (code
!= CODE_FOR_nothing
)
398 emit_unop_insn (code
, to
, from
,
399 tab
== sext_optab
? FLOAT_EXTEND
: FLOAT_TRUNCATE
);
404 if (REAL_MODE_FORMAT (from_mode
) == &arm_bfloat_half_format
405 && REAL_MODE_FORMAT (SFmode
) == &ieee_single_format
)
407 if (GET_MODE_PRECISION (to_mode
) > GET_MODE_PRECISION (SFmode
))
409 /* To cut down on libgcc size, implement
410 BFmode -> {DF,XF,TF}mode conversions by
411 BFmode -> SFmode -> {DF,XF,TF}mode conversions. */
412 rtx temp
= gen_reg_rtx (SFmode
);
413 convert_mode_scalar (temp
, from
, unsignedp
);
414 convert_mode_scalar (to
, temp
, unsignedp
);
417 if (REAL_MODE_FORMAT (to_mode
) == &ieee_half_format
)
419 /* Similarly, implement BFmode -> HFmode as
420 BFmode -> SFmode -> HFmode conversion where SFmode
421 has superset of BFmode values. We don't need
422 to handle sNaNs by raising exception and turning
423 it into qNaN though, as that can be done in the
424 SFmode -> HFmode conversion too. */
425 rtx temp
= gen_reg_rtx (SFmode
);
426 int save_flag_finite_math_only
= flag_finite_math_only
;
427 flag_finite_math_only
= true;
428 convert_mode_scalar (temp
, from
, unsignedp
);
429 flag_finite_math_only
= save_flag_finite_math_only
;
430 convert_mode_scalar (to
, temp
, unsignedp
);
433 if (to_mode
== SFmode
434 && !HONOR_NANS (from_mode
)
435 && !HONOR_NANS (to_mode
)
436 && optimize_insn_for_speed_p ())
438 /* If we don't expect sNaNs, for BFmode -> SFmode we can just
439 shift the bits up. */
440 machine_mode fromi_mode
, toi_mode
;
441 if (int_mode_for_size (GET_MODE_BITSIZE (from_mode
),
442 0).exists (&fromi_mode
)
443 && int_mode_for_size (GET_MODE_BITSIZE (to_mode
),
444 0).exists (&toi_mode
))
447 rtx fromi
= force_lowpart_subreg (fromi_mode
, from
,
453 if (GET_MODE (fromi
) == VOIDmode
)
454 toi
= simplify_unary_operation (ZERO_EXTEND
, toi_mode
,
458 toi
= gen_reg_rtx (toi_mode
);
459 convert_mode_scalar (toi
, fromi
, 1);
462 = maybe_expand_shift (LSHIFT_EXPR
, toi_mode
, toi
,
463 GET_MODE_PRECISION (to_mode
)
464 - GET_MODE_PRECISION (from_mode
),
468 tof
= force_lowpart_subreg (to_mode
, toi
, toi_mode
);
470 emit_move_insn (to
, tof
);
473 insns
= get_insns ();
483 if (REAL_MODE_FORMAT (from_mode
) == &ieee_single_format
484 && REAL_MODE_FORMAT (to_mode
) == &arm_bfloat_half_format
485 && !HONOR_NANS (from_mode
)
486 && !HONOR_NANS (to_mode
)
487 && !flag_rounding_math
488 && optimize_insn_for_speed_p ())
490 /* If we don't expect qNaNs nor sNaNs and can assume rounding
491 to nearest, we can expand the conversion inline as
492 (fromi + 0x7fff + ((fromi >> 16) & 1)) >> 16. */
493 machine_mode fromi_mode
, toi_mode
;
494 if (int_mode_for_size (GET_MODE_BITSIZE (from_mode
),
495 0).exists (&fromi_mode
)
496 && int_mode_for_size (GET_MODE_BITSIZE (to_mode
),
497 0).exists (&toi_mode
))
500 rtx fromi
= force_lowpart_subreg (fromi_mode
, from
, from_mode
);
506 int shift
= (GET_MODE_PRECISION (from_mode
)
507 - GET_MODE_PRECISION (to_mode
));
509 = maybe_expand_shift (RSHIFT_EXPR
, fromi_mode
, fromi
,
514 = expand_binop (fromi_mode
, and_optab
, temp1
, const1_rtx
,
515 NULL_RTX
, 1, OPTAB_DIRECT
);
519 = expand_binop (fromi_mode
, add_optab
, fromi
,
520 gen_int_mode ((HOST_WIDE_INT_1U
522 fromi_mode
), NULL_RTX
,
527 = expand_binop (fromi_mode
, add_optab
, temp3
, temp2
,
528 NULL_RTX
, 1, OPTAB_DIRECT
);
531 rtx temp5
= maybe_expand_shift (RSHIFT_EXPR
, fromi_mode
,
532 temp4
, shift
, NULL_RTX
, 1);
535 rtx temp6
= force_lowpart_subreg (toi_mode
, temp5
,
539 tof
= force_lowpart_subreg (to_mode
, temp6
, toi_mode
);
541 emit_move_insn (to
, tof
);
544 insns
= get_insns ();
555 /* Otherwise use a libcall. */
556 libcall
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
558 /* Is this conversion implemented yet? */
559 gcc_assert (libcall
);
562 value
= emit_library_call_value (libcall
, NULL_RTX
, LCT_CONST
, to_mode
,
564 insns
= get_insns ();
566 emit_libcall_block (insns
, to
, value
,
567 tab
== trunc_optab
? gen_rtx_FLOAT_TRUNCATE (to_mode
,
569 : gen_rtx_FLOAT_EXTEND (to_mode
, from
));
573 /* Handle pointer conversion. */ /* SPEE 900220. */
574 /* If the target has a converter from FROM_MODE to TO_MODE, use it. */
578 if (GET_MODE_PRECISION (from_mode
) > GET_MODE_PRECISION (to_mode
))
585 if (convert_optab_handler (ctab
, to_mode
, from_mode
)
588 emit_unop_insn (convert_optab_handler (ctab
, to_mode
, from_mode
),
594 /* Targets are expected to provide conversion insns between PxImode and
595 xImode for all MODE_PARTIAL_INT modes they use, but no others. */
596 if (GET_MODE_CLASS (to_mode
) == MODE_PARTIAL_INT
)
598 scalar_int_mode full_mode
599 = smallest_int_mode_for_size (GET_MODE_BITSIZE (to_mode
));
601 gcc_assert (convert_optab_handler (trunc_optab
, to_mode
, full_mode
)
602 != CODE_FOR_nothing
);
604 if (full_mode
!= from_mode
)
605 from
= convert_to_mode (full_mode
, from
, unsignedp
);
606 emit_unop_insn (convert_optab_handler (trunc_optab
, to_mode
, full_mode
),
610 if (GET_MODE_CLASS (from_mode
) == MODE_PARTIAL_INT
)
613 scalar_int_mode full_mode
614 = smallest_int_mode_for_size (GET_MODE_BITSIZE (from_mode
));
615 convert_optab ctab
= unsignedp
? zext_optab
: sext_optab
;
616 enum insn_code icode
;
618 icode
= convert_optab_handler (ctab
, full_mode
, from_mode
);
619 gcc_assert (icode
!= CODE_FOR_nothing
);
621 if (to_mode
== full_mode
)
623 emit_unop_insn (icode
, to
, from
, UNKNOWN
);
627 new_from
= gen_reg_rtx (full_mode
);
628 emit_unop_insn (icode
, new_from
, from
, UNKNOWN
);
630 /* else proceed to integer conversions below. */
631 from_mode
= full_mode
;
635 /* Make sure both are fixed-point modes or both are not. */
636 gcc_assert (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode
) ==
637 ALL_SCALAR_FIXED_POINT_MODE_P (to_mode
));
638 if (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode
))
640 /* If we widen from_mode to to_mode and they are in the same class,
641 we won't saturate the result.
642 Otherwise, always saturate the result to play safe. */
643 if (GET_MODE_CLASS (from_mode
) == GET_MODE_CLASS (to_mode
)
644 && GET_MODE_SIZE (from_mode
) < GET_MODE_SIZE (to_mode
))
645 expand_fixed_convert (to
, from
, 0, 0);
647 expand_fixed_convert (to
, from
, 0, 1);
651 /* Now both modes are integers. */
653 /* Handle expanding beyond a word. */
654 if (GET_MODE_PRECISION (from_mode
) < GET_MODE_PRECISION (to_mode
)
655 && GET_MODE_PRECISION (to_mode
) > BITS_PER_WORD
)
662 scalar_mode lowpart_mode
;
663 int nwords
= CEIL (GET_MODE_SIZE (to_mode
), UNITS_PER_WORD
);
665 /* Try converting directly if the insn is supported. */
666 if ((code
= can_extend_p (to_mode
, from_mode
, unsignedp
))
669 /* If FROM is a SUBREG, put it into a register. Do this
670 so that we always generate the same set of insns for
671 better cse'ing; if an intermediate assignment occurred,
672 we won't be doing the operation directly on the SUBREG. */
673 if (optimize
> 0 && GET_CODE (from
) == SUBREG
)
674 from
= force_reg (from_mode
, from
);
675 emit_unop_insn (code
, to
, from
, equiv_code
);
678 /* Next, try converting via full word. */
679 else if (GET_MODE_PRECISION (from_mode
) < BITS_PER_WORD
680 && ((code
= can_extend_p (to_mode
, word_mode
, unsignedp
))
681 != CODE_FOR_nothing
))
683 rtx word_to
= gen_reg_rtx (word_mode
);
686 if (reg_overlap_mentioned_p (to
, from
))
687 from
= force_reg (from_mode
, from
);
690 convert_move (word_to
, from
, unsignedp
);
691 emit_unop_insn (code
, to
, word_to
, equiv_code
);
695 /* No special multiword conversion insn; do it by hand. */
698 /* Since we will turn this into a no conflict block, we must ensure
699 the source does not overlap the target so force it into an isolated
700 register when maybe so. Likewise for any MEM input, since the
701 conversion sequence might require several references to it and we
702 must ensure we're getting the same value every time. */
704 if (MEM_P (from
) || reg_overlap_mentioned_p (to
, from
))
705 from
= force_reg (from_mode
, from
);
707 /* Get a copy of FROM widened to a word, if necessary. */
708 if (GET_MODE_PRECISION (from_mode
) < BITS_PER_WORD
)
709 lowpart_mode
= word_mode
;
711 lowpart_mode
= from_mode
;
713 lowfrom
= convert_to_mode (lowpart_mode
, from
, unsignedp
);
715 lowpart
= gen_lowpart (lowpart_mode
, to
);
716 emit_move_insn (lowpart
, lowfrom
);
718 /* Compute the value to put in each remaining word. */
720 fill_value
= const0_rtx
;
722 fill_value
= emit_store_flag_force (gen_reg_rtx (word_mode
),
723 LT
, lowfrom
, const0_rtx
,
724 lowpart_mode
, 0, -1);
726 /* Fill the remaining words. */
727 for (i
= GET_MODE_SIZE (lowpart_mode
) / UNITS_PER_WORD
; i
< nwords
; i
++)
729 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
730 rtx subword
= operand_subword (to
, index
, 1, to_mode
);
732 gcc_assert (subword
);
734 if (fill_value
!= subword
)
735 emit_move_insn (subword
, fill_value
);
738 insns
= get_insns ();
745 /* Truncating multi-word to a word or less. */
746 if (GET_MODE_PRECISION (from_mode
) > BITS_PER_WORD
747 && GET_MODE_PRECISION (to_mode
) <= BITS_PER_WORD
)
750 && ! MEM_VOLATILE_P (from
)
751 && direct_load
[(int) to_mode
]
752 && ! mode_dependent_address_p (XEXP (from
, 0),
753 MEM_ADDR_SPACE (from
)))
755 || GET_CODE (from
) == SUBREG
))
756 from
= force_reg (from_mode
, from
);
757 convert_move (to
, gen_lowpart (word_mode
, from
), 0);
761 /* Now follow all the conversions between integers
762 no more than a word long. */
764 /* For truncation, usually we can just refer to FROM in a narrower mode. */
765 if (GET_MODE_BITSIZE (to_mode
) < GET_MODE_BITSIZE (from_mode
)
766 && TRULY_NOOP_TRUNCATION_MODES_P (to_mode
, from_mode
))
769 && ! MEM_VOLATILE_P (from
)
770 && direct_load
[(int) to_mode
]
771 && ! mode_dependent_address_p (XEXP (from
, 0),
772 MEM_ADDR_SPACE (from
)))
774 || GET_CODE (from
) == SUBREG
))
775 from
= force_reg (from_mode
, from
);
776 if (REG_P (from
) && REGNO (from
) < FIRST_PSEUDO_REGISTER
777 && !targetm
.hard_regno_mode_ok (REGNO (from
), to_mode
))
778 from
= copy_to_reg (from
);
779 emit_move_insn (to
, gen_lowpart (to_mode
, from
));
783 /* Handle extension. */
784 if (GET_MODE_PRECISION (to_mode
) > GET_MODE_PRECISION (from_mode
))
786 /* Convert directly if that works. */
787 if ((code
= can_extend_p (to_mode
, from_mode
, unsignedp
))
790 emit_unop_insn (code
, to
, from
, equiv_code
);
798 /* Search for a mode to convert via. */
799 opt_scalar_mode intermediate_iter
;
800 FOR_EACH_MODE_FROM (intermediate_iter
, from_mode
)
802 scalar_mode intermediate
= intermediate_iter
.require ();
803 if (((can_extend_p (to_mode
, intermediate
, unsignedp
)
805 || (GET_MODE_SIZE (to_mode
) < GET_MODE_SIZE (intermediate
)
806 && TRULY_NOOP_TRUNCATION_MODES_P (to_mode
,
808 && (can_extend_p (intermediate
, from_mode
, unsignedp
)
809 != CODE_FOR_nothing
))
811 convert_move (to
, convert_to_mode (intermediate
, from
,
812 unsignedp
), unsignedp
);
817 /* No suitable intermediate mode.
818 Generate what we need with shifts. */
819 shift_amount
= (GET_MODE_PRECISION (to_mode
)
820 - GET_MODE_PRECISION (from_mode
));
821 from
= gen_lowpart (to_mode
, force_reg (from_mode
, from
));
822 tmp
= expand_shift (LSHIFT_EXPR
, to_mode
, from
, shift_amount
,
824 tmp
= expand_shift (RSHIFT_EXPR
, to_mode
, tmp
, shift_amount
,
827 emit_move_insn (to
, tmp
);
832 /* Support special truncate insns for certain modes. */
833 if (convert_optab_handler (trunc_optab
, to_mode
,
834 from_mode
) != CODE_FOR_nothing
)
836 emit_unop_insn (convert_optab_handler (trunc_optab
, to_mode
, from_mode
),
841 /* Handle truncation of volatile memrefs, and so on;
842 the things that couldn't be truncated directly,
843 and for which there was no special instruction.
845 ??? Code above formerly short-circuited this, for most integer
846 mode pairs, with a force_reg in from_mode followed by a recursive
847 call to this routine. Appears always to have been wrong. */
848 if (GET_MODE_PRECISION (to_mode
) < GET_MODE_PRECISION (from_mode
))
850 rtx temp
= force_reg (to_mode
, gen_lowpart (to_mode
, from
));
851 emit_move_insn (to
, temp
);
855 /* Mode combination is not recognized. */
859 /* Return an rtx for a value that would result
860 from converting X to mode MODE.
861 Both X and MODE may be floating, or both integer.
862 UNSIGNEDP is nonzero if X is an unsigned value.
863 This can be done by referring to a part of X in place
864 or by copying to a new temporary with conversion. */
867 convert_to_mode (machine_mode mode
, rtx x
, int unsignedp
)
869 return convert_modes (mode
, VOIDmode
, x
, unsignedp
);
872 /* Return an rtx for a value that would result
873 from converting X from mode OLDMODE to mode MODE.
874 Both modes may be floating, or both integer.
875 UNSIGNEDP is nonzero if X is an unsigned value.
877 This can be done by referring to a part of X in place
878 or by copying to a new temporary with conversion.
880 You can give VOIDmode for OLDMODE, if you are sure X has a nonvoid mode. */
883 convert_modes (machine_mode mode
, machine_mode oldmode
, rtx x
, int unsignedp
)
886 scalar_int_mode int_mode
;
888 /* If FROM is a SUBREG that indicates that we have already done at least
889 the required extension, strip it. */
891 if (GET_CODE (x
) == SUBREG
892 && SUBREG_PROMOTED_VAR_P (x
)
893 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
894 && (GET_MODE_PRECISION (subreg_promoted_mode (x
))
895 >= GET_MODE_PRECISION (int_mode
))
896 && SUBREG_CHECK_PROMOTED_SIGN (x
, unsignedp
))
898 scalar_int_mode int_orig_mode
;
899 scalar_int_mode int_inner_mode
;
900 machine_mode orig_mode
= GET_MODE (x
);
901 x
= gen_lowpart (int_mode
, SUBREG_REG (x
));
903 /* Preserve SUBREG_PROMOTED_VAR_P if the new mode is wider than
904 the original mode, but narrower than the inner mode. */
905 if (GET_CODE (x
) == SUBREG
906 && is_a
<scalar_int_mode
> (orig_mode
, &int_orig_mode
)
907 && GET_MODE_PRECISION (int_mode
)
908 > GET_MODE_PRECISION (int_orig_mode
)
909 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (x
)),
911 && GET_MODE_PRECISION (int_inner_mode
)
912 > GET_MODE_PRECISION (int_mode
))
914 SUBREG_PROMOTED_VAR_P (x
) = 1;
915 SUBREG_PROMOTED_SET (x
, unsignedp
);
919 if (GET_MODE (x
) != VOIDmode
)
920 oldmode
= GET_MODE (x
);
925 if (CONST_SCALAR_INT_P (x
)
926 && is_a
<scalar_int_mode
> (mode
, &int_mode
))
928 /* If the caller did not tell us the old mode, then there is not
929 much to do with respect to canonicalization. We have to
930 assume that all the bits are significant. */
931 if (!is_a
<scalar_int_mode
> (oldmode
))
932 oldmode
= MAX_MODE_INT
;
933 wide_int w
= wide_int::from (rtx_mode_t (x
, oldmode
),
934 GET_MODE_PRECISION (int_mode
),
935 unsignedp
? UNSIGNED
: SIGNED
);
936 return immed_wide_int_const (w
, int_mode
);
939 /* We can do this with a gen_lowpart if both desired and current modes
940 are integer, and this is either a constant integer, a register, or a
942 scalar_int_mode int_oldmode
;
943 if (is_int_mode (mode
, &int_mode
)
944 && is_int_mode (oldmode
, &int_oldmode
)
945 && GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (int_oldmode
)
946 && ((MEM_P (x
) && !MEM_VOLATILE_P (x
) && direct_load
[(int) int_mode
])
947 || CONST_POLY_INT_P (x
)
949 && (!HARD_REGISTER_P (x
)
950 || targetm
.hard_regno_mode_ok (REGNO (x
), int_mode
))
951 && TRULY_NOOP_TRUNCATION_MODES_P (int_mode
, GET_MODE (x
)))))
952 return gen_lowpart (int_mode
, x
);
954 /* Converting from integer constant into mode is always equivalent to an
956 if (VECTOR_MODE_P (mode
) && GET_MODE (x
) == VOIDmode
)
958 gcc_assert (known_eq (GET_MODE_BITSIZE (mode
),
959 GET_MODE_BITSIZE (oldmode
)));
960 return force_subreg (mode
, x
, oldmode
, 0);
963 temp
= gen_reg_rtx (mode
);
964 convert_move (temp
, x
, unsignedp
);
968 /* Variant of convert_modes for ABI parameter passing/return.
969 Return an rtx for a value that would result from converting X from
970 a floating point mode FMODE to wider integer mode MODE. */
973 convert_float_to_wider_int (machine_mode mode
, machine_mode fmode
, rtx x
)
975 gcc_assert (SCALAR_INT_MODE_P (mode
) && SCALAR_FLOAT_MODE_P (fmode
));
976 scalar_int_mode tmp_mode
= int_mode_for_mode (fmode
).require ();
977 rtx tmp
= force_reg (tmp_mode
, gen_lowpart (tmp_mode
, x
));
978 return convert_modes (mode
, tmp_mode
, tmp
, 1);
981 /* Variant of convert_modes for ABI parameter passing/return.
982 Return an rtx for a value that would result from converting X from
983 an integer mode IMODE to a narrower floating point mode MODE. */
986 convert_wider_int_to_float (machine_mode mode
, machine_mode imode
, rtx x
)
988 gcc_assert (SCALAR_FLOAT_MODE_P (mode
) && SCALAR_INT_MODE_P (imode
));
989 scalar_int_mode tmp_mode
= int_mode_for_mode (mode
).require ();
990 rtx tmp
= force_reg (tmp_mode
, gen_lowpart (tmp_mode
, x
));
991 return gen_lowpart_SUBREG (mode
, tmp
);
994 /* Return the largest alignment we can use for doing a move (or store)
995 of MAX_PIECES. ALIGN is the largest alignment we could use. */
998 alignment_for_piecewise_move (unsigned int max_pieces
, unsigned int align
)
1000 scalar_int_mode tmode
1001 = int_mode_for_size (max_pieces
* BITS_PER_UNIT
, 0).require ();
1003 if (align
>= GET_MODE_ALIGNMENT (tmode
))
1004 align
= GET_MODE_ALIGNMENT (tmode
);
1007 scalar_int_mode xmode
= NARROWEST_INT_MODE
;
1008 opt_scalar_int_mode mode_iter
;
1009 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_INT
)
1011 tmode
= mode_iter
.require ();
1012 if (GET_MODE_SIZE (tmode
) > max_pieces
1013 || targetm
.slow_unaligned_access (tmode
, align
))
1018 align
= MAX (align
, GET_MODE_ALIGNMENT (xmode
));
1024 /* Return true if we know how to implement OP using vectors of bytes. */
1026 can_use_qi_vectors (by_pieces_operation op
)
1028 return (op
== COMPARE_BY_PIECES
1029 || op
== SET_BY_PIECES
1030 || op
== CLEAR_BY_PIECES
);
1033 /* Return true if optabs exists for the mode and certain by pieces
1036 by_pieces_mode_supported_p (fixed_size_mode mode
, by_pieces_operation op
)
1038 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
1041 if ((op
== SET_BY_PIECES
|| op
== CLEAR_BY_PIECES
)
1042 && VECTOR_MODE_P (mode
)
1043 && optab_handler (vec_duplicate_optab
, mode
) == CODE_FOR_nothing
)
1046 if (op
== COMPARE_BY_PIECES
1047 && !can_compare_p (EQ
, mode
, ccp_jump
))
1053 /* Return the widest mode that can be used to perform part of an
1054 operation OP on SIZE bytes. Try to use QI vector modes where
1056 static fixed_size_mode
1057 widest_fixed_size_mode_for_size (unsigned int size
, by_pieces_operation op
)
1059 fixed_size_mode result
= NARROWEST_INT_MODE
;
1061 gcc_checking_assert (size
> 1);
1063 /* Use QI vector only if size is wider than a WORD. */
1064 if (can_use_qi_vectors (op
) && size
> UNITS_PER_WORD
)
1067 fixed_size_mode candidate
;
1068 FOR_EACH_MODE_IN_CLASS (mode
, MODE_VECTOR_INT
)
1069 if (is_a
<fixed_size_mode
> (mode
, &candidate
)
1070 && GET_MODE_INNER (candidate
) == QImode
)
1072 if (GET_MODE_SIZE (candidate
) >= size
)
1074 if (by_pieces_mode_supported_p (candidate
, op
))
1078 if (result
!= NARROWEST_INT_MODE
)
1082 opt_scalar_int_mode tmode
;
1083 scalar_int_mode mode
;
1084 FOR_EACH_MODE_IN_CLASS (tmode
, MODE_INT
)
1086 mode
= tmode
.require ();
1087 if (GET_MODE_SIZE (mode
) < size
1088 && by_pieces_mode_supported_p (mode
, op
))
1095 /* Determine whether an operation OP on LEN bytes with alignment ALIGN can
1096 and should be performed piecewise. */
1099 can_do_by_pieces (unsigned HOST_WIDE_INT len
, unsigned int align
,
1100 enum by_pieces_operation op
)
1102 return targetm
.use_by_pieces_infrastructure_p (len
, align
, op
,
1103 optimize_insn_for_speed_p ());
1106 /* Determine whether the LEN bytes can be moved by using several move
1107 instructions. Return nonzero if a call to move_by_pieces should
1111 can_move_by_pieces (unsigned HOST_WIDE_INT len
, unsigned int align
)
1113 return can_do_by_pieces (len
, align
, MOVE_BY_PIECES
);
1116 /* Return number of insns required to perform operation OP by pieces
1117 for L bytes. ALIGN (in bits) is maximum alignment we can assume. */
1119 unsigned HOST_WIDE_INT
1120 by_pieces_ninsns (unsigned HOST_WIDE_INT l
, unsigned int align
,
1121 unsigned int max_size
, by_pieces_operation op
)
1123 unsigned HOST_WIDE_INT n_insns
= 0;
1124 fixed_size_mode mode
;
1126 if (targetm
.overlap_op_by_pieces_p ())
1128 /* NB: Round up L and ALIGN to the widest integer mode for
1130 mode
= widest_fixed_size_mode_for_size (max_size
, op
);
1131 gcc_assert (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
);
1132 unsigned HOST_WIDE_INT up
= ROUND_UP (l
, GET_MODE_SIZE (mode
));
1135 align
= GET_MODE_ALIGNMENT (mode
);
1138 align
= alignment_for_piecewise_move (MOVE_MAX_PIECES
, align
);
1140 while (max_size
> 1 && l
> 0)
1142 mode
= widest_fixed_size_mode_for_size (max_size
, op
);
1143 gcc_assert (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
);
1145 unsigned int modesize
= GET_MODE_SIZE (mode
);
1147 if (align
>= GET_MODE_ALIGNMENT (mode
))
1149 unsigned HOST_WIDE_INT n_pieces
= l
/ modesize
;
1154 n_insns
+= n_pieces
;
1157 case COMPARE_BY_PIECES
:
1158 int batch
= targetm
.compare_by_pieces_branch_ratio (mode
);
1159 int batch_ops
= 4 * batch
- 1;
1160 unsigned HOST_WIDE_INT full
= n_pieces
/ batch
;
1161 n_insns
+= full
* batch_ops
;
1162 if (n_pieces
% batch
!= 0)
1168 max_size
= modesize
;
1175 /* Used when performing piecewise block operations, holds information
1176 about one of the memory objects involved. The member functions
1177 can be used to generate code for loading from the object and
1178 updating the address when iterating. */
1182 /* The object being referenced, a MEM. Can be NULL_RTX to indicate
1185 /* The address of the object. Can differ from that seen in the
1186 MEM rtx if we copied the address to a register. */
1188 /* Nonzero if the address on the object has an autoincrement already,
1189 signifies whether that was an increment or decrement. */
1190 signed char m_addr_inc
;
1191 /* Nonzero if we intend to use autoinc without the address already
1192 having autoinc form. We will insert add insns around each memory
1193 reference, expecting later passes to form autoinc addressing modes.
1194 The only supported options are predecrement and postincrement. */
1195 signed char m_explicit_inc
;
1196 /* True if we have either of the two possible cases of using
1199 /* True if this is an address to be used for load operations rather
1203 /* Optionally, a function to obtain constants for any given offset into
1204 the objects, and data associated with it. */
1205 by_pieces_constfn m_constfn
;
1208 pieces_addr (rtx
, bool, by_pieces_constfn
, void *);
1209 rtx
adjust (fixed_size_mode
, HOST_WIDE_INT
, by_pieces_prev
* = nullptr);
1210 void increment_address (HOST_WIDE_INT
);
1211 void maybe_predec (HOST_WIDE_INT
);
1212 void maybe_postinc (HOST_WIDE_INT
);
1213 void decide_autoinc (machine_mode
, bool, HOST_WIDE_INT
);
1220 /* Initialize a pieces_addr structure from an object OBJ. IS_LOAD is
1221 true if the operation to be performed on this object is a load
1222 rather than a store. For stores, OBJ can be NULL, in which case we
1223 assume the operation is a stack push. For loads, the optional
1224 CONSTFN and its associated CFNDATA can be used in place of the
1227 pieces_addr::pieces_addr (rtx obj
, bool is_load
, by_pieces_constfn constfn
,
1229 : m_obj (obj
), m_is_load (is_load
), m_constfn (constfn
), m_cfndata (cfndata
)
1235 rtx addr
= XEXP (obj
, 0);
1236 rtx_code code
= GET_CODE (addr
);
1238 bool dec
= code
== PRE_DEC
|| code
== POST_DEC
;
1239 bool inc
= code
== PRE_INC
|| code
== POST_INC
;
1240 m_auto
= inc
|| dec
;
1242 m_addr_inc
= dec
? -1 : 1;
1244 /* While we have always looked for these codes here, the code
1245 implementing the memory operation has never handled them.
1246 Support could be added later if necessary or beneficial. */
1247 gcc_assert (code
!= PRE_INC
&& code
!= POST_DEC
);
1255 if (STACK_GROWS_DOWNWARD
)
1261 gcc_assert (constfn
!= NULL
);
1265 gcc_assert (is_load
);
1268 /* Decide whether to use autoinc for an address involved in a memory op.
1269 MODE is the mode of the accesses, REVERSE is true if we've decided to
1270 perform the operation starting from the end, and LEN is the length of
1271 the operation. Don't override an earlier decision to set m_auto. */
1274 pieces_addr::decide_autoinc (machine_mode
ARG_UNUSED (mode
), bool reverse
,
1277 if (m_auto
|| m_obj
== NULL_RTX
)
1280 bool use_predec
= (m_is_load
1281 ? USE_LOAD_PRE_DECREMENT (mode
)
1282 : USE_STORE_PRE_DECREMENT (mode
));
1283 bool use_postinc
= (m_is_load
1284 ? USE_LOAD_POST_INCREMENT (mode
)
1285 : USE_STORE_POST_INCREMENT (mode
));
1286 machine_mode addr_mode
= get_address_mode (m_obj
);
1288 if (use_predec
&& reverse
)
1290 m_addr
= copy_to_mode_reg (addr_mode
,
1291 plus_constant (addr_mode
,
1294 m_explicit_inc
= -1;
1296 else if (use_postinc
&& !reverse
)
1298 m_addr
= copy_to_mode_reg (addr_mode
, m_addr
);
1302 else if (CONSTANT_P (m_addr
))
1303 m_addr
= copy_to_mode_reg (addr_mode
, m_addr
);
1306 /* Adjust the address to refer to the data at OFFSET in MODE. If we
1307 are using autoincrement for this address, we don't add the offset,
1308 but we still modify the MEM's properties. */
1311 pieces_addr::adjust (fixed_size_mode mode
, HOST_WIDE_INT offset
,
1312 by_pieces_prev
*prev
)
1315 /* Pass the previous data to m_constfn. */
1316 return m_constfn (m_cfndata
, prev
, offset
, mode
);
1317 if (m_obj
== NULL_RTX
)
1320 return adjust_automodify_address (m_obj
, mode
, m_addr
, offset
);
1322 return adjust_address (m_obj
, mode
, offset
);
1325 /* Emit an add instruction to increment the address by SIZE. */
1328 pieces_addr::increment_address (HOST_WIDE_INT size
)
1330 rtx amount
= gen_int_mode (size
, GET_MODE (m_addr
));
1331 emit_insn (gen_add2_insn (m_addr
, amount
));
1334 /* If we are supposed to decrement the address after each access, emit code
1335 to do so now. Increment by SIZE (which has should have the correct sign
1339 pieces_addr::maybe_predec (HOST_WIDE_INT size
)
1341 if (m_explicit_inc
>= 0)
1343 gcc_assert (HAVE_PRE_DECREMENT
);
1344 increment_address (size
);
1347 /* If we are supposed to decrement the address after each access, emit code
1348 to do so now. Increment by SIZE. */
1351 pieces_addr::maybe_postinc (HOST_WIDE_INT size
)
1353 if (m_explicit_inc
<= 0)
1355 gcc_assert (HAVE_POST_INCREMENT
);
1356 increment_address (size
);
1359 /* This structure is used by do_op_by_pieces to describe the operation
1362 class op_by_pieces_d
1365 fixed_size_mode
get_usable_mode (fixed_size_mode
, unsigned int);
1366 fixed_size_mode
smallest_fixed_size_mode_for_size (unsigned int);
1369 pieces_addr m_to
, m_from
;
1370 /* Make m_len read-only so that smallest_fixed_size_mode_for_size can
1371 use it to check the valid mode size. */
1372 const unsigned HOST_WIDE_INT m_len
;
1373 HOST_WIDE_INT m_offset
;
1374 unsigned int m_align
;
1375 unsigned int m_max_size
;
1377 /* True if this is a stack push. */
1379 /* True if targetm.overlap_op_by_pieces_p () returns true. */
1380 bool m_overlap_op_by_pieces
;
1381 /* The type of operation that we're performing. */
1382 by_pieces_operation m_op
;
1384 /* Virtual functions, overriden by derived classes for the specific
1386 virtual void generate (rtx
, rtx
, machine_mode
) = 0;
1387 virtual bool prepare_mode (machine_mode
, unsigned int) = 0;
1388 virtual void finish_mode (machine_mode
)
1393 op_by_pieces_d (unsigned int, rtx
, bool, rtx
, bool, by_pieces_constfn
,
1394 void *, unsigned HOST_WIDE_INT
, unsigned int, bool,
1395 by_pieces_operation
);
1399 /* The constructor for an op_by_pieces_d structure. We require two
1400 objects named TO and FROM, which are identified as loads or stores
1401 by TO_LOAD and FROM_LOAD. If FROM is a load, the optional FROM_CFN
1402 and its associated FROM_CFN_DATA can be used to replace loads with
1403 constant values. MAX_PIECES describes the maximum number of bytes
1404 at a time which can be moved efficiently. LEN describes the length
1405 of the operation. */
1407 op_by_pieces_d::op_by_pieces_d (unsigned int max_pieces
, rtx to
,
1408 bool to_load
, rtx from
, bool from_load
,
1409 by_pieces_constfn from_cfn
,
1410 void *from_cfn_data
,
1411 unsigned HOST_WIDE_INT len
,
1412 unsigned int align
, bool push
,
1413 by_pieces_operation op
)
1414 : m_to (to
, to_load
, NULL
, NULL
),
1415 m_from (from
, from_load
, from_cfn
, from_cfn_data
),
1416 m_len (len
), m_max_size (max_pieces
+ 1),
1417 m_push (push
), m_op (op
)
1419 int toi
= m_to
.get_addr_inc ();
1420 int fromi
= m_from
.get_addr_inc ();
1421 if (toi
>= 0 && fromi
>= 0)
1423 else if (toi
<= 0 && fromi
<= 0)
1428 m_offset
= m_reverse
? len
: 0;
1429 align
= MIN (to
? MEM_ALIGN (to
) : align
,
1430 from
? MEM_ALIGN (from
) : align
);
1432 /* If copying requires more than two move insns,
1433 copy addresses to registers (to make displacements shorter)
1434 and use post-increment if available. */
1435 if (by_pieces_ninsns (len
, align
, m_max_size
, MOVE_BY_PIECES
) > 2)
1437 /* Find the mode of the largest comparison. */
1438 fixed_size_mode mode
1439 = widest_fixed_size_mode_for_size (m_max_size
, m_op
);
1441 m_from
.decide_autoinc (mode
, m_reverse
, len
);
1442 m_to
.decide_autoinc (mode
, m_reverse
, len
);
1445 align
= alignment_for_piecewise_move (MOVE_MAX_PIECES
, align
);
1448 m_overlap_op_by_pieces
= targetm
.overlap_op_by_pieces_p ();
1451 /* This function returns the largest usable integer mode for LEN bytes
1452 whose size is no bigger than size of MODE. */
1455 op_by_pieces_d::get_usable_mode (fixed_size_mode mode
, unsigned int len
)
1460 size
= GET_MODE_SIZE (mode
);
1461 if (len
>= size
&& prepare_mode (mode
, m_align
))
1463 /* widest_fixed_size_mode_for_size checks SIZE > 1. */
1464 mode
= widest_fixed_size_mode_for_size (size
, m_op
);
1470 /* Return the smallest integer or QI vector mode that is not narrower
1474 op_by_pieces_d::smallest_fixed_size_mode_for_size (unsigned int size
)
1476 /* Use QI vector only for > size of WORD. */
1477 if (can_use_qi_vectors (m_op
) && size
> UNITS_PER_WORD
)
1480 fixed_size_mode candidate
;
1481 FOR_EACH_MODE_IN_CLASS (mode
, MODE_VECTOR_INT
)
1482 if (is_a
<fixed_size_mode
> (mode
, &candidate
)
1483 && GET_MODE_INNER (candidate
) == QImode
)
1485 /* Don't return a mode wider than M_LEN. */
1486 if (GET_MODE_SIZE (candidate
) > m_len
)
1489 if (GET_MODE_SIZE (candidate
) >= size
1490 && by_pieces_mode_supported_p (candidate
, m_op
))
1495 return smallest_int_mode_for_size (size
* BITS_PER_UNIT
);
1498 /* This function contains the main loop used for expanding a block
1499 operation. First move what we can in the largest integer mode,
1500 then go to successively smaller modes. For every access, call
1501 GENFUN with the two operands and the EXTRA_DATA. */
1504 op_by_pieces_d::run ()
1509 unsigned HOST_WIDE_INT length
= m_len
;
1511 /* widest_fixed_size_mode_for_size checks M_MAX_SIZE > 1. */
1512 fixed_size_mode mode
1513 = widest_fixed_size_mode_for_size (m_max_size
, m_op
);
1514 mode
= get_usable_mode (mode
, length
);
1516 by_pieces_prev to_prev
= { nullptr, mode
};
1517 by_pieces_prev from_prev
= { nullptr, mode
};
1521 unsigned int size
= GET_MODE_SIZE (mode
);
1522 rtx to1
= NULL_RTX
, from1
;
1524 while (length
>= size
)
1529 to1
= m_to
.adjust (mode
, m_offset
, &to_prev
);
1531 to_prev
.mode
= mode
;
1532 from1
= m_from
.adjust (mode
, m_offset
, &from_prev
);
1533 from_prev
.data
= from1
;
1534 from_prev
.mode
= mode
;
1536 m_to
.maybe_predec (-(HOST_WIDE_INT
)size
);
1537 m_from
.maybe_predec (-(HOST_WIDE_INT
)size
);
1539 generate (to1
, from1
, mode
);
1541 m_to
.maybe_postinc (size
);
1542 m_from
.maybe_postinc (size
);
1555 if (!m_push
&& m_overlap_op_by_pieces
)
1557 /* NB: Generate overlapping operations if it is not a stack
1558 push since stack push must not overlap. Get the smallest
1559 fixed size mode for M_LEN bytes. */
1560 mode
= smallest_fixed_size_mode_for_size (length
);
1561 mode
= get_usable_mode (mode
, GET_MODE_SIZE (mode
));
1562 int gap
= GET_MODE_SIZE (mode
) - length
;
1565 /* If size of MODE > M_LEN, generate the last operation
1566 in MODE for the remaining bytes with ovelapping memory
1567 from the previois operation. */
1577 /* widest_fixed_size_mode_for_size checks SIZE > 1. */
1578 mode
= widest_fixed_size_mode_for_size (size
, m_op
);
1579 mode
= get_usable_mode (mode
, length
);
1585 /* Derived class from op_by_pieces_d, providing support for block move
1588 #ifdef PUSH_ROUNDING
1589 #define PUSHG_P(to) ((to) == nullptr)
1591 #define PUSHG_P(to) false
1594 class move_by_pieces_d
: public op_by_pieces_d
1596 insn_gen_fn m_gen_fun
;
1597 void generate (rtx
, rtx
, machine_mode
) final override
;
1598 bool prepare_mode (machine_mode
, unsigned int) final override
;
1601 move_by_pieces_d (rtx to
, rtx from
, unsigned HOST_WIDE_INT len
,
1603 : op_by_pieces_d (MOVE_MAX_PIECES
, to
, false, from
, true, NULL
,
1604 NULL
, len
, align
, PUSHG_P (to
), MOVE_BY_PIECES
)
1607 rtx
finish_retmode (memop_ret
);
1610 /* Return true if MODE can be used for a set of copies, given an
1611 alignment ALIGN. Prepare whatever data is necessary for later
1612 calls to generate. */
1615 move_by_pieces_d::prepare_mode (machine_mode mode
, unsigned int align
)
1617 insn_code icode
= optab_handler (mov_optab
, mode
);
1618 m_gen_fun
= GEN_FCN (icode
);
1619 return icode
!= CODE_FOR_nothing
&& align
>= GET_MODE_ALIGNMENT (mode
);
1622 /* A callback used when iterating for a compare_by_pieces_operation.
1623 OP0 and OP1 are the values that have been loaded and should be
1624 compared in MODE. If OP0 is NULL, this means we should generate a
1625 push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1626 gen function that should be used to generate the mode. */
1629 move_by_pieces_d::generate (rtx op0
, rtx op1
,
1630 machine_mode mode ATTRIBUTE_UNUSED
)
1632 #ifdef PUSH_ROUNDING
1633 if (op0
== NULL_RTX
)
1635 emit_single_push_insn (mode
, op1
, NULL
);
1639 emit_insn (m_gen_fun (op0
, op1
));
1642 /* Perform the final adjustment at the end of a string to obtain the
1643 correct return value for the block operation.
1644 Return value is based on RETMODE argument. */
1647 move_by_pieces_d::finish_retmode (memop_ret retmode
)
1649 gcc_assert (!m_reverse
);
1650 if (retmode
== RETURN_END_MINUS_ONE
)
1652 m_to
.maybe_postinc (-1);
1655 return m_to
.adjust (QImode
, m_offset
);
1658 /* Generate several move instructions to copy LEN bytes from block FROM to
1659 block TO. (These are MEM rtx's with BLKmode).
1661 If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1662 used to push FROM to the stack.
1664 ALIGN is maximum stack alignment we can assume.
1666 Return value is based on RETMODE argument. */
1669 move_by_pieces (rtx to
, rtx from
, unsigned HOST_WIDE_INT len
,
1670 unsigned int align
, memop_ret retmode
)
1672 #ifndef PUSH_ROUNDING
1677 move_by_pieces_d
data (to
, from
, len
, align
);
1681 if (retmode
!= RETURN_BEGIN
)
1682 return data
.finish_retmode (retmode
);
1687 /* Derived class from op_by_pieces_d, providing support for block move
1690 class store_by_pieces_d
: public op_by_pieces_d
1692 insn_gen_fn m_gen_fun
;
1694 void generate (rtx
, rtx
, machine_mode
) final override
;
1695 bool prepare_mode (machine_mode
, unsigned int) final override
;
1698 store_by_pieces_d (rtx to
, by_pieces_constfn cfn
, void *cfn_data
,
1699 unsigned HOST_WIDE_INT len
, unsigned int align
,
1700 by_pieces_operation op
)
1701 : op_by_pieces_d (STORE_MAX_PIECES
, to
, false, NULL_RTX
, true, cfn
,
1702 cfn_data
, len
, align
, false, op
)
1705 rtx
finish_retmode (memop_ret
);
1708 /* Return true if MODE can be used for a set of stores, given an
1709 alignment ALIGN. Prepare whatever data is necessary for later
1710 calls to generate. */
1713 store_by_pieces_d::prepare_mode (machine_mode mode
, unsigned int align
)
1715 insn_code icode
= optab_handler (mov_optab
, mode
);
1716 m_gen_fun
= GEN_FCN (icode
);
1717 return icode
!= CODE_FOR_nothing
&& align
>= GET_MODE_ALIGNMENT (mode
);
1720 /* A callback used when iterating for a store_by_pieces_operation.
1721 OP0 and OP1 are the values that have been loaded and should be
1722 compared in MODE. If OP0 is NULL, this means we should generate a
1723 push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1724 gen function that should be used to generate the mode. */
1727 store_by_pieces_d::generate (rtx op0
, rtx op1
, machine_mode
)
1729 emit_insn (m_gen_fun (op0
, op1
));
1732 /* Perform the final adjustment at the end of a string to obtain the
1733 correct return value for the block operation.
1734 Return value is based on RETMODE argument. */
1737 store_by_pieces_d::finish_retmode (memop_ret retmode
)
1739 gcc_assert (!m_reverse
);
1740 if (retmode
== RETURN_END_MINUS_ONE
)
1742 m_to
.maybe_postinc (-1);
1745 return m_to
.adjust (QImode
, m_offset
);
1748 /* Determine whether the LEN bytes generated by CONSTFUN can be
1749 stored to memory using several move instructions. CONSTFUNDATA is
1750 a pointer which will be passed as argument in every CONSTFUN call.
1751 ALIGN is maximum alignment we can assume. MEMSETP is true if this is
1752 a memset operation and false if it's a copy of a constant string.
1753 Return true if a call to store_by_pieces should succeed. */
1756 can_store_by_pieces (unsigned HOST_WIDE_INT len
,
1757 by_pieces_constfn constfun
,
1758 void *constfundata
, unsigned int align
, bool memsetp
)
1760 unsigned HOST_WIDE_INT l
;
1761 unsigned int max_size
;
1762 HOST_WIDE_INT offset
= 0;
1763 enum insn_code icode
;
1765 /* cst is set but not used if LEGITIMATE_CONSTANT doesn't use it. */
1766 rtx cst ATTRIBUTE_UNUSED
;
1771 if (!targetm
.use_by_pieces_infrastructure_p (len
, align
,
1775 optimize_insn_for_speed_p ()))
1778 align
= alignment_for_piecewise_move (STORE_MAX_PIECES
, align
);
1780 /* We would first store what we can in the largest integer mode, then go to
1781 successively smaller modes. */
1784 reverse
<= (HAVE_PRE_DECREMENT
|| HAVE_POST_DECREMENT
);
1788 max_size
= STORE_MAX_PIECES
+ 1;
1789 while (max_size
> 1 && l
> 0)
1791 auto op
= memsetp
? SET_BY_PIECES
: STORE_BY_PIECES
;
1792 auto mode
= widest_fixed_size_mode_for_size (max_size
, op
);
1794 icode
= optab_handler (mov_optab
, mode
);
1795 if (icode
!= CODE_FOR_nothing
1796 && align
>= GET_MODE_ALIGNMENT (mode
))
1798 unsigned int size
= GET_MODE_SIZE (mode
);
1805 cst
= (*constfun
) (constfundata
, nullptr, offset
, mode
);
1806 /* All CONST_VECTORs can be loaded for memset since
1807 vec_duplicate_optab is a precondition to pick a
1808 vector mode for the memset expander. */
1809 if (!((memsetp
&& VECTOR_MODE_P (mode
))
1810 || targetm
.legitimate_constant_p (mode
, cst
)))
1820 max_size
= GET_MODE_SIZE (mode
);
1823 /* The code above should have handled everything. */
1830 /* Generate several move instructions to store LEN bytes generated by
1831 CONSTFUN to block TO. (A MEM rtx with BLKmode). CONSTFUNDATA is a
1832 pointer which will be passed as argument in every CONSTFUN call.
1833 ALIGN is maximum alignment we can assume. MEMSETP is true if this is
1834 a memset operation and false if it's a copy of a constant string.
1835 Return value is based on RETMODE argument. */
1838 store_by_pieces (rtx to
, unsigned HOST_WIDE_INT len
,
1839 by_pieces_constfn constfun
,
1840 void *constfundata
, unsigned int align
, bool memsetp
,
1845 gcc_assert (retmode
!= RETURN_END_MINUS_ONE
);
1849 gcc_assert (targetm
.use_by_pieces_infrastructure_p
1851 memsetp
? SET_BY_PIECES
: STORE_BY_PIECES
,
1852 optimize_insn_for_speed_p ()));
1854 store_by_pieces_d
data (to
, constfun
, constfundata
, len
, align
,
1855 memsetp
? SET_BY_PIECES
: STORE_BY_PIECES
);
1858 if (retmode
!= RETURN_BEGIN
)
1859 return data
.finish_retmode (retmode
);
1865 clear_by_pieces (rtx to
, unsigned HOST_WIDE_INT len
, unsigned int align
)
1870 /* Use builtin_memset_read_str to support vector mode broadcast. */
1872 store_by_pieces_d
data (to
, builtin_memset_read_str
, &c
, len
, align
,
1877 /* Context used by compare_by_pieces_genfn. It stores the fail label
1878 to jump to in case of miscomparison, and for branch ratios greater than 1,
1879 it stores an accumulator and the current and maximum counts before
1880 emitting another branch. */
1882 class compare_by_pieces_d
: public op_by_pieces_d
1884 rtx_code_label
*m_fail_label
;
1886 int m_count
, m_batch
;
1888 void generate (rtx
, rtx
, machine_mode
) final override
;
1889 bool prepare_mode (machine_mode
, unsigned int) final override
;
1890 void finish_mode (machine_mode
) final override
;
1893 compare_by_pieces_d (rtx op0
, rtx op1
, by_pieces_constfn op1_cfn
,
1894 void *op1_cfn_data
, HOST_WIDE_INT len
, int align
,
1895 rtx_code_label
*fail_label
)
1896 : op_by_pieces_d (COMPARE_MAX_PIECES
, op0
, true, op1
, true, op1_cfn
,
1897 op1_cfn_data
, len
, align
, false, COMPARE_BY_PIECES
)
1899 m_fail_label
= fail_label
;
1903 /* A callback used when iterating for a compare_by_pieces_operation.
1904 OP0 and OP1 are the values that have been loaded and should be
1905 compared in MODE. DATA holds a pointer to the compare_by_pieces_data
1906 context structure. */
1909 compare_by_pieces_d::generate (rtx op0
, rtx op1
, machine_mode mode
)
1913 rtx temp
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
,
1914 true, OPTAB_LIB_WIDEN
);
1916 temp
= expand_binop (mode
, ior_optab
, m_accumulator
, temp
, temp
,
1917 true, OPTAB_LIB_WIDEN
);
1918 m_accumulator
= temp
;
1920 if (++m_count
< m_batch
)
1924 op0
= m_accumulator
;
1926 m_accumulator
= NULL_RTX
;
1928 do_compare_rtx_and_jump (op0
, op1
, NE
, true, mode
, NULL_RTX
, NULL
,
1929 m_fail_label
, profile_probability::uninitialized ());
1932 /* Return true if MODE can be used for a set of moves and comparisons,
1933 given an alignment ALIGN. Prepare whatever data is necessary for
1934 later calls to generate. */
1937 compare_by_pieces_d::prepare_mode (machine_mode mode
, unsigned int align
)
1939 insn_code icode
= optab_handler (mov_optab
, mode
);
1940 if (icode
== CODE_FOR_nothing
1941 || align
< GET_MODE_ALIGNMENT (mode
)
1942 || !can_compare_p (EQ
, mode
, ccp_jump
))
1944 m_batch
= targetm
.compare_by_pieces_branch_ratio (mode
);
1947 m_accumulator
= NULL_RTX
;
1952 /* Called after expanding a series of comparisons in MODE. If we have
1953 accumulated results for which we haven't emitted a branch yet, do
1957 compare_by_pieces_d::finish_mode (machine_mode mode
)
1959 if (m_accumulator
!= NULL_RTX
)
1960 do_compare_rtx_and_jump (m_accumulator
, const0_rtx
, NE
, true, mode
,
1961 NULL_RTX
, NULL
, m_fail_label
,
1962 profile_probability::uninitialized ());
1965 /* Generate several move instructions to compare LEN bytes from blocks
1966 ARG0 and ARG1. (These are MEM rtx's with BLKmode).
1968 If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1969 used to push FROM to the stack.
1971 ALIGN is maximum stack alignment we can assume.
1973 Optionally, the caller can pass a constfn and associated data in A1_CFN
1974 and A1_CFN_DATA. describing that the second operand being compared is a
1975 known constant and how to obtain its data. */
1978 compare_by_pieces (rtx arg0
, rtx arg1
, unsigned HOST_WIDE_INT len
,
1979 rtx target
, unsigned int align
,
1980 by_pieces_constfn a1_cfn
, void *a1_cfn_data
)
1982 rtx_code_label
*fail_label
= gen_label_rtx ();
1983 rtx_code_label
*end_label
= gen_label_rtx ();
1985 if (target
== NULL_RTX
1986 || !REG_P (target
) || REGNO (target
) < FIRST_PSEUDO_REGISTER
)
1987 target
= gen_reg_rtx (TYPE_MODE (integer_type_node
));
1989 compare_by_pieces_d
data (arg0
, arg1
, a1_cfn
, a1_cfn_data
, len
, align
,
1994 emit_move_insn (target
, const0_rtx
);
1995 emit_jump (end_label
);
1997 emit_label (fail_label
);
1998 emit_move_insn (target
, const1_rtx
);
1999 emit_label (end_label
);
2004 /* Emit code to move a block Y to a block X. This may be done with
2005 string-move instructions, with multiple scalar move instructions,
2006 or with a library call.
2008 Both X and Y must be MEM rtx's (perhaps inside VOLATILE) with mode BLKmode.
2009 SIZE is an rtx that says how long they are.
2010 ALIGN is the maximum alignment we can assume they have.
2011 METHOD describes what kind of copy this is, and what mechanisms may be used.
2012 MIN_SIZE is the minimal size of block to move
2013 MAX_SIZE is the maximal size of block to move, if it cannot be represented
2014 in unsigned HOST_WIDE_INT, than it is mask of all ones.
2015 CTZ_SIZE is the trailing-zeros count of SIZE; even a nonconstant SIZE is
2016 known to be a multiple of 1<<CTZ_SIZE.
2018 Return the address of the new block, if memcpy is called and returns it,
2022 emit_block_move_hints (rtx x
, rtx y
, rtx size
, enum block_op_methods method
,
2023 unsigned int expected_align
, HOST_WIDE_INT expected_size
,
2024 unsigned HOST_WIDE_INT min_size
,
2025 unsigned HOST_WIDE_INT max_size
,
2026 unsigned HOST_WIDE_INT probable_max_size
,
2027 bool bail_out_libcall
, bool *is_move_done
,
2028 bool might_overlap
, unsigned ctz_size
)
2035 *is_move_done
= true;
2038 if (CONST_INT_P (size
) && INTVAL (size
) == 0)
2043 case BLOCK_OP_NORMAL
:
2044 case BLOCK_OP_TAILCALL
:
2048 case BLOCK_OP_CALL_PARM
:
2049 may_use_call
= block_move_libcall_safe_for_call_parm ();
2051 /* Make inhibit_defer_pop nonzero around the library call
2052 to force it to pop the arguments right away. */
2056 case BLOCK_OP_NO_LIBCALL
:
2060 case BLOCK_OP_NO_LIBCALL_RET
:
2068 gcc_assert (MEM_P (x
) && MEM_P (y
));
2069 align
= MIN (MEM_ALIGN (x
), MEM_ALIGN (y
));
2070 gcc_assert (align
>= BITS_PER_UNIT
);
2072 /* Make sure we've got BLKmode addresses; store_one_arg can decide that
2073 block copy is more efficient for other large modes, e.g. DCmode. */
2074 x
= adjust_address (x
, BLKmode
, 0);
2075 y
= adjust_address (y
, BLKmode
, 0);
2077 /* If source and destination are the same, no need to copy anything. */
2078 if (rtx_equal_p (x
, y
)
2079 && !MEM_VOLATILE_P (x
)
2080 && !MEM_VOLATILE_P (y
))
2083 /* Set MEM_SIZE as appropriate for this block copy. The main place this
2084 can be incorrect is coming from __builtin_memcpy. */
2085 poly_int64 const_size
;
2086 if (poly_int_rtx_p (size
, &const_size
))
2088 x
= shallow_copy_rtx (x
);
2089 y
= shallow_copy_rtx (y
);
2090 set_mem_size (x
, const_size
);
2091 set_mem_size (y
, const_size
);
2094 bool pieces_ok
= CONST_INT_P (size
)
2095 && can_move_by_pieces (INTVAL (size
), align
);
2096 bool pattern_ok
= false;
2098 if (!pieces_ok
|| might_overlap
)
2101 = emit_block_move_via_pattern (x
, y
, size
, align
,
2102 expected_align
, expected_size
,
2103 min_size
, max_size
, probable_max_size
,
2105 if (!pattern_ok
&& might_overlap
)
2107 /* Do not try any of the other methods below as they are not safe
2108 for overlapping moves. */
2109 *is_move_done
= false;
2114 bool dynamic_direction
= false;
2115 if (!pattern_ok
&& !pieces_ok
&& may_use_call
2116 && (flag_inline_stringops
& (might_overlap
? ILSOP_MEMMOVE
: ILSOP_MEMCPY
)))
2119 dynamic_direction
= might_overlap
;
2125 move_by_pieces (x
, y
, INTVAL (size
), align
, RETURN_BEGIN
);
2126 else if (may_use_call
&& !might_overlap
2127 && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x
))
2128 && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (y
)))
2130 if (bail_out_libcall
)
2133 *is_move_done
= false;
2137 if (may_use_call
< 0)
2140 retval
= emit_block_copy_via_libcall (x
, y
, size
,
2141 method
== BLOCK_OP_TAILCALL
);
2143 else if (dynamic_direction
)
2144 emit_block_move_via_oriented_loop (x
, y
, size
, align
, ctz_size
);
2145 else if (might_overlap
)
2146 *is_move_done
= false;
2148 emit_block_move_via_sized_loop (x
, y
, size
, align
, ctz_size
);
2150 if (method
== BLOCK_OP_CALL_PARM
)
2157 emit_block_move (rtx x
, rtx y
, rtx size
, enum block_op_methods method
,
2158 unsigned int ctz_size
)
2160 unsigned HOST_WIDE_INT max
, min
= 0;
2161 if (GET_CODE (size
) == CONST_INT
)
2162 min
= max
= UINTVAL (size
);
2164 max
= GET_MODE_MASK (GET_MODE (size
));
2165 return emit_block_move_hints (x
, y
, size
, method
, 0, -1,
2167 false, NULL
, false, ctz_size
);
2170 /* A subroutine of emit_block_move. Returns true if calling the
2171 block move libcall will not clobber any parameters which may have
2172 already been placed on the stack. */
2175 block_move_libcall_safe_for_call_parm (void)
2179 /* If arguments are pushed on the stack, then they're safe. */
2180 if (targetm
.calls
.push_argument (0))
2183 /* If registers go on the stack anyway, any argument is sure to clobber
2184 an outgoing argument. */
2185 #if defined (REG_PARM_STACK_SPACE)
2186 fn
= builtin_decl_implicit (BUILT_IN_MEMCPY
);
2187 /* Avoid set but not used warning if *REG_PARM_STACK_SPACE doesn't
2188 depend on its argument. */
2190 if (OUTGOING_REG_PARM_STACK_SPACE ((!fn
? NULL_TREE
: TREE_TYPE (fn
)))
2191 && REG_PARM_STACK_SPACE (fn
) != 0)
2195 /* If any argument goes in memory, then it might clobber an outgoing
2198 CUMULATIVE_ARGS args_so_far_v
;
2199 cumulative_args_t args_so_far
;
2202 fn
= builtin_decl_implicit (BUILT_IN_MEMCPY
);
2203 INIT_CUMULATIVE_ARGS (args_so_far_v
, TREE_TYPE (fn
), NULL_RTX
, 0, 3);
2204 args_so_far
= pack_cumulative_args (&args_so_far_v
);
2206 arg
= TYPE_ARG_TYPES (TREE_TYPE (fn
));
2207 for ( ; arg
!= void_list_node
; arg
= TREE_CHAIN (arg
))
2209 machine_mode mode
= TYPE_MODE (TREE_VALUE (arg
));
2210 function_arg_info
arg_info (mode
, /*named=*/true);
2211 rtx tmp
= targetm
.calls
.function_arg (args_so_far
, arg_info
);
2212 if (!tmp
|| !REG_P (tmp
))
2214 if (targetm
.calls
.arg_partial_bytes (args_so_far
, arg_info
))
2216 targetm
.calls
.function_arg_advance (args_so_far
, arg_info
);
2222 /* A subroutine of emit_block_move. Expand a cpymem or movmem pattern;
2223 return true if successful.
2225 X is the destination of the copy or move.
2226 Y is the source of the copy or move.
2227 SIZE is the size of the block to be moved.
2229 MIGHT_OVERLAP indicates this originated with expansion of a
2230 builtin_memmove() and the source and destination blocks may
2235 emit_block_move_via_pattern (rtx x
, rtx y
, rtx size
, unsigned int align
,
2236 unsigned int expected_align
,
2237 HOST_WIDE_INT expected_size
,
2238 unsigned HOST_WIDE_INT min_size
,
2239 unsigned HOST_WIDE_INT max_size
,
2240 unsigned HOST_WIDE_INT probable_max_size
,
2243 if (expected_align
< align
)
2244 expected_align
= align
;
2245 if (expected_size
!= -1)
2247 if ((unsigned HOST_WIDE_INT
)expected_size
> probable_max_size
)
2248 expected_size
= probable_max_size
;
2249 if ((unsigned HOST_WIDE_INT
)expected_size
< min_size
)
2250 expected_size
= min_size
;
2253 /* Since this is a move insn, we don't care about volatility. */
2254 temporary_volatile_ok
v (true);
2256 /* Try the most limited insn first, because there's no point
2257 including more than one in the machine description unless
2258 the more limited one has some advantage. */
2260 opt_scalar_int_mode mode_iter
;
2261 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_INT
)
2263 scalar_int_mode mode
= mode_iter
.require ();
2264 enum insn_code code
;
2266 code
= direct_optab_handler (movmem_optab
, mode
);
2268 code
= direct_optab_handler (cpymem_optab
, mode
);
2270 if (code
!= CODE_FOR_nothing
2271 /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
2272 here because if SIZE is less than the mode mask, as it is
2273 returned by the macro, it will definitely be less than the
2274 actual mode mask. Since SIZE is within the Pmode address
2275 space, we limit MODE to Pmode. */
2276 && ((CONST_INT_P (size
)
2277 && ((unsigned HOST_WIDE_INT
) INTVAL (size
)
2278 <= (GET_MODE_MASK (mode
) >> 1)))
2279 || max_size
<= (GET_MODE_MASK (mode
) >> 1)
2280 || GET_MODE_BITSIZE (mode
) >= GET_MODE_BITSIZE (Pmode
)))
2282 class expand_operand ops
[9];
2285 /* ??? When called via emit_block_move_for_call, it'd be
2286 nice if there were some way to inform the backend, so
2287 that it doesn't fail the expansion because it thinks
2288 emitting the libcall would be more efficient. */
2289 nops
= insn_data
[(int) code
].n_generator_args
;
2290 gcc_assert (nops
== 4 || nops
== 6 || nops
== 8 || nops
== 9);
2292 create_fixed_operand (&ops
[0], x
);
2293 create_fixed_operand (&ops
[1], y
);
2294 /* The check above guarantees that this size conversion is valid. */
2295 create_convert_operand_to (&ops
[2], size
, mode
, true);
2296 create_integer_operand (&ops
[3], align
/ BITS_PER_UNIT
);
2299 create_integer_operand (&ops
[4], expected_align
/ BITS_PER_UNIT
);
2300 create_integer_operand (&ops
[5], expected_size
);
2304 create_integer_operand (&ops
[6], min_size
);
2305 /* If we cannot represent the maximal size,
2306 make parameter NULL. */
2307 if ((HOST_WIDE_INT
) max_size
!= -1)
2308 create_integer_operand (&ops
[7], max_size
);
2310 create_fixed_operand (&ops
[7], NULL
);
2314 /* If we cannot represent the maximal size,
2315 make parameter NULL. */
2316 if ((HOST_WIDE_INT
) probable_max_size
!= -1)
2317 create_integer_operand (&ops
[8], probable_max_size
);
2319 create_fixed_operand (&ops
[8], NULL
);
2321 if (maybe_expand_insn (code
, nops
, ops
))
2329 /* Like emit_block_move_via_loop, but choose a suitable INCR based on
2330 ALIGN and CTZ_SIZE. */
2333 emit_block_move_via_sized_loop (rtx x
, rtx y
, rtx size
,
2335 unsigned int ctz_size
)
2337 int incr
= align
/ BITS_PER_UNIT
;
2339 if (CONST_INT_P (size
))
2340 ctz_size
= MAX (ctz_size
, (unsigned) wi::ctz (UINTVAL (size
)));
2342 if (HOST_WIDE_INT_1U
<< ctz_size
< (unsigned HOST_WIDE_INT
) incr
)
2343 incr
= HOST_WIDE_INT_1U
<< ctz_size
;
2345 while (incr
> 1 && !can_move_by_pieces (incr
, align
))
2348 gcc_checking_assert (incr
);
2350 return emit_block_move_via_loop (x
, y
, size
, align
, incr
);
2353 /* Like emit_block_move_via_sized_loop, but besides choosing INCR so
2354 as to ensure safe moves even in case of overlap, output dynamic
2355 tests to choose between two loops, one moving downwards, another
2359 emit_block_move_via_oriented_loop (rtx x
, rtx y
, rtx size
,
2361 unsigned int ctz_size
)
2363 int incr
= align
/ BITS_PER_UNIT
;
2365 if (CONST_INT_P (size
))
2366 ctz_size
= MAX (ctz_size
, (unsigned) wi::ctz (UINTVAL (size
)));
2368 if (HOST_WIDE_INT_1U
<< ctz_size
< (unsigned HOST_WIDE_INT
) incr
)
2369 incr
= HOST_WIDE_INT_1U
<< ctz_size
;
2371 while (incr
> 1 && !int_mode_for_size (incr
, 0).exists ())
2374 gcc_checking_assert (incr
);
2376 rtx_code_label
*upw_label
, *end_label
;
2377 upw_label
= gen_label_rtx ();
2378 end_label
= gen_label_rtx ();
2380 rtx x_addr
= force_operand (XEXP (x
, 0), NULL_RTX
);
2381 rtx y_addr
= force_operand (XEXP (y
, 0), NULL_RTX
);
2382 do_pending_stack_adjust ();
2384 machine_mode mode
= GET_MODE (x_addr
);
2385 if (mode
!= GET_MODE (y_addr
))
2387 scalar_int_mode xmode
2388 = smallest_int_mode_for_size (GET_MODE_BITSIZE (mode
));
2389 scalar_int_mode ymode
2390 = smallest_int_mode_for_size (GET_MODE_BITSIZE
2391 (GET_MODE (y_addr
)));
2392 if (GET_MODE_BITSIZE (xmode
) < GET_MODE_BITSIZE (ymode
))
2397 #ifndef POINTERS_EXTEND_UNSIGNED
2398 const int POINTERS_EXTEND_UNSIGNED
= 1;
2400 x_addr
= convert_modes (mode
, GET_MODE (x_addr
), x_addr
,
2401 POINTERS_EXTEND_UNSIGNED
);
2402 y_addr
= convert_modes (mode
, GET_MODE (y_addr
), y_addr
,
2403 POINTERS_EXTEND_UNSIGNED
);
2406 /* Test for overlap: if (x >= y || x + size <= y) goto upw_label. */
2407 emit_cmp_and_jump_insns (x_addr
, y_addr
, GEU
, NULL_RTX
, mode
,
2409 profile_probability::guessed_always ()
2410 .apply_scale (5, 10));
2411 rtx tmp
= convert_modes (GET_MODE (x_addr
), GET_MODE (size
), size
, true);
2412 tmp
= simplify_gen_binary (PLUS
, GET_MODE (x_addr
), x_addr
, tmp
);
2414 emit_cmp_and_jump_insns (tmp
, y_addr
, LEU
, NULL_RTX
, mode
,
2416 profile_probability::guessed_always ()
2417 .apply_scale (8, 10));
2419 emit_block_move_via_loop (x
, y
, size
, align
, -incr
);
2421 emit_jump (end_label
);
2422 emit_label (upw_label
);
2424 emit_block_move_via_loop (x
, y
, size
, align
, incr
);
2426 emit_label (end_label
);
2429 /* A subroutine of emit_block_move. Copy the data via an explicit
2430 loop. This is used only when libcalls are forbidden, or when
2431 inlining is required. INCR is the block size to be copied in each
2432 loop iteration. If it is negative, the absolute value is used, and
2433 the block is copied backwards. INCR must be a power of two, an
2434 exact divisor for SIZE and ALIGN, and imply a mode that can be
2435 safely copied per iteration assuming no overlap. */
2438 emit_block_move_via_loop (rtx x
, rtx y
, rtx size
,
2439 unsigned int align
, int incr
)
2441 rtx_code_label
*cmp_label
, *top_label
;
2442 rtx iter
, x_addr
, y_addr
, tmp
;
2443 machine_mode x_addr_mode
= get_address_mode (x
);
2444 machine_mode y_addr_mode
= get_address_mode (y
);
2445 machine_mode iter_mode
;
2447 iter_mode
= GET_MODE (size
);
2448 if (iter_mode
== VOIDmode
)
2449 iter_mode
= word_mode
;
2451 top_label
= gen_label_rtx ();
2452 cmp_label
= gen_label_rtx ();
2453 iter
= gen_reg_rtx (iter_mode
);
2455 bool downwards
= incr
< 0;
2460 machine_mode move_mode
;
2466 iter_limit
= const0_rtx
;
2467 iter_incr
= GEN_INT (incr
);
2471 iter_init
= const0_rtx
;
2474 iter_incr
= GEN_INT (incr
);
2476 emit_move_insn (iter
, iter_init
);
2478 opt_scalar_int_mode int_move_mode
2479 = int_mode_for_size (incr
* BITS_PER_UNIT
, 1);
2480 if (!int_move_mode
.exists (&move_mode
)
2481 || GET_MODE_BITSIZE (int_move_mode
.require ()) != incr
* BITS_PER_UNIT
)
2483 move_mode
= BLKmode
;
2484 gcc_checking_assert (can_move_by_pieces (incr
, align
));
2487 x_addr
= force_operand (XEXP (x
, 0), NULL_RTX
);
2488 y_addr
= force_operand (XEXP (y
, 0), NULL_RTX
);
2489 do_pending_stack_adjust ();
2491 emit_jump (cmp_label
);
2492 emit_label (top_label
);
2494 tmp
= convert_modes (x_addr_mode
, iter_mode
, iter
, true);
2495 x_addr
= simplify_gen_binary (PLUS
, x_addr_mode
, x_addr
, tmp
);
2497 if (x_addr_mode
!= y_addr_mode
)
2498 tmp
= convert_modes (y_addr_mode
, iter_mode
, iter
, true);
2499 y_addr
= simplify_gen_binary (PLUS
, y_addr_mode
, y_addr
, tmp
);
2501 x
= change_address (x
, move_mode
, x_addr
);
2502 y
= change_address (y
, move_mode
, y_addr
);
2504 if (move_mode
== BLKmode
)
2507 emit_block_move_hints (x
, y
, iter_incr
, BLOCK_OP_NO_LIBCALL
,
2508 align
, incr
, incr
, incr
, incr
,
2509 false, &done
, false);
2510 gcc_checking_assert (done
);
2513 emit_move_insn (x
, y
);
2516 emit_label (cmp_label
);
2518 tmp
= expand_simple_binop (iter_mode
, PLUS
, iter
, iter_incr
, iter
,
2519 true, OPTAB_LIB_WIDEN
);
2521 emit_move_insn (iter
, tmp
);
2524 emit_label (cmp_label
);
2526 emit_cmp_and_jump_insns (iter
, iter_limit
, iter_cond
, NULL_RTX
, iter_mode
,
2528 profile_probability::guessed_always ()
2529 .apply_scale (9, 10));
2532 /* Expand a call to memcpy or memmove or memcmp, and return the result.
2533 TAILCALL is true if this is a tail call. */
2536 emit_block_op_via_libcall (enum built_in_function fncode
, rtx dst
, rtx src
,
2537 rtx size
, bool tailcall
)
2539 rtx dst_addr
, src_addr
;
2540 tree call_expr
, dst_tree
, src_tree
, size_tree
;
2541 machine_mode size_mode
;
2543 /* Since dst and src are passed to a libcall, mark the corresponding
2544 tree EXPR as addressable. */
2545 tree dst_expr
= MEM_EXPR (dst
);
2546 tree src_expr
= MEM_EXPR (src
);
2548 mark_addressable (dst_expr
);
2550 mark_addressable (src_expr
);
2552 dst_addr
= copy_addr_to_reg (XEXP (dst
, 0));
2553 dst_addr
= convert_memory_address (ptr_mode
, dst_addr
);
2554 dst_tree
= make_tree (ptr_type_node
, dst_addr
);
2556 src_addr
= copy_addr_to_reg (XEXP (src
, 0));
2557 src_addr
= convert_memory_address (ptr_mode
, src_addr
);
2558 src_tree
= make_tree (ptr_type_node
, src_addr
);
2560 size_mode
= TYPE_MODE (sizetype
);
2561 size
= convert_to_mode (size_mode
, size
, 1);
2562 size
= copy_to_mode_reg (size_mode
, size
);
2563 size_tree
= make_tree (sizetype
, size
);
2565 /* It is incorrect to use the libcall calling conventions for calls to
2566 memcpy/memmove/memcmp because they can be provided by the user. */
2567 tree fn
= builtin_decl_implicit (fncode
);
2568 call_expr
= build_call_expr (fn
, 3, dst_tree
, src_tree
, size_tree
);
2569 CALL_EXPR_TAILCALL (call_expr
) = tailcall
;
2571 return expand_call (call_expr
, NULL_RTX
, false);
2574 /* Try to expand cmpstrn or cmpmem operation ICODE with the given operands.
2575 ARG3_TYPE is the type of ARG3_RTX. Return the result rtx on success,
2576 otherwise return null. */
2579 expand_cmpstrn_or_cmpmem (insn_code icode
, rtx target
, rtx arg1_rtx
,
2580 rtx arg2_rtx
, tree arg3_type
, rtx arg3_rtx
,
2581 HOST_WIDE_INT align
)
2583 machine_mode insn_mode
= insn_data
[icode
].operand
[0].mode
;
2585 if (target
&& (!REG_P (target
) || HARD_REGISTER_P (target
)))
2588 class expand_operand ops
[5];
2589 create_output_operand (&ops
[0], target
, insn_mode
);
2590 create_fixed_operand (&ops
[1], arg1_rtx
);
2591 create_fixed_operand (&ops
[2], arg2_rtx
);
2592 create_convert_operand_from (&ops
[3], arg3_rtx
, TYPE_MODE (arg3_type
),
2593 TYPE_UNSIGNED (arg3_type
));
2594 create_integer_operand (&ops
[4], align
);
2595 if (maybe_expand_insn (icode
, 5, ops
))
2596 return ops
[0].value
;
2600 /* Expand a block compare between X and Y with length LEN using the
2601 cmpmem optab, placing the result in TARGET. LEN_TYPE is the type
2602 of the expression that was used to calculate the length. ALIGN
2603 gives the known minimum common alignment. */
2606 emit_block_cmp_via_cmpmem (rtx x
, rtx y
, rtx len
, tree len_type
, rtx target
,
2609 /* Note: The cmpstrnsi pattern, if it exists, is not suitable for
2610 implementing memcmp because it will stop if it encounters two
2612 insn_code icode
= direct_optab_handler (cmpmem_optab
, SImode
);
2614 if (icode
== CODE_FOR_nothing
)
2617 return expand_cmpstrn_or_cmpmem (icode
, target
, x
, y
, len_type
, len
, align
);
2620 /* Emit code to compare a block Y to a block X. This may be done with
2621 string-compare instructions, with multiple scalar instructions,
2622 or with a library call.
2624 Both X and Y must be MEM rtx's. LEN is an rtx that says how long
2625 they are. LEN_TYPE is the type of the expression that was used to
2626 calculate it, and CTZ_LEN is the known trailing-zeros count of LEN,
2627 so LEN must be a multiple of 1<<CTZ_LEN even if it's not constant.
2629 If EQUALITY_ONLY is true, it means we don't have to return the tri-state
2630 value of a normal memcmp call, instead we can just compare for equality.
2631 If FORCE_LIBCALL is true, we should emit a call to memcmp rather than
2634 Optionally, the caller can pass a constfn and associated data in Y_CFN
2635 and Y_CFN_DATA. describing that the second operand being compared is a
2636 known constant and how to obtain its data.
2637 Return the result of the comparison, or NULL_RTX if we failed to
2638 perform the operation. */
2641 emit_block_cmp_hints (rtx x
, rtx y
, rtx len
, tree len_type
, rtx target
,
2642 bool equality_only
, by_pieces_constfn y_cfn
,
2643 void *y_cfndata
, unsigned ctz_len
)
2647 if (CONST_INT_P (len
) && INTVAL (len
) == 0)
2650 gcc_assert (MEM_P (x
) && MEM_P (y
));
2651 unsigned int align
= MIN (MEM_ALIGN (x
), MEM_ALIGN (y
));
2652 gcc_assert (align
>= BITS_PER_UNIT
);
2654 x
= adjust_address (x
, BLKmode
, 0);
2655 y
= adjust_address (y
, BLKmode
, 0);
2658 && CONST_INT_P (len
)
2659 && can_do_by_pieces (INTVAL (len
), align
, COMPARE_BY_PIECES
))
2660 result
= compare_by_pieces (x
, y
, INTVAL (len
), target
, align
,
2663 result
= emit_block_cmp_via_cmpmem (x
, y
, len
, len_type
, target
, align
);
2665 if (!result
&& (flag_inline_stringops
& ILSOP_MEMCMP
))
2666 result
= emit_block_cmp_via_loop (x
, y
, len
, len_type
,
2667 target
, equality_only
,
2673 /* Like emit_block_cmp_hints, but with known alignment and no support
2674 for constats. Always expand to a loop with iterations that compare
2675 blocks of the largest compare-by-pieces size that divides both len
2676 and align, and then, if !EQUALITY_ONLY, identify the word and then
2677 the unit that first differs to return the result. */
2680 emit_block_cmp_via_loop (rtx x
, rtx y
, rtx len
, tree len_type
, rtx target
,
2681 bool equality_only
, unsigned align
, unsigned ctz_len
)
2683 unsigned incr
= align
/ BITS_PER_UNIT
;
2685 if (CONST_INT_P (len
))
2686 ctz_len
= MAX (ctz_len
, (unsigned) wi::ctz (UINTVAL (len
)));
2688 if (HOST_WIDE_INT_1U
<< ctz_len
< (unsigned HOST_WIDE_INT
) incr
)
2689 incr
= HOST_WIDE_INT_1U
<< ctz_len
;
2692 && !can_do_by_pieces (incr
, align
, COMPARE_BY_PIECES
))
2695 rtx_code_label
*cmp_label
, *top_label
, *ne_label
, *res_label
;
2696 rtx iter
, x_addr
, y_addr
, tmp
;
2697 machine_mode x_addr_mode
= get_address_mode (x
);
2698 machine_mode y_addr_mode
= get_address_mode (y
);
2699 machine_mode iter_mode
;
2701 iter_mode
= GET_MODE (len
);
2702 if (iter_mode
== VOIDmode
)
2703 iter_mode
= word_mode
;
2705 rtx iter_init
= const0_rtx
;
2706 rtx_code iter_cond
= LTU
;
2707 rtx_code entry_cond
= GEU
;
2708 rtx iter_limit
= len
;
2709 rtx iter_incr
= GEN_INT (incr
);
2710 machine_mode cmp_mode
;
2712 /* We can drop the loop back edge if we know there's exactly one
2714 top_label
= (!rtx_equal_p (len
, iter_incr
)
2717 /* We need not test before entering the loop if len is known
2718 nonzero. ??? This could be even stricter, testing whether a
2719 nonconstant LEN could possibly be zero. */
2720 cmp_label
= (!CONSTANT_P (len
) || rtx_equal_p (len
, iter_init
)
2723 ne_label
= gen_label_rtx ();
2724 res_label
= gen_label_rtx ();
2726 iter
= gen_reg_rtx (iter_mode
);
2727 emit_move_insn (iter
, iter_init
);
2729 opt_scalar_int_mode int_cmp_mode
2730 = int_mode_for_size (incr
* BITS_PER_UNIT
, 1);
2731 if (!int_cmp_mode
.exists (&cmp_mode
)
2732 || GET_MODE_BITSIZE (int_cmp_mode
.require ()) != incr
* BITS_PER_UNIT
2733 || !can_compare_p (NE
, cmp_mode
, ccp_jump
))
2736 gcc_checking_assert (incr
!= 1);
2739 /* Save the base addresses. */
2740 x_addr
= force_operand (XEXP (x
, 0), NULL_RTX
);
2741 y_addr
= force_operand (XEXP (y
, 0), NULL_RTX
);
2742 do_pending_stack_adjust ();
2747 emit_jump (cmp_label
);
2749 emit_cmp_and_jump_insns (iter
, iter_limit
, entry_cond
,
2750 NULL_RTX
, iter_mode
,
2752 profile_probability::guessed_always ()
2753 .apply_scale (1, 10));
2756 emit_label (top_label
);
2758 /* Offset the base addresses by ITER. */
2759 tmp
= convert_modes (x_addr_mode
, iter_mode
, iter
, true);
2760 x_addr
= simplify_gen_binary (PLUS
, x_addr_mode
, x_addr
, tmp
);
2762 if (x_addr_mode
!= y_addr_mode
)
2763 tmp
= convert_modes (y_addr_mode
, iter_mode
, iter
, true);
2764 y_addr
= simplify_gen_binary (PLUS
, y_addr_mode
, y_addr
, tmp
);
2766 x
= change_address (x
, cmp_mode
, x_addr
);
2767 y
= change_address (y
, cmp_mode
, y_addr
);
2769 /* Compare one block. */
2771 if (cmp_mode
== BLKmode
)
2772 part_res
= compare_by_pieces (x
, y
, incr
, target
, align
, 0, 0);
2774 part_res
= expand_binop (cmp_mode
, sub_optab
, x
, y
, NULL_RTX
,
2775 true, OPTAB_LIB_WIDEN
);
2777 /* Stop if we found a difference. */
2778 emit_cmp_and_jump_insns (part_res
, GEN_INT (0), NE
, NULL_RTX
,
2779 GET_MODE (part_res
), true, ne_label
,
2780 profile_probability::guessed_always ()
2781 .apply_scale (1, 10));
2783 /* Increment ITER. */
2784 tmp
= expand_simple_binop (iter_mode
, PLUS
, iter
, iter_incr
, iter
,
2785 true, OPTAB_LIB_WIDEN
);
2787 emit_move_insn (iter
, tmp
);
2790 emit_label (cmp_label
);
2791 /* Loop until we reach the limit. */
2794 emit_cmp_and_jump_insns (iter
, iter_limit
, iter_cond
, NULL_RTX
, iter_mode
,
2796 profile_probability::guessed_always ()
2797 .apply_scale (9, 10));
2799 /* We got to the end without differences, so the result is zero. */
2800 if (target
== NULL_RTX
2801 || !REG_P (target
) || REGNO (target
) < FIRST_PSEUDO_REGISTER
)
2802 target
= gen_reg_rtx (TYPE_MODE (integer_type_node
));
2804 emit_move_insn (target
, const0_rtx
);
2805 emit_jump (res_label
);
2807 emit_label (ne_label
);
2809 /* Return nonzero, or pinpoint the difference to return the expected
2810 result for non-equality tests. */
2812 emit_move_insn (target
, const1_rtx
);
2815 if (incr
> UNITS_PER_WORD
)
2816 /* ??? Re-compare the block found to be different one word at a
2818 part_res
= emit_block_cmp_via_loop (x
, y
, GEN_INT (incr
), len_type
,
2819 target
, equality_only
,
2822 /* ??? Re-compare the block found to be different one byte at a
2823 time. We could do better using part_res, and being careful
2824 about endianness. */
2825 part_res
= emit_block_cmp_via_loop (x
, y
, GEN_INT (incr
), len_type
,
2826 target
, equality_only
,
2828 else if (known_gt (GET_MODE_BITSIZE (GET_MODE (target
)),
2829 GET_MODE_BITSIZE (cmp_mode
)))
2830 part_res
= expand_binop (GET_MODE (target
), sub_optab
, x
, y
, target
,
2831 true, OPTAB_LIB_WIDEN
);
2834 /* In the odd chance target is QImode, we can't count on
2835 widening subtract to capture the result of the unsigned
2837 rtx_code_label
*ltu_label
;
2838 ltu_label
= gen_label_rtx ();
2839 emit_cmp_and_jump_insns (x
, y
, LTU
, NULL_RTX
,
2840 cmp_mode
, true, ltu_label
,
2841 profile_probability::guessed_always ()
2842 .apply_scale (5, 10));
2844 emit_move_insn (target
, const1_rtx
);
2845 emit_jump (res_label
);
2847 emit_label (ltu_label
);
2848 emit_move_insn (target
, constm1_rtx
);
2852 if (target
!= part_res
)
2853 convert_move (target
, part_res
, false);
2856 emit_label (res_label
);
2862 /* Copy all or part of a value X into registers starting at REGNO.
2863 The number of registers to be filled is NREGS. */
2866 move_block_to_reg (int regno
, rtx x
, int nregs
, machine_mode mode
)
2871 if (CONSTANT_P (x
) && !targetm
.legitimate_constant_p (mode
, x
))
2872 x
= validize_mem (force_const_mem (mode
, x
));
2874 /* See if the machine can do this with a load multiple insn. */
2875 if (targetm
.have_load_multiple ())
2877 rtx_insn
*last
= get_last_insn ();
2878 rtx first
= gen_rtx_REG (word_mode
, regno
);
2879 if (rtx_insn
*pat
= targetm
.gen_load_multiple (first
, x
,
2886 delete_insns_since (last
);
2889 for (int i
= 0; i
< nregs
; i
++)
2890 emit_move_insn (gen_rtx_REG (word_mode
, regno
+ i
),
2891 operand_subword_force (x
, i
, mode
));
2894 /* Copy all or part of a BLKmode value X out of registers starting at REGNO.
2895 The number of registers to be filled is NREGS. */
2898 move_block_from_reg (int regno
, rtx x
, int nregs
)
2903 /* See if the machine can do this with a store multiple insn. */
2904 if (targetm
.have_store_multiple ())
2906 rtx_insn
*last
= get_last_insn ();
2907 rtx first
= gen_rtx_REG (word_mode
, regno
);
2908 if (rtx_insn
*pat
= targetm
.gen_store_multiple (x
, first
,
2915 delete_insns_since (last
);
2918 for (int i
= 0; i
< nregs
; i
++)
2920 rtx tem
= operand_subword (x
, i
, 1, BLKmode
);
2924 emit_move_insn (tem
, gen_rtx_REG (word_mode
, regno
+ i
));
2928 /* Generate a PARALLEL rtx for a new non-consecutive group of registers from
2929 ORIG, where ORIG is a non-consecutive group of registers represented by
2930 a PARALLEL. The clone is identical to the original except in that the
2931 original set of registers is replaced by a new set of pseudo registers.
2932 The new set has the same modes as the original set. */
2935 gen_group_rtx (rtx orig
)
2940 gcc_assert (GET_CODE (orig
) == PARALLEL
);
2942 length
= XVECLEN (orig
, 0);
2943 tmps
= XALLOCAVEC (rtx
, length
);
2945 /* Skip a NULL entry in first slot. */
2946 i
= XEXP (XVECEXP (orig
, 0, 0), 0) ? 0 : 1;
2951 for (; i
< length
; i
++)
2953 machine_mode mode
= GET_MODE (XEXP (XVECEXP (orig
, 0, i
), 0));
2954 rtx offset
= XEXP (XVECEXP (orig
, 0, i
), 1);
2956 tmps
[i
] = gen_rtx_EXPR_LIST (VOIDmode
, gen_reg_rtx (mode
), offset
);
2959 return gen_rtx_PARALLEL (GET_MODE (orig
), gen_rtvec_v (length
, tmps
));
2962 /* A subroutine of emit_group_load. Arguments as for emit_group_load,
2963 except that values are placed in TMPS[i], and must later be moved
2964 into corresponding XEXP (XVECEXP (DST, 0, i), 0) element. */
2967 emit_group_load_1 (rtx
*tmps
, rtx dst
, rtx orig_src
, tree type
,
2972 machine_mode m
= GET_MODE (orig_src
);
2974 gcc_assert (GET_CODE (dst
) == PARALLEL
);
2977 && !SCALAR_INT_MODE_P (m
)
2978 && !MEM_P (orig_src
)
2979 && GET_CODE (orig_src
) != CONCAT
)
2981 scalar_int_mode imode
;
2982 if (int_mode_for_mode (GET_MODE (orig_src
)).exists (&imode
))
2984 src
= gen_reg_rtx (imode
);
2985 emit_move_insn (gen_lowpart (GET_MODE (orig_src
), src
), orig_src
);
2989 src
= assign_stack_temp (GET_MODE (orig_src
), ssize
);
2990 emit_move_insn (src
, orig_src
);
2992 emit_group_load_1 (tmps
, dst
, src
, type
, ssize
);
2996 /* Check for a NULL entry, used to indicate that the parameter goes
2997 both on the stack and in registers. */
2998 if (XEXP (XVECEXP (dst
, 0, 0), 0))
3003 /* Process the pieces. */
3004 for (i
= start
; i
< XVECLEN (dst
, 0); i
++)
3006 machine_mode mode
= GET_MODE (XEXP (XVECEXP (dst
, 0, i
), 0));
3007 poly_int64 bytepos
= rtx_to_poly_int64 (XEXP (XVECEXP (dst
, 0, i
), 1));
3008 poly_int64 bytelen
= GET_MODE_SIZE (mode
);
3009 poly_int64 shift
= 0;
3011 /* Handle trailing fragments that run over the size of the struct.
3012 It's the target's responsibility to make sure that the fragment
3013 cannot be strictly smaller in some cases and strictly larger
3015 gcc_checking_assert (ordered_p (bytepos
+ bytelen
, ssize
));
3016 if (known_size_p (ssize
) && maybe_gt (bytepos
+ bytelen
, ssize
))
3018 /* Arrange to shift the fragment to where it belongs.
3019 extract_bit_field loads to the lsb of the reg. */
3021 #ifdef BLOCK_REG_PADDING
3022 BLOCK_REG_PADDING (GET_MODE (orig_src
), type
, i
== start
)
3023 == (BYTES_BIG_ENDIAN
? PAD_UPWARD
: PAD_DOWNWARD
)
3028 shift
= (bytelen
- (ssize
- bytepos
)) * BITS_PER_UNIT
;
3029 bytelen
= ssize
- bytepos
;
3030 gcc_assert (maybe_gt (bytelen
, 0));
3033 /* If we won't be loading directly from memory, protect the real source
3034 from strange tricks we might play; but make sure that the source can
3035 be loaded directly into the destination. */
3037 if (!MEM_P (orig_src
)
3038 && (!REG_P (orig_src
) || HARD_REGISTER_P (orig_src
))
3039 && !CONSTANT_P (orig_src
))
3041 gcc_assert (GET_MODE (orig_src
) != VOIDmode
);
3042 src
= force_reg (GET_MODE (orig_src
), orig_src
);
3045 /* Optimize the access just a bit. */
3047 && (! targetm
.slow_unaligned_access (mode
, MEM_ALIGN (src
))
3048 || MEM_ALIGN (src
) >= GET_MODE_ALIGNMENT (mode
))
3049 && multiple_p (bytepos
* BITS_PER_UNIT
, GET_MODE_ALIGNMENT (mode
))
3050 && known_eq (bytelen
, GET_MODE_SIZE (mode
)))
3052 tmps
[i
] = gen_reg_rtx (mode
);
3053 emit_move_insn (tmps
[i
], adjust_address (src
, mode
, bytepos
));
3055 else if (COMPLEX_MODE_P (mode
)
3056 && GET_MODE (src
) == mode
3057 && known_eq (bytelen
, GET_MODE_SIZE (mode
)))
3058 /* Let emit_move_complex do the bulk of the work. */
3060 else if (GET_CODE (src
) == CONCAT
)
3062 poly_int64 slen
= GET_MODE_SIZE (GET_MODE (src
));
3063 poly_int64 slen0
= GET_MODE_SIZE (GET_MODE (XEXP (src
, 0)));
3067 if (can_div_trunc_p (bytepos
, slen0
, &elt
, &subpos
)
3068 && known_le (subpos
+ bytelen
, slen0
))
3070 /* The following assumes that the concatenated objects all
3071 have the same size. In this case, a simple calculation
3072 can be used to determine the object and the bit field
3074 tmps
[i
] = XEXP (src
, elt
);
3075 if (maybe_ne (subpos
, 0)
3076 || maybe_ne (subpos
+ bytelen
, slen0
)
3077 || (!CONSTANT_P (tmps
[i
])
3078 && (!REG_P (tmps
[i
]) || GET_MODE (tmps
[i
]) != mode
)))
3079 tmps
[i
] = extract_bit_field (tmps
[i
], bytelen
* BITS_PER_UNIT
,
3080 subpos
* BITS_PER_UNIT
,
3081 1, NULL_RTX
, mode
, mode
, false,
3088 gcc_assert (known_eq (bytepos
, 0));
3089 mem
= assign_stack_temp (GET_MODE (src
), slen
);
3090 emit_move_insn (mem
, src
);
3091 tmps
[i
] = extract_bit_field (mem
, bytelen
* BITS_PER_UNIT
,
3092 0, 1, NULL_RTX
, mode
, mode
, false,
3096 else if (CONSTANT_P (src
) && GET_MODE (dst
) != BLKmode
3097 && XVECLEN (dst
, 0) > 1)
3098 tmps
[i
] = force_subreg (mode
, src
, GET_MODE (dst
), bytepos
);
3099 else if (CONSTANT_P (src
))
3101 if (known_eq (bytelen
, ssize
))
3107 /* TODO: const_wide_int can have sizes other than this... */
3108 gcc_assert (known_eq (2 * bytelen
, ssize
));
3109 split_double (src
, &first
, &second
);
3116 else if (REG_P (src
) && GET_MODE (src
) == mode
)
3119 tmps
[i
] = extract_bit_field (src
, bytelen
* BITS_PER_UNIT
,
3120 bytepos
* BITS_PER_UNIT
, 1, NULL_RTX
,
3121 mode
, mode
, false, NULL
);
3123 if (maybe_ne (shift
, 0))
3124 tmps
[i
] = expand_shift (LSHIFT_EXPR
, mode
, tmps
[i
],
3129 /* Emit code to move a block SRC of type TYPE to a block DST,
3130 where DST is non-consecutive registers represented by a PARALLEL.
3131 SSIZE represents the total size of block ORIG_SRC in bytes, or -1
3135 emit_group_load (rtx dst
, rtx src
, tree type
, poly_int64 ssize
)
3140 tmps
= XALLOCAVEC (rtx
, XVECLEN (dst
, 0));
3141 emit_group_load_1 (tmps
, dst
, src
, type
, ssize
);
3143 /* Copy the extracted pieces into the proper (probable) hard regs. */
3144 for (i
= 0; i
< XVECLEN (dst
, 0); i
++)
3146 rtx d
= XEXP (XVECEXP (dst
, 0, i
), 0);
3149 emit_move_insn (d
, tmps
[i
]);
3153 /* Similar, but load SRC into new pseudos in a format that looks like
3154 PARALLEL. This can later be fed to emit_group_move to get things
3155 in the right place. */
3158 emit_group_load_into_temps (rtx parallel
, rtx src
, tree type
, poly_int64 ssize
)
3163 vec
= rtvec_alloc (XVECLEN (parallel
, 0));
3164 emit_group_load_1 (&RTVEC_ELT (vec
, 0), parallel
, src
, type
, ssize
);
3166 /* Convert the vector to look just like the original PARALLEL, except
3167 with the computed values. */
3168 for (i
= 0; i
< XVECLEN (parallel
, 0); i
++)
3170 rtx e
= XVECEXP (parallel
, 0, i
);
3171 rtx d
= XEXP (e
, 0);
3175 d
= force_reg (GET_MODE (d
), RTVEC_ELT (vec
, i
));
3176 e
= alloc_EXPR_LIST (REG_NOTE_KIND (e
), d
, XEXP (e
, 1));
3178 RTVEC_ELT (vec
, i
) = e
;
3181 return gen_rtx_PARALLEL (GET_MODE (parallel
), vec
);
3184 /* Emit code to move a block SRC to block DST, where SRC and DST are
3185 non-consecutive groups of registers, each represented by a PARALLEL. */
3188 emit_group_move (rtx dst
, rtx src
)
3192 gcc_assert (GET_CODE (src
) == PARALLEL
3193 && GET_CODE (dst
) == PARALLEL
3194 && XVECLEN (src
, 0) == XVECLEN (dst
, 0));
3196 /* Skip first entry if NULL. */
3197 for (i
= XEXP (XVECEXP (src
, 0, 0), 0) ? 0 : 1; i
< XVECLEN (src
, 0); i
++)
3198 emit_move_insn (XEXP (XVECEXP (dst
, 0, i
), 0),
3199 XEXP (XVECEXP (src
, 0, i
), 0));
3202 /* Move a group of registers represented by a PARALLEL into pseudos. */
3205 emit_group_move_into_temps (rtx src
)
3207 rtvec vec
= rtvec_alloc (XVECLEN (src
, 0));
3210 for (i
= 0; i
< XVECLEN (src
, 0); i
++)
3212 rtx e
= XVECEXP (src
, 0, i
);
3213 rtx d
= XEXP (e
, 0);
3216 e
= alloc_EXPR_LIST (REG_NOTE_KIND (e
), copy_to_reg (d
), XEXP (e
, 1));
3217 RTVEC_ELT (vec
, i
) = e
;
3220 return gen_rtx_PARALLEL (GET_MODE (src
), vec
);
3223 /* Emit code to move a block SRC to a block ORIG_DST of type TYPE,
3224 where SRC is non-consecutive registers represented by a PARALLEL.
3225 SSIZE represents the total size of block ORIG_DST, or -1 if not
3229 emit_group_store (rtx orig_dst
, rtx src
, tree type ATTRIBUTE_UNUSED
,
3233 int start
, finish
, i
;
3234 machine_mode m
= GET_MODE (orig_dst
);
3236 gcc_assert (GET_CODE (src
) == PARALLEL
);
3238 if (!SCALAR_INT_MODE_P (m
)
3239 && !MEM_P (orig_dst
) && GET_CODE (orig_dst
) != CONCAT
)
3241 scalar_int_mode imode
;
3242 if (int_mode_for_mode (GET_MODE (orig_dst
)).exists (&imode
))
3244 dst
= gen_reg_rtx (imode
);
3245 emit_group_store (dst
, src
, type
, ssize
);
3246 dst
= gen_lowpart (GET_MODE (orig_dst
), dst
);
3250 dst
= assign_stack_temp (GET_MODE (orig_dst
), ssize
);
3251 emit_group_store (dst
, src
, type
, ssize
);
3253 emit_move_insn (orig_dst
, dst
);
3257 /* Check for a NULL entry, used to indicate that the parameter goes
3258 both on the stack and in registers. */
3259 if (XEXP (XVECEXP (src
, 0, 0), 0))
3263 finish
= XVECLEN (src
, 0);
3265 tmps
= XALLOCAVEC (rtx
, finish
);
3267 /* Copy the (probable) hard regs into pseudos. */
3268 for (i
= start
; i
< finish
; i
++)
3270 rtx reg
= XEXP (XVECEXP (src
, 0, i
), 0);
3271 if (!REG_P (reg
) || REGNO (reg
) < FIRST_PSEUDO_REGISTER
)
3273 tmps
[i
] = gen_reg_rtx (GET_MODE (reg
));
3274 emit_move_insn (tmps
[i
], reg
);
3280 /* If we won't be storing directly into memory, protect the real destination
3281 from strange tricks we might play. */
3283 if (GET_CODE (dst
) == PARALLEL
)
3287 /* We can get a PARALLEL dst if there is a conditional expression in
3288 a return statement. In that case, the dst and src are the same,
3289 so no action is necessary. */
3290 if (rtx_equal_p (dst
, src
))
3293 /* It is unclear if we can ever reach here, but we may as well handle
3294 it. Allocate a temporary, and split this into a store/load to/from
3296 temp
= assign_stack_temp (GET_MODE (dst
), ssize
);
3297 emit_group_store (temp
, src
, type
, ssize
);
3298 emit_group_load (dst
, temp
, type
, ssize
);
3301 else if (!MEM_P (dst
) && GET_CODE (dst
) != CONCAT
)
3303 machine_mode outer
= GET_MODE (dst
);
3309 if (!REG_P (dst
) || REGNO (dst
) < FIRST_PSEUDO_REGISTER
)
3310 dst
= gen_reg_rtx (outer
);
3312 /* Make life a bit easier for combine: if the first element of the
3313 vector is the low part of the destination mode, use a paradoxical
3314 subreg to initialize the destination. */
3317 inner
= GET_MODE (tmps
[start
]);
3318 bytepos
= subreg_lowpart_offset (inner
, outer
);
3319 if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src
, 0, start
), 1)),
3322 temp
= force_subreg (outer
, tmps
[start
], inner
, 0);
3325 emit_move_insn (dst
, temp
);
3332 /* If the first element wasn't the low part, try the last. */
3334 && start
< finish
- 1)
3336 inner
= GET_MODE (tmps
[finish
- 1]);
3337 bytepos
= subreg_lowpart_offset (inner
, outer
);
3338 if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src
, 0,
3342 temp
= force_subreg (outer
, tmps
[finish
- 1], inner
, 0);
3345 emit_move_insn (dst
, temp
);
3352 /* Otherwise, simply initialize the result to zero. */
3354 emit_move_insn (dst
, CONST0_RTX (outer
));
3357 /* Process the pieces. */
3358 for (i
= start
; i
< finish
; i
++)
3360 poly_int64 bytepos
= rtx_to_poly_int64 (XEXP (XVECEXP (src
, 0, i
), 1));
3361 machine_mode mode
= GET_MODE (tmps
[i
]);
3362 poly_int64 bytelen
= GET_MODE_SIZE (mode
);
3363 poly_uint64 adj_bytelen
;
3366 /* Handle trailing fragments that run over the size of the struct.
3367 It's the target's responsibility to make sure that the fragment
3368 cannot be strictly smaller in some cases and strictly larger
3370 gcc_checking_assert (ordered_p (bytepos
+ bytelen
, ssize
));
3371 if (known_size_p (ssize
) && maybe_gt (bytepos
+ bytelen
, ssize
))
3372 adj_bytelen
= ssize
- bytepos
;
3374 adj_bytelen
= bytelen
;
3376 /* Deal with destination CONCATs by either storing into one of the parts
3377 or doing a copy after storing into a register or stack temporary. */
3378 if (GET_CODE (dst
) == CONCAT
)
3380 if (known_le (bytepos
+ adj_bytelen
,
3381 GET_MODE_SIZE (GET_MODE (XEXP (dst
, 0)))))
3382 dest
= XEXP (dst
, 0);
3384 else if (known_ge (bytepos
, GET_MODE_SIZE (GET_MODE (XEXP (dst
, 0)))))
3386 bytepos
-= GET_MODE_SIZE (GET_MODE (XEXP (dst
, 0)));
3387 dest
= XEXP (dst
, 1);
3392 machine_mode dest_mode
= GET_MODE (dest
);
3393 machine_mode tmp_mode
= GET_MODE (tmps
[i
]);
3394 scalar_int_mode dest_imode
;
3396 gcc_assert (known_eq (bytepos
, 0) && XVECLEN (src
, 0));
3398 /* If the source is a single scalar integer register, and the
3399 destination has a complex mode for which a same-sized integer
3400 mode exists, then we can take the left-justified part of the
3401 source in the complex mode. */
3402 if (finish
== start
+ 1
3404 && SCALAR_INT_MODE_P (tmp_mode
)
3405 && COMPLEX_MODE_P (dest_mode
)
3406 && int_mode_for_mode (dest_mode
).exists (&dest_imode
))
3408 const scalar_int_mode tmp_imode
3409 = as_a
<scalar_int_mode
> (tmp_mode
);
3411 if (GET_MODE_BITSIZE (dest_imode
)
3412 < GET_MODE_BITSIZE (tmp_imode
))
3414 dest
= gen_reg_rtx (dest_imode
);
3415 if (BYTES_BIG_ENDIAN
)
3416 tmps
[i
] = expand_shift (RSHIFT_EXPR
, tmp_mode
, tmps
[i
],
3417 GET_MODE_BITSIZE (tmp_imode
)
3418 - GET_MODE_BITSIZE (dest_imode
),
3420 emit_move_insn (dest
, gen_lowpart (dest_imode
, tmps
[i
]));
3421 dst
= gen_lowpart (dest_mode
, dest
);
3424 dst
= gen_lowpart (dest_mode
, tmps
[i
]);
3427 /* Otherwise spill the source onto the stack using the more
3428 aligned of the two modes. */
3429 else if (GET_MODE_ALIGNMENT (dest_mode
)
3430 >= GET_MODE_ALIGNMENT (tmp_mode
))
3432 dest
= assign_stack_temp (dest_mode
,
3433 GET_MODE_SIZE (dest_mode
));
3434 emit_move_insn (adjust_address (dest
, tmp_mode
, bytepos
),
3441 dest
= assign_stack_temp (tmp_mode
,
3442 GET_MODE_SIZE (tmp_mode
));
3443 emit_move_insn (dest
, tmps
[i
]);
3444 dst
= adjust_address (dest
, dest_mode
, bytepos
);
3451 /* Handle trailing fragments that run over the size of the struct. */
3452 if (known_size_p (ssize
) && maybe_gt (bytepos
+ bytelen
, ssize
))
3454 /* store_bit_field always takes its value from the lsb.
3455 Move the fragment to the lsb if it's not already there. */
3457 #ifdef BLOCK_REG_PADDING
3458 BLOCK_REG_PADDING (GET_MODE (orig_dst
), type
, i
== start
)
3459 == (BYTES_BIG_ENDIAN
? PAD_UPWARD
: PAD_DOWNWARD
)
3465 poly_int64 shift
= (bytelen
- (ssize
- bytepos
)) * BITS_PER_UNIT
;
3466 tmps
[i
] = expand_shift (RSHIFT_EXPR
, mode
, tmps
[i
],
3470 /* Make sure not to write past the end of the struct. */
3471 store_bit_field (dest
,
3472 adj_bytelen
* BITS_PER_UNIT
, bytepos
* BITS_PER_UNIT
,
3473 bytepos
* BITS_PER_UNIT
, ssize
* BITS_PER_UNIT
- 1,
3474 VOIDmode
, tmps
[i
], false, false);
3477 /* Optimize the access just a bit. */
3478 else if (MEM_P (dest
)
3479 && (!targetm
.slow_unaligned_access (mode
, MEM_ALIGN (dest
))
3480 || MEM_ALIGN (dest
) >= GET_MODE_ALIGNMENT (mode
))
3481 && multiple_p (bytepos
* BITS_PER_UNIT
,
3482 GET_MODE_ALIGNMENT (mode
))
3483 && known_eq (bytelen
, GET_MODE_SIZE (mode
)))
3484 emit_move_insn (adjust_address (dest
, mode
, bytepos
), tmps
[i
]);
3487 store_bit_field (dest
, bytelen
* BITS_PER_UNIT
, bytepos
* BITS_PER_UNIT
,
3488 0, 0, mode
, tmps
[i
], false, false);
3491 /* Copy from the pseudo into the (probable) hard reg. */
3492 if (orig_dst
!= dst
)
3493 emit_move_insn (orig_dst
, dst
);
3496 /* Return a form of X that does not use a PARALLEL. TYPE is the type
3497 of the value stored in X. */
3500 maybe_emit_group_store (rtx x
, tree type
)
3502 machine_mode mode
= TYPE_MODE (type
);
3503 gcc_checking_assert (GET_MODE (x
) == VOIDmode
|| GET_MODE (x
) == mode
);
3504 if (GET_CODE (x
) == PARALLEL
)
3506 rtx result
= gen_reg_rtx (mode
);
3507 emit_group_store (result
, x
, type
, int_size_in_bytes (type
));
3513 /* Copy a BLKmode object of TYPE out of a register SRCREG into TARGET.
3515 This is used on targets that return BLKmode values in registers. */
3518 copy_blkmode_from_reg (rtx target
, rtx srcreg
, tree type
)
3520 unsigned HOST_WIDE_INT bytes
= int_size_in_bytes (type
);
3521 rtx src
= NULL
, dst
= NULL
;
3522 unsigned HOST_WIDE_INT bitsize
= MIN (TYPE_ALIGN (type
), BITS_PER_WORD
);
3523 unsigned HOST_WIDE_INT bitpos
, xbitpos
, padding_correction
= 0;
3524 /* No current ABI uses variable-sized modes to pass a BLKmnode type. */
3525 fixed_size_mode mode
= as_a
<fixed_size_mode
> (GET_MODE (srcreg
));
3526 fixed_size_mode tmode
= as_a
<fixed_size_mode
> (GET_MODE (target
));
3527 fixed_size_mode copy_mode
;
3529 /* BLKmode registers created in the back-end shouldn't have survived. */
3530 gcc_assert (mode
!= BLKmode
);
3532 /* If the structure doesn't take up a whole number of words, see whether
3533 SRCREG is padded on the left or on the right. If it's on the left,
3534 set PADDING_CORRECTION to the number of bits to skip.
3536 In most ABIs, the structure will be returned at the least end of
3537 the register, which translates to right padding on little-endian
3538 targets and left padding on big-endian targets. The opposite
3539 holds if the structure is returned at the most significant
3540 end of the register. */
3541 if (bytes
% UNITS_PER_WORD
!= 0
3542 && (targetm
.calls
.return_in_msb (type
)
3544 : BYTES_BIG_ENDIAN
))
3546 = (BITS_PER_WORD
- ((bytes
% UNITS_PER_WORD
) * BITS_PER_UNIT
));
3548 /* We can use a single move if we have an exact mode for the size. */
3549 else if (MEM_P (target
)
3550 && (!targetm
.slow_unaligned_access (mode
, MEM_ALIGN (target
))
3551 || MEM_ALIGN (target
) >= GET_MODE_ALIGNMENT (mode
))
3552 && bytes
== GET_MODE_SIZE (mode
))
3554 emit_move_insn (adjust_address (target
, mode
, 0), srcreg
);
3558 /* And if we additionally have the same mode for a register. */
3559 else if (REG_P (target
)
3560 && GET_MODE (target
) == mode
3561 && bytes
== GET_MODE_SIZE (mode
))
3563 emit_move_insn (target
, srcreg
);
3567 /* This code assumes srcreg is at least a full word. If it isn't, copy it
3568 into a new pseudo which is a full word. */
3569 if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
3571 srcreg
= convert_to_mode (word_mode
, srcreg
, TYPE_UNSIGNED (type
));
3575 /* Copy the structure BITSIZE bits at a time. If the target lives in
3576 memory, take care of not reading/writing past its end by selecting
3577 a copy mode suited to BITSIZE. This should always be possible given
3580 If the target lives in register, make sure not to select a copy mode
3581 larger than the mode of the register.
3583 We could probably emit more efficient code for machines which do not use
3584 strict alignment, but it doesn't seem worth the effort at the current
3587 copy_mode
= word_mode
;
3590 opt_scalar_int_mode mem_mode
= int_mode_for_size (bitsize
, 1);
3591 if (mem_mode
.exists ())
3592 copy_mode
= mem_mode
.require ();
3594 else if (REG_P (target
) && GET_MODE_BITSIZE (tmode
) < BITS_PER_WORD
)
3597 for (bitpos
= 0, xbitpos
= padding_correction
;
3598 bitpos
< bytes
* BITS_PER_UNIT
;
3599 bitpos
+= bitsize
, xbitpos
+= bitsize
)
3601 /* We need a new source operand each time xbitpos is on a
3602 word boundary and when xbitpos == padding_correction
3603 (the first time through). */
3604 if (xbitpos
% BITS_PER_WORD
== 0 || xbitpos
== padding_correction
)
3605 src
= operand_subword_force (srcreg
, xbitpos
/ BITS_PER_WORD
, mode
);
3607 /* We need a new destination operand each time bitpos is on
3609 if (REG_P (target
) && GET_MODE_BITSIZE (tmode
) < BITS_PER_WORD
)
3611 else if (bitpos
% BITS_PER_WORD
== 0)
3612 dst
= operand_subword (target
, bitpos
/ BITS_PER_WORD
, 1, tmode
);
3614 /* Use xbitpos for the source extraction (right justified) and
3615 bitpos for the destination store (left justified). */
3616 store_bit_field (dst
, bitsize
, bitpos
% BITS_PER_WORD
, 0, 0, copy_mode
,
3617 extract_bit_field (src
, bitsize
,
3618 xbitpos
% BITS_PER_WORD
, 1,
3619 NULL_RTX
, copy_mode
, copy_mode
,
3625 /* Copy BLKmode value SRC into a register of mode MODE_IN. Return the
3626 register if it contains any data, otherwise return null.
3628 This is used on targets that return BLKmode values in registers. */
3631 copy_blkmode_to_reg (machine_mode mode_in
, tree src
)
3634 unsigned HOST_WIDE_INT bitpos
, xbitpos
, padding_correction
= 0, bytes
;
3635 unsigned int bitsize
;
3636 rtx
*dst_words
, dst
, x
, src_word
= NULL_RTX
, dst_word
= NULL_RTX
;
3637 /* No current ABI uses variable-sized modes to pass a BLKmnode type. */
3638 fixed_size_mode mode
= as_a
<fixed_size_mode
> (mode_in
);
3639 fixed_size_mode dst_mode
;
3640 scalar_int_mode min_mode
;
3642 gcc_assert (TYPE_MODE (TREE_TYPE (src
)) == BLKmode
);
3644 x
= expand_normal (src
);
3646 bytes
= arg_int_size_in_bytes (TREE_TYPE (src
));
3650 /* If the structure doesn't take up a whole number of words, see
3651 whether the register value should be padded on the left or on
3652 the right. Set PADDING_CORRECTION to the number of padding
3653 bits needed on the left side.
3655 In most ABIs, the structure will be returned at the least end of
3656 the register, which translates to right padding on little-endian
3657 targets and left padding on big-endian targets. The opposite
3658 holds if the structure is returned at the most significant
3659 end of the register. */
3660 if (bytes
% UNITS_PER_WORD
!= 0
3661 && (targetm
.calls
.return_in_msb (TREE_TYPE (src
))
3663 : BYTES_BIG_ENDIAN
))
3664 padding_correction
= (BITS_PER_WORD
- ((bytes
% UNITS_PER_WORD
)
3667 n_regs
= (bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
3668 dst_words
= XALLOCAVEC (rtx
, n_regs
);
3669 bitsize
= MIN (TYPE_ALIGN (TREE_TYPE (src
)), BITS_PER_WORD
);
3670 min_mode
= smallest_int_mode_for_size (bitsize
);
3672 /* Copy the structure BITSIZE bits at a time. */
3673 for (bitpos
= 0, xbitpos
= padding_correction
;
3674 bitpos
< bytes
* BITS_PER_UNIT
;
3675 bitpos
+= bitsize
, xbitpos
+= bitsize
)
3677 /* We need a new destination pseudo each time xbitpos is
3678 on a word boundary and when xbitpos == padding_correction
3679 (the first time through). */
3680 if (xbitpos
% BITS_PER_WORD
== 0
3681 || xbitpos
== padding_correction
)
3683 /* Generate an appropriate register. */
3684 dst_word
= gen_reg_rtx (word_mode
);
3685 dst_words
[xbitpos
/ BITS_PER_WORD
] = dst_word
;
3687 /* Clear the destination before we move anything into it. */
3688 emit_move_insn (dst_word
, CONST0_RTX (word_mode
));
3691 /* Find the largest integer mode that can be used to copy all or as
3692 many bits as possible of the structure if the target supports larger
3693 copies. There are too many corner cases here w.r.t to alignments on
3694 the read/writes. So if there is any padding just use single byte
3696 opt_scalar_int_mode mode_iter
;
3697 if (padding_correction
== 0 && !STRICT_ALIGNMENT
)
3699 FOR_EACH_MODE_FROM (mode_iter
, min_mode
)
3701 unsigned int msize
= GET_MODE_BITSIZE (mode_iter
.require ());
3702 if (msize
<= ((bytes
* BITS_PER_UNIT
) - bitpos
)
3703 && msize
<= BITS_PER_WORD
)
3710 /* We need a new source operand each time bitpos is on a word
3712 if (bitpos
% BITS_PER_WORD
== 0)
3713 src_word
= operand_subword_force (x
, bitpos
/ BITS_PER_WORD
, BLKmode
);
3715 /* Use bitpos for the source extraction (left justified) and
3716 xbitpos for the destination store (right justified). */
3717 store_bit_field (dst_word
, bitsize
, xbitpos
% BITS_PER_WORD
,
3719 extract_bit_field (src_word
, bitsize
,
3720 bitpos
% BITS_PER_WORD
, 1,
3721 NULL_RTX
, word_mode
, word_mode
,
3726 if (mode
== BLKmode
)
3728 /* Find the smallest integer mode large enough to hold the
3729 entire structure. */
3730 opt_scalar_int_mode mode_iter
;
3731 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_INT
)
3732 if (GET_MODE_SIZE (mode_iter
.require ()) >= bytes
)
3735 /* A suitable mode should have been found. */
3736 mode
= mode_iter
.require ();
3739 if (GET_MODE_SIZE (mode
) < GET_MODE_SIZE (word_mode
))
3740 dst_mode
= word_mode
;
3743 dst
= gen_reg_rtx (dst_mode
);
3745 for (i
= 0; i
< n_regs
; i
++)
3746 emit_move_insn (operand_subword (dst
, i
, 0, dst_mode
), dst_words
[i
]);
3748 if (mode
!= dst_mode
)
3749 dst
= gen_lowpart (mode
, dst
);
3754 /* Add a USE expression for REG to the (possibly empty) list pointed
3755 to by CALL_FUSAGE. REG must denote a hard register. */
3758 use_reg_mode (rtx
*call_fusage
, rtx reg
, machine_mode mode
)
3760 gcc_assert (REG_P (reg
));
3762 if (!HARD_REGISTER_P (reg
))
3766 = gen_rtx_EXPR_LIST (mode
, gen_rtx_USE (VOIDmode
, reg
), *call_fusage
);
3769 /* Add a CLOBBER expression for REG to the (possibly empty) list pointed
3770 to by CALL_FUSAGE. REG must denote a hard register. */
3773 clobber_reg_mode (rtx
*call_fusage
, rtx reg
, machine_mode mode
)
3775 gcc_assert (REG_P (reg
) && REGNO (reg
) < FIRST_PSEUDO_REGISTER
);
3778 = gen_rtx_EXPR_LIST (mode
, gen_rtx_CLOBBER (VOIDmode
, reg
), *call_fusage
);
3781 /* Add USE expressions to *CALL_FUSAGE for each of NREGS consecutive regs,
3782 starting at REGNO. All of these registers must be hard registers. */
3785 use_regs (rtx
*call_fusage
, int regno
, int nregs
)
3789 gcc_assert (regno
+ nregs
<= FIRST_PSEUDO_REGISTER
);
3791 for (i
= 0; i
< nregs
; i
++)
3792 use_reg (call_fusage
, regno_reg_rtx
[regno
+ i
]);
3795 /* Add USE expressions to *CALL_FUSAGE for each REG contained in the
3796 PARALLEL REGS. This is for calls that pass values in multiple
3797 non-contiguous locations. The Irix 6 ABI has examples of this. */
3800 use_group_regs (rtx
*call_fusage
, rtx regs
)
3804 for (i
= 0; i
< XVECLEN (regs
, 0); i
++)
3806 rtx reg
= XEXP (XVECEXP (regs
, 0, i
), 0);
3808 /* A NULL entry means the parameter goes both on the stack and in
3809 registers. This can also be a MEM for targets that pass values
3810 partially on the stack and partially in registers. */
3811 if (reg
!= 0 && REG_P (reg
))
3812 use_reg (call_fusage
, reg
);
3816 /* Return the defining gimple statement for SSA_NAME NAME if it is an
3817 assigment and the code of the expresion on the RHS is CODE. Return
3821 get_def_for_expr (tree name
, enum tree_code code
)
3825 if (TREE_CODE (name
) != SSA_NAME
)
3828 def_stmt
= get_gimple_for_ssa_name (name
);
3830 || gimple_assign_rhs_code (def_stmt
) != code
)
3836 /* Return the defining gimple statement for SSA_NAME NAME if it is an
3837 assigment and the class of the expresion on the RHS is CLASS. Return
3841 get_def_for_expr_class (tree name
, enum tree_code_class tclass
)
3845 if (TREE_CODE (name
) != SSA_NAME
)
3848 def_stmt
= get_gimple_for_ssa_name (name
);
3850 || TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt
)) != tclass
)
3856 /* Write zeros through the storage of OBJECT. If OBJECT has BLKmode, SIZE is
3857 its length in bytes. */
3860 clear_storage_hints (rtx object
, rtx size
, enum block_op_methods method
,
3861 unsigned int expected_align
, HOST_WIDE_INT expected_size
,
3862 unsigned HOST_WIDE_INT min_size
,
3863 unsigned HOST_WIDE_INT max_size
,
3864 unsigned HOST_WIDE_INT probable_max_size
,
3867 machine_mode mode
= GET_MODE (object
);
3870 gcc_assert (method
== BLOCK_OP_NORMAL
|| method
== BLOCK_OP_TAILCALL
);
3872 /* If OBJECT is not BLKmode and SIZE is the same size as its mode,
3873 just move a zero. Otherwise, do this a piece at a time. */
3874 poly_int64 size_val
;
3876 && poly_int_rtx_p (size
, &size_val
)
3877 && known_eq (size_val
, GET_MODE_SIZE (mode
)))
3879 rtx zero
= CONST0_RTX (mode
);
3882 emit_move_insn (object
, zero
);
3886 if (COMPLEX_MODE_P (mode
))
3888 zero
= CONST0_RTX (GET_MODE_INNER (mode
));
3891 write_complex_part (object
, zero
, 0, true);
3892 write_complex_part (object
, zero
, 1, false);
3898 if (size
== const0_rtx
)
3901 align
= MEM_ALIGN (object
);
3903 if (CONST_INT_P (size
)
3904 && targetm
.use_by_pieces_infrastructure_p (INTVAL (size
), align
,
3906 optimize_insn_for_speed_p ()))
3907 clear_by_pieces (object
, INTVAL (size
), align
);
3908 else if (set_storage_via_setmem (object
, size
, const0_rtx
, align
,
3909 expected_align
, expected_size
,
3910 min_size
, max_size
, probable_max_size
))
3912 else if (try_store_by_multiple_pieces (object
, size
, ctz_size
,
3914 NULL_RTX
, 0, align
))
3916 else if (ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (object
)))
3917 return set_storage_via_libcall (object
, size
, const0_rtx
,
3918 method
== BLOCK_OP_TAILCALL
);
3926 clear_storage (rtx object
, rtx size
, enum block_op_methods method
)
3928 unsigned HOST_WIDE_INT max
, min
= 0;
3929 if (GET_CODE (size
) == CONST_INT
)
3930 min
= max
= UINTVAL (size
);
3932 max
= GET_MODE_MASK (GET_MODE (size
));
3933 return clear_storage_hints (object
, size
, method
, 0, -1, min
, max
, max
, 0);
3937 /* A subroutine of clear_storage. Expand a call to memset.
3938 Return the return value of memset, 0 otherwise. */
3941 set_storage_via_libcall (rtx object
, rtx size
, rtx val
, bool tailcall
)
3943 tree call_expr
, fn
, object_tree
, size_tree
, val_tree
;
3944 machine_mode size_mode
;
3946 object
= copy_addr_to_reg (XEXP (object
, 0));
3947 object_tree
= make_tree (ptr_type_node
, object
);
3949 if (!CONST_INT_P (val
))
3950 val
= convert_to_mode (TYPE_MODE (integer_type_node
), val
, 1);
3951 val_tree
= make_tree (integer_type_node
, val
);
3953 size_mode
= TYPE_MODE (sizetype
);
3954 size
= convert_to_mode (size_mode
, size
, 1);
3955 size
= copy_to_mode_reg (size_mode
, size
);
3956 size_tree
= make_tree (sizetype
, size
);
3958 /* It is incorrect to use the libcall calling conventions for calls to
3959 memset because it can be provided by the user. */
3960 fn
= builtin_decl_implicit (BUILT_IN_MEMSET
);
3961 call_expr
= build_call_expr (fn
, 3, object_tree
, val_tree
, size_tree
);
3962 CALL_EXPR_TAILCALL (call_expr
) = tailcall
;
3964 return expand_call (call_expr
, NULL_RTX
, false);
3967 /* Expand a setmem pattern; return true if successful. */
3970 set_storage_via_setmem (rtx object
, rtx size
, rtx val
, unsigned int align
,
3971 unsigned int expected_align
, HOST_WIDE_INT expected_size
,
3972 unsigned HOST_WIDE_INT min_size
,
3973 unsigned HOST_WIDE_INT max_size
,
3974 unsigned HOST_WIDE_INT probable_max_size
)
3976 /* Try the most limited insn first, because there's no point
3977 including more than one in the machine description unless
3978 the more limited one has some advantage. */
3980 if (expected_align
< align
)
3981 expected_align
= align
;
3982 if (expected_size
!= -1)
3984 if ((unsigned HOST_WIDE_INT
)expected_size
> max_size
)
3985 expected_size
= max_size
;
3986 if ((unsigned HOST_WIDE_INT
)expected_size
< min_size
)
3987 expected_size
= min_size
;
3990 opt_scalar_int_mode mode_iter
;
3991 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_INT
)
3993 scalar_int_mode mode
= mode_iter
.require ();
3994 enum insn_code code
= direct_optab_handler (setmem_optab
, mode
);
3996 if (code
!= CODE_FOR_nothing
3997 /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
3998 here because if SIZE is less than the mode mask, as it is
3999 returned by the macro, it will definitely be less than the
4000 actual mode mask. Since SIZE is within the Pmode address
4001 space, we limit MODE to Pmode. */
4002 && ((CONST_INT_P (size
)
4003 && ((unsigned HOST_WIDE_INT
) INTVAL (size
)
4004 <= (GET_MODE_MASK (mode
) >> 1)))
4005 || max_size
<= (GET_MODE_MASK (mode
) >> 1)
4006 || GET_MODE_BITSIZE (mode
) >= GET_MODE_BITSIZE (Pmode
)))
4008 class expand_operand ops
[9];
4011 nops
= insn_data
[(int) code
].n_generator_args
;
4012 gcc_assert (nops
== 4 || nops
== 6 || nops
== 8 || nops
== 9);
4014 create_fixed_operand (&ops
[0], object
);
4015 /* The check above guarantees that this size conversion is valid. */
4016 create_convert_operand_to (&ops
[1], size
, mode
, true);
4017 create_convert_operand_from (&ops
[2], val
, byte_mode
, true);
4018 create_integer_operand (&ops
[3], align
/ BITS_PER_UNIT
);
4021 create_integer_operand (&ops
[4], expected_align
/ BITS_PER_UNIT
);
4022 create_integer_operand (&ops
[5], expected_size
);
4026 create_integer_operand (&ops
[6], min_size
);
4027 /* If we cannot represent the maximal size,
4028 make parameter NULL. */
4029 if ((HOST_WIDE_INT
) max_size
!= -1)
4030 create_integer_operand (&ops
[7], max_size
);
4032 create_fixed_operand (&ops
[7], NULL
);
4036 /* If we cannot represent the maximal size,
4037 make parameter NULL. */
4038 if ((HOST_WIDE_INT
) probable_max_size
!= -1)
4039 create_integer_operand (&ops
[8], probable_max_size
);
4041 create_fixed_operand (&ops
[8], NULL
);
4043 if (maybe_expand_insn (code
, nops
, ops
))
4052 /* Write to one of the components of the complex value CPLX. Write VAL to
4053 the real part if IMAG_P is false, and the imaginary part if its true.
4054 If UNDEFINED_P then the value in CPLX is currently undefined. */
4057 write_complex_part (rtx cplx
, rtx val
, bool imag_p
, bool undefined_p
)
4063 if (GET_CODE (cplx
) == CONCAT
)
4065 emit_move_insn (XEXP (cplx
, imag_p
), val
);
4069 cmode
= GET_MODE (cplx
);
4070 imode
= GET_MODE_INNER (cmode
);
4071 ibitsize
= GET_MODE_BITSIZE (imode
);
4073 /* For MEMs simplify_gen_subreg may generate an invalid new address
4074 because, e.g., the original address is considered mode-dependent
4075 by the target, which restricts simplify_subreg from invoking
4076 adjust_address_nv. Instead of preparing fallback support for an
4077 invalid address, we call adjust_address_nv directly. */
4080 emit_move_insn (adjust_address_nv (cplx
, imode
,
4081 imag_p
? GET_MODE_SIZE (imode
) : 0),
4086 /* If the sub-object is at least word sized, then we know that subregging
4087 will work. This special case is important, since store_bit_field
4088 wants to operate on integer modes, and there's rarely an OImode to
4089 correspond to TCmode. */
4090 if (ibitsize
>= BITS_PER_WORD
4091 /* For hard regs we have exact predicates. Assume we can split
4092 the original object if it spans an even number of hard regs.
4093 This special case is important for SCmode on 64-bit platforms
4094 where the natural size of floating-point regs is 32-bit. */
4096 && REGNO (cplx
) < FIRST_PSEUDO_REGISTER
4097 && REG_NREGS (cplx
) % 2 == 0))
4099 rtx part
= simplify_gen_subreg (imode
, cplx
, cmode
,
4100 imag_p
? GET_MODE_SIZE (imode
) : 0);
4103 emit_move_insn (part
, val
);
4107 /* simplify_gen_subreg may fail for sub-word MEMs. */
4108 gcc_assert (MEM_P (cplx
) && ibitsize
< BITS_PER_WORD
);
4111 store_bit_field (cplx
, ibitsize
, imag_p
? ibitsize
: 0, 0, 0, imode
, val
,
4112 false, undefined_p
);
4115 /* Extract one of the components of the complex value CPLX. Extract the
4116 real part if IMAG_P is false, and the imaginary part if it's true. */
4119 read_complex_part (rtx cplx
, bool imag_p
)
4125 if (GET_CODE (cplx
) == CONCAT
)
4126 return XEXP (cplx
, imag_p
);
4128 cmode
= GET_MODE (cplx
);
4129 imode
= GET_MODE_INNER (cmode
);
4130 ibitsize
= GET_MODE_BITSIZE (imode
);
4132 /* Special case reads from complex constants that got spilled to memory. */
4133 if (MEM_P (cplx
) && GET_CODE (XEXP (cplx
, 0)) == SYMBOL_REF
)
4135 tree decl
= SYMBOL_REF_DECL (XEXP (cplx
, 0));
4136 if (decl
&& TREE_CODE (decl
) == COMPLEX_CST
)
4138 tree part
= imag_p
? TREE_IMAGPART (decl
) : TREE_REALPART (decl
);
4139 if (CONSTANT_CLASS_P (part
))
4140 return expand_expr (part
, NULL_RTX
, imode
, EXPAND_NORMAL
);
4144 /* For MEMs simplify_gen_subreg may generate an invalid new address
4145 because, e.g., the original address is considered mode-dependent
4146 by the target, which restricts simplify_subreg from invoking
4147 adjust_address_nv. Instead of preparing fallback support for an
4148 invalid address, we call adjust_address_nv directly. */
4150 return adjust_address_nv (cplx
, imode
,
4151 imag_p
? GET_MODE_SIZE (imode
) : 0);
4153 /* If the sub-object is at least word sized, then we know that subregging
4154 will work. This special case is important, since extract_bit_field
4155 wants to operate on integer modes, and there's rarely an OImode to
4156 correspond to TCmode. */
4157 if (ibitsize
>= BITS_PER_WORD
4158 /* For hard regs we have exact predicates. Assume we can split
4159 the original object if it spans an even number of hard regs.
4160 This special case is important for SCmode on 64-bit platforms
4161 where the natural size of floating-point regs is 32-bit. */
4163 && REGNO (cplx
) < FIRST_PSEUDO_REGISTER
4164 && REG_NREGS (cplx
) % 2 == 0))
4166 rtx ret
= simplify_gen_subreg (imode
, cplx
, cmode
,
4167 imag_p
? GET_MODE_SIZE (imode
) : 0);
4171 /* simplify_gen_subreg may fail for sub-word MEMs. */
4172 gcc_assert (MEM_P (cplx
) && ibitsize
< BITS_PER_WORD
);
4175 return extract_bit_field (cplx
, ibitsize
, imag_p
? ibitsize
: 0,
4176 true, NULL_RTX
, imode
, imode
, false, NULL
);
4179 /* A subroutine of emit_move_insn_1. Yet another lowpart generator.
4180 NEW_MODE and OLD_MODE are the same size. Return NULL if X cannot be
4181 represented in NEW_MODE. If FORCE is true, this will never happen, as
4182 we'll force-create a SUBREG if needed. */
4185 emit_move_change_mode (machine_mode new_mode
,
4186 machine_mode old_mode
, rtx x
, bool force
)
4190 if (push_operand (x
, GET_MODE (x
)))
4192 ret
= gen_rtx_MEM (new_mode
, XEXP (x
, 0));
4193 MEM_COPY_ATTRIBUTES (ret
, x
);
4197 /* We don't have to worry about changing the address since the
4198 size in bytes is supposed to be the same. */
4199 if (reload_in_progress
)
4201 /* Copy the MEM to change the mode and move any
4202 substitutions from the old MEM to the new one. */
4203 ret
= adjust_address_nv (x
, new_mode
, 0);
4204 copy_replacements (x
, ret
);
4207 ret
= adjust_address (x
, new_mode
, 0);
4211 /* Note that we do want simplify_subreg's behavior of validating
4212 that the new mode is ok for a hard register. If we were to use
4213 simplify_gen_subreg, we would create the subreg, but would
4214 probably run into the target not being able to implement it. */
4215 /* Except, of course, when FORCE is true, when this is exactly what
4216 we want. Which is needed for CCmodes on some targets. */
4218 ret
= simplify_gen_subreg (new_mode
, x
, old_mode
, 0);
4220 ret
= simplify_subreg (new_mode
, x
, old_mode
, 0);
4226 /* A subroutine of emit_move_insn_1. Generate a move from Y into X using
4227 an integer mode of the same size as MODE. Returns the instruction
4228 emitted, or NULL if such a move could not be generated. */
4231 emit_move_via_integer (machine_mode mode
, rtx x
, rtx y
, bool force
)
4233 scalar_int_mode imode
;
4234 enum insn_code code
;
4236 /* There must exist a mode of the exact size we require. */
4237 if (!int_mode_for_mode (mode
).exists (&imode
))
4240 /* The target must support moves in this mode. */
4241 code
= optab_handler (mov_optab
, imode
);
4242 if (code
== CODE_FOR_nothing
)
4245 x
= emit_move_change_mode (imode
, mode
, x
, force
);
4248 y
= emit_move_change_mode (imode
, mode
, y
, force
);
4251 return emit_insn (GEN_FCN (code
) (x
, y
));
4254 /* A subroutine of emit_move_insn_1. X is a push_operand in MODE.
4255 Return an equivalent MEM that does not use an auto-increment. */
4258 emit_move_resolve_push (machine_mode mode
, rtx x
)
4260 enum rtx_code code
= GET_CODE (XEXP (x
, 0));
4263 poly_int64 adjust
= GET_MODE_SIZE (mode
);
4264 #ifdef PUSH_ROUNDING
4265 adjust
= PUSH_ROUNDING (adjust
);
4267 if (code
== PRE_DEC
|| code
== POST_DEC
)
4269 else if (code
== PRE_MODIFY
|| code
== POST_MODIFY
)
4271 rtx expr
= XEXP (XEXP (x
, 0), 1);
4273 gcc_assert (GET_CODE (expr
) == PLUS
|| GET_CODE (expr
) == MINUS
);
4274 poly_int64 val
= rtx_to_poly_int64 (XEXP (expr
, 1));
4275 if (GET_CODE (expr
) == MINUS
)
4277 gcc_assert (known_eq (adjust
, val
) || known_eq (adjust
, -val
));
4281 /* Do not use anti_adjust_stack, since we don't want to update
4282 stack_pointer_delta. */
4283 temp
= expand_simple_binop (Pmode
, PLUS
, stack_pointer_rtx
,
4284 gen_int_mode (adjust
, Pmode
), stack_pointer_rtx
,
4285 0, OPTAB_LIB_WIDEN
);
4286 if (temp
!= stack_pointer_rtx
)
4287 emit_move_insn (stack_pointer_rtx
, temp
);
4294 temp
= stack_pointer_rtx
;
4299 temp
= plus_constant (Pmode
, stack_pointer_rtx
, -adjust
);
4305 return replace_equiv_address (x
, temp
);
4308 /* A subroutine of emit_move_complex. Generate a move from Y into X.
4309 X is known to satisfy push_operand, and MODE is known to be complex.
4310 Returns the last instruction emitted. */
4313 emit_move_complex_push (machine_mode mode
, rtx x
, rtx y
)
4315 scalar_mode submode
= GET_MODE_INNER (mode
);
4318 #ifdef PUSH_ROUNDING
4319 poly_int64 submodesize
= GET_MODE_SIZE (submode
);
4321 /* In case we output to the stack, but the size is smaller than the
4322 machine can push exactly, we need to use move instructions. */
4323 if (maybe_ne (PUSH_ROUNDING (submodesize
), submodesize
))
4325 x
= emit_move_resolve_push (mode
, x
);
4326 return emit_move_insn (x
, y
);
4330 /* Note that the real part always precedes the imag part in memory
4331 regardless of machine's endianness. */
4332 switch (GET_CODE (XEXP (x
, 0)))
4346 emit_move_insn (gen_rtx_MEM (submode
, XEXP (x
, 0)),
4347 read_complex_part (y
, imag_first
));
4348 return emit_move_insn (gen_rtx_MEM (submode
, XEXP (x
, 0)),
4349 read_complex_part (y
, !imag_first
));
4352 /* A subroutine of emit_move_complex. Perform the move from Y to X
4353 via two moves of the parts. Returns the last instruction emitted. */
4356 emit_move_complex_parts (rtx x
, rtx y
)
4358 /* Show the output dies here. This is necessary for SUBREGs
4359 of pseudos since we cannot track their lifetimes correctly;
4360 hard regs shouldn't appear here except as return values. */
4361 if (!reload_completed
&& !reload_in_progress
4362 && REG_P (x
) && !reg_overlap_mentioned_p (x
, y
))
4365 write_complex_part (x
, read_complex_part (y
, false), false, true);
4366 write_complex_part (x
, read_complex_part (y
, true), true, false);
4368 return get_last_insn ();
4371 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
4372 MODE is known to be complex. Returns the last instruction emitted. */
4375 emit_move_complex (machine_mode mode
, rtx x
, rtx y
)
4379 /* Need to take special care for pushes, to maintain proper ordering
4380 of the data, and possibly extra padding. */
4381 if (push_operand (x
, mode
))
4382 return emit_move_complex_push (mode
, x
, y
);
4384 /* See if we can coerce the target into moving both values at once, except
4385 for floating point where we favor moving as parts if this is easy. */
4386 if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
4387 && optab_handler (mov_optab
, GET_MODE_INNER (mode
)) != CODE_FOR_nothing
4389 && HARD_REGISTER_P (x
)
4390 && REG_NREGS (x
) == 1)
4392 && HARD_REGISTER_P (y
)
4393 && REG_NREGS (y
) == 1))
4395 /* Not possible if the values are inherently not adjacent. */
4396 else if (GET_CODE (x
) == CONCAT
|| GET_CODE (y
) == CONCAT
)
4398 /* Is possible if both are registers (or subregs of registers). */
4399 else if (register_operand (x
, mode
) && register_operand (y
, mode
))
4401 /* If one of the operands is a memory, and alignment constraints
4402 are friendly enough, we may be able to do combined memory operations.
4403 We do not attempt this if Y is a constant because that combination is
4404 usually better with the by-parts thing below. */
4405 else if ((MEM_P (x
) ? !CONSTANT_P (y
) : MEM_P (y
))
4406 && (!STRICT_ALIGNMENT
4407 || get_mode_alignment (mode
) == BIGGEST_ALIGNMENT
))
4416 /* For memory to memory moves, optimal behavior can be had with the
4417 existing block move logic. But use normal expansion if optimizing
4419 if (MEM_P (x
) && MEM_P (y
))
4421 emit_block_move (x
, y
, gen_int_mode (GET_MODE_SIZE (mode
), Pmode
),
4422 (optimize_insn_for_speed_p()
4423 ? BLOCK_OP_NO_LIBCALL
: BLOCK_OP_NORMAL
));
4424 return get_last_insn ();
4427 ret
= emit_move_via_integer (mode
, x
, y
, true);
4432 return emit_move_complex_parts (x
, y
);
4435 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
4436 MODE is known to be MODE_CC. Returns the last instruction emitted. */
4439 emit_move_ccmode (machine_mode mode
, rtx x
, rtx y
)
4443 /* Assume all MODE_CC modes are equivalent; if we have movcc, use it. */
4446 enum insn_code code
= optab_handler (mov_optab
, CCmode
);
4447 if (code
!= CODE_FOR_nothing
)
4449 x
= emit_move_change_mode (CCmode
, mode
, x
, true);
4450 y
= emit_move_change_mode (CCmode
, mode
, y
, true);
4451 return emit_insn (GEN_FCN (code
) (x
, y
));
4455 /* Otherwise, find the MODE_INT mode of the same width. */
4456 ret
= emit_move_via_integer (mode
, x
, y
, false);
4457 gcc_assert (ret
!= NULL
);
4461 /* Return true if word I of OP lies entirely in the
4462 undefined bits of a paradoxical subreg. */
4465 undefined_operand_subword_p (const_rtx op
, int i
)
4467 if (GET_CODE (op
) != SUBREG
)
4469 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
4470 poly_int64 offset
= i
* UNITS_PER_WORD
+ subreg_memory_offset (op
);
4471 return (known_ge (offset
, GET_MODE_SIZE (innermostmode
))
4472 || known_le (offset
, -UNITS_PER_WORD
));
4475 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
4476 MODE is any multi-word or full-word mode that lacks a move_insn
4477 pattern. Note that you will get better code if you define such
4478 patterns, even if they must turn into multiple assembler instructions. */
4481 emit_move_multi_word (machine_mode mode
, rtx x
, rtx y
)
4483 rtx_insn
*last_insn
= 0;
4489 /* This function can only handle cases where the number of words is
4490 known at compile time. */
4491 mode_size
= GET_MODE_SIZE (mode
).to_constant ();
4492 gcc_assert (mode_size
>= UNITS_PER_WORD
);
4494 /* If X is a push on the stack, do the push now and replace
4495 X with a reference to the stack pointer. */
4496 if (push_operand (x
, mode
))
4497 x
= emit_move_resolve_push (mode
, x
);
4499 /* If we are in reload, see if either operand is a MEM whose address
4500 is scheduled for replacement. */
4501 if (reload_in_progress
&& MEM_P (x
)
4502 && (inner
= find_replacement (&XEXP (x
, 0))) != XEXP (x
, 0))
4503 x
= replace_equiv_address_nv (x
, inner
);
4504 if (reload_in_progress
&& MEM_P (y
)
4505 && (inner
= find_replacement (&XEXP (y
, 0))) != XEXP (y
, 0))
4506 y
= replace_equiv_address_nv (y
, inner
);
4510 need_clobber
= false;
4511 for (i
= 0; i
< CEIL (mode_size
, UNITS_PER_WORD
); i
++)
4513 /* Do not generate code for a move if it would go entirely
4514 to the non-existing bits of a paradoxical subreg. */
4515 if (undefined_operand_subword_p (x
, i
))
4518 rtx xpart
= operand_subword (x
, i
, 1, mode
);
4521 /* Do not generate code for a move if it would come entirely
4522 from the undefined bits of a paradoxical subreg. */
4523 if (undefined_operand_subword_p (y
, i
))
4526 ypart
= operand_subword (y
, i
, 1, mode
);
4528 /* If we can't get a part of Y, put Y into memory if it is a
4529 constant. Otherwise, force it into a register. Then we must
4530 be able to get a part of Y. */
4531 if (ypart
== 0 && CONSTANT_P (y
))
4533 y
= use_anchored_address (force_const_mem (mode
, y
));
4534 ypart
= operand_subword (y
, i
, 1, mode
);
4536 else if (ypart
== 0)
4537 ypart
= operand_subword_force (y
, i
, mode
);
4539 gcc_assert (xpart
&& ypart
);
4541 need_clobber
|= (GET_CODE (xpart
) == SUBREG
);
4543 last_insn
= emit_move_insn (xpart
, ypart
);
4549 /* Show the output dies here. This is necessary for SUBREGs
4550 of pseudos since we cannot track their lifetimes correctly;
4551 hard regs shouldn't appear here except as return values.
4552 We never want to emit such a clobber after reload. */
4554 && ! (reload_in_progress
|| reload_completed
)
4555 && need_clobber
!= 0)
4563 /* Low level part of emit_move_insn.
4564 Called just like emit_move_insn, but assumes X and Y
4565 are basically valid. */
4568 emit_move_insn_1 (rtx x
, rtx y
)
4570 machine_mode mode
= GET_MODE (x
);
4571 enum insn_code code
;
4573 gcc_assert ((unsigned int) mode
< (unsigned int) MAX_MACHINE_MODE
);
4575 code
= optab_handler (mov_optab
, mode
);
4576 if (code
!= CODE_FOR_nothing
)
4577 return emit_insn (GEN_FCN (code
) (x
, y
));
4579 /* Expand complex moves by moving real part and imag part. */
4580 if (COMPLEX_MODE_P (mode
))
4581 return emit_move_complex (mode
, x
, y
);
4583 if (GET_MODE_CLASS (mode
) == MODE_DECIMAL_FLOAT
4584 || ALL_FIXED_POINT_MODE_P (mode
))
4586 rtx_insn
*result
= emit_move_via_integer (mode
, x
, y
, true);
4588 /* If we can't find an integer mode, use multi words. */
4592 return emit_move_multi_word (mode
, x
, y
);
4595 if (GET_MODE_CLASS (mode
) == MODE_CC
)
4596 return emit_move_ccmode (mode
, x
, y
);
4598 /* Try using a move pattern for the corresponding integer mode. This is
4599 only safe when simplify_subreg can convert MODE constants into integer
4600 constants. At present, it can only do this reliably if the value
4601 fits within a HOST_WIDE_INT. */
4603 || known_le (GET_MODE_BITSIZE (mode
), HOST_BITS_PER_WIDE_INT
))
4605 rtx_insn
*ret
= emit_move_via_integer (mode
, x
, y
, lra_in_progress
);
4609 if (! lra_in_progress
|| recog (PATTERN (ret
), ret
, 0) >= 0)
4614 return emit_move_multi_word (mode
, x
, y
);
4617 /* Generate code to copy Y into X.
4618 Both Y and X must have the same mode, except that
4619 Y can be a constant with VOIDmode.
4620 This mode cannot be BLKmode; use emit_block_move for that.
4622 Return the last instruction emitted. */
4625 emit_move_insn (rtx x
, rtx y
)
4627 machine_mode mode
= GET_MODE (x
);
4628 rtx y_cst
= NULL_RTX
;
4629 rtx_insn
*last_insn
;
4632 gcc_assert (mode
!= BLKmode
4633 && (GET_MODE (y
) == mode
|| GET_MODE (y
) == VOIDmode
));
4635 /* If we have a copy that looks like one of the following patterns:
4636 (set (subreg:M1 (reg:M2 ...)) (subreg:M1 (reg:M2 ...)))
4637 (set (subreg:M1 (reg:M2 ...)) (mem:M1 ADDR))
4638 (set (mem:M1 ADDR) (subreg:M1 (reg:M2 ...)))
4639 (set (subreg:M1 (reg:M2 ...)) (constant C))
4640 where mode M1 is equal in size to M2, try to detect whether the
4641 mode change involves an implicit round trip through memory.
4642 If so, see if we can avoid that by removing the subregs and
4643 doing the move in mode M2 instead. */
4645 rtx x_inner
= NULL_RTX
;
4646 rtx y_inner
= NULL_RTX
;
4648 auto candidate_subreg_p
= [&](rtx subreg
) {
4649 return (REG_P (SUBREG_REG (subreg
))
4650 && known_eq (GET_MODE_SIZE (GET_MODE (SUBREG_REG (subreg
))),
4651 GET_MODE_SIZE (GET_MODE (subreg
)))
4652 && optab_handler (mov_optab
, GET_MODE (SUBREG_REG (subreg
)))
4653 != CODE_FOR_nothing
);
4656 auto candidate_mem_p
= [&](machine_mode innermode
, rtx mem
) {
4657 return (!targetm
.can_change_mode_class (innermode
, GET_MODE (mem
), ALL_REGS
)
4658 && !push_operand (mem
, GET_MODE (mem
))
4659 /* Not a candiate if innermode requires too much alignment. */
4660 && (MEM_ALIGN (mem
) >= GET_MODE_ALIGNMENT (innermode
)
4661 || targetm
.slow_unaligned_access (GET_MODE (mem
),
4663 || !targetm
.slow_unaligned_access (innermode
,
4667 if (SUBREG_P (x
) && candidate_subreg_p (x
))
4668 x_inner
= SUBREG_REG (x
);
4670 if (SUBREG_P (y
) && candidate_subreg_p (y
))
4671 y_inner
= SUBREG_REG (y
);
4673 if (x_inner
!= NULL_RTX
4674 && y_inner
!= NULL_RTX
4675 && GET_MODE (x_inner
) == GET_MODE (y_inner
)
4676 && !targetm
.can_change_mode_class (GET_MODE (x_inner
), mode
, ALL_REGS
))
4680 mode
= GET_MODE (x_inner
);
4682 else if (x_inner
!= NULL_RTX
4684 && candidate_mem_p (GET_MODE (x_inner
), y
))
4687 y
= adjust_address (y
, GET_MODE (x_inner
), 0);
4688 mode
= GET_MODE (x_inner
);
4690 else if (y_inner
!= NULL_RTX
4692 && candidate_mem_p (GET_MODE (y_inner
), x
))
4694 x
= adjust_address (x
, GET_MODE (y_inner
), 0);
4696 mode
= GET_MODE (y_inner
);
4698 else if (x_inner
!= NULL_RTX
4700 && !targetm
.can_change_mode_class (GET_MODE (x_inner
),
4702 && (y_inner
= simplify_subreg (GET_MODE (x_inner
), y
, mode
, 0)))
4706 mode
= GET_MODE (x_inner
);
4712 && SCALAR_FLOAT_MODE_P (GET_MODE (x
))
4713 && (last_insn
= compress_float_constant (x
, y
)))
4718 if (!targetm
.legitimate_constant_p (mode
, y
))
4720 y
= force_const_mem (mode
, y
);
4722 /* If the target's cannot_force_const_mem prevented the spill,
4723 assume that the target's move expanders will also take care
4724 of the non-legitimate constant. */
4728 y
= use_anchored_address (y
);
4732 /* If X or Y are memory references, verify that their addresses are valid
4735 && (! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4737 && ! push_operand (x
, GET_MODE (x
))))
4738 x
= validize_mem (x
);
4741 && ! memory_address_addr_space_p (GET_MODE (y
), XEXP (y
, 0),
4742 MEM_ADDR_SPACE (y
)))
4743 y
= validize_mem (y
);
4745 gcc_assert (mode
!= BLKmode
);
4747 last_insn
= emit_move_insn_1 (x
, y
);
4749 if (y_cst
&& REG_P (x
)
4750 && (set
= single_set (last_insn
)) != NULL_RTX
4751 && SET_DEST (set
) == x
4752 && ! rtx_equal_p (y_cst
, SET_SRC (set
)))
4753 set_unique_reg_note (last_insn
, REG_EQUAL
, copy_rtx (y_cst
));
4758 /* Generate the body of an instruction to copy Y into X.
4759 It may be a list of insns, if one insn isn't enough. */
4762 gen_move_insn (rtx x
, rtx y
)
4767 emit_move_insn_1 (x
, y
);
4773 /* If Y is representable exactly in a narrower mode, and the target can
4774 perform the extension directly from constant or memory, then emit the
4775 move as an extension. */
4778 compress_float_constant (rtx x
, rtx y
)
4780 machine_mode dstmode
= GET_MODE (x
);
4781 machine_mode orig_srcmode
= GET_MODE (y
);
4782 machine_mode srcmode
;
4783 const REAL_VALUE_TYPE
*r
;
4784 int oldcost
, newcost
;
4785 bool speed
= optimize_insn_for_speed_p ();
4787 r
= CONST_DOUBLE_REAL_VALUE (y
);
4789 if (targetm
.legitimate_constant_p (dstmode
, y
))
4790 oldcost
= set_src_cost (y
, orig_srcmode
, speed
);
4792 oldcost
= set_src_cost (force_const_mem (dstmode
, y
), dstmode
, speed
);
4794 FOR_EACH_MODE_UNTIL (srcmode
, orig_srcmode
)
4798 rtx_insn
*last_insn
;
4800 /* Skip if the target can't extend this way. */
4801 ic
= can_extend_p (dstmode
, srcmode
, 0);
4802 if (ic
== CODE_FOR_nothing
)
4805 /* Skip if the narrowed value isn't exact. */
4806 if (! exact_real_truncate (srcmode
, r
))
4809 trunc_y
= const_double_from_real_value (*r
, srcmode
);
4811 if (targetm
.legitimate_constant_p (srcmode
, trunc_y
))
4813 /* Skip if the target needs extra instructions to perform
4815 if (!insn_operand_matches (ic
, 1, trunc_y
))
4817 /* This is valid, but may not be cheaper than the original. */
4818 newcost
= set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode
, trunc_y
),
4820 if (oldcost
< newcost
)
4823 else if (float_extend_from_mem
[dstmode
][srcmode
])
4825 trunc_y
= force_const_mem (srcmode
, trunc_y
);
4826 /* This is valid, but may not be cheaper than the original. */
4827 newcost
= set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode
, trunc_y
),
4829 if (oldcost
< newcost
)
4831 trunc_y
= validize_mem (trunc_y
);
4836 /* For CSE's benefit, force the compressed constant pool entry
4837 into a new pseudo. This constant may be used in different modes,
4838 and if not, combine will put things back together for us. */
4839 trunc_y
= force_reg (srcmode
, trunc_y
);
4841 /* If x is a hard register, perform the extension into a pseudo,
4842 so that e.g. stack realignment code is aware of it. */
4844 if (REG_P (x
) && HARD_REGISTER_P (x
))
4845 target
= gen_reg_rtx (dstmode
);
4847 emit_unop_insn (ic
, target
, trunc_y
, UNKNOWN
);
4848 last_insn
= get_last_insn ();
4851 set_unique_reg_note (last_insn
, REG_EQUAL
, y
);
4854 return emit_move_insn (x
, target
);
4861 /* Pushing data onto the stack. */
4863 /* Push a block of length SIZE (perhaps variable)
4864 and return an rtx to address the beginning of the block.
4865 The value may be virtual_outgoing_args_rtx.
4867 EXTRA is the number of bytes of padding to push in addition to SIZE.
4868 BELOW nonzero means this padding comes at low addresses;
4869 otherwise, the padding comes at high addresses. */
4872 push_block (rtx size
, poly_int64 extra
, int below
)
4876 size
= convert_modes (Pmode
, ptr_mode
, size
, 1);
4877 if (CONSTANT_P (size
))
4878 anti_adjust_stack (plus_constant (Pmode
, size
, extra
));
4879 else if (REG_P (size
) && known_eq (extra
, 0))
4880 anti_adjust_stack (size
);
4883 temp
= copy_to_mode_reg (Pmode
, size
);
4884 if (maybe_ne (extra
, 0))
4885 temp
= expand_binop (Pmode
, add_optab
, temp
,
4886 gen_int_mode (extra
, Pmode
),
4887 temp
, 0, OPTAB_LIB_WIDEN
);
4888 anti_adjust_stack (temp
);
4891 if (STACK_GROWS_DOWNWARD
)
4893 temp
= virtual_outgoing_args_rtx
;
4894 if (maybe_ne (extra
, 0) && below
)
4895 temp
= plus_constant (Pmode
, temp
, extra
);
4900 if (poly_int_rtx_p (size
, &csize
))
4901 temp
= plus_constant (Pmode
, virtual_outgoing_args_rtx
,
4902 -csize
- (below
? 0 : extra
));
4903 else if (maybe_ne (extra
, 0) && !below
)
4904 temp
= gen_rtx_PLUS (Pmode
, virtual_outgoing_args_rtx
,
4905 negate_rtx (Pmode
, plus_constant (Pmode
, size
,
4908 temp
= gen_rtx_PLUS (Pmode
, virtual_outgoing_args_rtx
,
4909 negate_rtx (Pmode
, size
));
4912 return memory_address (NARROWEST_INT_MODE
, temp
);
4915 /* A utility routine that returns the base of an auto-inc memory, or NULL. */
4918 mem_autoinc_base (rtx mem
)
4922 rtx addr
= XEXP (mem
, 0);
4923 if (GET_RTX_CLASS (GET_CODE (addr
)) == RTX_AUTOINC
)
4924 return XEXP (addr
, 0);
4929 /* A utility routine used here, in reload, and in try_split. The insns
4930 after PREV up to and including LAST are known to adjust the stack,
4931 with a final value of END_ARGS_SIZE. Iterate backward from LAST
4932 placing notes as appropriate. PREV may be NULL, indicating the
4933 entire insn sequence prior to LAST should be scanned.
4935 The set of allowed stack pointer modifications is small:
4936 (1) One or more auto-inc style memory references (aka pushes),
4937 (2) One or more addition/subtraction with the SP as destination,
4938 (3) A single move insn with the SP as destination,
4939 (4) A call_pop insn,
4940 (5) Noreturn call insns if !ACCUMULATE_OUTGOING_ARGS.
4942 Insns in the sequence that do not modify the SP are ignored,
4943 except for noreturn calls.
4945 The return value is the amount of adjustment that can be trivially
4946 verified, via immediate operand or auto-inc. If the adjustment
4947 cannot be trivially extracted, the return value is HOST_WIDE_INT_MIN. */
4950 find_args_size_adjust (rtx_insn
*insn
)
4955 pat
= PATTERN (insn
);
4958 /* Look for a call_pop pattern. */
4961 /* We have to allow non-call_pop patterns for the case
4962 of emit_single_push_insn of a TLS address. */
4963 if (GET_CODE (pat
) != PARALLEL
)
4966 /* All call_pop have a stack pointer adjust in the parallel.
4967 The call itself is always first, and the stack adjust is
4968 usually last, so search from the end. */
4969 for (i
= XVECLEN (pat
, 0) - 1; i
> 0; --i
)
4971 set
= XVECEXP (pat
, 0, i
);
4972 if (GET_CODE (set
) != SET
)
4974 dest
= SET_DEST (set
);
4975 if (dest
== stack_pointer_rtx
)
4978 /* We'd better have found the stack pointer adjust. */
4981 /* Fall through to process the extracted SET and DEST
4982 as if it was a standalone insn. */
4984 else if (GET_CODE (pat
) == SET
)
4986 else if ((set
= single_set (insn
)) != NULL
)
4988 else if (GET_CODE (pat
) == PARALLEL
)
4990 /* ??? Some older ports use a parallel with a stack adjust
4991 and a store for a PUSH_ROUNDING pattern, rather than a
4992 PRE/POST_MODIFY rtx. Don't force them to update yet... */
4993 /* ??? See h8300 and m68k, pushqi1. */
4994 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; --i
)
4996 set
= XVECEXP (pat
, 0, i
);
4997 if (GET_CODE (set
) != SET
)
4999 dest
= SET_DEST (set
);
5000 if (dest
== stack_pointer_rtx
)
5003 /* We do not expect an auto-inc of the sp in the parallel. */
5004 gcc_checking_assert (mem_autoinc_base (dest
) != stack_pointer_rtx
);
5005 gcc_checking_assert (mem_autoinc_base (SET_SRC (set
))
5006 != stack_pointer_rtx
);
5014 dest
= SET_DEST (set
);
5016 /* Look for direct modifications of the stack pointer. */
5017 if (REG_P (dest
) && REGNO (dest
) == STACK_POINTER_REGNUM
)
5019 /* Look for a trivial adjustment, otherwise assume nothing. */
5020 /* Note that the SPU restore_stack_block pattern refers to
5021 the stack pointer in V4SImode. Consider that non-trivial. */
5023 if (SCALAR_INT_MODE_P (GET_MODE (dest
))
5024 && strip_offset (SET_SRC (set
), &offset
) == stack_pointer_rtx
)
5026 /* ??? Reload can generate no-op moves, which will be cleaned
5027 up later. Recognize it and continue searching. */
5028 else if (rtx_equal_p (dest
, SET_SRC (set
)))
5031 return HOST_WIDE_INT_MIN
;
5037 /* Otherwise only think about autoinc patterns. */
5038 if (mem_autoinc_base (dest
) == stack_pointer_rtx
)
5041 gcc_checking_assert (mem_autoinc_base (SET_SRC (set
))
5042 != stack_pointer_rtx
);
5044 else if (mem_autoinc_base (SET_SRC (set
)) == stack_pointer_rtx
)
5045 mem
= SET_SRC (set
);
5049 addr
= XEXP (mem
, 0);
5050 switch (GET_CODE (addr
))
5054 return GET_MODE_SIZE (GET_MODE (mem
));
5057 return -GET_MODE_SIZE (GET_MODE (mem
));
5060 addr
= XEXP (addr
, 1);
5061 gcc_assert (GET_CODE (addr
) == PLUS
);
5062 gcc_assert (XEXP (addr
, 0) == stack_pointer_rtx
);
5063 return rtx_to_poly_int64 (XEXP (addr
, 1));
5071 fixup_args_size_notes (rtx_insn
*prev
, rtx_insn
*last
,
5072 poly_int64 end_args_size
)
5074 poly_int64 args_size
= end_args_size
;
5075 bool saw_unknown
= false;
5078 for (insn
= last
; insn
!= prev
; insn
= PREV_INSN (insn
))
5080 if (!NONDEBUG_INSN_P (insn
))
5083 /* We might have existing REG_ARGS_SIZE notes, e.g. when pushing
5084 a call argument containing a TLS address that itself requires
5085 a call to __tls_get_addr. The handling of stack_pointer_delta
5086 in emit_single_push_insn is supposed to ensure that any such
5087 notes are already correct. */
5088 rtx note
= find_reg_note (insn
, REG_ARGS_SIZE
, NULL_RTX
);
5089 gcc_assert (!note
|| known_eq (args_size
, get_args_size (note
)));
5091 poly_int64 this_delta
= find_args_size_adjust (insn
);
5092 if (known_eq (this_delta
, 0))
5095 || ACCUMULATE_OUTGOING_ARGS
5096 || find_reg_note (insn
, REG_NORETURN
, NULL_RTX
) == NULL_RTX
)
5100 gcc_assert (!saw_unknown
);
5101 if (known_eq (this_delta
, HOST_WIDE_INT_MIN
))
5105 add_args_size_note (insn
, args_size
);
5106 if (STACK_GROWS_DOWNWARD
)
5107 this_delta
= -poly_uint64 (this_delta
);
5110 args_size
= HOST_WIDE_INT_MIN
;
5112 args_size
-= this_delta
;
5118 #ifdef PUSH_ROUNDING
5119 /* Emit single push insn. */
5122 emit_single_push_insn_1 (machine_mode mode
, rtx x
, tree type
)
5125 poly_int64 rounded_size
= PUSH_ROUNDING (GET_MODE_SIZE (mode
));
5127 enum insn_code icode
;
5129 /* If there is push pattern, use it. Otherwise try old way of throwing
5130 MEM representing push operation to move expander. */
5131 icode
= optab_handler (push_optab
, mode
);
5132 if (icode
!= CODE_FOR_nothing
)
5134 class expand_operand ops
[1];
5136 create_input_operand (&ops
[0], x
, mode
);
5137 if (maybe_expand_insn (icode
, 1, ops
))
5140 if (known_eq (GET_MODE_SIZE (mode
), rounded_size
))
5141 dest_addr
= gen_rtx_fmt_e (STACK_PUSH_CODE
, Pmode
, stack_pointer_rtx
);
5142 /* If we are to pad downward, adjust the stack pointer first and
5143 then store X into the stack location using an offset. This is
5144 because emit_move_insn does not know how to pad; it does not have
5146 else if (targetm
.calls
.function_arg_padding (mode
, type
) == PAD_DOWNWARD
)
5148 emit_move_insn (stack_pointer_rtx
,
5149 expand_binop (Pmode
,
5150 STACK_GROWS_DOWNWARD
? sub_optab
5153 gen_int_mode (rounded_size
, Pmode
),
5154 NULL_RTX
, 0, OPTAB_LIB_WIDEN
));
5156 poly_int64 offset
= rounded_size
- GET_MODE_SIZE (mode
);
5157 if (STACK_GROWS_DOWNWARD
&& STACK_PUSH_CODE
== POST_DEC
)
5158 /* We have already decremented the stack pointer, so get the
5160 offset
+= rounded_size
;
5162 if (!STACK_GROWS_DOWNWARD
&& STACK_PUSH_CODE
== POST_INC
)
5163 /* We have already incremented the stack pointer, so get the
5165 offset
-= rounded_size
;
5167 dest_addr
= plus_constant (Pmode
, stack_pointer_rtx
, offset
);
5171 if (STACK_GROWS_DOWNWARD
)
5172 /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */
5173 dest_addr
= plus_constant (Pmode
, stack_pointer_rtx
, -rounded_size
);
5175 /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */
5176 dest_addr
= plus_constant (Pmode
, stack_pointer_rtx
, rounded_size
);
5178 dest_addr
= gen_rtx_PRE_MODIFY (Pmode
, stack_pointer_rtx
, dest_addr
);
5181 dest
= gen_rtx_MEM (mode
, dest_addr
);
5185 set_mem_attributes (dest
, type
, 1);
5187 if (cfun
->tail_call_marked
)
5188 /* Function incoming arguments may overlap with sibling call
5189 outgoing arguments and we cannot allow reordering of reads
5190 from function arguments with stores to outgoing arguments
5191 of sibling calls. */
5192 set_mem_alias_set (dest
, 0);
5194 emit_move_insn (dest
, x
);
5197 /* Emit and annotate a single push insn. */
5200 emit_single_push_insn (machine_mode mode
, rtx x
, tree type
)
5202 poly_int64 delta
, old_delta
= stack_pointer_delta
;
5203 rtx_insn
*prev
= get_last_insn ();
5206 emit_single_push_insn_1 (mode
, x
, type
);
5208 /* Adjust stack_pointer_delta to describe the situation after the push
5209 we just performed. Note that we must do this after the push rather
5210 than before the push in case calculating X needs pushes and pops of
5211 its own (e.g. if calling __tls_get_addr). The REG_ARGS_SIZE notes
5212 for such pushes and pops must not include the effect of the future
5214 stack_pointer_delta
+= PUSH_ROUNDING (GET_MODE_SIZE (mode
));
5216 last
= get_last_insn ();
5218 /* Notice the common case where we emitted exactly one insn. */
5219 if (PREV_INSN (last
) == prev
)
5221 add_args_size_note (last
, stack_pointer_delta
);
5225 delta
= fixup_args_size_notes (prev
, last
, stack_pointer_delta
);
5226 gcc_assert (known_eq (delta
, HOST_WIDE_INT_MIN
)
5227 || known_eq (delta
, old_delta
));
5231 /* If reading SIZE bytes from X will end up reading from
5232 Y return the number of bytes that overlap. Return -1
5233 if there is no overlap or -2 if we can't determine
5234 (for example when X and Y have different base registers). */
5237 memory_load_overlap (rtx x
, rtx y
, HOST_WIDE_INT size
)
5239 rtx tmp
= plus_constant (Pmode
, x
, size
);
5240 rtx sub
= simplify_gen_binary (MINUS
, Pmode
, tmp
, y
);
5242 if (!CONST_INT_P (sub
))
5245 HOST_WIDE_INT val
= INTVAL (sub
);
5247 return IN_RANGE (val
, 1, size
) ? val
: -1;
5250 /* Generate code to push X onto the stack, assuming it has mode MODE and
5252 MODE is redundant except when X is a CONST_INT (since they don't
5254 SIZE is an rtx for the size of data to be copied (in bytes),
5255 needed only if X is BLKmode.
5256 Return true if successful. May return false if asked to push a
5257 partial argument during a sibcall optimization (as specified by
5258 SIBCALL_P) and the incoming and outgoing pointers cannot be shown
5261 ALIGN (in bits) is maximum alignment we can assume.
5263 If PARTIAL and REG are both nonzero, then copy that many of the first
5264 bytes of X into registers starting with REG, and push the rest of X.
5265 The amount of space pushed is decreased by PARTIAL bytes.
5266 REG must be a hard register in this case.
5267 If REG is zero but PARTIAL is not, take any all others actions for an
5268 argument partially in registers, but do not actually load any
5271 EXTRA is the amount in bytes of extra space to leave next to this arg.
5272 This is ignored if an argument block has already been allocated.
5274 On a machine that lacks real push insns, ARGS_ADDR is the address of
5275 the bottom of the argument block for this call. We use indexing off there
5276 to store the arg. On machines with push insns, ARGS_ADDR is 0 when a
5277 argument block has not been preallocated.
5279 ARGS_SO_FAR is the size of args previously pushed for this call.
5281 REG_PARM_STACK_SPACE is nonzero if functions require stack space
5282 for arguments passed in registers. If nonzero, it will be the number
5283 of bytes required. */
5286 emit_push_insn (rtx x
, machine_mode mode
, tree type
, rtx size
,
5287 unsigned int align
, int partial
, rtx reg
, poly_int64 extra
,
5288 rtx args_addr
, rtx args_so_far
, int reg_parm_stack_space
,
5289 rtx alignment_pad
, bool sibcall_p
)
5292 pad_direction stack_direction
5293 = STACK_GROWS_DOWNWARD
? PAD_DOWNWARD
: PAD_UPWARD
;
5295 /* Decide where to pad the argument: PAD_DOWNWARD for below,
5296 PAD_UPWARD for above, or PAD_NONE for don't pad it.
5297 Default is below for small data on big-endian machines; else above. */
5298 pad_direction where_pad
= targetm
.calls
.function_arg_padding (mode
, type
);
5300 /* Invert direction if stack is post-decrement.
5302 if (STACK_PUSH_CODE
== POST_DEC
)
5303 if (where_pad
!= PAD_NONE
)
5304 where_pad
= (where_pad
== PAD_DOWNWARD
? PAD_UPWARD
: PAD_DOWNWARD
);
5308 int nregs
= partial
/ UNITS_PER_WORD
;
5309 rtx
*tmp_regs
= NULL
;
5310 int overlapping
= 0;
5313 || (STRICT_ALIGNMENT
&& align
< GET_MODE_ALIGNMENT (mode
)))
5315 /* Copy a block into the stack, entirely or partially. */
5322 offset
= partial
% (PARM_BOUNDARY
/ BITS_PER_UNIT
);
5323 used
= partial
- offset
;
5325 if (mode
!= BLKmode
)
5327 /* A value is to be stored in an insufficiently aligned
5328 stack slot; copy via a suitably aligned slot if
5330 size
= gen_int_mode (GET_MODE_SIZE (mode
), Pmode
);
5331 if (!MEM_P (xinner
))
5333 temp
= assign_temp (type
, 1, 1);
5334 emit_move_insn (temp
, xinner
);
5341 /* USED is now the # of bytes we need not copy to the stack
5342 because registers will take care of them. */
5345 xinner
= adjust_address (xinner
, BLKmode
, used
);
5347 /* If the partial register-part of the arg counts in its stack size,
5348 skip the part of stack space corresponding to the registers.
5349 Otherwise, start copying to the beginning of the stack space,
5350 by setting SKIP to 0. */
5351 skip
= (reg_parm_stack_space
== 0) ? 0 : used
;
5353 #ifdef PUSH_ROUNDING
5354 /* NB: Let the backend known the number of bytes to push and
5355 decide if push insns should be generated. */
5356 unsigned int push_size
;
5357 if (CONST_INT_P (size
))
5358 push_size
= INTVAL (size
);
5362 /* Do it with several push insns if that doesn't take lots of insns
5363 and if there is no difficulty with push insns that skip bytes
5364 on the stack for alignment purposes. */
5366 && targetm
.calls
.push_argument (push_size
)
5367 && CONST_INT_P (size
)
5369 && MEM_ALIGN (xinner
) >= align
5370 && can_move_by_pieces ((unsigned) INTVAL (size
) - used
, align
)
5371 /* Here we avoid the case of a structure whose weak alignment
5372 forces many pushes of a small amount of data,
5373 and such small pushes do rounding that causes trouble. */
5374 && ((!targetm
.slow_unaligned_access (word_mode
, align
))
5375 || align
>= BIGGEST_ALIGNMENT
5376 || known_eq (PUSH_ROUNDING (align
/ BITS_PER_UNIT
),
5377 align
/ BITS_PER_UNIT
))
5378 && known_eq (PUSH_ROUNDING (INTVAL (size
)), INTVAL (size
)))
5380 /* Push padding now if padding above and stack grows down,
5381 or if padding below and stack grows up.
5382 But if space already allocated, this has already been done. */
5383 if (maybe_ne (extra
, 0)
5385 && where_pad
!= PAD_NONE
5386 && where_pad
!= stack_direction
)
5387 anti_adjust_stack (gen_int_mode (extra
, Pmode
));
5389 move_by_pieces (NULL
, xinner
, INTVAL (size
) - used
, align
,
5393 #endif /* PUSH_ROUNDING */
5397 /* Otherwise make space on the stack and copy the data
5398 to the address of that space. */
5400 /* Deduct words put into registers from the size we must copy. */
5403 if (CONST_INT_P (size
))
5404 size
= GEN_INT (INTVAL (size
) - used
);
5406 size
= expand_binop (GET_MODE (size
), sub_optab
, size
,
5407 gen_int_mode (used
, GET_MODE (size
)),
5408 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5411 /* Get the address of the stack space.
5412 In this case, we do not deal with EXTRA separately.
5413 A single stack adjust will do. */
5414 poly_int64 const_args_so_far
;
5417 temp
= push_block (size
, extra
, where_pad
== PAD_DOWNWARD
);
5420 else if (poly_int_rtx_p (args_so_far
, &const_args_so_far
))
5421 temp
= memory_address (BLKmode
,
5422 plus_constant (Pmode
, args_addr
,
5423 skip
+ const_args_so_far
));
5425 temp
= memory_address (BLKmode
,
5426 plus_constant (Pmode
,
5427 gen_rtx_PLUS (Pmode
,
5432 if (!ACCUMULATE_OUTGOING_ARGS
)
5434 /* If the source is referenced relative to the stack pointer,
5435 copy it to another register to stabilize it. We do not need
5436 to do this if we know that we won't be changing sp. */
5438 if (reg_mentioned_p (virtual_stack_dynamic_rtx
, temp
)
5439 || reg_mentioned_p (virtual_outgoing_args_rtx
, temp
))
5440 temp
= copy_to_reg (temp
);
5443 target
= gen_rtx_MEM (BLKmode
, temp
);
5445 /* We do *not* set_mem_attributes here, because incoming arguments
5446 may overlap with sibling call outgoing arguments and we cannot
5447 allow reordering of reads from function arguments with stores
5448 to outgoing arguments of sibling calls. We do, however, want
5449 to record the alignment of the stack slot. */
5450 /* ALIGN may well be better aligned than TYPE, e.g. due to
5451 PARM_BOUNDARY. Assume the caller isn't lying. */
5452 set_mem_align (target
, align
);
5454 /* If part should go in registers and pushing to that part would
5455 overwrite some of the values that need to go into regs, load the
5456 overlapping values into temporary pseudos to be moved into the hard
5457 regs at the end after the stack pushing has completed.
5458 We cannot load them directly into the hard regs here because
5459 they can be clobbered by the block move expansions.
5462 if (partial
> 0 && reg
!= 0 && mode
== BLKmode
5463 && GET_CODE (reg
) != PARALLEL
)
5465 overlapping
= memory_load_overlap (XEXP (x
, 0), temp
, partial
);
5466 if (overlapping
> 0)
5468 gcc_assert (overlapping
% UNITS_PER_WORD
== 0);
5469 overlapping
/= UNITS_PER_WORD
;
5471 tmp_regs
= XALLOCAVEC (rtx
, overlapping
);
5473 for (int i
= 0; i
< overlapping
; i
++)
5474 tmp_regs
[i
] = gen_reg_rtx (word_mode
);
5476 for (int i
= 0; i
< overlapping
; i
++)
5477 emit_move_insn (tmp_regs
[i
],
5478 operand_subword_force (target
, i
, mode
));
5480 else if (overlapping
== -1)
5482 /* Could not determine whether there is overlap.
5483 Fail the sibcall. */
5492 /* If source is a constant VAR_DECL with a simple constructor,
5493 store the constructor to the stack instead of moving it. */
5498 && SYMBOL_REF_P (XEXP (xinner
, 0))
5499 && (decl
= SYMBOL_REF_DECL (XEXP (xinner
, 0))) != NULL_TREE
5501 && TREE_READONLY (decl
)
5502 && !TREE_SIDE_EFFECTS (decl
)
5503 && immediate_const_ctor_p (DECL_INITIAL (decl
), 2)
5504 && (sz
= int_expr_size (DECL_INITIAL (decl
))) > 0
5505 && CONST_INT_P (size
)
5506 && INTVAL (size
) == sz
)
5507 store_constructor (DECL_INITIAL (decl
), target
, 0, sz
, false);
5509 emit_block_move (target
, xinner
, size
, BLOCK_OP_CALL_PARM
);
5512 else if (partial
> 0)
5514 /* Scalar partly in registers. This case is only supported
5515 for fixed-wdth modes. */
5516 int num_words
= GET_MODE_SIZE (mode
).to_constant ();
5517 num_words
/= UNITS_PER_WORD
;
5520 /* # bytes of start of argument
5521 that we must make space for but need not store. */
5522 int offset
= partial
% (PARM_BOUNDARY
/ BITS_PER_UNIT
);
5523 int args_offset
= INTVAL (args_so_far
);
5526 /* Push padding now if padding above and stack grows down,
5527 or if padding below and stack grows up.
5528 But if space already allocated, this has already been done. */
5529 if (maybe_ne (extra
, 0)
5531 && where_pad
!= PAD_NONE
5532 && where_pad
!= stack_direction
)
5533 anti_adjust_stack (gen_int_mode (extra
, Pmode
));
5535 /* If we make space by pushing it, we might as well push
5536 the real data. Otherwise, we can leave OFFSET nonzero
5537 and leave the space uninitialized. */
5541 /* Now NOT_STACK gets the number of words that we don't need to
5542 allocate on the stack. Convert OFFSET to words too. */
5543 not_stack
= (partial
- offset
) / UNITS_PER_WORD
;
5544 offset
/= UNITS_PER_WORD
;
5546 /* If the partial register-part of the arg counts in its stack size,
5547 skip the part of stack space corresponding to the registers.
5548 Otherwise, start copying to the beginning of the stack space,
5549 by setting SKIP to 0. */
5550 skip
= (reg_parm_stack_space
== 0) ? 0 : not_stack
;
5552 if (CONSTANT_P (x
) && !targetm
.legitimate_constant_p (mode
, x
))
5553 x
= validize_mem (force_const_mem (mode
, x
));
5555 /* If X is a hard register in a non-integer mode, copy it into a pseudo;
5556 SUBREGs of such registers are not allowed. */
5557 if ((REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
5558 && GET_MODE_CLASS (GET_MODE (x
)) != MODE_INT
))
5559 x
= copy_to_reg (x
);
5561 /* Loop over all the words allocated on the stack for this arg. */
5562 /* We can do it by words, because any scalar bigger than a word
5563 has a size a multiple of a word. */
5564 tree word_mode_type
= lang_hooks
.types
.type_for_mode (word_mode
, 1);
5565 for (i
= num_words
- 1; i
>= not_stack
; i
--)
5566 if (i
>= not_stack
+ offset
)
5567 if (!emit_push_insn (operand_subword_force (x
, i
, mode
),
5568 word_mode
, word_mode_type
, NULL_RTX
, align
, 0,
5569 NULL_RTX
, 0, args_addr
,
5570 GEN_INT (args_offset
+ ((i
- not_stack
+ skip
)
5572 reg_parm_stack_space
, alignment_pad
, sibcall_p
))
5580 /* Push padding now if padding above and stack grows down,
5581 or if padding below and stack grows up.
5582 But if space already allocated, this has already been done. */
5583 if (maybe_ne (extra
, 0)
5585 && where_pad
!= PAD_NONE
5586 && where_pad
!= stack_direction
)
5587 anti_adjust_stack (gen_int_mode (extra
, Pmode
));
5589 #ifdef PUSH_ROUNDING
5590 if (args_addr
== 0 && targetm
.calls
.push_argument (0))
5591 emit_single_push_insn (mode
, x
, type
);
5595 addr
= simplify_gen_binary (PLUS
, Pmode
, args_addr
, args_so_far
);
5596 dest
= gen_rtx_MEM (mode
, memory_address (mode
, addr
));
5598 /* We do *not* set_mem_attributes here, because incoming arguments
5599 may overlap with sibling call outgoing arguments and we cannot
5600 allow reordering of reads from function arguments with stores
5601 to outgoing arguments of sibling calls. We do, however, want
5602 to record the alignment of the stack slot. */
5603 /* ALIGN may well be better aligned than TYPE, e.g. due to
5604 PARM_BOUNDARY. Assume the caller isn't lying. */
5605 set_mem_align (dest
, align
);
5607 emit_move_insn (dest
, x
);
5611 /* Move the partial arguments into the registers and any overlapping
5612 values that we moved into the pseudos in tmp_regs. */
5613 if (partial
> 0 && reg
!= 0)
5615 /* Handle calls that pass values in multiple non-contiguous locations.
5616 The Irix 6 ABI has examples of this. */
5617 if (GET_CODE (reg
) == PARALLEL
)
5618 emit_group_load (reg
, x
, type
, -1);
5621 gcc_assert (partial
% UNITS_PER_WORD
== 0);
5622 move_block_to_reg (REGNO (reg
), x
, nregs
- overlapping
, mode
);
5624 for (int i
= 0; i
< overlapping
; i
++)
5625 emit_move_insn (gen_rtx_REG (word_mode
, REGNO (reg
)
5626 + nregs
- overlapping
+ i
),
5632 if (maybe_ne (extra
, 0) && args_addr
== 0 && where_pad
== stack_direction
)
5633 anti_adjust_stack (gen_int_mode (extra
, Pmode
));
5635 if (alignment_pad
&& args_addr
== 0)
5636 anti_adjust_stack (alignment_pad
);
5641 /* Return X if X can be used as a subtarget in a sequence of arithmetic
5645 get_subtarget (rtx x
)
5649 /* Only registers can be subtargets. */
5651 /* Don't use hard regs to avoid extending their life. */
5652 || REGNO (x
) < FIRST_PSEUDO_REGISTER
5656 /* A subroutine of expand_assignment. Optimize FIELD op= VAL, where
5657 FIELD is a bitfield. Returns true if the optimization was successful,
5658 and there's nothing else to do. */
5661 optimize_bitfield_assignment_op (poly_uint64 pbitsize
,
5662 poly_uint64 pbitpos
,
5663 poly_uint64 pbitregion_start
,
5664 poly_uint64 pbitregion_end
,
5665 machine_mode mode1
, rtx str_rtx
,
5666 tree to
, tree src
, bool reverse
)
5668 /* str_mode is not guaranteed to be a scalar type. */
5669 machine_mode str_mode
= GET_MODE (str_rtx
);
5670 unsigned int str_bitsize
;
5675 enum tree_code code
;
5677 unsigned HOST_WIDE_INT bitsize
, bitpos
, bitregion_start
, bitregion_end
;
5678 if (mode1
!= VOIDmode
5679 || !pbitsize
.is_constant (&bitsize
)
5680 || !pbitpos
.is_constant (&bitpos
)
5681 || !pbitregion_start
.is_constant (&bitregion_start
)
5682 || !pbitregion_end
.is_constant (&bitregion_end
)
5683 || bitsize
>= BITS_PER_WORD
5684 || !GET_MODE_BITSIZE (str_mode
).is_constant (&str_bitsize
)
5685 || str_bitsize
> BITS_PER_WORD
5686 || TREE_SIDE_EFFECTS (to
)
5687 || TREE_THIS_VOLATILE (to
))
5691 if (TREE_CODE (src
) != SSA_NAME
)
5693 if (TREE_CODE (TREE_TYPE (src
)) != INTEGER_TYPE
)
5696 srcstmt
= get_gimple_for_ssa_name (src
);
5698 || TREE_CODE_CLASS (gimple_assign_rhs_code (srcstmt
)) != tcc_binary
)
5701 code
= gimple_assign_rhs_code (srcstmt
);
5703 op0
= gimple_assign_rhs1 (srcstmt
);
5705 /* If OP0 is an SSA_NAME, then we want to walk the use-def chain
5706 to find its initialization. Hopefully the initialization will
5707 be from a bitfield load. */
5708 if (TREE_CODE (op0
) == SSA_NAME
)
5710 gimple
*op0stmt
= get_gimple_for_ssa_name (op0
);
5712 /* We want to eventually have OP0 be the same as TO, which
5713 should be a bitfield. */
5715 || !is_gimple_assign (op0stmt
)
5716 || gimple_assign_rhs_code (op0stmt
) != TREE_CODE (to
))
5718 op0
= gimple_assign_rhs1 (op0stmt
);
5721 op1
= gimple_assign_rhs2 (srcstmt
);
5723 if (!operand_equal_p (to
, op0
, 0))
5726 if (MEM_P (str_rtx
))
5728 unsigned HOST_WIDE_INT offset1
;
5730 if (str_bitsize
== 0 || str_bitsize
> BITS_PER_WORD
)
5731 str_bitsize
= BITS_PER_WORD
;
5733 scalar_int_mode best_mode
;
5734 if (!get_best_mode (bitsize
, bitpos
, bitregion_start
, bitregion_end
,
5735 MEM_ALIGN (str_rtx
), str_bitsize
, false, &best_mode
))
5737 str_mode
= best_mode
;
5738 str_bitsize
= GET_MODE_BITSIZE (best_mode
);
5741 bitpos
%= str_bitsize
;
5742 offset1
= (offset1
- bitpos
) / BITS_PER_UNIT
;
5743 str_rtx
= adjust_address (str_rtx
, str_mode
, offset1
);
5745 else if (!REG_P (str_rtx
) && GET_CODE (str_rtx
) != SUBREG
)
5748 /* If the bit field covers the whole REG/MEM, store_field
5749 will likely generate better code. */
5750 if (bitsize
>= str_bitsize
)
5753 /* We can't handle fields split across multiple entities. */
5754 if (bitpos
+ bitsize
> str_bitsize
)
5757 if (reverse
? !BYTES_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
5758 bitpos
= str_bitsize
- bitpos
- bitsize
;
5764 /* For now, just optimize the case of the topmost bitfield
5765 where we don't need to do any masking and also
5766 1 bit bitfields where xor can be used.
5767 We might win by one instruction for the other bitfields
5768 too if insv/extv instructions aren't used, so that
5769 can be added later. */
5770 if ((reverse
|| bitpos
+ bitsize
!= str_bitsize
)
5771 && (bitsize
!= 1 || TREE_CODE (op1
) != INTEGER_CST
))
5774 value
= expand_expr (op1
, NULL_RTX
, str_mode
, EXPAND_NORMAL
);
5775 value
= convert_modes (str_mode
,
5776 TYPE_MODE (TREE_TYPE (op1
)), value
,
5777 TYPE_UNSIGNED (TREE_TYPE (op1
)));
5779 /* We may be accessing data outside the field, which means
5780 we can alias adjacent data. */
5781 if (MEM_P (str_rtx
))
5783 str_rtx
= shallow_copy_rtx (str_rtx
);
5784 set_mem_alias_set (str_rtx
, 0);
5785 set_mem_expr (str_rtx
, 0);
5788 if (bitsize
== 1 && (reverse
|| bitpos
+ bitsize
!= str_bitsize
))
5790 value
= expand_and (str_mode
, value
, const1_rtx
, NULL
);
5794 binop
= code
== PLUS_EXPR
? add_optab
: sub_optab
;
5796 value
= expand_shift (LSHIFT_EXPR
, str_mode
, value
, bitpos
, NULL_RTX
, 1);
5798 value
= flip_storage_order (str_mode
, value
);
5799 result
= expand_binop (str_mode
, binop
, str_rtx
,
5800 value
, str_rtx
, 1, OPTAB_WIDEN
);
5801 if (result
!= str_rtx
)
5802 emit_move_insn (str_rtx
, result
);
5807 if (TREE_CODE (op1
) != INTEGER_CST
)
5809 value
= expand_expr (op1
, NULL_RTX
, str_mode
, EXPAND_NORMAL
);
5810 value
= convert_modes (str_mode
,
5811 TYPE_MODE (TREE_TYPE (op1
)), value
,
5812 TYPE_UNSIGNED (TREE_TYPE (op1
)));
5814 /* We may be accessing data outside the field, which means
5815 we can alias adjacent data. */
5816 if (MEM_P (str_rtx
))
5818 str_rtx
= shallow_copy_rtx (str_rtx
);
5819 set_mem_alias_set (str_rtx
, 0);
5820 set_mem_expr (str_rtx
, 0);
5823 binop
= code
== BIT_IOR_EXPR
? ior_optab
: xor_optab
;
5824 if (bitpos
+ bitsize
!= str_bitsize
)
5826 rtx mask
= gen_int_mode ((HOST_WIDE_INT_1U
<< bitsize
) - 1,
5828 value
= expand_and (str_mode
, value
, mask
, NULL_RTX
);
5830 value
= expand_shift (LSHIFT_EXPR
, str_mode
, value
, bitpos
, NULL_RTX
, 1);
5832 value
= flip_storage_order (str_mode
, value
);
5833 result
= expand_binop (str_mode
, binop
, str_rtx
,
5834 value
, str_rtx
, 1, OPTAB_WIDEN
);
5835 if (result
!= str_rtx
)
5836 emit_move_insn (str_rtx
, result
);
5846 /* In the C++ memory model, consecutive bit fields in a structure are
5847 considered one memory location.
5849 Given a COMPONENT_REF EXP at position (BITPOS, OFFSET), this function
5850 returns the bit range of consecutive bits in which this COMPONENT_REF
5851 belongs. The values are returned in *BITSTART and *BITEND. *BITPOS
5852 and *OFFSET may be adjusted in the process.
5854 If the access does not need to be restricted, 0 is returned in both
5855 *BITSTART and *BITEND. */
5858 get_bit_range (poly_uint64
*bitstart
, poly_uint64
*bitend
, tree exp
,
5859 poly_int64
*bitpos
, tree
*offset
)
5861 poly_int64 bitoffset
;
5864 gcc_assert (TREE_CODE (exp
) == COMPONENT_REF
);
5866 field
= TREE_OPERAND (exp
, 1);
5867 repr
= DECL_BIT_FIELD_REPRESENTATIVE (field
);
5868 /* If we do not have a DECL_BIT_FIELD_REPRESENTATIVE there is no
5869 need to limit the range we can access. */
5872 *bitstart
= *bitend
= 0;
5876 /* If we have a DECL_BIT_FIELD_REPRESENTATIVE but the enclosing record is
5877 part of a larger bit field, then the representative does not serve any
5878 useful purpose. This can occur in Ada. */
5879 if (handled_component_p (TREE_OPERAND (exp
, 0)))
5882 poly_int64 rbitsize
, rbitpos
;
5884 int unsignedp
, reversep
, volatilep
= 0;
5885 get_inner_reference (TREE_OPERAND (exp
, 0), &rbitsize
, &rbitpos
,
5886 &roffset
, &rmode
, &unsignedp
, &reversep
,
5888 if (!multiple_p (rbitpos
, BITS_PER_UNIT
))
5890 *bitstart
= *bitend
= 0;
5895 /* Compute the adjustment to bitpos from the offset of the field
5896 relative to the representative. DECL_FIELD_OFFSET of field and
5897 repr are the same by construction if they are not constants,
5898 see finish_bitfield_layout. */
5899 poly_uint64 field_offset
, repr_offset
;
5900 if (poly_int_tree_p (DECL_FIELD_OFFSET (field
), &field_offset
)
5901 && poly_int_tree_p (DECL_FIELD_OFFSET (repr
), &repr_offset
))
5902 bitoffset
= (field_offset
- repr_offset
) * BITS_PER_UNIT
;
5905 bitoffset
+= (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field
))
5906 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr
)));
5908 /* If the adjustment is larger than bitpos, we would have a negative bit
5909 position for the lower bound and this may wreak havoc later. Adjust
5910 offset and bitpos to make the lower bound non-negative in that case. */
5911 if (maybe_gt (bitoffset
, *bitpos
))
5913 poly_int64 adjust_bits
= upper_bound (bitoffset
, *bitpos
) - *bitpos
;
5914 poly_int64 adjust_bytes
= exact_div (adjust_bits
, BITS_PER_UNIT
);
5916 *bitpos
+= adjust_bits
;
5917 if (*offset
== NULL_TREE
)
5918 *offset
= size_int (-adjust_bytes
);
5920 *offset
= size_binop (MINUS_EXPR
, *offset
, size_int (adjust_bytes
));
5924 *bitstart
= *bitpos
- bitoffset
;
5926 *bitend
= *bitstart
+ tree_to_poly_uint64 (DECL_SIZE (repr
)) - 1;
5929 /* Returns true if BASE is a DECL that does not reside in memory and
5930 has non-BLKmode. DECL_RTL must not be a MEM; if
5931 DECL_RTL was not set yet, return false. */
5934 non_mem_decl_p (tree base
)
5937 || TREE_ADDRESSABLE (base
)
5938 || DECL_MODE (base
) == BLKmode
)
5941 if (!DECL_RTL_SET_P (base
))
5944 return (!MEM_P (DECL_RTL (base
)));
5947 /* Returns true if REF refers to an object that does not
5948 reside in memory and has non-BLKmode. */
5951 mem_ref_refers_to_non_mem_p (tree ref
)
5955 if (TREE_CODE (ref
) == MEM_REF
5956 || TREE_CODE (ref
) == TARGET_MEM_REF
)
5958 tree addr
= TREE_OPERAND (ref
, 0);
5960 if (TREE_CODE (addr
) != ADDR_EXPR
)
5963 base
= TREE_OPERAND (addr
, 0);
5968 return non_mem_decl_p (base
);
5971 /* Expand an assignment that stores the value of FROM into TO. If NONTEMPORAL
5972 is true, try generating a nontemporal store. */
5975 expand_assignment (tree to
, tree from
, bool nontemporal
)
5981 enum insn_code icode
;
5983 /* Don't crash if the lhs of the assignment was erroneous. */
5984 if (TREE_CODE (to
) == ERROR_MARK
)
5986 expand_normal (from
);
5990 /* Optimize away no-op moves without side-effects. */
5991 if (operand_equal_p (to
, from
, 0))
5994 /* Handle misaligned stores. */
5995 mode
= TYPE_MODE (TREE_TYPE (to
));
5996 if ((TREE_CODE (to
) == MEM_REF
5997 || TREE_CODE (to
) == TARGET_MEM_REF
6000 && !mem_ref_refers_to_non_mem_p (to
)
6001 && ((align
= get_object_alignment (to
))
6002 < GET_MODE_ALIGNMENT (mode
))
6003 && (((icode
= optab_handler (movmisalign_optab
, mode
))
6004 != CODE_FOR_nothing
)
6005 || targetm
.slow_unaligned_access (mode
, align
)))
6009 reg
= expand_expr (from
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
6010 /* Handle PARALLEL. */
6011 reg
= maybe_emit_group_store (reg
, TREE_TYPE (from
));
6012 reg
= force_not_mem (reg
);
6013 mem
= expand_expr (to
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
6014 if (TREE_CODE (to
) == MEM_REF
&& REF_REVERSE_STORAGE_ORDER (to
))
6015 reg
= flip_storage_order (mode
, reg
);
6017 if (icode
!= CODE_FOR_nothing
)
6019 class expand_operand ops
[2];
6021 create_fixed_operand (&ops
[0], mem
);
6022 create_input_operand (&ops
[1], reg
, mode
);
6023 /* The movmisalign<mode> pattern cannot fail, else the assignment
6024 would silently be omitted. */
6025 expand_insn (icode
, 2, ops
);
6028 store_bit_field (mem
, GET_MODE_BITSIZE (mode
), 0, 0, 0, mode
, reg
,
6033 /* Assignment of a structure component needs special treatment
6034 if the structure component's rtx is not simply a MEM.
6035 Assignment of an array element at a constant index, and assignment of
6036 an array element in an unaligned packed structure field, has the same
6037 problem. Same for (partially) storing into a non-memory object. */
6038 if (handled_component_p (to
)
6039 || (TREE_CODE (to
) == MEM_REF
6040 && (REF_REVERSE_STORAGE_ORDER (to
)
6041 || mem_ref_refers_to_non_mem_p (to
)))
6042 || TREE_CODE (TREE_TYPE (to
)) == ARRAY_TYPE
)
6045 poly_int64 bitsize
, bitpos
;
6046 poly_uint64 bitregion_start
= 0;
6047 poly_uint64 bitregion_end
= 0;
6049 int unsignedp
, reversep
, volatilep
= 0;
6053 tem
= get_inner_reference (to
, &bitsize
, &bitpos
, &offset
, &mode1
,
6054 &unsignedp
, &reversep
, &volatilep
);
6056 /* Make sure bitpos is not negative, it can wreak havoc later. */
6057 if (maybe_lt (bitpos
, 0))
6059 gcc_assert (offset
== NULL_TREE
);
6060 offset
= size_int (bits_to_bytes_round_down (bitpos
));
6061 bitpos
= num_trailing_bits (bitpos
);
6064 if (TREE_CODE (to
) == COMPONENT_REF
6065 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (to
, 1)))
6066 get_bit_range (&bitregion_start
, &bitregion_end
, to
, &bitpos
, &offset
);
6067 /* The C++ memory model naturally applies to byte-aligned fields.
6068 However, if we do not have a DECL_BIT_FIELD_TYPE but BITPOS or
6069 BITSIZE are not byte-aligned, there is no need to limit the range
6070 we can access. This can occur with packed structures in Ada. */
6071 else if (maybe_gt (bitsize
, 0)
6072 && multiple_p (bitsize
, BITS_PER_UNIT
)
6073 && multiple_p (bitpos
, BITS_PER_UNIT
))
6075 bitregion_start
= bitpos
;
6076 bitregion_end
= bitpos
+ bitsize
- 1;
6079 to_rtx
= expand_expr (tem
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
6081 /* If the field has a mode, we want to access it in the
6082 field's mode, not the computed mode.
6083 If a MEM has VOIDmode (external with incomplete type),
6084 use BLKmode for it instead. */
6087 if (mode1
!= VOIDmode
)
6088 to_rtx
= adjust_address (to_rtx
, mode1
, 0);
6089 else if (GET_MODE (to_rtx
) == VOIDmode
)
6090 to_rtx
= adjust_address (to_rtx
, BLKmode
, 0);
6093 rtx stemp
= NULL_RTX
, old_to_rtx
= NULL_RTX
;
6096 machine_mode address_mode
;
6099 if (!MEM_P (to_rtx
))
6101 /* We can get constant negative offsets into arrays with broken
6102 user code. Translate this to a trap instead of ICEing. */
6103 if (TREE_CODE (offset
) == INTEGER_CST
)
6105 expand_builtin_trap ();
6106 to_rtx
= gen_rtx_MEM (BLKmode
, const0_rtx
);
6108 /* Else spill for variable offset to the destination. We expect
6109 to run into this only for hard registers. */
6112 gcc_assert (VAR_P (tem
) && DECL_HARD_REGISTER (tem
));
6113 stemp
= assign_stack_temp (GET_MODE (to_rtx
),
6114 GET_MODE_SIZE (GET_MODE (to_rtx
)));
6115 emit_move_insn (stemp
, to_rtx
);
6116 old_to_rtx
= to_rtx
;
6121 offset_rtx
= expand_expr (offset
, NULL_RTX
, VOIDmode
, EXPAND_SUM
);
6122 address_mode
= get_address_mode (to_rtx
);
6123 if (GET_MODE (offset_rtx
) != address_mode
)
6125 /* We cannot be sure that the RTL in offset_rtx is valid outside
6126 of a memory address context, so force it into a register
6127 before attempting to convert it to the desired mode. */
6128 offset_rtx
= force_operand (offset_rtx
, NULL_RTX
);
6129 offset_rtx
= convert_to_mode (address_mode
, offset_rtx
, 0);
6132 /* If we have an expression in OFFSET_RTX and a non-zero
6133 byte offset in BITPOS, adding the byte offset before the
6134 OFFSET_RTX results in better intermediate code, which makes
6135 later rtl optimization passes perform better.
6137 We prefer intermediate code like this:
6139 r124:DI=r123:DI+0x18
6144 r124:DI=r123:DI+0x10
6145 [r124:DI+0x8]=r121:DI
6147 This is only done for aligned data values, as these can
6148 be expected to result in single move instructions. */
6150 if (mode1
!= VOIDmode
6151 && maybe_ne (bitpos
, 0)
6152 && maybe_gt (bitsize
, 0)
6153 && multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
6154 && multiple_p (bitpos
, bitsize
)
6155 && multiple_p (bitsize
, GET_MODE_ALIGNMENT (mode1
))
6156 && MEM_ALIGN (to_rtx
) >= GET_MODE_ALIGNMENT (mode1
))
6158 to_rtx
= adjust_address (to_rtx
, mode1
, bytepos
);
6159 bitregion_start
= 0;
6160 if (known_ge (bitregion_end
, poly_uint64 (bitpos
)))
6161 bitregion_end
-= bitpos
;
6165 to_rtx
= offset_address (to_rtx
, offset_rtx
,
6166 highest_pow2_factor_for_target (to
,
6170 /* No action is needed if the target is not a memory and the field
6171 lies completely outside that target. This can occur if the source
6172 code contains an out-of-bounds access to a small array. */
6174 && GET_MODE (to_rtx
) != BLKmode
6175 && known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (to_rtx
))))
6177 expand_normal (from
);
6180 /* Handle expand_expr of a complex value returning a CONCAT. */
6181 else if (GET_CODE (to_rtx
) == CONCAT
)
6183 machine_mode to_mode
= GET_MODE (to_rtx
);
6184 gcc_checking_assert (COMPLEX_MODE_P (to_mode
));
6185 poly_int64 mode_bitsize
= GET_MODE_BITSIZE (to_mode
);
6186 unsigned short inner_bitsize
= GET_MODE_UNIT_BITSIZE (to_mode
);
6187 if (TYPE_MODE (TREE_TYPE (from
)) == to_mode
6188 && known_eq (bitpos
, 0)
6189 && known_eq (bitsize
, mode_bitsize
))
6190 result
= store_expr (from
, to_rtx
, false, nontemporal
, reversep
);
6191 else if (TYPE_MODE (TREE_TYPE (from
)) == GET_MODE_INNER (to_mode
)
6192 && known_eq (bitsize
, inner_bitsize
)
6193 && (known_eq (bitpos
, 0)
6194 || known_eq (bitpos
, inner_bitsize
)))
6195 result
= store_expr (from
, XEXP (to_rtx
, maybe_ne (bitpos
, 0)),
6196 false, nontemporal
, reversep
);
6197 else if (known_le (bitpos
+ bitsize
, inner_bitsize
))
6198 result
= store_field (XEXP (to_rtx
, 0), bitsize
, bitpos
,
6199 bitregion_start
, bitregion_end
,
6200 mode1
, from
, get_alias_set (to
),
6201 nontemporal
, reversep
);
6202 else if (known_ge (bitpos
, inner_bitsize
))
6203 result
= store_field (XEXP (to_rtx
, 1), bitsize
,
6204 bitpos
- inner_bitsize
,
6205 bitregion_start
, bitregion_end
,
6206 mode1
, from
, get_alias_set (to
),
6207 nontemporal
, reversep
);
6208 else if (known_eq (bitpos
, 0) && known_eq (bitsize
, mode_bitsize
))
6210 result
= expand_normal (from
);
6211 if (GET_CODE (result
) == CONCAT
)
6213 to_mode
= GET_MODE_INNER (to_mode
);
6214 machine_mode from_mode
= GET_MODE_INNER (GET_MODE (result
));
6216 = force_subreg (to_mode
, XEXP (result
, 0), from_mode
, 0);
6218 = force_subreg (to_mode
, XEXP (result
, 1), from_mode
, 0);
6219 if (!from_real
|| !from_imag
)
6220 goto concat_store_slow
;
6221 emit_move_insn (XEXP (to_rtx
, 0), from_real
);
6222 emit_move_insn (XEXP (to_rtx
, 1), from_imag
);
6226 machine_mode from_mode
6227 = GET_MODE (result
) == VOIDmode
6228 ? TYPE_MODE (TREE_TYPE (from
))
6229 : GET_MODE (result
);
6232 from_rtx
= change_address (result
, to_mode
, NULL_RTX
);
6234 from_rtx
= force_subreg (to_mode
, result
, from_mode
, 0);
6237 emit_move_insn (XEXP (to_rtx
, 0),
6238 read_complex_part (from_rtx
, false));
6239 emit_move_insn (XEXP (to_rtx
, 1),
6240 read_complex_part (from_rtx
, true));
6244 to_mode
= GET_MODE_INNER (to_mode
);
6246 = force_subreg (to_mode
, result
, from_mode
, 0);
6248 = force_subreg (to_mode
, result
, from_mode
,
6249 GET_MODE_SIZE (to_mode
));
6250 if (!from_real
|| !from_imag
)
6251 goto concat_store_slow
;
6252 emit_move_insn (XEXP (to_rtx
, 0), from_real
);
6253 emit_move_insn (XEXP (to_rtx
, 1), from_imag
);
6260 rtx temp
= assign_stack_temp (GET_MODE (to_rtx
),
6261 GET_MODE_SIZE (GET_MODE (to_rtx
)));
6262 write_complex_part (temp
, XEXP (to_rtx
, 0), false, true);
6263 write_complex_part (temp
, XEXP (to_rtx
, 1), true, false);
6264 result
= store_field (temp
, bitsize
, bitpos
,
6265 bitregion_start
, bitregion_end
,
6266 mode1
, from
, get_alias_set (to
),
6267 nontemporal
, reversep
);
6268 emit_move_insn (XEXP (to_rtx
, 0), read_complex_part (temp
, false));
6269 emit_move_insn (XEXP (to_rtx
, 1), read_complex_part (temp
, true));
6272 /* For calls to functions returning variable length structures, if TO_RTX
6273 is not a MEM, go through a MEM because we must not create temporaries
6275 else if (!MEM_P (to_rtx
)
6276 && TREE_CODE (from
) == CALL_EXPR
6277 && COMPLETE_TYPE_P (TREE_TYPE (from
))
6278 && TREE_CODE (TYPE_SIZE (TREE_TYPE (from
))) != INTEGER_CST
)
6280 rtx temp
= assign_stack_temp (GET_MODE (to_rtx
),
6281 GET_MODE_SIZE (GET_MODE (to_rtx
)));
6282 result
= store_field (temp
, bitsize
, bitpos
, bitregion_start
,
6283 bitregion_end
, mode1
, from
, get_alias_set (to
),
6284 nontemporal
, reversep
);
6285 emit_move_insn (to_rtx
, temp
);
6291 /* If the field is at offset zero, we could have been given the
6292 DECL_RTX of the parent struct. Don't munge it. */
6293 to_rtx
= shallow_copy_rtx (to_rtx
);
6294 set_mem_attributes_minus_bitpos (to_rtx
, to
, 0, bitpos
);
6296 MEM_VOLATILE_P (to_rtx
) = 1;
6299 gcc_checking_assert (known_ge (bitpos
, 0));
6300 if (optimize_bitfield_assignment_op (bitsize
, bitpos
,
6301 bitregion_start
, bitregion_end
,
6302 mode1
, to_rtx
, to
, from
,
6305 else if (SUBREG_P (to_rtx
)
6306 && SUBREG_PROMOTED_VAR_P (to_rtx
))
6308 /* If to_rtx is a promoted subreg, we need to zero or sign
6309 extend the value afterwards. */
6310 if (TREE_CODE (to
) == MEM_REF
6311 && TYPE_MODE (TREE_TYPE (from
)) != BLKmode
6312 && !REF_REVERSE_STORAGE_ORDER (to
)
6313 && known_eq (bitpos
, 0)
6314 && known_eq (bitsize
, GET_MODE_BITSIZE (GET_MODE (to_rtx
))))
6315 result
= store_expr (from
, to_rtx
, 0, nontemporal
, false);
6316 /* Check if the field overlaps the MSB, requiring extension. */
6317 else if (maybe_eq (bitpos
+ bitsize
,
6318 GET_MODE_BITSIZE (GET_MODE (to_rtx
))))
6320 scalar_int_mode imode
= subreg_unpromoted_mode (to_rtx
);
6321 scalar_int_mode omode
= subreg_promoted_mode (to_rtx
);
6322 rtx to_rtx1
= lowpart_subreg (imode
, SUBREG_REG (to_rtx
),
6324 result
= store_field (to_rtx1
, bitsize
, bitpos
,
6325 bitregion_start
, bitregion_end
,
6326 mode1
, from
, get_alias_set (to
),
6327 nontemporal
, reversep
);
6328 /* If the target usually keeps IMODE appropriately
6329 extended in OMODE it's unsafe to refer to it using
6330 a SUBREG whilst this invariant doesn't hold. */
6331 if (targetm
.mode_rep_extended (imode
, omode
) != UNKNOWN
)
6332 to_rtx1
= simplify_gen_unary (TRUNCATE
, imode
,
6333 SUBREG_REG (to_rtx
), omode
);
6334 convert_move (SUBREG_REG (to_rtx
), to_rtx1
,
6335 SUBREG_PROMOTED_SIGN (to_rtx
));
6338 result
= store_field (to_rtx
, bitsize
, bitpos
,
6339 bitregion_start
, bitregion_end
,
6340 mode1
, from
, get_alias_set (to
),
6341 nontemporal
, reversep
);
6344 result
= store_field (to_rtx
, bitsize
, bitpos
,
6345 bitregion_start
, bitregion_end
,
6346 mode1
, from
, get_alias_set (to
),
6347 nontemporal
, reversep
);
6348 /* Move the temporary storage back to the non-MEM_P. */
6350 emit_move_insn (old_to_rtx
, stemp
);
6354 preserve_temp_slots (result
);
6359 /* If the rhs is a function call and its value is not an aggregate,
6360 call the function before we start to compute the lhs.
6361 This is needed for correct code for cases such as
6362 val = setjmp (buf) on machines where reference to val
6363 requires loading up part of an address in a separate insn.
6365 Don't do this if TO is a VAR_DECL or PARM_DECL whose DECL_RTL is REG
6366 since it might be a promoted variable where the zero- or sign- extension
6367 needs to be done. Handling this in the normal way is safe because no
6368 computation is done before the call. The same is true for SSA names. */
6369 if (TREE_CODE (from
) == CALL_EXPR
&& ! aggregate_value_p (from
, from
)
6370 && COMPLETE_TYPE_P (TREE_TYPE (from
))
6371 && TREE_CODE (TYPE_SIZE (TREE_TYPE (from
))) == INTEGER_CST
6373 || TREE_CODE (to
) == PARM_DECL
6374 || TREE_CODE (to
) == RESULT_DECL
)
6375 && REG_P (DECL_RTL (to
)))
6376 || TREE_CODE (to
) == SSA_NAME
))
6381 value
= expand_normal (from
);
6384 to_rtx
= expand_expr (to
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
6386 /* Handle calls that return values in multiple non-contiguous locations.
6387 The Irix 6 ABI has examples of this. */
6388 if (GET_CODE (to_rtx
) == PARALLEL
)
6390 if (GET_CODE (value
) == PARALLEL
)
6391 emit_group_move (to_rtx
, value
);
6393 emit_group_load (to_rtx
, value
, TREE_TYPE (from
),
6394 int_size_in_bytes (TREE_TYPE (from
)));
6396 else if (GET_CODE (value
) == PARALLEL
)
6397 emit_group_store (to_rtx
, value
, TREE_TYPE (from
),
6398 int_size_in_bytes (TREE_TYPE (from
)));
6399 else if (GET_MODE (to_rtx
) == BLKmode
)
6401 /* Handle calls that return BLKmode values in registers. */
6403 copy_blkmode_from_reg (to_rtx
, value
, TREE_TYPE (from
));
6405 emit_block_move (to_rtx
, value
, expr_size (from
), BLOCK_OP_NORMAL
);
6409 if (POINTER_TYPE_P (TREE_TYPE (to
)))
6410 value
= convert_memory_address_addr_space
6411 (as_a
<scalar_int_mode
> (GET_MODE (to_rtx
)), value
,
6412 TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (to
))));
6414 emit_move_insn (to_rtx
, value
);
6417 preserve_temp_slots (to_rtx
);
6422 /* Ordinary treatment. Expand TO to get a REG or MEM rtx. */
6423 to_rtx
= expand_expr (to
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
6425 /* Don't move directly into a return register. */
6426 if (TREE_CODE (to
) == RESULT_DECL
6427 && (REG_P (to_rtx
) || GET_CODE (to_rtx
) == PARALLEL
))
6433 /* If the source is itself a return value, it still is in a pseudo at
6434 this point so we can move it back to the return register directly. */
6436 && TYPE_MODE (TREE_TYPE (from
)) == BLKmode
6437 && TREE_CODE (from
) != CALL_EXPR
)
6438 temp
= copy_blkmode_to_reg (GET_MODE (to_rtx
), from
);
6440 temp
= expand_expr (from
, NULL_RTX
, GET_MODE (to_rtx
), EXPAND_NORMAL
);
6442 /* Handle calls that return values in multiple non-contiguous locations.
6443 The Irix 6 ABI has examples of this. */
6444 if (GET_CODE (to_rtx
) == PARALLEL
)
6446 if (GET_CODE (temp
) == PARALLEL
)
6447 emit_group_move (to_rtx
, temp
);
6449 emit_group_load (to_rtx
, temp
, TREE_TYPE (from
),
6450 int_size_in_bytes (TREE_TYPE (from
)));
6453 emit_move_insn (to_rtx
, temp
);
6455 preserve_temp_slots (to_rtx
);
6460 /* In case we are returning the contents of an object which overlaps
6461 the place the value is being stored, use a safe function when copying
6462 a value through a pointer into a structure value return block. */
6463 if (TREE_CODE (to
) == RESULT_DECL
6464 && TREE_CODE (from
) == INDIRECT_REF
6465 && ADDR_SPACE_GENERIC_P
6466 (TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (from
, 0)))))
6467 && refs_may_alias_p (to
, from
)
6468 && cfun
->returns_struct
6469 && !cfun
->returns_pcc_struct
)
6474 size
= expr_size (from
);
6475 from_rtx
= expand_normal (from
);
6477 emit_block_move_via_libcall (XEXP (to_rtx
, 0), XEXP (from_rtx
, 0), size
);
6479 preserve_temp_slots (to_rtx
);
6484 /* Compute FROM and store the value in the rtx we got. */
6487 result
= store_expr (from
, to_rtx
, 0, nontemporal
, false);
6488 preserve_temp_slots (result
);
6493 /* Emits nontemporal store insn that moves FROM to TO. Returns true if this
6494 succeeded, false otherwise. */
6497 emit_storent_insn (rtx to
, rtx from
)
6499 class expand_operand ops
[2];
6500 machine_mode mode
= GET_MODE (to
);
6501 enum insn_code code
= optab_handler (storent_optab
, mode
);
6503 if (code
== CODE_FOR_nothing
)
6506 create_fixed_operand (&ops
[0], to
);
6507 create_input_operand (&ops
[1], from
, mode
);
6508 return maybe_expand_insn (code
, 2, ops
);
6511 /* Helper function for store_expr storing of STRING_CST. */
6514 string_cst_read_str (void *data
, void *, HOST_WIDE_INT offset
,
6515 fixed_size_mode mode
)
6517 tree str
= (tree
) data
;
6519 gcc_assert (offset
>= 0);
6520 if (offset
>= TREE_STRING_LENGTH (str
))
6523 if ((unsigned HOST_WIDE_INT
) offset
+ GET_MODE_SIZE (mode
)
6524 > (unsigned HOST_WIDE_INT
) TREE_STRING_LENGTH (str
))
6526 char *p
= XALLOCAVEC (char, GET_MODE_SIZE (mode
));
6527 size_t l
= TREE_STRING_LENGTH (str
) - offset
;
6528 memcpy (p
, TREE_STRING_POINTER (str
) + offset
, l
);
6529 memset (p
+ l
, '\0', GET_MODE_SIZE (mode
) - l
);
6530 return c_readstr (p
, mode
, false);
6533 return c_readstr (TREE_STRING_POINTER (str
) + offset
, mode
, false);
6536 /* Generate code for computing expression EXP,
6537 and storing the value into TARGET.
6539 If the mode is BLKmode then we may return TARGET itself.
6540 It turns out that in BLKmode it doesn't cause a problem.
6541 because C has no operators that could combine two different
6542 assignments into the same BLKmode object with different values
6543 with no sequence point. Will other languages need this to
6546 If CALL_PARAM_P is nonzero, this is a store into a call param on the
6547 stack, and block moves may need to be treated specially.
6549 If NONTEMPORAL is true, try using a nontemporal store instruction.
6551 If REVERSE is true, the store is to be done in reverse order. */
6554 store_expr (tree exp
, rtx target
, int call_param_p
,
6555 bool nontemporal
, bool reverse
)
6558 rtx alt_rtl
= NULL_RTX
;
6559 location_t loc
= curr_insn_location ();
6560 bool shortened_string_cst
= false;
6562 if (VOID_TYPE_P (TREE_TYPE (exp
)))
6564 /* C++ can generate ?: expressions with a throw expression in one
6565 branch and an rvalue in the other. Here, we resolve attempts to
6566 store the throw expression's nonexistent result. */
6567 gcc_assert (!call_param_p
);
6568 expand_expr (exp
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6571 if (TREE_CODE (exp
) == COMPOUND_EXPR
)
6573 /* Perform first part of compound expression, then assign from second
6575 expand_expr (TREE_OPERAND (exp
, 0), const0_rtx
, VOIDmode
,
6576 call_param_p
? EXPAND_STACK_PARM
: EXPAND_NORMAL
);
6577 return store_expr (TREE_OPERAND (exp
, 1), target
,
6578 call_param_p
, nontemporal
, reverse
);
6580 else if (TREE_CODE (exp
) == COND_EXPR
&& GET_MODE (target
) == BLKmode
)
6582 /* For conditional expression, get safe form of the target. Then
6583 test the condition, doing the appropriate assignment on either
6584 side. This avoids the creation of unnecessary temporaries.
6585 For non-BLKmode, it is more efficient not to do this. */
6587 rtx_code_label
*lab1
= gen_label_rtx (), *lab2
= gen_label_rtx ();
6589 do_pending_stack_adjust ();
6591 jumpifnot (TREE_OPERAND (exp
, 0), lab1
,
6592 profile_probability::uninitialized ());
6593 store_expr (TREE_OPERAND (exp
, 1), target
, call_param_p
,
6594 nontemporal
, reverse
);
6595 emit_jump_insn (targetm
.gen_jump (lab2
));
6598 store_expr (TREE_OPERAND (exp
, 2), target
, call_param_p
,
6599 nontemporal
, reverse
);
6605 else if (GET_CODE (target
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (target
))
6606 /* If this is a scalar in a register that is stored in a wider mode
6607 than the declared mode, compute the result into its declared mode
6608 and then convert to the wider mode. Our value is the computed
6611 rtx inner_target
= 0;
6612 scalar_int_mode outer_mode
= subreg_unpromoted_mode (target
);
6613 scalar_int_mode inner_mode
= subreg_promoted_mode (target
);
6615 /* We can do the conversion inside EXP, which will often result
6616 in some optimizations. Do the conversion in two steps: first
6617 change the signedness, if needed, then the extend. But don't
6618 do this if the type of EXP is a subtype of something else
6619 since then the conversion might involve more than just
6620 converting modes. */
6621 if (INTEGRAL_TYPE_P (TREE_TYPE (exp
))
6622 && TREE_TYPE (TREE_TYPE (exp
)) == 0
6623 && GET_MODE_PRECISION (outer_mode
)
6624 == TYPE_PRECISION (TREE_TYPE (exp
)))
6626 if (!SUBREG_CHECK_PROMOTED_SIGN (target
,
6627 TYPE_UNSIGNED (TREE_TYPE (exp
))))
6629 /* Some types, e.g. Fortran's logical*4, won't have a signed
6630 version, so use the mode instead. */
6632 = (signed_or_unsigned_type_for
6633 (SUBREG_PROMOTED_SIGN (target
), TREE_TYPE (exp
)));
6635 ntype
= lang_hooks
.types
.type_for_mode
6636 (TYPE_MODE (TREE_TYPE (exp
)),
6637 SUBREG_PROMOTED_SIGN (target
));
6639 exp
= fold_convert_loc (loc
, ntype
, exp
);
6642 exp
= fold_convert_loc (loc
, lang_hooks
.types
.type_for_mode
6643 (inner_mode
, SUBREG_PROMOTED_SIGN (target
)),
6646 inner_target
= SUBREG_REG (target
);
6649 temp
= expand_expr (exp
, inner_target
, VOIDmode
,
6650 call_param_p
? EXPAND_STACK_PARM
: EXPAND_NORMAL
);
6653 /* If TEMP is a VOIDmode constant, use convert_modes to make
6654 sure that we properly convert it. */
6655 if (CONSTANT_P (temp
) && GET_MODE (temp
) == VOIDmode
)
6657 temp
= convert_modes (outer_mode
, TYPE_MODE (TREE_TYPE (exp
)),
6658 temp
, SUBREG_PROMOTED_SIGN (target
));
6659 temp
= convert_modes (inner_mode
, outer_mode
, temp
,
6660 SUBREG_PROMOTED_SIGN (target
));
6662 else if (!SCALAR_INT_MODE_P (GET_MODE (temp
)))
6663 temp
= convert_modes (outer_mode
, TYPE_MODE (TREE_TYPE (exp
)),
6664 temp
, SUBREG_PROMOTED_SIGN (target
));
6666 convert_move (SUBREG_REG (target
), temp
,
6667 SUBREG_PROMOTED_SIGN (target
));
6671 else if ((TREE_CODE (exp
) == STRING_CST
6672 || (TREE_CODE (exp
) == MEM_REF
6673 && TREE_CODE (TREE_OPERAND (exp
, 0)) == ADDR_EXPR
6674 && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp
, 0), 0))
6676 && integer_zerop (TREE_OPERAND (exp
, 1))))
6677 && !nontemporal
&& !call_param_p
6680 /* Optimize initialization of an array with a STRING_CST. */
6681 HOST_WIDE_INT exp_len
, str_copy_len
;
6683 tree str
= TREE_CODE (exp
) == STRING_CST
6684 ? exp
: TREE_OPERAND (TREE_OPERAND (exp
, 0), 0);
6686 exp_len
= int_expr_size (exp
);
6690 if (TREE_STRING_LENGTH (str
) <= 0)
6693 if (can_store_by_pieces (exp_len
, string_cst_read_str
, (void *) str
,
6694 MEM_ALIGN (target
), false))
6696 store_by_pieces (target
, exp_len
, string_cst_read_str
, (void *) str
,
6697 MEM_ALIGN (target
), false, RETURN_BEGIN
);
6701 str_copy_len
= TREE_STRING_LENGTH (str
);
6703 /* Trailing NUL bytes in EXP will be handled by the call to
6704 clear_storage, which is more efficient than copying them from
6705 the STRING_CST, so trim those from STR_COPY_LEN. */
6706 while (str_copy_len
)
6708 if (TREE_STRING_POINTER (str
)[str_copy_len
- 1])
6713 if ((STORE_MAX_PIECES
& (STORE_MAX_PIECES
- 1)) == 0)
6715 str_copy_len
+= STORE_MAX_PIECES
- 1;
6716 str_copy_len
&= ~(STORE_MAX_PIECES
- 1);
6718 if (str_copy_len
>= exp_len
)
6721 if (!can_store_by_pieces (str_copy_len
, string_cst_read_str
,
6722 (void *) str
, MEM_ALIGN (target
), false))
6725 dest_mem
= store_by_pieces (target
, str_copy_len
, string_cst_read_str
,
6726 (void *) str
, MEM_ALIGN (target
), false,
6728 clear_storage (adjust_address_1 (dest_mem
, BLKmode
, 0, 1, 1, 0,
6729 exp_len
- str_copy_len
),
6730 GEN_INT (exp_len
- str_copy_len
), BLOCK_OP_NORMAL
);
6738 /* If we want to use a nontemporal or a reverse order store, force the
6739 value into a register first. */
6740 tmp_target
= nontemporal
|| reverse
? NULL_RTX
: target
;
6742 if (TREE_CODE (exp
) == STRING_CST
6743 && tmp_target
== target
6744 && GET_MODE (target
) == BLKmode
6745 && TYPE_MODE (TREE_TYPE (exp
)) == BLKmode
)
6747 rtx size
= expr_size (exp
);
6748 if (CONST_INT_P (size
)
6749 && size
!= const0_rtx
6751 > ((unsigned HOST_WIDE_INT
) TREE_STRING_LENGTH (exp
) + 32)))
6753 /* If the STRING_CST has much larger array type than
6754 TREE_STRING_LENGTH, only emit the TREE_STRING_LENGTH part of
6755 it into the rodata section as the code later on will use
6756 memset zero for the remainder anyway. See PR95052. */
6757 tmp_target
= NULL_RTX
;
6758 rexp
= copy_node (exp
);
6760 = build_index_type (size_int (TREE_STRING_LENGTH (exp
) - 1));
6761 TREE_TYPE (rexp
) = build_array_type (TREE_TYPE (TREE_TYPE (exp
)),
6763 shortened_string_cst
= true;
6766 temp
= expand_expr_real (rexp
, tmp_target
, GET_MODE (target
),
6768 ? EXPAND_STACK_PARM
: EXPAND_NORMAL
),
6770 if (shortened_string_cst
)
6772 gcc_assert (MEM_P (temp
));
6773 temp
= change_address (temp
, BLKmode
, NULL_RTX
);
6777 /* If TEMP is a VOIDmode constant and the mode of the type of EXP is not
6778 the same as that of TARGET, adjust the constant. This is needed, for
6779 example, in case it is a CONST_DOUBLE or CONST_WIDE_INT and we want
6780 only a word-sized value. */
6781 if (CONSTANT_P (temp
) && GET_MODE (temp
) == VOIDmode
6782 && TREE_CODE (exp
) != ERROR_MARK
6783 && GET_MODE (target
) != TYPE_MODE (TREE_TYPE (exp
)))
6785 gcc_assert (!shortened_string_cst
);
6786 if (GET_MODE_CLASS (GET_MODE (target
))
6787 != GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (exp
)))
6788 && known_eq (GET_MODE_BITSIZE (GET_MODE (target
)),
6789 GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp
)))))
6791 rtx t
= simplify_gen_subreg (GET_MODE (target
), temp
,
6792 TYPE_MODE (TREE_TYPE (exp
)), 0);
6796 if (GET_MODE (temp
) == VOIDmode
)
6797 temp
= convert_modes (GET_MODE (target
), TYPE_MODE (TREE_TYPE (exp
)),
6798 temp
, TYPE_UNSIGNED (TREE_TYPE (exp
)));
6801 /* If value was not generated in the target, store it there.
6802 Convert the value to TARGET's type first if necessary and emit the
6803 pending incrementations that have been queued when expanding EXP.
6804 Note that we cannot emit the whole queue blindly because this will
6805 effectively disable the POST_INC optimization later.
6807 If TEMP and TARGET compare equal according to rtx_equal_p, but
6808 one or both of them are volatile memory refs, we have to distinguish
6810 - expand_expr has used TARGET. In this case, we must not generate
6811 another copy. This can be detected by TARGET being equal according
6813 - expand_expr has not used TARGET - that means that the source just
6814 happens to have the same RTX form. Since temp will have been created
6815 by expand_expr, it will compare unequal according to == .
6816 We must generate a copy in this case, to reach the correct number
6817 of volatile memory references. */
6819 if ((! rtx_equal_p (temp
, target
)
6820 || (temp
!= target
&& (side_effects_p (temp
)
6821 || side_effects_p (target
)
6823 && !mems_same_for_tbaa_p (temp
, target
)))))
6824 && TREE_CODE (exp
) != ERROR_MARK
6825 /* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
6826 but TARGET is not valid memory reference, TEMP will differ
6827 from TARGET although it is really the same location. */
6829 && rtx_equal_p (alt_rtl
, target
)
6830 && !side_effects_p (alt_rtl
)
6831 && !side_effects_p (target
))
6832 /* If there's nothing to copy, don't bother. Don't call
6833 expr_size unless necessary, because some front-ends (C++)
6834 expr_size-hook must not be given objects that are not
6835 supposed to be bit-copied or bit-initialized. */
6836 && expr_size (exp
) != const0_rtx
)
6838 if (GET_MODE (temp
) != GET_MODE (target
) && GET_MODE (temp
) != VOIDmode
)
6840 gcc_assert (!shortened_string_cst
);
6841 if (GET_MODE (target
) == BLKmode
)
6843 /* Handle calls that return BLKmode values in registers. */
6844 if (REG_P (temp
) && TREE_CODE (exp
) == CALL_EXPR
)
6845 copy_blkmode_from_reg (target
, temp
, TREE_TYPE (exp
));
6847 store_bit_field (target
,
6848 rtx_to_poly_int64 (expr_size (exp
))
6850 0, 0, 0, GET_MODE (temp
), temp
, reverse
,
6854 convert_move (target
, temp
, TYPE_UNSIGNED (TREE_TYPE (exp
)));
6857 else if (GET_MODE (temp
) == BLKmode
&& TREE_CODE (exp
) == STRING_CST
)
6859 /* Handle copying a string constant into an array. The string
6860 constant may be shorter than the array. So copy just the string's
6861 actual length, and clear the rest. First get the size of the data
6862 type of the string, which is actually the size of the target. */
6863 rtx size
= expr_size (exp
);
6865 if (CONST_INT_P (size
)
6866 && INTVAL (size
) < TREE_STRING_LENGTH (exp
))
6867 emit_block_move (target
, temp
, size
,
6869 ? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL
));
6872 machine_mode pointer_mode
6873 = targetm
.addr_space
.pointer_mode (MEM_ADDR_SPACE (target
));
6874 machine_mode address_mode
= get_address_mode (target
);
6876 /* Compute the size of the data to copy from the string. */
6878 = size_binop_loc (loc
, MIN_EXPR
,
6879 make_tree (sizetype
, size
),
6880 size_int (TREE_STRING_LENGTH (exp
)));
6882 = expand_expr (copy_size
, NULL_RTX
, VOIDmode
,
6884 ? EXPAND_STACK_PARM
: EXPAND_NORMAL
));
6885 rtx_code_label
*label
= 0;
6887 /* Copy that much. */
6888 copy_size_rtx
= convert_to_mode (pointer_mode
, copy_size_rtx
,
6889 TYPE_UNSIGNED (sizetype
));
6890 emit_block_move (target
, temp
, copy_size_rtx
,
6892 ? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL
));
6894 /* Figure out how much is left in TARGET that we have to clear.
6895 Do all calculations in pointer_mode. */
6896 poly_int64 const_copy_size
;
6897 if (poly_int_rtx_p (copy_size_rtx
, &const_copy_size
))
6899 size
= plus_constant (address_mode
, size
, -const_copy_size
);
6900 target
= adjust_address (target
, BLKmode
, const_copy_size
);
6904 size
= expand_binop (TYPE_MODE (sizetype
), sub_optab
, size
,
6905 copy_size_rtx
, NULL_RTX
, 0,
6908 if (GET_MODE (copy_size_rtx
) != address_mode
)
6909 copy_size_rtx
= convert_to_mode (address_mode
,
6911 TYPE_UNSIGNED (sizetype
));
6913 target
= offset_address (target
, copy_size_rtx
,
6914 highest_pow2_factor (copy_size
));
6915 label
= gen_label_rtx ();
6916 emit_cmp_and_jump_insns (size
, const0_rtx
, LT
, NULL_RTX
,
6917 GET_MODE (size
), 0, label
);
6920 if (size
!= const0_rtx
)
6921 clear_storage (target
, size
, BLOCK_OP_NORMAL
);
6927 else if (shortened_string_cst
)
6929 /* Handle calls that return values in multiple non-contiguous locations.
6930 The Irix 6 ABI has examples of this. */
6931 else if (GET_CODE (target
) == PARALLEL
)
6933 if (GET_CODE (temp
) == PARALLEL
)
6934 emit_group_move (target
, temp
);
6936 emit_group_load (target
, temp
, TREE_TYPE (exp
),
6937 int_size_in_bytes (TREE_TYPE (exp
)));
6939 else if (GET_CODE (temp
) == PARALLEL
)
6940 emit_group_store (target
, temp
, TREE_TYPE (exp
),
6941 int_size_in_bytes (TREE_TYPE (exp
)));
6942 else if (GET_MODE (temp
) == BLKmode
)
6943 emit_block_move (target
, temp
, expr_size (exp
),
6945 ? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL
));
6946 /* If we emit a nontemporal store, there is nothing else to do. */
6947 else if (nontemporal
&& emit_storent_insn (target
, temp
))
6952 temp
= flip_storage_order (GET_MODE (target
), temp
);
6953 temp
= force_operand (temp
, target
);
6955 emit_move_insn (target
, temp
);
6959 gcc_assert (!shortened_string_cst
);
6964 /* Return true if field F of structure TYPE is a flexible array. */
6967 flexible_array_member_p (const_tree f
, const_tree type
)
6972 return (DECL_CHAIN (f
) == NULL
6973 && TREE_CODE (tf
) == ARRAY_TYPE
6975 && TYPE_MIN_VALUE (TYPE_DOMAIN (tf
))
6976 && integer_zerop (TYPE_MIN_VALUE (TYPE_DOMAIN (tf
)))
6977 && !TYPE_MAX_VALUE (TYPE_DOMAIN (tf
))
6978 && int_size_in_bytes (type
) >= 0);
6981 /* If FOR_CTOR_P, return the number of top-level elements that a constructor
6982 must have in order for it to completely initialize a value of type TYPE.
6983 Return -1 if the number isn't known.
6985 If !FOR_CTOR_P, return an estimate of the number of scalars in TYPE. */
6987 static HOST_WIDE_INT
6988 count_type_elements (const_tree type
, bool for_ctor_p
)
6990 switch (TREE_CODE (type
))
6996 nelts
= array_type_nelts (type
);
6997 if (nelts
&& tree_fits_uhwi_p (nelts
))
6999 unsigned HOST_WIDE_INT n
;
7001 n
= tree_to_uhwi (nelts
) + 1;
7002 if (n
== 0 || for_ctor_p
)
7005 return n
* count_type_elements (TREE_TYPE (type
), false);
7007 return for_ctor_p
? -1 : 1;
7012 unsigned HOST_WIDE_INT n
;
7016 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
7017 if (TREE_CODE (f
) == FIELD_DECL
)
7020 n
+= count_type_elements (TREE_TYPE (f
), false);
7021 else if (!flexible_array_member_p (f
, type
))
7022 /* Don't count flexible arrays, which are not supposed
7023 to be initialized. */
7031 case QUAL_UNION_TYPE
:
7036 gcc_assert (!for_ctor_p
);
7037 /* Estimate the number of scalars in each field and pick the
7038 maximum. Other estimates would do instead; the idea is simply
7039 to make sure that the estimate is not sensitive to the ordering
7042 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
7043 if (TREE_CODE (f
) == FIELD_DECL
)
7045 m
= count_type_elements (TREE_TYPE (f
), false);
7046 /* If the field doesn't span the whole union, add an extra
7047 scalar for the rest. */
7048 if (simple_cst_equal (TYPE_SIZE (TREE_TYPE (f
)),
7049 TYPE_SIZE (type
)) != 1)
7062 unsigned HOST_WIDE_INT nelts
;
7063 if (TYPE_VECTOR_SUBPARTS (type
).is_constant (&nelts
))
7071 case FIXED_POINT_TYPE
:
7076 case REFERENCE_TYPE
:
7094 /* Helper for categorize_ctor_elements. Identical interface. */
7097 categorize_ctor_elements_1 (const_tree ctor
, HOST_WIDE_INT
*p_nz_elts
,
7098 HOST_WIDE_INT
*p_unique_nz_elts
,
7099 HOST_WIDE_INT
*p_init_elts
, bool *p_complete
)
7101 unsigned HOST_WIDE_INT idx
;
7102 HOST_WIDE_INT nz_elts
, unique_nz_elts
, init_elts
, num_fields
;
7103 tree value
, purpose
, elt_type
;
7105 /* Whether CTOR is a valid constant initializer, in accordance with what
7106 initializer_constant_valid_p does. If inferred from the constructor
7107 elements, true until proven otherwise. */
7108 bool const_from_elts_p
= constructor_static_from_elts_p (ctor
);
7109 bool const_p
= const_from_elts_p
? true : TREE_STATIC (ctor
);
7115 elt_type
= NULL_TREE
;
7117 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor
), idx
, purpose
, value
)
7119 HOST_WIDE_INT mult
= 1;
7121 if (purpose
&& TREE_CODE (purpose
) == RANGE_EXPR
)
7123 tree lo_index
= TREE_OPERAND (purpose
, 0);
7124 tree hi_index
= TREE_OPERAND (purpose
, 1);
7126 if (tree_fits_uhwi_p (lo_index
) && tree_fits_uhwi_p (hi_index
))
7127 mult
= (tree_to_uhwi (hi_index
)
7128 - tree_to_uhwi (lo_index
) + 1);
7131 elt_type
= TREE_TYPE (value
);
7133 switch (TREE_CODE (value
))
7137 HOST_WIDE_INT nz
= 0, unz
= 0, ic
= 0;
7139 bool const_elt_p
= categorize_ctor_elements_1 (value
, &nz
, &unz
,
7142 nz_elts
+= mult
* nz
;
7143 unique_nz_elts
+= unz
;
7144 init_elts
+= mult
* ic
;
7146 if (const_from_elts_p
&& const_p
)
7147 const_p
= const_elt_p
;
7154 if (!initializer_zerop (value
))
7163 nz_elts
+= mult
* TREE_STRING_LENGTH (value
);
7164 unique_nz_elts
+= TREE_STRING_LENGTH (value
);
7165 init_elts
+= mult
* TREE_STRING_LENGTH (value
);
7169 if (!initializer_zerop (TREE_REALPART (value
)))
7174 if (!initializer_zerop (TREE_IMAGPART (value
)))
7179 init_elts
+= 2 * mult
;
7184 /* We can only construct constant-length vectors using
7186 unsigned int nunits
= VECTOR_CST_NELTS (value
).to_constant ();
7187 for (unsigned int i
= 0; i
< nunits
; ++i
)
7189 tree v
= VECTOR_CST_ELT (value
, i
);
7190 if (!initializer_zerop (v
))
7202 HOST_WIDE_INT tc
= count_type_elements (elt_type
, false);
7203 nz_elts
+= mult
* tc
;
7204 unique_nz_elts
+= tc
;
7205 init_elts
+= mult
* tc
;
7207 if (const_from_elts_p
&& const_p
)
7209 = initializer_constant_valid_p (value
,
7211 TYPE_REVERSE_STORAGE_ORDER
7219 if (*p_complete
&& !complete_ctor_at_level_p (TREE_TYPE (ctor
),
7220 num_fields
, elt_type
))
7221 *p_complete
= false;
7223 *p_nz_elts
+= nz_elts
;
7224 *p_unique_nz_elts
+= unique_nz_elts
;
7225 *p_init_elts
+= init_elts
;
7230 /* Examine CTOR to discover:
7231 * how many scalar fields are set to nonzero values,
7232 and place it in *P_NZ_ELTS;
7233 * the same, but counting RANGE_EXPRs as multiplier of 1 instead of
7234 high - low + 1 (this can be useful for callers to determine ctors
7235 that could be cheaply initialized with - perhaps nested - loops
7236 compared to copied from huge read-only data),
7237 and place it in *P_UNIQUE_NZ_ELTS;
7238 * how many scalar fields in total are in CTOR,
7239 and place it in *P_ELT_COUNT.
7240 * whether the constructor is complete -- in the sense that every
7241 meaningful byte is explicitly given a value --
7242 and place it in *P_COMPLETE.
7244 Return whether or not CTOR is a valid static constant initializer, the same
7245 as "initializer_constant_valid_p (CTOR, TREE_TYPE (CTOR)) != 0". */
7248 categorize_ctor_elements (const_tree ctor
, HOST_WIDE_INT
*p_nz_elts
,
7249 HOST_WIDE_INT
*p_unique_nz_elts
,
7250 HOST_WIDE_INT
*p_init_elts
, bool *p_complete
)
7253 *p_unique_nz_elts
= 0;
7257 return categorize_ctor_elements_1 (ctor
, p_nz_elts
, p_unique_nz_elts
,
7258 p_init_elts
, p_complete
);
7261 /* Return true if constructor CTOR is simple enough to be materialized
7262 in an integer mode register. Limit the size to WORDS words, which
7266 immediate_const_ctor_p (const_tree ctor
, unsigned int words
)
7268 /* Allow function to be called with a VAR_DECL's DECL_INITIAL. */
7269 if (!ctor
|| TREE_CODE (ctor
) != CONSTRUCTOR
)
7272 return TREE_CONSTANT (ctor
)
7273 && !TREE_ADDRESSABLE (ctor
)
7274 && CONSTRUCTOR_NELTS (ctor
)
7275 && TREE_CODE (TREE_TYPE (ctor
)) != ARRAY_TYPE
7276 && int_expr_size (ctor
) <= words
* UNITS_PER_WORD
7277 && initializer_constant_valid_for_bitfield_p (ctor
);
7280 /* TYPE is initialized by a constructor with NUM_ELTS elements, the last
7281 of which had type LAST_TYPE. Each element was itself a complete
7282 initializer, in the sense that every meaningful byte was explicitly
7283 given a value. Return true if the same is true for the constructor
7287 complete_ctor_at_level_p (const_tree type
, HOST_WIDE_INT num_elts
,
7288 const_tree last_type
)
7290 if (TREE_CODE (type
) == UNION_TYPE
7291 || TREE_CODE (type
) == QUAL_UNION_TYPE
)
7296 gcc_assert (num_elts
== 1 && last_type
);
7298 /* ??? We could look at each element of the union, and find the
7299 largest element. Which would avoid comparing the size of the
7300 initialized element against any tail padding in the union.
7301 Doesn't seem worth the effort... */
7302 return simple_cst_equal (TYPE_SIZE (type
), TYPE_SIZE (last_type
)) == 1;
7305 return count_type_elements (type
, true) == num_elts
;
7308 /* Return true if EXP contains mostly (3/4) zeros. */
7311 mostly_zeros_p (const_tree exp
)
7313 if (TREE_CODE (exp
) == CONSTRUCTOR
)
7315 HOST_WIDE_INT nz_elts
, unz_elts
, init_elts
;
7318 categorize_ctor_elements (exp
, &nz_elts
, &unz_elts
, &init_elts
,
7320 return !complete_p
|| nz_elts
< init_elts
/ 4;
7323 return initializer_zerop (exp
);
7326 /* Return true if EXP contains all zeros. */
7329 all_zeros_p (const_tree exp
)
7331 if (TREE_CODE (exp
) == CONSTRUCTOR
)
7333 HOST_WIDE_INT nz_elts
, unz_elts
, init_elts
;
7336 categorize_ctor_elements (exp
, &nz_elts
, &unz_elts
, &init_elts
,
7338 return nz_elts
== 0;
7341 return initializer_zerop (exp
);
7344 /* Helper function for store_constructor.
7345 TARGET, BITSIZE, BITPOS, MODE, EXP are as for store_field.
7346 CLEARED is as for store_constructor.
7347 ALIAS_SET is the alias set to use for any stores.
7348 If REVERSE is true, the store is to be done in reverse order.
7350 This provides a recursive shortcut back to store_constructor when it isn't
7351 necessary to go through store_field. This is so that we can pass through
7352 the cleared field to let store_constructor know that we may not have to
7353 clear a substructure if the outer structure has already been cleared. */
7356 store_constructor_field (rtx target
, poly_uint64 bitsize
, poly_int64 bitpos
,
7357 poly_uint64 bitregion_start
,
7358 poly_uint64 bitregion_end
,
7360 tree exp
, int cleared
,
7361 alias_set_type alias_set
, bool reverse
)
7364 poly_uint64 bytesize
;
7365 if (TREE_CODE (exp
) == CONSTRUCTOR
7366 /* We can only call store_constructor recursively if the size and
7367 bit position are on a byte boundary. */
7368 && multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
7369 && maybe_ne (bitsize
, 0U)
7370 && multiple_p (bitsize
, BITS_PER_UNIT
, &bytesize
)
7371 /* If we have a nonzero bitpos for a register target, then we just
7372 let store_field do the bitfield handling. This is unlikely to
7373 generate unnecessary clear instructions anyways. */
7374 && (known_eq (bitpos
, 0) || MEM_P (target
)))
7378 machine_mode target_mode
= GET_MODE (target
);
7379 if (target_mode
!= BLKmode
7380 && !multiple_p (bitpos
, GET_MODE_ALIGNMENT (target_mode
)))
7381 target_mode
= BLKmode
;
7382 target
= adjust_address (target
, target_mode
, bytepos
);
7386 /* Update the alias set, if required. */
7387 if (MEM_P (target
) && ! MEM_KEEP_ALIAS_SET_P (target
)
7388 && MEM_ALIAS_SET (target
) != 0)
7390 target
= copy_rtx (target
);
7391 set_mem_alias_set (target
, alias_set
);
7394 store_constructor (exp
, target
, cleared
, bytesize
, reverse
);
7397 store_field (target
, bitsize
, bitpos
, bitregion_start
, bitregion_end
, mode
,
7398 exp
, alias_set
, false, reverse
);
7402 /* Returns the number of FIELD_DECLs in TYPE. */
7405 fields_length (const_tree type
)
7407 tree t
= TYPE_FIELDS (type
);
7410 for (; t
; t
= DECL_CHAIN (t
))
7411 if (TREE_CODE (t
) == FIELD_DECL
)
7418 /* Store the value of constructor EXP into the rtx TARGET.
7419 TARGET is either a REG or a MEM; we know it cannot conflict, since
7420 safe_from_p has been called.
7421 CLEARED is true if TARGET is known to have been zero'd.
7422 SIZE is the number of bytes of TARGET we are allowed to modify: this
7423 may not be the same as the size of EXP if we are assigning to a field
7424 which has been packed to exclude padding bits.
7425 If REVERSE is true, the store is to be done in reverse order. */
7428 store_constructor (tree exp
, rtx target
, int cleared
, poly_int64 size
,
7431 tree type
= TREE_TYPE (exp
);
7432 HOST_WIDE_INT exp_size
= int_size_in_bytes (type
);
7433 poly_int64 bitregion_end
= known_gt (size
, 0) ? size
* BITS_PER_UNIT
- 1 : 0;
7435 switch (TREE_CODE (type
))
7439 case QUAL_UNION_TYPE
:
7441 unsigned HOST_WIDE_INT idx
;
7444 /* The storage order is specified for every aggregate type. */
7445 reverse
= TYPE_REVERSE_STORAGE_ORDER (type
);
7447 /* If size is zero or the target is already cleared, do nothing. */
7448 if (known_eq (size
, 0) || cleared
)
7450 /* We either clear the aggregate or indicate the value is dead. */
7451 else if ((TREE_CODE (type
) == UNION_TYPE
7452 || TREE_CODE (type
) == QUAL_UNION_TYPE
)
7453 && ! CONSTRUCTOR_ELTS (exp
))
7454 /* If the constructor is empty, clear the union. */
7456 clear_storage (target
, expr_size (exp
), BLOCK_OP_NORMAL
);
7460 /* If we are building a static constructor into a register,
7461 set the initial value as zero so we can fold the value into
7462 a constant. But if more than one register is involved,
7463 this probably loses. */
7464 else if (REG_P (target
) && TREE_STATIC (exp
)
7465 && known_le (GET_MODE_SIZE (GET_MODE (target
)),
7466 REGMODE_NATURAL_SIZE (GET_MODE (target
))))
7468 emit_move_insn (target
, CONST0_RTX (GET_MODE (target
)));
7472 /* If the constructor has fewer fields than the structure or
7473 if we are initializing the structure to mostly zeros, clear
7474 the whole structure first. Don't do this if TARGET is a
7475 register whose mode size isn't equal to SIZE since
7476 clear_storage can't handle this case. */
7477 else if (known_size_p (size
)
7478 && (((int) CONSTRUCTOR_NELTS (exp
) != fields_length (type
))
7479 || mostly_zeros_p (exp
))
7481 || known_eq (GET_MODE_SIZE (GET_MODE (target
)), size
)))
7483 clear_storage (target
, gen_int_mode (size
, Pmode
),
7488 if (REG_P (target
) && !cleared
)
7489 emit_clobber (target
);
7491 /* Store each element of the constructor into the
7492 corresponding field of TARGET. */
7493 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp
), idx
, field
, value
)
7496 HOST_WIDE_INT bitsize
;
7497 HOST_WIDE_INT bitpos
= 0;
7499 rtx to_rtx
= target
;
7501 /* Just ignore missing fields. We cleared the whole
7502 structure, above, if any fields are missing. */
7506 if (cleared
&& initializer_zerop (value
))
7509 if (tree_fits_uhwi_p (DECL_SIZE (field
)))
7510 bitsize
= tree_to_uhwi (DECL_SIZE (field
));
7514 mode
= DECL_MODE (field
);
7515 if (DECL_BIT_FIELD (field
))
7518 offset
= DECL_FIELD_OFFSET (field
);
7519 if (tree_fits_shwi_p (offset
)
7520 && tree_fits_shwi_p (bit_position (field
)))
7522 bitpos
= int_bit_position (field
);
7528 /* If this initializes a field that is smaller than a
7529 word, at the start of a word, try to widen it to a full
7530 word. This special case allows us to output C++ member
7531 function initializations in a form that the optimizers
7533 if (WORD_REGISTER_OPERATIONS
7535 && bitsize
< BITS_PER_WORD
7536 && bitpos
% BITS_PER_WORD
== 0
7537 && GET_MODE_CLASS (mode
) == MODE_INT
7538 && TREE_CODE (value
) == INTEGER_CST
7540 && bitpos
+ BITS_PER_WORD
<= exp_size
* BITS_PER_UNIT
)
7542 type
= TREE_TYPE (value
);
7544 if (TYPE_PRECISION (type
) < BITS_PER_WORD
)
7546 type
= lang_hooks
.types
.type_for_mode
7547 (word_mode
, TYPE_UNSIGNED (type
));
7548 value
= fold_convert (type
, value
);
7549 /* Make sure the bits beyond the original bitsize are zero
7550 so that we can correctly avoid extra zeroing stores in
7551 later constructor elements. */
7553 = wide_int_to_tree (type
, wi::mask (bitsize
, false,
7555 value
= fold_build2 (BIT_AND_EXPR
, type
, value
, bitsize_mask
);
7558 if (BYTES_BIG_ENDIAN
)
7560 = fold_build2 (LSHIFT_EXPR
, type
, value
,
7561 build_int_cst (type
,
7562 BITS_PER_WORD
- bitsize
));
7563 bitsize
= BITS_PER_WORD
;
7567 if (MEM_P (to_rtx
) && !MEM_KEEP_ALIAS_SET_P (to_rtx
)
7568 && DECL_NONADDRESSABLE_P (field
))
7570 to_rtx
= copy_rtx (to_rtx
);
7571 MEM_KEEP_ALIAS_SET_P (to_rtx
) = 1;
7574 store_constructor_field (to_rtx
, bitsize
, bitpos
,
7575 0, bitregion_end
, mode
,
7577 get_alias_set (TREE_TYPE (field
)),
7585 unsigned HOST_WIDE_INT i
;
7588 tree elttype
= TREE_TYPE (type
);
7589 bool const_bounds_p
;
7590 HOST_WIDE_INT minelt
= 0;
7591 HOST_WIDE_INT maxelt
= 0;
7593 /* The storage order is specified for every aggregate type. */
7594 reverse
= TYPE_REVERSE_STORAGE_ORDER (type
);
7596 domain
= TYPE_DOMAIN (type
);
7597 const_bounds_p
= (TYPE_MIN_VALUE (domain
)
7598 && TYPE_MAX_VALUE (domain
)
7599 && tree_fits_shwi_p (TYPE_MIN_VALUE (domain
))
7600 && tree_fits_shwi_p (TYPE_MAX_VALUE (domain
)));
7602 /* If we have constant bounds for the range of the type, get them. */
7605 minelt
= tree_to_shwi (TYPE_MIN_VALUE (domain
));
7606 maxelt
= tree_to_shwi (TYPE_MAX_VALUE (domain
));
7609 /* If the constructor has fewer elements than the array, clear
7610 the whole array first. Similarly if this is static
7611 constructor of a non-BLKmode object. */
7613 need_to_clear
= false;
7614 else if (REG_P (target
) && TREE_STATIC (exp
))
7615 need_to_clear
= true;
7618 unsigned HOST_WIDE_INT idx
;
7619 HOST_WIDE_INT count
= 0, zero_count
= 0;
7620 need_to_clear
= ! const_bounds_p
;
7622 /* This loop is a more accurate version of the loop in
7623 mostly_zeros_p (it handles RANGE_EXPR in an index). It
7624 is also needed to check for missing elements. */
7625 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp
), idx
, index
, value
)
7627 HOST_WIDE_INT this_node_count
;
7632 if (index
!= NULL_TREE
&& TREE_CODE (index
) == RANGE_EXPR
)
7634 tree lo_index
= TREE_OPERAND (index
, 0);
7635 tree hi_index
= TREE_OPERAND (index
, 1);
7637 if (! tree_fits_uhwi_p (lo_index
)
7638 || ! tree_fits_uhwi_p (hi_index
))
7640 need_to_clear
= true;
7644 this_node_count
= (tree_to_uhwi (hi_index
)
7645 - tree_to_uhwi (lo_index
) + 1);
7648 this_node_count
= 1;
7650 count
+= this_node_count
;
7651 if (mostly_zeros_p (value
))
7652 zero_count
+= this_node_count
;
7655 /* Clear the entire array first if there are any missing
7656 elements, or if the incidence of zero elements is >=
7659 && (count
< maxelt
- minelt
+ 1
7660 || 4 * zero_count
>= 3 * count
))
7661 need_to_clear
= true;
7664 if (need_to_clear
&& maybe_gt (size
, 0))
7667 emit_move_insn (target
, CONST0_RTX (GET_MODE (target
)));
7669 clear_storage (target
, gen_int_mode (size
, Pmode
),
7674 if (!cleared
&& REG_P (target
))
7675 /* Inform later passes that the old value is dead. */
7676 emit_clobber (target
);
7678 /* Store each element of the constructor into the
7679 corresponding element of TARGET, determined by counting the
7681 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp
), i
, index
, value
)
7685 HOST_WIDE_INT bitpos
;
7686 rtx xtarget
= target
;
7688 if (cleared
&& initializer_zerop (value
))
7691 mode
= TYPE_MODE (elttype
);
7692 if (mode
!= BLKmode
)
7693 bitsize
= GET_MODE_BITSIZE (mode
);
7694 else if (!poly_int_tree_p (TYPE_SIZE (elttype
), &bitsize
))
7697 if (index
!= NULL_TREE
&& TREE_CODE (index
) == RANGE_EXPR
)
7699 tree lo_index
= TREE_OPERAND (index
, 0);
7700 tree hi_index
= TREE_OPERAND (index
, 1);
7701 rtx index_r
, pos_rtx
;
7702 HOST_WIDE_INT lo
, hi
, count
;
7705 /* If the range is constant and "small", unroll the loop. */
7707 && tree_fits_shwi_p (lo_index
)
7708 && tree_fits_shwi_p (hi_index
)
7709 && (lo
= tree_to_shwi (lo_index
),
7710 hi
= tree_to_shwi (hi_index
),
7711 count
= hi
- lo
+ 1,
7714 || (tree_fits_uhwi_p (TYPE_SIZE (elttype
))
7715 && (tree_to_uhwi (TYPE_SIZE (elttype
)) * count
7718 lo
-= minelt
; hi
-= minelt
;
7719 for (; lo
<= hi
; lo
++)
7721 bitpos
= lo
* tree_to_shwi (TYPE_SIZE (elttype
));
7724 && !MEM_KEEP_ALIAS_SET_P (target
)
7725 && TREE_CODE (type
) == ARRAY_TYPE
7726 && TYPE_NONALIASED_COMPONENT (type
))
7728 target
= copy_rtx (target
);
7729 MEM_KEEP_ALIAS_SET_P (target
) = 1;
7732 store_constructor_field
7733 (target
, bitsize
, bitpos
, 0, bitregion_end
,
7734 mode
, value
, cleared
,
7735 get_alias_set (elttype
), reverse
);
7740 rtx_code_label
*loop_start
= gen_label_rtx ();
7741 rtx_code_label
*loop_end
= gen_label_rtx ();
7744 expand_normal (hi_index
);
7746 index
= build_decl (EXPR_LOCATION (exp
),
7747 VAR_DECL
, NULL_TREE
, domain
);
7748 index_r
= gen_reg_rtx (promote_decl_mode (index
, NULL
));
7749 SET_DECL_RTL (index
, index_r
);
7750 store_expr (lo_index
, index_r
, 0, false, reverse
);
7752 /* Build the head of the loop. */
7753 do_pending_stack_adjust ();
7754 emit_label (loop_start
);
7756 /* Assign value to element index. */
7758 fold_convert (ssizetype
,
7759 fold_build2 (MINUS_EXPR
,
7762 TYPE_MIN_VALUE (domain
)));
7765 size_binop (MULT_EXPR
, position
,
7766 fold_convert (ssizetype
,
7767 TYPE_SIZE_UNIT (elttype
)));
7769 pos_rtx
= expand_normal (position
);
7770 xtarget
= offset_address (target
, pos_rtx
,
7771 highest_pow2_factor (position
));
7772 xtarget
= adjust_address (xtarget
, mode
, 0);
7773 if (TREE_CODE (value
) == CONSTRUCTOR
)
7774 store_constructor (value
, xtarget
, cleared
,
7775 exact_div (bitsize
, BITS_PER_UNIT
),
7778 store_expr (value
, xtarget
, 0, false, reverse
);
7780 /* Generate a conditional jump to exit the loop. */
7781 exit_cond
= build2 (LT_EXPR
, integer_type_node
,
7783 jumpif (exit_cond
, loop_end
,
7784 profile_probability::uninitialized ());
7786 /* Update the loop counter, and jump to the head of
7788 expand_assignment (index
,
7789 build2 (PLUS_EXPR
, TREE_TYPE (index
),
7790 index
, integer_one_node
),
7793 emit_jump (loop_start
);
7795 /* Build the end of the loop. */
7796 emit_label (loop_end
);
7799 else if ((index
!= 0 && ! tree_fits_shwi_p (index
))
7800 || ! tree_fits_uhwi_p (TYPE_SIZE (elttype
)))
7805 index
= ssize_int (1);
7808 index
= fold_convert (ssizetype
,
7809 fold_build2 (MINUS_EXPR
,
7812 TYPE_MIN_VALUE (domain
)));
7815 size_binop (MULT_EXPR
, index
,
7816 fold_convert (ssizetype
,
7817 TYPE_SIZE_UNIT (elttype
)));
7818 xtarget
= offset_address (target
,
7819 expand_normal (position
),
7820 highest_pow2_factor (position
));
7821 xtarget
= adjust_address (xtarget
, mode
, 0);
7822 store_expr (value
, xtarget
, 0, false, reverse
);
7827 bitpos
= ((tree_to_shwi (index
) - minelt
)
7828 * tree_to_uhwi (TYPE_SIZE (elttype
)));
7830 bitpos
= (i
* tree_to_uhwi (TYPE_SIZE (elttype
)));
7832 if (MEM_P (target
) && !MEM_KEEP_ALIAS_SET_P (target
)
7833 && TREE_CODE (type
) == ARRAY_TYPE
7834 && TYPE_NONALIASED_COMPONENT (type
))
7836 target
= copy_rtx (target
);
7837 MEM_KEEP_ALIAS_SET_P (target
) = 1;
7839 store_constructor_field (target
, bitsize
, bitpos
, 0,
7840 bitregion_end
, mode
, value
,
7841 cleared
, get_alias_set (elttype
),
7850 unsigned HOST_WIDE_INT idx
;
7851 constructor_elt
*ce
;
7854 insn_code icode
= CODE_FOR_nothing
;
7856 tree elttype
= TREE_TYPE (type
);
7857 int elt_size
= vector_element_bits (type
);
7858 machine_mode eltmode
= TYPE_MODE (elttype
);
7859 HOST_WIDE_INT bitsize
;
7860 HOST_WIDE_INT bitpos
;
7861 rtvec vector
= NULL
;
7863 unsigned HOST_WIDE_INT const_n_elts
;
7864 alias_set_type alias
;
7865 bool vec_vec_init_p
= false;
7866 machine_mode mode
= GET_MODE (target
);
7868 gcc_assert (eltmode
!= BLKmode
);
7870 /* Try using vec_duplicate_optab for uniform vectors. */
7871 if (!TREE_SIDE_EFFECTS (exp
)
7872 && VECTOR_MODE_P (mode
)
7873 && eltmode
== GET_MODE_INNER (mode
)
7874 && ((icode
= optab_handler (vec_duplicate_optab
, mode
))
7875 != CODE_FOR_nothing
)
7876 && (elt
= uniform_vector_p (exp
))
7877 && !VECTOR_TYPE_P (TREE_TYPE (elt
)))
7879 class expand_operand ops
[2];
7880 create_output_operand (&ops
[0], target
, mode
);
7881 create_input_operand (&ops
[1], expand_normal (elt
), eltmode
);
7882 expand_insn (icode
, 2, ops
);
7883 if (!rtx_equal_p (target
, ops
[0].value
))
7884 emit_move_insn (target
, ops
[0].value
);
7887 /* Use sign-extension for uniform boolean vectors with
7888 integer modes and single-bit mask entries.
7889 Effectively "vec_duplicate" for bitmasks. */
7891 && !TREE_SIDE_EFFECTS (exp
)
7892 && VECTOR_BOOLEAN_TYPE_P (type
)
7893 && SCALAR_INT_MODE_P (TYPE_MODE (type
))
7894 && (elt
= uniform_vector_p (exp
))
7895 && !VECTOR_TYPE_P (TREE_TYPE (elt
)))
7897 rtx op0
= force_reg (TYPE_MODE (TREE_TYPE (elt
)),
7898 expand_normal (elt
));
7899 rtx tmp
= gen_reg_rtx (mode
);
7900 convert_move (tmp
, op0
, 0);
7902 /* Ensure no excess bits are set.
7903 GCN needs this for nunits < 64.
7904 x86 needs this for nunits < 8. */
7905 auto nunits
= TYPE_VECTOR_SUBPARTS (type
).to_constant ();
7906 if (maybe_ne (GET_MODE_PRECISION (mode
), nunits
))
7907 tmp
= expand_binop (mode
, and_optab
, tmp
,
7908 GEN_INT ((HOST_WIDE_INT_1U
<< nunits
) - 1),
7909 target
, true, OPTAB_WIDEN
);
7911 emit_move_insn (target
, tmp
);
7915 n_elts
= TYPE_VECTOR_SUBPARTS (type
);
7917 && VECTOR_MODE_P (mode
)
7918 && n_elts
.is_constant (&const_n_elts
))
7920 machine_mode emode
= eltmode
;
7921 bool vector_typed_elts_p
= false;
7923 if (CONSTRUCTOR_NELTS (exp
)
7924 && (TREE_CODE (TREE_TYPE (CONSTRUCTOR_ELT (exp
, 0)->value
))
7927 tree etype
= TREE_TYPE (CONSTRUCTOR_ELT (exp
, 0)->value
);
7928 gcc_assert (known_eq (CONSTRUCTOR_NELTS (exp
)
7929 * TYPE_VECTOR_SUBPARTS (etype
),
7931 emode
= TYPE_MODE (etype
);
7932 vector_typed_elts_p
= true;
7934 icode
= convert_optab_handler (vec_init_optab
, mode
, emode
);
7935 if (icode
!= CODE_FOR_nothing
)
7937 unsigned int n
= const_n_elts
;
7939 if (vector_typed_elts_p
)
7941 n
= CONSTRUCTOR_NELTS (exp
);
7942 vec_vec_init_p
= true;
7944 vector
= rtvec_alloc (n
);
7945 for (unsigned int k
= 0; k
< n
; k
++)
7946 RTVEC_ELT (vector
, k
) = CONST0_RTX (emode
);
7950 /* Compute the size of the elements in the CTOR. It differs
7951 from the size of the vector type elements only when the
7952 CTOR elements are vectors themselves. */
7953 tree val_type
= (CONSTRUCTOR_NELTS (exp
) != 0
7954 ? TREE_TYPE (CONSTRUCTOR_ELT (exp
, 0)->value
)
7956 if (VECTOR_TYPE_P (val_type
))
7957 bitsize
= tree_to_uhwi (TYPE_SIZE (val_type
));
7961 /* If the constructor has fewer elements than the vector,
7962 clear the whole array first. Similarly if this is static
7963 constructor of a non-BLKmode object. */
7965 need_to_clear
= false;
7966 else if (REG_P (target
) && TREE_STATIC (exp
))
7967 need_to_clear
= true;
7970 unsigned HOST_WIDE_INT count
= 0, zero_count
= 0;
7973 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp
), idx
, value
)
7975 int n_elts_here
= bitsize
/ elt_size
;
7976 count
+= n_elts_here
;
7977 if (mostly_zeros_p (value
))
7978 zero_count
+= n_elts_here
;
7981 /* Clear the entire vector first if there are any missing elements,
7982 or if the incidence of zero elements is >= 75%. */
7983 need_to_clear
= (maybe_lt (count
, n_elts
)
7984 || 4 * zero_count
>= 3 * count
);
7987 if (need_to_clear
&& maybe_gt (size
, 0) && !vector
)
7990 emit_move_insn (target
, CONST0_RTX (mode
));
7992 clear_storage (target
, gen_int_mode (size
, Pmode
),
7997 /* Inform later passes that the old value is dead. */
7998 if (!cleared
&& !vector
&& REG_P (target
) && maybe_gt (n_elts
, 1u))
8000 emit_move_insn (target
, CONST0_RTX (mode
));
8005 alias
= MEM_ALIAS_SET (target
);
8007 alias
= get_alias_set (elttype
);
8009 /* Store each element of the constructor into the corresponding
8010 element of TARGET, determined by counting the elements. */
8011 for (idx
= 0, i
= 0;
8012 vec_safe_iterate (CONSTRUCTOR_ELTS (exp
), idx
, &ce
);
8013 idx
++, i
+= bitsize
/ elt_size
)
8015 HOST_WIDE_INT eltpos
;
8016 tree value
= ce
->value
;
8018 if (cleared
&& initializer_zerop (value
))
8022 eltpos
= tree_to_uhwi (ce
->index
);
8030 gcc_assert (ce
->index
== NULL_TREE
);
8031 gcc_assert (TREE_CODE (TREE_TYPE (value
)) == VECTOR_TYPE
);
8035 gcc_assert (TREE_CODE (TREE_TYPE (value
)) != VECTOR_TYPE
);
8036 RTVEC_ELT (vector
, eltpos
) = expand_normal (value
);
8040 machine_mode value_mode
8041 = (TREE_CODE (TREE_TYPE (value
)) == VECTOR_TYPE
8042 ? TYPE_MODE (TREE_TYPE (value
)) : eltmode
);
8043 bitpos
= eltpos
* elt_size
;
8044 store_constructor_field (target
, bitsize
, bitpos
, 0,
8045 bitregion_end
, value_mode
,
8046 value
, cleared
, alias
, reverse
);
8051 emit_insn (GEN_FCN (icode
) (target
,
8052 gen_rtx_PARALLEL (mode
, vector
)));
8061 /* Store the value of EXP (an expression tree)
8062 into a subfield of TARGET which has mode MODE and occupies
8063 BITSIZE bits, starting BITPOS bits from the start of TARGET.
8064 If MODE is VOIDmode, it means that we are storing into a bit-field.
8066 BITREGION_START is bitpos of the first bitfield in this region.
8067 BITREGION_END is the bitpos of the ending bitfield in this region.
8068 These two fields are 0, if the C++ memory model does not apply,
8069 or we are not interested in keeping track of bitfield regions.
8071 Always return const0_rtx unless we have something particular to
8074 ALIAS_SET is the alias set for the destination. This value will
8075 (in general) be different from that for TARGET, since TARGET is a
8076 reference to the containing structure.
8078 If NONTEMPORAL is true, try generating a nontemporal store.
8080 If REVERSE is true, the store is to be done in reverse order. */
8083 store_field (rtx target
, poly_int64 bitsize
, poly_int64 bitpos
,
8084 poly_uint64 bitregion_start
, poly_uint64 bitregion_end
,
8085 machine_mode mode
, tree exp
,
8086 alias_set_type alias_set
, bool nontemporal
, bool reverse
)
8088 if (TREE_CODE (exp
) == ERROR_MARK
)
8091 /* If we have nothing to store, do nothing unless the expression has
8092 side-effects. Don't do that for zero sized addressable lhs of
8094 if (known_eq (bitsize
, 0)
8095 && (!TREE_ADDRESSABLE (TREE_TYPE (exp
))
8096 || TREE_CODE (exp
) != CALL_EXPR
))
8097 return expand_expr (exp
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
8099 if (GET_CODE (target
) == CONCAT
)
8101 /* We're storing into a struct containing a single __complex. */
8103 gcc_assert (known_eq (bitpos
, 0));
8104 return store_expr (exp
, target
, 0, nontemporal
, reverse
);
8107 /* If the structure is in a register or if the component
8108 is a bit field, we cannot use addressing to access it.
8109 Use bit-field techniques or SUBREG to store in it. */
8111 poly_int64 decl_bitsize
;
8112 if (mode
== VOIDmode
8113 || (mode
!= BLKmode
&& ! direct_store
[(int) mode
]
8114 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_INT
8115 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_FLOAT
)
8117 || GET_CODE (target
) == SUBREG
8118 /* If the field isn't aligned enough to store as an ordinary memref,
8119 store it as a bit field. */
8121 && ((((MEM_ALIGN (target
) < GET_MODE_ALIGNMENT (mode
))
8122 || !multiple_p (bitpos
, GET_MODE_ALIGNMENT (mode
)))
8123 && targetm
.slow_unaligned_access (mode
, MEM_ALIGN (target
)))
8124 || !multiple_p (bitpos
, BITS_PER_UNIT
)))
8125 || (known_size_p (bitsize
)
8127 && maybe_gt (GET_MODE_BITSIZE (mode
), bitsize
))
8128 /* If the RHS and field are a constant size and the size of the
8129 RHS isn't the same size as the bitfield, we must use bitfield
8131 || (known_size_p (bitsize
)
8132 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp
)))
8133 && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp
))),
8135 /* Except for initialization of full bytes from a CONSTRUCTOR, which
8136 we will handle specially below. */
8137 && !(TREE_CODE (exp
) == CONSTRUCTOR
8138 && multiple_p (bitsize
, BITS_PER_UNIT
))
8139 /* And except for bitwise copying of TREE_ADDRESSABLE types,
8140 where the FIELD_DECL has the right bitsize, but TREE_TYPE (exp)
8141 includes some extra padding. store_expr / expand_expr will in
8142 that case call get_inner_reference that will have the bitsize
8143 we check here and thus the block move will not clobber the
8144 padding that shouldn't be clobbered. In the future we could
8145 replace the TREE_ADDRESSABLE check with a check that
8146 get_base_address needs to live in memory. */
8147 && (!TREE_ADDRESSABLE (TREE_TYPE (exp
))
8148 || TREE_CODE (exp
) != COMPONENT_REF
8149 || !multiple_p (bitsize
, BITS_PER_UNIT
)
8150 || !multiple_p (bitpos
, BITS_PER_UNIT
)
8151 || !poly_int_tree_p (DECL_SIZE (TREE_OPERAND (exp
, 1)),
8153 || maybe_ne (decl_bitsize
, bitsize
))
8154 /* A call with an addressable return type and return-slot
8155 optimization must not need bitfield operations but we must
8156 pass down the original target. */
8157 && (TREE_CODE (exp
) != CALL_EXPR
8158 || !TREE_ADDRESSABLE (TREE_TYPE (exp
))
8159 || !CALL_EXPR_RETURN_SLOT_OPT (exp
)))
8160 /* If we are expanding a MEM_REF of a non-BLKmode non-addressable
8161 decl we must use bitfield operations. */
8162 || (known_size_p (bitsize
)
8163 && TREE_CODE (exp
) == MEM_REF
8164 && TREE_CODE (TREE_OPERAND (exp
, 0)) == ADDR_EXPR
8165 && DECL_P (TREE_OPERAND (TREE_OPERAND (exp
, 0), 0))
8166 && !TREE_ADDRESSABLE (TREE_OPERAND (TREE_OPERAND (exp
, 0), 0))
8167 && DECL_MODE (TREE_OPERAND (TREE_OPERAND (exp
, 0), 0)) != BLKmode
))
8172 /* If EXP is a NOP_EXPR of precision less than its mode, then that
8173 implies a mask operation. If the precision is the same size as
8174 the field we're storing into, that mask is redundant. This is
8175 particularly common with bit field assignments generated by the
8177 nop_def
= get_def_for_expr (exp
, NOP_EXPR
);
8180 tree type
= TREE_TYPE (exp
);
8181 if (INTEGRAL_TYPE_P (type
)
8182 && maybe_ne (TYPE_PRECISION (type
),
8183 GET_MODE_BITSIZE (TYPE_MODE (type
)))
8184 && known_eq (bitsize
, TYPE_PRECISION (type
)))
8186 tree op
= gimple_assign_rhs1 (nop_def
);
8187 type
= TREE_TYPE (op
);
8188 if (INTEGRAL_TYPE_P (type
)
8189 && known_ge (TYPE_PRECISION (type
), bitsize
))
8194 temp
= expand_normal (exp
);
8196 /* We don't support variable-sized BLKmode bitfields, since our
8197 handling of BLKmode is bound up with the ability to break
8198 things into words. */
8199 gcc_assert (mode
!= BLKmode
|| bitsize
.is_constant ());
8201 /* Handle calls that return values in multiple non-contiguous locations.
8202 The Irix 6 ABI has examples of this. */
8203 if (GET_CODE (temp
) == PARALLEL
)
8205 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
8206 machine_mode temp_mode
= GET_MODE (temp
);
8207 if (temp_mode
== BLKmode
|| temp_mode
== VOIDmode
)
8208 temp_mode
= smallest_int_mode_for_size (size
* BITS_PER_UNIT
);
8209 rtx temp_target
= gen_reg_rtx (temp_mode
);
8210 emit_group_store (temp_target
, temp
, TREE_TYPE (exp
), size
);
8214 /* Handle calls that return BLKmode values in registers. */
8215 else if (mode
== BLKmode
&& REG_P (temp
) && TREE_CODE (exp
) == CALL_EXPR
)
8217 rtx temp_target
= gen_reg_rtx (GET_MODE (temp
));
8218 copy_blkmode_from_reg (temp_target
, temp
, TREE_TYPE (exp
));
8222 /* If the value has aggregate type and an integral mode then, if BITSIZE
8223 is narrower than this mode and this is for big-endian data, we first
8224 need to put the value into the low-order bits for store_bit_field,
8225 except when MODE is BLKmode and BITSIZE larger than the word size
8226 (see the handling of fields larger than a word in store_bit_field).
8227 Moreover, the field may be not aligned on a byte boundary; in this
8228 case, if it has reverse storage order, it needs to be accessed as a
8229 scalar field with reverse storage order and we must first put the
8230 value into target order. */
8231 scalar_int_mode temp_mode
;
8232 if (AGGREGATE_TYPE_P (TREE_TYPE (exp
))
8233 && is_int_mode (GET_MODE (temp
), &temp_mode
))
8235 HOST_WIDE_INT size
= GET_MODE_BITSIZE (temp_mode
);
8237 reverse
= TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (exp
));
8240 temp
= flip_storage_order (temp_mode
, temp
);
8242 gcc_checking_assert (known_le (bitsize
, size
));
8243 if (maybe_lt (bitsize
, size
)
8244 && reverse
? !BYTES_BIG_ENDIAN
: BYTES_BIG_ENDIAN
8245 /* Use of to_constant for BLKmode was checked above. */
8246 && !(mode
== BLKmode
&& bitsize
.to_constant () > BITS_PER_WORD
))
8247 temp
= expand_shift (RSHIFT_EXPR
, temp_mode
, temp
,
8248 size
- bitsize
, NULL_RTX
, 1);
8251 /* Unless MODE is VOIDmode or BLKmode, convert TEMP to MODE. */
8252 if (mode
!= VOIDmode
&& mode
!= BLKmode
8253 && mode
!= TYPE_MODE (TREE_TYPE (exp
)))
8254 temp
= convert_modes (mode
, TYPE_MODE (TREE_TYPE (exp
)), temp
, 1);
8256 /* If the mode of TEMP and TARGET is BLKmode, both must be in memory
8257 and BITPOS must be aligned on a byte boundary. If so, we simply do
8258 a block copy. Likewise for a BLKmode-like TARGET. */
8259 if (GET_MODE (temp
) == BLKmode
8260 && (GET_MODE (target
) == BLKmode
8262 && GET_MODE_CLASS (GET_MODE (target
)) == MODE_INT
8263 && multiple_p (bitpos
, BITS_PER_UNIT
)
8264 && multiple_p (bitsize
, BITS_PER_UNIT
))))
8266 gcc_assert (MEM_P (target
) && MEM_P (temp
));
8267 poly_int64 bytepos
= exact_div (bitpos
, BITS_PER_UNIT
);
8268 poly_int64 bytesize
= bits_to_bytes_round_up (bitsize
);
8270 target
= adjust_address (target
, VOIDmode
, bytepos
);
8271 emit_block_move (target
, temp
,
8272 gen_int_mode (bytesize
, Pmode
),
8278 /* If the mode of TEMP is still BLKmode and BITSIZE not larger than the
8279 word size, we need to load the value (see again store_bit_field). */
8280 if (GET_MODE (temp
) == BLKmode
&& known_le (bitsize
, BITS_PER_WORD
))
8282 temp_mode
= smallest_int_mode_for_size (bitsize
);
8283 temp
= extract_bit_field (temp
, bitsize
, 0, 1, NULL_RTX
, temp_mode
,
8284 temp_mode
, false, NULL
);
8287 /* Store the value in the bitfield. */
8288 gcc_checking_assert (known_ge (bitpos
, 0));
8289 store_bit_field (target
, bitsize
, bitpos
,
8290 bitregion_start
, bitregion_end
,
8291 mode
, temp
, reverse
, false);
8297 /* Now build a reference to just the desired component. */
8298 rtx to_rtx
= adjust_address (target
, mode
,
8299 exact_div (bitpos
, BITS_PER_UNIT
));
8301 if (to_rtx
== target
)
8302 to_rtx
= copy_rtx (to_rtx
);
8304 if (!MEM_KEEP_ALIAS_SET_P (to_rtx
) && MEM_ALIAS_SET (to_rtx
) != 0)
8305 set_mem_alias_set (to_rtx
, alias_set
);
8307 /* Above we avoided using bitfield operations for storing a CONSTRUCTOR
8308 into a target smaller than its type; handle that case now. */
8309 if (TREE_CODE (exp
) == CONSTRUCTOR
&& known_size_p (bitsize
))
8311 poly_int64 bytesize
= exact_div (bitsize
, BITS_PER_UNIT
);
8312 store_constructor (exp
, to_rtx
, 0, bytesize
, reverse
);
8316 return store_expr (exp
, to_rtx
, 0, nontemporal
, reverse
);
8320 /* Given an expression EXP that may be a COMPONENT_REF, a BIT_FIELD_REF,
8321 an ARRAY_REF, or an ARRAY_RANGE_REF, look for nested operations of these
8322 codes and find the ultimate containing object, which we return.
8324 We set *PBITSIZE to the size in bits that we want, *PBITPOS to the
8325 bit position, *PUNSIGNEDP to the signedness and *PREVERSEP to the
8326 storage order of the field.
8327 If the position of the field is variable, we store a tree
8328 giving the variable offset (in units) in *POFFSET.
8329 This offset is in addition to the bit position.
8330 If the position is not variable, we store 0 in *POFFSET.
8332 If any of the extraction expressions is volatile,
8333 we store 1 in *PVOLATILEP. Otherwise we don't change that.
8335 If the field is a non-BLKmode bit-field, *PMODE is set to VOIDmode.
8336 Otherwise, it is a mode that can be used to access the field.
8338 If the field describes a variable-sized object, *PMODE is set to
8339 BLKmode and *PBITSIZE is set to -1. An access cannot be made in
8340 this case, but the address of the object can be found. */
8343 get_inner_reference (tree exp
, poly_int64
*pbitsize
,
8344 poly_int64
*pbitpos
, tree
*poffset
,
8345 machine_mode
*pmode
, int *punsignedp
,
8346 int *preversep
, int *pvolatilep
)
8349 machine_mode mode
= VOIDmode
;
8350 bool blkmode_bitfield
= false;
8351 tree offset
= size_zero_node
;
8352 poly_offset_int bit_offset
= 0;
8354 /* First get the mode, signedness, storage order and size. We do this from
8355 just the outermost expression. */
8357 if (TREE_CODE (exp
) == COMPONENT_REF
)
8359 tree field
= TREE_OPERAND (exp
, 1);
8360 size_tree
= DECL_SIZE (field
);
8361 if (flag_strict_volatile_bitfields
> 0
8362 && TREE_THIS_VOLATILE (exp
)
8363 && DECL_BIT_FIELD_TYPE (field
)
8364 && DECL_MODE (field
) != BLKmode
)
8365 /* Volatile bitfields should be accessed in the mode of the
8366 field's type, not the mode computed based on the bit
8368 mode
= TYPE_MODE (DECL_BIT_FIELD_TYPE (field
));
8369 else if (!DECL_BIT_FIELD (field
))
8371 mode
= DECL_MODE (field
);
8372 /* For vector fields re-check the target flags, as DECL_MODE
8373 could have been set with different target flags than
8374 the current function has. */
8375 if (VECTOR_TYPE_P (TREE_TYPE (field
))
8376 && VECTOR_MODE_P (TYPE_MODE_RAW (TREE_TYPE (field
))))
8377 mode
= TYPE_MODE (TREE_TYPE (field
));
8379 else if (DECL_MODE (field
) == BLKmode
)
8380 blkmode_bitfield
= true;
8382 *punsignedp
= DECL_UNSIGNED (field
);
8384 else if (TREE_CODE (exp
) == BIT_FIELD_REF
)
8386 size_tree
= TREE_OPERAND (exp
, 1);
8387 *punsignedp
= (! INTEGRAL_TYPE_P (TREE_TYPE (exp
))
8388 || TYPE_UNSIGNED (TREE_TYPE (exp
)));
8390 /* For vector element types with the correct size of access or for
8391 vector typed accesses use the mode of the access type. */
8392 if ((TREE_CODE (TREE_TYPE (TREE_OPERAND (exp
, 0))) == VECTOR_TYPE
8393 && TREE_TYPE (exp
) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp
, 0)))
8394 && tree_int_cst_equal (size_tree
, TYPE_SIZE (TREE_TYPE (exp
))))
8395 || VECTOR_TYPE_P (TREE_TYPE (exp
)))
8396 mode
= TYPE_MODE (TREE_TYPE (exp
));
8400 mode
= TYPE_MODE (TREE_TYPE (exp
));
8401 *punsignedp
= TYPE_UNSIGNED (TREE_TYPE (exp
));
8403 if (mode
== BLKmode
)
8404 size_tree
= TYPE_SIZE (TREE_TYPE (exp
));
8406 *pbitsize
= GET_MODE_BITSIZE (mode
);
8411 if (! tree_fits_uhwi_p (size_tree
))
8412 mode
= BLKmode
, *pbitsize
= -1;
8414 *pbitsize
= tree_to_uhwi (size_tree
);
8417 *preversep
= reverse_storage_order_for_component_p (exp
);
8419 /* Compute cumulative bit-offset for nested component-refs and array-refs,
8420 and find the ultimate containing object. */
8423 switch (TREE_CODE (exp
))
8426 bit_offset
+= wi::to_poly_offset (TREE_OPERAND (exp
, 2));
8431 tree field
= TREE_OPERAND (exp
, 1);
8432 tree this_offset
= component_ref_field_offset (exp
);
8434 /* If this field hasn't been filled in yet, don't go past it.
8435 This should only happen when folding expressions made during
8436 type construction. */
8437 if (this_offset
== 0)
8440 offset
= size_binop (PLUS_EXPR
, offset
, this_offset
);
8441 bit_offset
+= wi::to_poly_offset (DECL_FIELD_BIT_OFFSET (field
));
8443 /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
8448 case ARRAY_RANGE_REF
:
8450 tree index
= TREE_OPERAND (exp
, 1);
8451 tree low_bound
= array_ref_low_bound (exp
);
8452 tree unit_size
= array_ref_element_size (exp
);
8454 /* We assume all arrays have sizes that are a multiple of a byte.
8455 First subtract the lower bound, if any, in the type of the
8456 index, then convert to sizetype and multiply by the size of
8457 the array element. */
8458 if (! integer_zerop (low_bound
))
8459 index
= fold_build2 (MINUS_EXPR
, TREE_TYPE (index
),
8462 offset
= size_binop (PLUS_EXPR
, offset
,
8463 size_binop (MULT_EXPR
,
8464 fold_convert (sizetype
, index
),
8473 bit_offset
+= *pbitsize
;
8476 case VIEW_CONVERT_EXPR
:
8480 /* Hand back the decl for MEM[&decl, off]. */
8481 if (TREE_CODE (TREE_OPERAND (exp
, 0)) == ADDR_EXPR
)
8483 tree off
= TREE_OPERAND (exp
, 1);
8484 if (!integer_zerop (off
))
8486 poly_offset_int boff
= mem_ref_offset (exp
);
8487 boff
<<= LOG2_BITS_PER_UNIT
;
8490 exp
= TREE_OPERAND (TREE_OPERAND (exp
, 0), 0);
8498 /* If any reference in the chain is volatile, the effect is volatile. */
8499 if (TREE_THIS_VOLATILE (exp
))
8502 exp
= TREE_OPERAND (exp
, 0);
8506 /* If OFFSET is constant, see if we can return the whole thing as a
8507 constant bit position. Make sure to handle overflow during
8509 if (poly_int_tree_p (offset
))
8511 poly_offset_int tem
= wi::sext (wi::to_poly_offset (offset
),
8512 TYPE_PRECISION (sizetype
));
8513 tem
<<= LOG2_BITS_PER_UNIT
;
8515 if (tem
.to_shwi (pbitpos
))
8516 *poffset
= offset
= NULL_TREE
;
8519 /* Otherwise, split it up. */
8522 /* Avoid returning a negative bitpos as this may wreak havoc later. */
8523 if (!bit_offset
.to_shwi (pbitpos
) || maybe_lt (*pbitpos
, 0))
8525 *pbitpos
= num_trailing_bits (bit_offset
.force_shwi ());
8526 poly_offset_int bytes
= bits_to_bytes_round_down (bit_offset
);
8527 offset
= size_binop (PLUS_EXPR
, offset
,
8528 build_int_cst (sizetype
, bytes
.force_shwi ()));
8534 /* We can use BLKmode for a byte-aligned BLKmode bitfield. */
8535 if (mode
== VOIDmode
8537 && multiple_p (*pbitpos
, BITS_PER_UNIT
)
8538 && multiple_p (*pbitsize
, BITS_PER_UNIT
))
8546 /* Alignment in bits the TARGET of an assignment may be assumed to have. */
8548 static unsigned HOST_WIDE_INT
8549 target_align (const_tree target
)
8551 /* We might have a chain of nested references with intermediate misaligning
8552 bitfields components, so need to recurse to find out. */
8554 unsigned HOST_WIDE_INT this_align
, outer_align
;
8556 switch (TREE_CODE (target
))
8562 this_align
= DECL_ALIGN (TREE_OPERAND (target
, 1));
8563 outer_align
= target_align (TREE_OPERAND (target
, 0));
8564 return MIN (this_align
, outer_align
);
8567 case ARRAY_RANGE_REF
:
8568 this_align
= TYPE_ALIGN (TREE_TYPE (target
));
8569 outer_align
= target_align (TREE_OPERAND (target
, 0));
8570 return MIN (this_align
, outer_align
);
8573 case NON_LVALUE_EXPR
:
8574 case VIEW_CONVERT_EXPR
:
8575 this_align
= TYPE_ALIGN (TREE_TYPE (target
));
8576 outer_align
= target_align (TREE_OPERAND (target
, 0));
8577 return MAX (this_align
, outer_align
);
8580 return TYPE_ALIGN (TREE_TYPE (target
));
8585 /* Given an rtx VALUE that may contain additions and multiplications, return
8586 an equivalent value that just refers to a register, memory, or constant.
8587 This is done by generating instructions to perform the arithmetic and
8588 returning a pseudo-register containing the value.
8590 The returned value may be a REG, SUBREG, MEM or constant. */
8593 force_operand (rtx value
, rtx target
)
8596 /* Use subtarget as the target for operand 0 of a binary operation. */
8597 rtx subtarget
= get_subtarget (target
);
8598 enum rtx_code code
= GET_CODE (value
);
8600 /* Check for subreg applied to an expression produced by loop optimizer. */
8602 && !REG_P (SUBREG_REG (value
))
8603 && !MEM_P (SUBREG_REG (value
)))
8606 = simplify_gen_subreg (GET_MODE (value
),
8607 force_reg (GET_MODE (SUBREG_REG (value
)),
8608 force_operand (SUBREG_REG (value
),
8610 GET_MODE (SUBREG_REG (value
)),
8611 SUBREG_BYTE (value
));
8612 code
= GET_CODE (value
);
8615 /* Check for a PIC address load. */
8616 if ((code
== PLUS
|| code
== MINUS
)
8617 && XEXP (value
, 0) == pic_offset_table_rtx
8618 && (GET_CODE (XEXP (value
, 1)) == SYMBOL_REF
8619 || GET_CODE (XEXP (value
, 1)) == LABEL_REF
8620 || GET_CODE (XEXP (value
, 1)) == CONST
))
8623 subtarget
= gen_reg_rtx (GET_MODE (value
));
8624 emit_move_insn (subtarget
, value
);
8628 if (ARITHMETIC_P (value
))
8630 op2
= XEXP (value
, 1);
8631 if (!CONSTANT_P (op2
) && !(REG_P (op2
) && op2
!= subtarget
))
8633 if (code
== MINUS
&& CONST_INT_P (op2
))
8636 op2
= negate_rtx (GET_MODE (value
), op2
);
8639 /* Check for an addition with OP2 a constant integer and our first
8640 operand a PLUS of a virtual register and something else. In that
8641 case, we want to emit the sum of the virtual register and the
8642 constant first and then add the other value. This allows virtual
8643 register instantiation to simply modify the constant rather than
8644 creating another one around this addition. */
8645 if (code
== PLUS
&& CONST_INT_P (op2
)
8646 && GET_CODE (XEXP (value
, 0)) == PLUS
8647 && REG_P (XEXP (XEXP (value
, 0), 0))
8648 && VIRTUAL_REGISTER_P (XEXP (XEXP (value
, 0), 0)))
8650 rtx temp
= expand_simple_binop (GET_MODE (value
), code
,
8651 XEXP (XEXP (value
, 0), 0), op2
,
8652 subtarget
, 0, OPTAB_LIB_WIDEN
);
8653 return expand_simple_binop (GET_MODE (value
), code
, temp
,
8654 force_operand (XEXP (XEXP (value
,
8656 target
, 0, OPTAB_LIB_WIDEN
);
8659 op1
= force_operand (XEXP (value
, 0), subtarget
);
8660 op2
= force_operand (op2
, NULL_RTX
);
8664 return expand_mult (GET_MODE (value
), op1
, op2
, target
, 1);
8666 if (!INTEGRAL_MODE_P (GET_MODE (value
)))
8667 return expand_simple_binop (GET_MODE (value
), code
, op1
, op2
,
8668 target
, 1, OPTAB_LIB_WIDEN
);
8670 return expand_divmod (0,
8671 FLOAT_MODE_P (GET_MODE (value
))
8672 ? RDIV_EXPR
: TRUNC_DIV_EXPR
,
8673 GET_MODE (value
), op1
, op2
, target
, 0);
8675 return expand_divmod (1, TRUNC_MOD_EXPR
, GET_MODE (value
), op1
, op2
,
8678 return expand_divmod (0, TRUNC_DIV_EXPR
, GET_MODE (value
), op1
, op2
,
8681 return expand_divmod (1, TRUNC_MOD_EXPR
, GET_MODE (value
), op1
, op2
,
8684 return expand_simple_binop (GET_MODE (value
), code
, op1
, op2
,
8685 target
, 0, OPTAB_LIB_WIDEN
);
8687 return expand_simple_binop (GET_MODE (value
), code
, op1
, op2
,
8688 target
, 1, OPTAB_LIB_WIDEN
);
8691 if (UNARY_P (value
))
8694 target
= gen_reg_rtx (GET_MODE (value
));
8695 op1
= force_operand (XEXP (value
, 0), NULL_RTX
);
8702 case FLOAT_TRUNCATE
:
8703 convert_move (target
, op1
, code
== ZERO_EXTEND
);
8708 expand_fix (target
, op1
, code
== UNSIGNED_FIX
);
8712 case UNSIGNED_FLOAT
:
8713 expand_float (target
, op1
, code
== UNSIGNED_FLOAT
);
8717 return expand_simple_unop (GET_MODE (value
), code
, op1
, target
, 0);
8721 #ifdef INSN_SCHEDULING
8722 /* On machines that have insn scheduling, we want all memory reference to be
8723 explicit, so we need to deal with such paradoxical SUBREGs. */
8724 if (paradoxical_subreg_p (value
) && MEM_P (SUBREG_REG (value
)))
8726 = simplify_gen_subreg (GET_MODE (value
),
8727 force_reg (GET_MODE (SUBREG_REG (value
)),
8728 force_operand (SUBREG_REG (value
),
8730 GET_MODE (SUBREG_REG (value
)),
8731 SUBREG_BYTE (value
));
8737 /* Subroutine of expand_expr: return true iff there is no way that
8738 EXP can reference X, which is being modified. TOP_P is nonzero if this
8739 call is going to be used to determine whether we need a temporary
8740 for EXP, as opposed to a recursive call to this function.
8742 It is always safe for this routine to return false since it merely
8743 searches for optimization opportunities. */
8746 safe_from_p (const_rtx x
, tree exp
, int top_p
)
8752 /* If EXP has varying size, we MUST use a target since we currently
8753 have no way of allocating temporaries of variable size
8754 (except for arrays that have TYPE_ARRAY_MAX_SIZE set).
8755 So we assume here that something at a higher level has prevented a
8756 clash. This is somewhat bogus, but the best we can do. Only
8757 do this when X is BLKmode and when we are at the top level. */
8758 || (top_p
&& TREE_TYPE (exp
) != 0 && COMPLETE_TYPE_P (TREE_TYPE (exp
))
8759 && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp
))) != INTEGER_CST
8760 && (TREE_CODE (TREE_TYPE (exp
)) != ARRAY_TYPE
8761 || TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp
)) == NULL_TREE
8762 || TREE_CODE (TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp
)))
8764 && GET_MODE (x
) == BLKmode
)
8765 /* If X is in the outgoing argument area, it is always safe. */
8767 && (XEXP (x
, 0) == virtual_outgoing_args_rtx
8768 || (GET_CODE (XEXP (x
, 0)) == PLUS
8769 && XEXP (XEXP (x
, 0), 0) == virtual_outgoing_args_rtx
))))
8772 /* If this is a subreg of a hard register, declare it unsafe, otherwise,
8773 find the underlying pseudo. */
8774 if (GET_CODE (x
) == SUBREG
)
8777 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
8781 /* Now look at our tree code and possibly recurse. */
8782 switch (TREE_CODE_CLASS (TREE_CODE (exp
)))
8784 case tcc_declaration
:
8785 exp_rtl
= DECL_RTL_IF_SET (exp
);
8791 case tcc_exceptional
:
8792 if (TREE_CODE (exp
) == TREE_LIST
)
8796 if (TREE_VALUE (exp
) && !safe_from_p (x
, TREE_VALUE (exp
), 0))
8798 exp
= TREE_CHAIN (exp
);
8801 if (TREE_CODE (exp
) != TREE_LIST
)
8802 return safe_from_p (x
, exp
, 0);
8805 else if (TREE_CODE (exp
) == CONSTRUCTOR
)
8807 constructor_elt
*ce
;
8808 unsigned HOST_WIDE_INT idx
;
8810 FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (exp
), idx
, ce
)
8811 if ((ce
->index
!= NULL_TREE
&& !safe_from_p (x
, ce
->index
, 0))
8812 || !safe_from_p (x
, ce
->value
, 0))
8816 else if (TREE_CODE (exp
) == ERROR_MARK
)
8817 return true; /* An already-visited SAVE_EXPR? */
8822 /* The only case we look at here is the DECL_INITIAL inside a
8824 return (TREE_CODE (exp
) != DECL_EXPR
8825 || TREE_CODE (DECL_EXPR_DECL (exp
)) != VAR_DECL
8826 || !DECL_INITIAL (DECL_EXPR_DECL (exp
))
8827 || safe_from_p (x
, DECL_INITIAL (DECL_EXPR_DECL (exp
)), 0));
8830 case tcc_comparison
:
8831 if (!safe_from_p (x
, TREE_OPERAND (exp
, 1), 0))
8836 return safe_from_p (x
, TREE_OPERAND (exp
, 0), 0);
8838 case tcc_expression
:
8841 /* Now do code-specific tests. EXP_RTL is set to any rtx we find in
8842 the expression. If it is set, we conflict iff we are that rtx or
8843 both are in memory. Otherwise, we check all operands of the
8844 expression recursively. */
8846 switch (TREE_CODE (exp
))
8849 /* If the operand is static or we are static, we can't conflict.
8850 Likewise if we don't conflict with the operand at all. */
8851 if (staticp (TREE_OPERAND (exp
, 0))
8852 || TREE_STATIC (exp
)
8853 || safe_from_p (x
, TREE_OPERAND (exp
, 0), 0))
8856 /* Otherwise, the only way this can conflict is if we are taking
8857 the address of a DECL a that address if part of X, which is
8859 exp
= TREE_OPERAND (exp
, 0);
8862 if (!DECL_RTL_SET_P (exp
)
8863 || !MEM_P (DECL_RTL (exp
)))
8866 exp_rtl
= XEXP (DECL_RTL (exp
), 0);
8872 && alias_sets_conflict_p (MEM_ALIAS_SET (x
),
8873 get_alias_set (exp
)))
8878 /* Assume that the call will clobber all hard registers and
8880 if ((REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
8885 case WITH_CLEANUP_EXPR
:
8886 case CLEANUP_POINT_EXPR
:
8887 /* Lowered by gimplify.cc. */
8891 return safe_from_p (x
, TREE_OPERAND (exp
, 0), 0);
8897 /* If we have an rtx, we do not need to scan our operands. */
8901 nops
= TREE_OPERAND_LENGTH (exp
);
8902 for (i
= 0; i
< nops
; i
++)
8903 if (TREE_OPERAND (exp
, i
) != 0
8904 && ! safe_from_p (x
, TREE_OPERAND (exp
, i
), 0))
8910 /* Should never get a type here. */
8914 /* If we have an rtl, find any enclosed object. Then see if we conflict
8918 if (GET_CODE (exp_rtl
) == SUBREG
)
8920 exp_rtl
= SUBREG_REG (exp_rtl
);
8922 && REGNO (exp_rtl
) < FIRST_PSEUDO_REGISTER
)
8926 /* If the rtl is X, then it is not safe. Otherwise, it is unless both
8927 are memory and they conflict. */
8928 return ! (rtx_equal_p (x
, exp_rtl
)
8929 || (MEM_P (x
) && MEM_P (exp_rtl
)
8930 && true_dependence (exp_rtl
, VOIDmode
, x
)));
8933 /* If we reach here, it is safe. */
8938 /* Return the highest power of two that EXP is known to be a multiple of.
8939 This is used in updating alignment of MEMs in array references. */
8941 unsigned HOST_WIDE_INT
8942 highest_pow2_factor (const_tree exp
)
8944 unsigned HOST_WIDE_INT ret
;
8945 int trailing_zeros
= tree_ctz (exp
);
8946 if (trailing_zeros
>= HOST_BITS_PER_WIDE_INT
)
8947 return BIGGEST_ALIGNMENT
;
8948 ret
= HOST_WIDE_INT_1U
<< trailing_zeros
;
8949 if (ret
> BIGGEST_ALIGNMENT
)
8950 return BIGGEST_ALIGNMENT
;
8954 /* Similar, except that the alignment requirements of TARGET are
8955 taken into account. Assume it is at least as aligned as its
8956 type, unless it is a COMPONENT_REF in which case the layout of
8957 the structure gives the alignment. */
8959 static unsigned HOST_WIDE_INT
8960 highest_pow2_factor_for_target (const_tree target
, const_tree exp
)
8962 unsigned HOST_WIDE_INT talign
= target_align (target
) / BITS_PER_UNIT
;
8963 unsigned HOST_WIDE_INT factor
= highest_pow2_factor (exp
);
8965 return MAX (factor
, talign
);
8968 /* Convert the tree comparison code TCODE to the rtl one where the
8969 signedness is UNSIGNEDP. */
8971 static enum rtx_code
8972 convert_tree_comp_to_rtx (enum tree_code tcode
, int unsignedp
)
8984 code
= unsignedp
? LTU
: LT
;
8987 code
= unsignedp
? LEU
: LE
;
8990 code
= unsignedp
? GTU
: GT
;
8993 code
= unsignedp
? GEU
: GE
;
8995 case UNORDERED_EXPR
:
9026 /* Subroutine of expand_expr. Expand the two operands of a binary
9027 expression EXP0 and EXP1 placing the results in OP0 and OP1.
9028 The value may be stored in TARGET if TARGET is nonzero. The
9029 MODIFIER argument is as documented by expand_expr. */
9032 expand_operands (tree exp0
, tree exp1
, rtx target
, rtx
*op0
, rtx
*op1
,
9033 enum expand_modifier modifier
)
9035 if (! safe_from_p (target
, exp1
, 1))
9037 if (operand_equal_p (exp0
, exp1
, 0))
9039 *op0
= expand_expr (exp0
, target
, VOIDmode
, modifier
);
9040 *op1
= copy_rtx (*op0
);
9044 *op0
= expand_expr (exp0
, target
, VOIDmode
, modifier
);
9045 *op1
= expand_expr (exp1
, NULL_RTX
, VOIDmode
, modifier
);
9050 /* Return a MEM that contains constant EXP. DEFER is as for
9051 output_constant_def and MODIFIER is as for expand_expr. */
9054 expand_expr_constant (tree exp
, int defer
, enum expand_modifier modifier
)
9058 mem
= output_constant_def (exp
, defer
);
9059 if (modifier
!= EXPAND_INITIALIZER
)
9060 mem
= use_anchored_address (mem
);
9064 /* A subroutine of expand_expr_addr_expr. Evaluate the address of EXP.
9065 The TARGET, TMODE and MODIFIER arguments are as for expand_expr. */
9068 expand_expr_addr_expr_1 (tree exp
, rtx target
, scalar_int_mode tmode
,
9069 enum expand_modifier modifier
, addr_space_t as
)
9071 rtx result
, subtarget
;
9073 poly_int64 bitsize
, bitpos
;
9074 int unsignedp
, reversep
, volatilep
= 0;
9077 /* If we are taking the address of a constant and are at the top level,
9078 we have to use output_constant_def since we can't call force_const_mem
9080 /* ??? This should be considered a front-end bug. We should not be
9081 generating ADDR_EXPR of something that isn't an LVALUE. The only
9082 exception here is STRING_CST. */
9083 if (CONSTANT_CLASS_P (exp
))
9085 result
= XEXP (expand_expr_constant (exp
, 0, modifier
), 0);
9086 if (modifier
< EXPAND_SUM
)
9087 result
= force_operand (result
, target
);
9091 /* Everything must be something allowed by is_gimple_addressable. */
9092 switch (TREE_CODE (exp
))
9095 /* This case will happen via recursion for &a->b. */
9096 return expand_expr (TREE_OPERAND (exp
, 0), target
, tmode
, modifier
);
9100 tree tem
= TREE_OPERAND (exp
, 0);
9101 if (!integer_zerop (TREE_OPERAND (exp
, 1)))
9102 tem
= fold_build_pointer_plus (tem
, TREE_OPERAND (exp
, 1));
9103 return expand_expr (tem
, target
, tmode
, modifier
);
9106 case TARGET_MEM_REF
:
9107 return addr_for_mem_ref (exp
, as
, true);
9110 /* Expand the initializer like constants above. */
9111 result
= XEXP (expand_expr_constant (DECL_INITIAL (exp
),
9113 if (modifier
< EXPAND_SUM
)
9114 result
= force_operand (result
, target
);
9118 /* The real part of the complex number is always first, therefore
9119 the address is the same as the address of the parent object. */
9122 inner
= TREE_OPERAND (exp
, 0);
9126 /* The imaginary part of the complex number is always second.
9127 The expression is therefore always offset by the size of the
9130 bitpos
= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (exp
)));
9131 inner
= TREE_OPERAND (exp
, 0);
9134 case COMPOUND_LITERAL_EXPR
:
9135 /* Allow COMPOUND_LITERAL_EXPR in initializers or coming from
9136 initializers, if e.g. rtl_for_decl_init is called on DECL_INITIAL
9137 with COMPOUND_LITERAL_EXPRs in it, or ARRAY_REF on a const static
9138 array with address of COMPOUND_LITERAL_EXPR in DECL_INITIAL;
9139 the initializers aren't gimplified. */
9140 if (COMPOUND_LITERAL_EXPR_DECL (exp
)
9141 && is_global_var (COMPOUND_LITERAL_EXPR_DECL (exp
)))
9142 return expand_expr_addr_expr_1 (COMPOUND_LITERAL_EXPR_DECL (exp
),
9143 target
, tmode
, modifier
, as
);
9146 /* If the object is a DECL, then expand it for its rtl. Don't bypass
9147 expand_expr, as that can have various side effects; LABEL_DECLs for
9148 example, may not have their DECL_RTL set yet. Expand the rtl of
9149 CONSTRUCTORs too, which should yield a memory reference for the
9150 constructor's contents. Assume language specific tree nodes can
9151 be expanded in some interesting way. */
9152 gcc_assert (TREE_CODE (exp
) < LAST_AND_UNUSED_TREE_CODE
);
9154 || TREE_CODE (exp
) == CONSTRUCTOR
9155 || TREE_CODE (exp
) == COMPOUND_LITERAL_EXPR
)
9157 result
= expand_expr (exp
, target
, tmode
,
9158 modifier
== EXPAND_INITIALIZER
9159 ? EXPAND_INITIALIZER
: EXPAND_CONST_ADDRESS
);
9161 /* If the DECL isn't in memory, then the DECL wasn't properly
9162 marked TREE_ADDRESSABLE, which will be either a front-end
9163 or a tree optimizer bug. */
9165 gcc_assert (MEM_P (result
));
9166 result
= XEXP (result
, 0);
9168 /* ??? Is this needed anymore? */
9170 TREE_USED (exp
) = 1;
9172 if (modifier
!= EXPAND_INITIALIZER
9173 && modifier
!= EXPAND_CONST_ADDRESS
9174 && modifier
!= EXPAND_SUM
)
9175 result
= force_operand (result
, target
);
9179 /* Pass FALSE as the last argument to get_inner_reference although
9180 we are expanding to RTL. The rationale is that we know how to
9181 handle "aligning nodes" here: we can just bypass them because
9182 they won't change the final object whose address will be returned
9183 (they actually exist only for that purpose). */
9184 inner
= get_inner_reference (exp
, &bitsize
, &bitpos
, &offset
, &mode1
,
9185 &unsignedp
, &reversep
, &volatilep
);
9189 /* We must have made progress. */
9190 gcc_assert (inner
!= exp
);
9192 subtarget
= offset
|| maybe_ne (bitpos
, 0) ? NULL_RTX
: target
;
9193 /* For VIEW_CONVERT_EXPR, where the outer alignment is bigger than
9194 inner alignment, force the inner to be sufficiently aligned. */
9195 if (CONSTANT_CLASS_P (inner
)
9196 && TYPE_ALIGN (TREE_TYPE (inner
)) < TYPE_ALIGN (TREE_TYPE (exp
)))
9198 inner
= copy_node (inner
);
9199 TREE_TYPE (inner
) = copy_node (TREE_TYPE (inner
));
9200 SET_TYPE_ALIGN (TREE_TYPE (inner
), TYPE_ALIGN (TREE_TYPE (exp
)));
9201 TYPE_USER_ALIGN (TREE_TYPE (inner
)) = 1;
9203 result
= expand_expr_addr_expr_1 (inner
, subtarget
, tmode
, modifier
, as
);
9209 if (modifier
!= EXPAND_NORMAL
)
9210 result
= force_operand (result
, NULL
);
9211 tmp
= expand_expr (offset
, NULL_RTX
, tmode
,
9212 modifier
== EXPAND_INITIALIZER
9213 ? EXPAND_INITIALIZER
: EXPAND_NORMAL
);
9215 /* expand_expr is allowed to return an object in a mode other
9216 than TMODE. If it did, we need to convert. */
9217 if (GET_MODE (tmp
) != VOIDmode
&& tmode
!= GET_MODE (tmp
))
9218 tmp
= convert_modes (tmode
, GET_MODE (tmp
),
9219 tmp
, TYPE_UNSIGNED (TREE_TYPE (offset
)));
9220 result
= convert_memory_address_addr_space (tmode
, result
, as
);
9221 tmp
= convert_memory_address_addr_space (tmode
, tmp
, as
);
9223 if (modifier
== EXPAND_SUM
|| modifier
== EXPAND_INITIALIZER
)
9224 result
= simplify_gen_binary (PLUS
, tmode
, result
, tmp
);
9227 subtarget
= maybe_ne (bitpos
, 0) ? NULL_RTX
: target
;
9228 result
= expand_simple_binop (tmode
, PLUS
, result
, tmp
, subtarget
,
9229 1, OPTAB_LIB_WIDEN
);
9233 if (maybe_ne (bitpos
, 0))
9235 /* Someone beforehand should have rejected taking the address
9236 of an object that isn't byte-aligned. */
9237 poly_int64 bytepos
= exact_div (bitpos
, BITS_PER_UNIT
);
9238 result
= convert_memory_address_addr_space (tmode
, result
, as
);
9239 result
= plus_constant (tmode
, result
, bytepos
);
9240 if (modifier
< EXPAND_SUM
)
9241 result
= force_operand (result
, target
);
9247 /* A subroutine of expand_expr. Evaluate EXP, which is an ADDR_EXPR.
9248 The TARGET, TMODE and MODIFIER arguments are as for expand_expr. */
9251 expand_expr_addr_expr (tree exp
, rtx target
, machine_mode tmode
,
9252 enum expand_modifier modifier
)
9254 addr_space_t as
= ADDR_SPACE_GENERIC
;
9255 scalar_int_mode address_mode
= Pmode
;
9256 scalar_int_mode pointer_mode
= ptr_mode
;
9260 /* Target mode of VOIDmode says "whatever's natural". */
9261 if (tmode
== VOIDmode
)
9262 tmode
= TYPE_MODE (TREE_TYPE (exp
));
9264 if (POINTER_TYPE_P (TREE_TYPE (exp
)))
9266 as
= TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp
)));
9267 address_mode
= targetm
.addr_space
.address_mode (as
);
9268 pointer_mode
= targetm
.addr_space
.pointer_mode (as
);
9271 /* We can get called with some Weird Things if the user does silliness
9272 like "(short) &a". In that case, convert_memory_address won't do
9273 the right thing, so ignore the given target mode. */
9274 scalar_int_mode new_tmode
= (tmode
== pointer_mode
9278 result
= expand_expr_addr_expr_1 (TREE_OPERAND (exp
, 0), target
,
9279 new_tmode
, modifier
, as
);
9281 /* Despite expand_expr claims concerning ignoring TMODE when not
9282 strictly convenient, stuff breaks if we don't honor it. Note
9283 that combined with the above, we only do this for pointer modes. */
9284 rmode
= GET_MODE (result
);
9285 if (rmode
== VOIDmode
)
9287 if (rmode
!= new_tmode
)
9288 result
= convert_memory_address_addr_space (new_tmode
, result
, as
);
9293 /* Generate code for computing CONSTRUCTOR EXP.
9294 An rtx for the computed value is returned. If AVOID_TEMP_MEM
9295 is TRUE, instead of creating a temporary variable in memory
9296 NULL is returned and the caller needs to handle it differently. */
9299 expand_constructor (tree exp
, rtx target
, enum expand_modifier modifier
,
9300 bool avoid_temp_mem
)
9302 tree type
= TREE_TYPE (exp
);
9303 machine_mode mode
= TYPE_MODE (type
);
9305 /* Try to avoid creating a temporary at all. This is possible
9306 if all of the initializer is zero.
9307 FIXME: try to handle all [0..255] initializers we can handle
9309 if (TREE_STATIC (exp
)
9310 && !TREE_ADDRESSABLE (exp
)
9311 && target
!= 0 && mode
== BLKmode
9312 && all_zeros_p (exp
))
9314 clear_storage (target
, expr_size (exp
), BLOCK_OP_NORMAL
);
9318 /* All elts simple constants => refer to a constant in memory. But
9319 if this is a non-BLKmode mode, let it store a field at a time
9320 since that should make a CONST_INT, CONST_WIDE_INT or
9321 CONST_DOUBLE when we fold. Likewise, if we have a target we can
9322 use, it is best to store directly into the target unless the type
9323 is large enough that memcpy will be used. If we are making an
9324 initializer and all operands are constant, put it in memory as
9327 FIXME: Avoid trying to fill vector constructors piece-meal.
9328 Output them with output_constant_def below unless we're sure
9329 they're zeros. This should go away when vector initializers
9330 are treated like VECTOR_CST instead of arrays. */
9331 if ((TREE_STATIC (exp
)
9332 && ((mode
== BLKmode
9333 && ! (target
!= 0 && safe_from_p (target
, exp
, 1)))
9334 || TREE_ADDRESSABLE (exp
)
9335 || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
))
9336 && (! can_move_by_pieces
9337 (tree_to_uhwi (TYPE_SIZE_UNIT (type
)),
9339 && ! mostly_zeros_p (exp
))))
9340 || ((modifier
== EXPAND_INITIALIZER
|| modifier
== EXPAND_CONST_ADDRESS
)
9341 && TREE_CONSTANT (exp
)))
9348 constructor
= expand_expr_constant (exp
, 1, modifier
);
9350 if (modifier
!= EXPAND_CONST_ADDRESS
9351 && modifier
!= EXPAND_INITIALIZER
9352 && modifier
!= EXPAND_SUM
)
9353 constructor
= validize_mem (constructor
);
9358 /* If the CTOR is available in static storage and not mostly
9359 zeros and we can move it by pieces prefer to do so since
9360 that's usually more efficient than performing a series of
9361 stores from immediates. */
9363 && TREE_STATIC (exp
)
9364 && TREE_CONSTANT (exp
)
9365 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
))
9366 && can_move_by_pieces (tree_to_uhwi (TYPE_SIZE_UNIT (type
)),
9368 && ! mostly_zeros_p (exp
))
9371 /* Handle calls that pass values in multiple non-contiguous
9372 locations. The Irix 6 ABI has examples of this. */
9373 if (target
== 0 || ! safe_from_p (target
, exp
, 1)
9374 || GET_CODE (target
) == PARALLEL
|| modifier
== EXPAND_STACK_PARM
9375 /* Also make a temporary if the store is to volatile memory, to
9376 avoid individual accesses to aggregate members. */
9377 || (GET_CODE (target
) == MEM
9378 && MEM_VOLATILE_P (target
)
9379 && !TREE_ADDRESSABLE (TREE_TYPE (exp
))))
9384 target
= assign_temp (type
, TREE_ADDRESSABLE (exp
), 1);
9387 store_constructor (exp
, target
, 0, int_expr_size (exp
), false);
9392 /* expand_expr: generate code for computing expression EXP.
9393 An rtx for the computed value is returned. The value is never null.
9394 In the case of a void EXP, const0_rtx is returned.
9396 The value may be stored in TARGET if TARGET is nonzero.
9397 TARGET is just a suggestion; callers must assume that
9398 the rtx returned may not be the same as TARGET.
9400 If TARGET is CONST0_RTX, it means that the value will be ignored.
9402 If TMODE is not VOIDmode, it suggests generating the
9403 result in mode TMODE. But this is done only when convenient.
9404 Otherwise, TMODE is ignored and the value generated in its natural mode.
9405 TMODE is just a suggestion; callers must assume that
9406 the rtx returned may not have mode TMODE.
9408 Note that TARGET may have neither TMODE nor MODE. In that case, it
9409 probably will not be used.
9411 If MODIFIER is EXPAND_SUM then when EXP is an addition
9412 we can return an rtx of the form (MULT (REG ...) (CONST_INT ...))
9413 or a nest of (PLUS ...) and (MINUS ...) where the terms are
9414 products as above, or REG or MEM, or constant.
9415 Ordinarily in such cases we would output mul or add instructions
9416 and then return a pseudo reg containing the sum.
9418 EXPAND_INITIALIZER is much like EXPAND_SUM except that
9419 it also marks a label as absolutely required (it can't be dead).
9420 It also makes a ZERO_EXTEND or SIGN_EXTEND instead of emitting extend insns.
9421 This is used for outputting expressions used in initializers.
9423 EXPAND_CONST_ADDRESS says that it is okay to return a MEM
9424 with a constant address even if that address is not normally legitimate.
9425 EXPAND_INITIALIZER and EXPAND_SUM also have this effect.
9427 EXPAND_STACK_PARM is used when expanding to a TARGET on the stack for
9428 a call parameter. Such targets require special care as we haven't yet
9429 marked TARGET so that it's safe from being trashed by libcalls. We
9430 don't want to use TARGET for anything but the final result;
9431 Intermediate values must go elsewhere. Additionally, calls to
9432 emit_block_move will be flagged with BLOCK_OP_CALL_PARM.
9434 If EXP is a VAR_DECL whose DECL_RTL was a MEM with an invalid
9435 address, and ALT_RTL is non-NULL, then *ALT_RTL is set to the
9436 DECL_RTL of the VAR_DECL. *ALT_RTL is also set if EXP is a
9437 COMPOUND_EXPR whose second argument is such a VAR_DECL, and so on
9439 If the result can be stored at TARGET, and ALT_RTL is non-NULL,
9440 then *ALT_RTL is set to TARGET (before legitimziation).
9442 If INNER_REFERENCE_P is true, we are expanding an inner reference.
9443 In this case, we don't adjust a returned MEM rtx that wouldn't be
9444 sufficiently aligned for its mode; instead, it's up to the caller
9445 to deal with it afterwards. This is used to make sure that unaligned
9446 base objects for which out-of-bounds accesses are supported, for
9447 example record types with trailing arrays, aren't realigned behind
9448 the back of the caller.
9449 The normal operating mode is to pass FALSE for this parameter. */
9452 expand_expr_real (tree exp
, rtx target
, machine_mode tmode
,
9453 enum expand_modifier modifier
, rtx
*alt_rtl
,
9454 bool inner_reference_p
)
9458 /* Handle ERROR_MARK before anybody tries to access its type. */
9459 if (TREE_CODE (exp
) == ERROR_MARK
9460 || (TREE_CODE (TREE_TYPE (exp
)) == ERROR_MARK
))
9462 ret
= CONST0_RTX (tmode
);
9463 return ret
? ret
: const0_rtx
;
9466 ret
= expand_expr_real_1 (exp
, target
, tmode
, modifier
, alt_rtl
,
9471 /* Try to expand the conditional expression which is represented by
9472 TREEOP0 ? TREEOP1 : TREEOP2 using conditonal moves. If it succeeds
9473 return the rtl reg which represents the result. Otherwise return
9477 expand_cond_expr_using_cmove (tree treeop0 ATTRIBUTE_UNUSED
,
9478 tree treeop1 ATTRIBUTE_UNUSED
,
9479 tree treeop2 ATTRIBUTE_UNUSED
)
9482 rtx op00
, op01
, op1
, op2
;
9483 enum rtx_code comparison_code
;
9484 machine_mode comparison_mode
;
9487 tree type
= TREE_TYPE (treeop1
);
9488 int unsignedp
= TYPE_UNSIGNED (type
);
9489 machine_mode mode
= TYPE_MODE (type
);
9490 machine_mode orig_mode
= mode
;
9491 static bool expanding_cond_expr_using_cmove
= false;
9493 /* Conditional move expansion can end up TERing two operands which,
9494 when recursively hitting conditional expressions can result in
9495 exponential behavior if the cmove expansion ultimatively fails.
9496 It's hardly profitable to TER a cmove into a cmove so avoid doing
9497 that by failing early if we end up recursing. */
9498 if (expanding_cond_expr_using_cmove
)
9501 /* If we cannot do a conditional move on the mode, try doing it
9502 with the promoted mode. */
9503 if (!can_conditionally_move_p (mode
))
9505 mode
= promote_mode (type
, mode
, &unsignedp
);
9506 if (!can_conditionally_move_p (mode
))
9508 temp
= assign_temp (type
, 0, 0); /* Use promoted mode for temp. */
9511 temp
= assign_temp (type
, 0, 1);
9513 expanding_cond_expr_using_cmove
= true;
9515 expand_operands (treeop1
, treeop2
,
9516 mode
== orig_mode
? temp
: NULL_RTX
, &op1
, &op2
,
9519 if (TREE_CODE (treeop0
) == SSA_NAME
9520 && (srcstmt
= get_def_for_expr_class (treeop0
, tcc_comparison
)))
9522 type
= TREE_TYPE (gimple_assign_rhs1 (srcstmt
));
9523 enum tree_code cmpcode
= gimple_assign_rhs_code (srcstmt
);
9524 op00
= expand_normal (gimple_assign_rhs1 (srcstmt
));
9525 op01
= expand_normal (gimple_assign_rhs2 (srcstmt
));
9526 comparison_mode
= TYPE_MODE (type
);
9527 unsignedp
= TYPE_UNSIGNED (type
);
9528 comparison_code
= convert_tree_comp_to_rtx (cmpcode
, unsignedp
);
9530 else if (COMPARISON_CLASS_P (treeop0
))
9532 type
= TREE_TYPE (TREE_OPERAND (treeop0
, 0));
9533 enum tree_code cmpcode
= TREE_CODE (treeop0
);
9534 op00
= expand_normal (TREE_OPERAND (treeop0
, 0));
9535 op01
= expand_normal (TREE_OPERAND (treeop0
, 1));
9536 unsignedp
= TYPE_UNSIGNED (type
);
9537 comparison_mode
= TYPE_MODE (type
);
9538 comparison_code
= convert_tree_comp_to_rtx (cmpcode
, unsignedp
);
9542 op00
= expand_normal (treeop0
);
9544 comparison_code
= NE
;
9545 comparison_mode
= GET_MODE (op00
);
9546 if (comparison_mode
== VOIDmode
)
9547 comparison_mode
= TYPE_MODE (TREE_TYPE (treeop0
));
9549 expanding_cond_expr_using_cmove
= false;
9551 if (GET_MODE (op1
) != mode
)
9552 op1
= gen_lowpart (mode
, op1
);
9554 if (GET_MODE (op2
) != mode
)
9555 op2
= gen_lowpart (mode
, op2
);
9557 /* Try to emit the conditional move. */
9558 insn
= emit_conditional_move (temp
,
9559 { comparison_code
, op00
, op01
,
9564 /* If we could do the conditional move, emit the sequence,
9568 rtx_insn
*seq
= get_insns ();
9571 return convert_modes (orig_mode
, mode
, temp
, 0);
9574 /* Otherwise discard the sequence and fall back to code with
9580 /* A helper function for expand_expr_real_2 to be used with a
9581 misaligned mem_ref TEMP. Assume an unsigned type if UNSIGNEDP
9582 is nonzero, with alignment ALIGN in bits.
9583 Store the value at TARGET if possible (if TARGET is nonzero).
9584 Regardless of TARGET, we return the rtx for where the value is placed.
9585 If the result can be stored at TARGET, and ALT_RTL is non-NULL,
9586 then *ALT_RTL is set to TARGET (before legitimziation). */
9589 expand_misaligned_mem_ref (rtx temp
, machine_mode mode
, int unsignedp
,
9590 unsigned int align
, rtx target
, rtx
*alt_rtl
)
9592 enum insn_code icode
;
9594 if ((icode
= optab_handler (movmisalign_optab
, mode
))
9595 != CODE_FOR_nothing
)
9597 class expand_operand ops
[2];
9599 /* We've already validated the memory, and we're creating a
9600 new pseudo destination. The predicates really can't fail,
9601 nor can the generator. */
9602 create_output_operand (&ops
[0], NULL_RTX
, mode
);
9603 create_fixed_operand (&ops
[1], temp
);
9604 expand_insn (icode
, 2, ops
);
9605 temp
= ops
[0].value
;
9607 else if (targetm
.slow_unaligned_access (mode
, align
))
9608 temp
= extract_bit_field (temp
, GET_MODE_BITSIZE (mode
),
9609 0, unsignedp
, target
,
9610 mode
, mode
, false, alt_rtl
);
9614 /* Helper function of expand_expr_2, expand a division or modulo.
9615 op0 and op1 should be already expanded treeop0 and treeop1, using
9619 expand_expr_divmod (tree_code code
, machine_mode mode
, tree treeop0
,
9620 tree treeop1
, rtx op0
, rtx op1
, rtx target
, int unsignedp
)
9622 bool mod_p
= (code
== TRUNC_MOD_EXPR
|| code
== FLOOR_MOD_EXPR
9623 || code
== CEIL_MOD_EXPR
|| code
== ROUND_MOD_EXPR
);
9624 if (SCALAR_INT_MODE_P (mode
)
9626 && get_range_pos_neg (treeop0
) == 1
9627 && get_range_pos_neg (treeop1
) == 1)
9629 /* If both arguments are known to be positive when interpreted
9630 as signed, we can expand it as both signed and unsigned
9631 division or modulo. Choose the cheaper sequence in that case. */
9632 bool speed_p
= optimize_insn_for_speed_p ();
9633 do_pending_stack_adjust ();
9635 rtx uns_ret
= expand_divmod (mod_p
, code
, mode
, op0
, op1
, target
, 1);
9636 rtx_insn
*uns_insns
= get_insns ();
9639 rtx sgn_ret
= expand_divmod (mod_p
, code
, mode
, op0
, op1
, target
, 0);
9640 rtx_insn
*sgn_insns
= get_insns ();
9642 unsigned uns_cost
= seq_cost (uns_insns
, speed_p
);
9643 unsigned sgn_cost
= seq_cost (sgn_insns
, speed_p
);
9645 /* If costs are the same then use as tie breaker the other other
9647 if (uns_cost
== sgn_cost
)
9649 uns_cost
= seq_cost (uns_insns
, !speed_p
);
9650 sgn_cost
= seq_cost (sgn_insns
, !speed_p
);
9653 if (uns_cost
< sgn_cost
|| (uns_cost
== sgn_cost
&& unsignedp
))
9655 emit_insn (uns_insns
);
9658 emit_insn (sgn_insns
);
9661 return expand_divmod (mod_p
, code
, mode
, op0
, op1
, target
, unsignedp
);
9665 expand_expr_real_2 (const_sepops ops
, rtx target
, machine_mode tmode
,
9666 enum expand_modifier modifier
)
9668 rtx op0
, op1
, op2
, temp
;
9669 rtx_code_label
*lab
;
9673 scalar_int_mode int_mode
;
9674 enum tree_code code
= ops
->code
;
9676 rtx subtarget
, original_target
;
9678 bool reduce_bit_field
;
9679 location_t loc
= ops
->location
;
9680 tree treeop0
, treeop1
, treeop2
;
9681 #define REDUCE_BIT_FIELD(expr) (reduce_bit_field \
9682 ? reduce_to_bit_field_precision ((expr), \
9688 mode
= TYPE_MODE (type
);
9689 unsignedp
= TYPE_UNSIGNED (type
);
9695 /* We should be called only on simple (binary or unary) expressions,
9696 exactly those that are valid in gimple expressions that aren't
9697 GIMPLE_SINGLE_RHS (or invalid). */
9698 gcc_assert (get_gimple_rhs_class (code
) == GIMPLE_UNARY_RHS
9699 || get_gimple_rhs_class (code
) == GIMPLE_BINARY_RHS
9700 || get_gimple_rhs_class (code
) == GIMPLE_TERNARY_RHS
);
9702 ignore
= (target
== const0_rtx
9703 || ((CONVERT_EXPR_CODE_P (code
)
9704 || code
== COND_EXPR
|| code
== VIEW_CONVERT_EXPR
)
9705 && TREE_CODE (type
) == VOID_TYPE
));
9707 /* We should be called only if we need the result. */
9708 gcc_assert (!ignore
);
9710 /* An operation in what may be a bit-field type needs the
9711 result to be reduced to the precision of the bit-field type,
9712 which is narrower than that of the type's mode. */
9713 reduce_bit_field
= (INTEGRAL_TYPE_P (type
)
9714 && !type_has_mode_precision_p (type
));
9716 if (reduce_bit_field
9717 && (modifier
== EXPAND_STACK_PARM
9718 || (target
&& GET_MODE (target
) != mode
)))
9721 /* Use subtarget as the target for operand 0 of a binary operation. */
9722 subtarget
= get_subtarget (target
);
9723 original_target
= target
;
9727 case NON_LVALUE_EXPR
:
9730 if (treeop0
== error_mark_node
)
9733 if (TREE_CODE (type
) == UNION_TYPE
)
9735 tree valtype
= TREE_TYPE (treeop0
);
9737 /* If both input and output are BLKmode, this conversion isn't doing
9738 anything except possibly changing memory attribute. */
9739 if (mode
== BLKmode
&& TYPE_MODE (valtype
) == BLKmode
)
9741 rtx result
= expand_expr (treeop0
, target
, tmode
,
9744 result
= copy_rtx (result
);
9745 set_mem_attributes (result
, type
, 0);
9751 if (TYPE_MODE (type
) != BLKmode
)
9752 target
= gen_reg_rtx (TYPE_MODE (type
));
9754 target
= assign_temp (type
, 1, 1);
9758 /* Store data into beginning of memory target. */
9759 store_expr (treeop0
,
9760 adjust_address (target
, TYPE_MODE (valtype
), 0),
9761 modifier
== EXPAND_STACK_PARM
,
9762 false, TYPE_REVERSE_STORAGE_ORDER (type
));
9766 gcc_assert (REG_P (target
)
9767 && !TYPE_REVERSE_STORAGE_ORDER (type
));
9769 /* Store this field into a union of the proper type. */
9770 poly_uint64 op0_size
9771 = tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (treeop0
)));
9772 poly_uint64 union_size
= GET_MODE_BITSIZE (mode
);
9773 store_field (target
,
9774 /* The conversion must be constructed so that
9775 we know at compile time how many bits
9777 ordered_min (op0_size
, union_size
),
9778 0, 0, 0, TYPE_MODE (valtype
), treeop0
, 0,
9782 /* Return the entire union. */
9786 if (mode
== TYPE_MODE (TREE_TYPE (treeop0
)))
9788 op0
= expand_expr (treeop0
, target
, VOIDmode
,
9791 return REDUCE_BIT_FIELD (op0
);
9794 op0
= expand_expr (treeop0
, NULL_RTX
, mode
,
9795 modifier
== EXPAND_SUM
? EXPAND_NORMAL
: modifier
);
9796 if (GET_MODE (op0
) == mode
)
9799 /* If OP0 is a constant, just convert it into the proper mode. */
9800 else if (CONSTANT_P (op0
))
9802 tree inner_type
= TREE_TYPE (treeop0
);
9803 machine_mode inner_mode
= GET_MODE (op0
);
9805 if (inner_mode
== VOIDmode
)
9806 inner_mode
= TYPE_MODE (inner_type
);
9808 if (modifier
== EXPAND_INITIALIZER
)
9809 op0
= force_lowpart_subreg (mode
, op0
, inner_mode
);
9811 op0
= convert_modes (mode
, inner_mode
, op0
,
9812 TYPE_UNSIGNED (inner_type
));
9815 else if (modifier
== EXPAND_INITIALIZER
)
9816 op0
= gen_rtx_fmt_e (TYPE_UNSIGNED (TREE_TYPE (treeop0
))
9817 ? ZERO_EXTEND
: SIGN_EXTEND
, mode
, op0
);
9819 else if (target
== 0)
9820 op0
= convert_to_mode (mode
, op0
,
9821 TYPE_UNSIGNED (TREE_TYPE
9825 convert_move (target
, op0
,
9826 TYPE_UNSIGNED (TREE_TYPE (treeop0
)));
9830 return REDUCE_BIT_FIELD (op0
);
9832 case ADDR_SPACE_CONVERT_EXPR
:
9834 tree treeop0_type
= TREE_TYPE (treeop0
);
9836 gcc_assert (POINTER_TYPE_P (type
));
9837 gcc_assert (POINTER_TYPE_P (treeop0_type
));
9839 addr_space_t as_to
= TYPE_ADDR_SPACE (TREE_TYPE (type
));
9840 addr_space_t as_from
= TYPE_ADDR_SPACE (TREE_TYPE (treeop0_type
));
9842 /* Conversions between pointers to the same address space should
9843 have been implemented via CONVERT_EXPR / NOP_EXPR. */
9844 gcc_assert (as_to
!= as_from
);
9846 op0
= expand_expr (treeop0
, NULL_RTX
, VOIDmode
, modifier
);
9848 /* Ask target code to handle conversion between pointers
9849 to overlapping address spaces. */
9850 if (targetm
.addr_space
.subset_p (as_to
, as_from
)
9851 || targetm
.addr_space
.subset_p (as_from
, as_to
))
9853 op0
= targetm
.addr_space
.convert (op0
, treeop0_type
, type
);
9857 /* For disjoint address spaces, converting anything but a null
9858 pointer invokes undefined behavior. We truncate or extend the
9859 value as if we'd converted via integers, which handles 0 as
9860 required, and all others as the programmer likely expects. */
9861 #ifndef POINTERS_EXTEND_UNSIGNED
9862 const int POINTERS_EXTEND_UNSIGNED
= 1;
9864 op0
= convert_modes (mode
, TYPE_MODE (treeop0_type
),
9865 op0
, POINTERS_EXTEND_UNSIGNED
);
9871 case POINTER_PLUS_EXPR
:
9872 /* Even though the sizetype mode and the pointer's mode can be different
9873 expand is able to handle this correctly and get the correct result out
9874 of the PLUS_EXPR code. */
9875 /* Make sure to sign-extend the sizetype offset in a POINTER_PLUS_EXPR
9876 if sizetype precision is smaller than pointer precision. */
9877 if (TYPE_PRECISION (sizetype
) < TYPE_PRECISION (type
))
9878 treeop1
= fold_convert_loc (loc
, type
,
9879 fold_convert_loc (loc
, ssizetype
,
9881 /* If sizetype precision is larger than pointer precision, truncate the
9882 offset to have matching modes. */
9883 else if (TYPE_PRECISION (sizetype
) > TYPE_PRECISION (type
))
9884 treeop1
= fold_convert_loc (loc
, type
, treeop1
);
9888 /* If we are adding a constant, a VAR_DECL that is sp, fp, or ap, and
9889 something else, make sure we add the register to the constant and
9890 then to the other thing. This case can occur during strength
9891 reduction and doing it this way will produce better code if the
9892 frame pointer or argument pointer is eliminated.
9894 fold-const.cc will ensure that the constant is always in the inner
9895 PLUS_EXPR, so the only case we need to do anything about is if
9896 sp, ap, or fp is our second argument, in which case we must swap
9897 the innermost first argument and our second argument. */
9899 if (TREE_CODE (treeop0
) == PLUS_EXPR
9900 && TREE_CODE (TREE_OPERAND (treeop0
, 1)) == INTEGER_CST
9902 && (DECL_RTL (treeop1
) == frame_pointer_rtx
9903 || DECL_RTL (treeop1
) == stack_pointer_rtx
9904 || DECL_RTL (treeop1
) == arg_pointer_rtx
))
9909 /* If the result is to be ptr_mode and we are adding an integer to
9910 something, we might be forming a constant. So try to use
9911 plus_constant. If it produces a sum and we can't accept it,
9912 use force_operand. This allows P = &ARR[const] to generate
9913 efficient code on machines where a SYMBOL_REF is not a valid
9916 If this is an EXPAND_SUM call, always return the sum. */
9917 if (modifier
== EXPAND_SUM
|| modifier
== EXPAND_INITIALIZER
9918 || (mode
== ptr_mode
&& (unsignedp
|| ! flag_trapv
)))
9920 if (modifier
== EXPAND_STACK_PARM
)
9922 if (TREE_CODE (treeop0
) == INTEGER_CST
9923 && HWI_COMPUTABLE_MODE_P (mode
)
9924 && TREE_CONSTANT (treeop1
))
9928 machine_mode wmode
= TYPE_MODE (TREE_TYPE (treeop1
));
9930 op1
= expand_expr (treeop1
, subtarget
, VOIDmode
,
9932 /* Use wi::shwi to ensure that the constant is
9933 truncated according to the mode of OP1, then sign extended
9934 to a HOST_WIDE_INT. Using the constant directly can result
9935 in non-canonical RTL in a 64x32 cross compile. */
9936 wc
= TREE_INT_CST_LOW (treeop0
);
9938 immed_wide_int_const (wi::shwi (wc
, wmode
), wmode
);
9939 op1
= plus_constant (mode
, op1
, INTVAL (constant_part
));
9940 if (modifier
!= EXPAND_SUM
&& modifier
!= EXPAND_INITIALIZER
)
9941 op1
= force_operand (op1
, target
);
9942 return REDUCE_BIT_FIELD (op1
);
9945 else if (TREE_CODE (treeop1
) == INTEGER_CST
9946 && HWI_COMPUTABLE_MODE_P (mode
)
9947 && TREE_CONSTANT (treeop0
))
9951 machine_mode wmode
= TYPE_MODE (TREE_TYPE (treeop0
));
9953 op0
= expand_expr (treeop0
, subtarget
, VOIDmode
,
9954 (modifier
== EXPAND_INITIALIZER
9955 ? EXPAND_INITIALIZER
: EXPAND_SUM
));
9956 if (! CONSTANT_P (op0
))
9958 op1
= expand_expr (treeop1
, NULL_RTX
,
9959 VOIDmode
, modifier
);
9960 /* Return a PLUS if modifier says it's OK. */
9961 if (modifier
== EXPAND_SUM
9962 || modifier
== EXPAND_INITIALIZER
)
9963 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
9966 /* Use wi::shwi to ensure that the constant is
9967 truncated according to the mode of OP1, then sign extended
9968 to a HOST_WIDE_INT. Using the constant directly can result
9969 in non-canonical RTL in a 64x32 cross compile. */
9970 wc
= TREE_INT_CST_LOW (treeop1
);
9972 = immed_wide_int_const (wi::shwi (wc
, wmode
), wmode
);
9973 op0
= plus_constant (mode
, op0
, INTVAL (constant_part
));
9974 if (modifier
!= EXPAND_SUM
&& modifier
!= EXPAND_INITIALIZER
)
9975 op0
= force_operand (op0
, target
);
9976 return REDUCE_BIT_FIELD (op0
);
9980 /* Use TER to expand pointer addition of a negated value
9981 as pointer subtraction. */
9982 if ((POINTER_TYPE_P (TREE_TYPE (treeop0
))
9983 || (TREE_CODE (TREE_TYPE (treeop0
)) == VECTOR_TYPE
9984 && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (treeop0
)))))
9985 && TREE_CODE (treeop1
) == SSA_NAME
9986 && TYPE_MODE (TREE_TYPE (treeop0
))
9987 == TYPE_MODE (TREE_TYPE (treeop1
)))
9989 gimple
*def
= get_def_for_expr (treeop1
, NEGATE_EXPR
);
9992 treeop1
= gimple_assign_rhs1 (def
);
9998 /* No sense saving up arithmetic to be done
9999 if it's all in the wrong mode to form part of an address.
10000 And force_operand won't know whether to sign-extend or
10002 if (modifier
!= EXPAND_INITIALIZER
10003 && (modifier
!= EXPAND_SUM
|| mode
!= ptr_mode
))
10005 expand_operands (treeop0
, treeop1
,
10006 subtarget
, &op0
, &op1
, modifier
);
10007 if (op0
== const0_rtx
)
10009 if (op1
== const0_rtx
)
10014 expand_operands (treeop0
, treeop1
,
10015 subtarget
, &op0
, &op1
, modifier
);
10016 return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS
, mode
, op0
, op1
));
10019 case POINTER_DIFF_EXPR
:
10021 /* For initializers, we are allowed to return a MINUS of two
10022 symbolic constants. Here we handle all cases when both operands
10024 /* Handle difference of two symbolic constants,
10025 for the sake of an initializer. */
10026 if ((modifier
== EXPAND_SUM
|| modifier
== EXPAND_INITIALIZER
)
10027 && really_constant_p (treeop0
)
10028 && really_constant_p (treeop1
))
10030 expand_operands (treeop0
, treeop1
,
10031 NULL_RTX
, &op0
, &op1
, modifier
);
10032 return simplify_gen_binary (MINUS
, mode
, op0
, op1
);
10035 /* No sense saving up arithmetic to be done
10036 if it's all in the wrong mode to form part of an address.
10037 And force_operand won't know whether to sign-extend or
10039 if (modifier
!= EXPAND_INITIALIZER
10040 && (modifier
!= EXPAND_SUM
|| mode
!= ptr_mode
))
10043 expand_operands (treeop0
, treeop1
,
10044 subtarget
, &op0
, &op1
, modifier
);
10046 /* Convert A - const to A + (-const). */
10047 if (CONST_INT_P (op1
))
10049 op1
= negate_rtx (mode
, op1
);
10050 return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS
, mode
, op0
, op1
));
10055 case WIDEN_MULT_PLUS_EXPR
:
10056 case WIDEN_MULT_MINUS_EXPR
:
10057 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
10058 op2
= expand_normal (treeop2
);
10059 target
= expand_widen_pattern_expr (ops
, op0
, op1
, op2
,
10060 target
, unsignedp
);
10063 case WIDEN_MULT_EXPR
:
10064 /* If first operand is constant, swap them.
10065 Thus the following special case checks need only
10066 check the second operand. */
10067 if (TREE_CODE (treeop0
) == INTEGER_CST
)
10068 std::swap (treeop0
, treeop1
);
10070 /* First, check if we have a multiplication of one signed and one
10071 unsigned operand. */
10072 if (TREE_CODE (treeop1
) != INTEGER_CST
10073 && (TYPE_UNSIGNED (TREE_TYPE (treeop0
))
10074 != TYPE_UNSIGNED (TREE_TYPE (treeop1
))))
10076 machine_mode innermode
= TYPE_MODE (TREE_TYPE (treeop0
));
10077 this_optab
= usmul_widen_optab
;
10078 if (find_widening_optab_handler (this_optab
, mode
, innermode
)
10079 != CODE_FOR_nothing
)
10081 if (TYPE_UNSIGNED (TREE_TYPE (treeop0
)))
10082 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
,
10085 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op1
, &op0
,
10087 /* op0 and op1 might still be constant, despite the above
10088 != INTEGER_CST check. Handle it. */
10089 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
10091 op0
= convert_modes (mode
, innermode
, op0
, true);
10092 op1
= convert_modes (mode
, innermode
, op1
, false);
10093 return REDUCE_BIT_FIELD (expand_mult (mode
, op0
, op1
,
10094 target
, unsignedp
));
10099 /* Check for a multiplication with matching signedness. */
10100 else if ((TREE_CODE (treeop1
) == INTEGER_CST
10101 && int_fits_type_p (treeop1
, TREE_TYPE (treeop0
)))
10102 || (TYPE_UNSIGNED (TREE_TYPE (treeop1
))
10103 == TYPE_UNSIGNED (TREE_TYPE (treeop0
))))
10105 tree op0type
= TREE_TYPE (treeop0
);
10106 machine_mode innermode
= TYPE_MODE (op0type
);
10107 bool zextend_p
= TYPE_UNSIGNED (op0type
);
10108 optab other_optab
= zextend_p
? smul_widen_optab
: umul_widen_optab
;
10109 this_optab
= zextend_p
? umul_widen_optab
: smul_widen_optab
;
10111 if (TREE_CODE (treeop0
) != INTEGER_CST
)
10113 if (find_widening_optab_handler (this_optab
, mode
, innermode
)
10114 != CODE_FOR_nothing
)
10116 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
,
10118 /* op0 and op1 might still be constant, despite the above
10119 != INTEGER_CST check. Handle it. */
10120 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
10123 op0
= convert_modes (mode
, innermode
, op0
, zextend_p
);
10125 = convert_modes (mode
, innermode
, op1
,
10126 TYPE_UNSIGNED (TREE_TYPE (treeop1
)));
10127 return REDUCE_BIT_FIELD (expand_mult (mode
, op0
, op1
,
10131 temp
= expand_widening_mult (mode
, op0
, op1
, target
,
10132 unsignedp
, this_optab
);
10133 return REDUCE_BIT_FIELD (temp
);
10135 if (find_widening_optab_handler (other_optab
, mode
, innermode
)
10136 != CODE_FOR_nothing
10137 && innermode
== word_mode
)
10140 op0
= expand_normal (treeop0
);
10141 op1
= expand_normal (treeop1
);
10142 /* op0 and op1 might be constants, despite the above
10143 != INTEGER_CST check. Handle it. */
10144 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
10145 goto widen_mult_const
;
10146 temp
= expand_binop (mode
, other_optab
, op0
, op1
, target
,
10147 unsignedp
, OPTAB_LIB_WIDEN
);
10148 hipart
= gen_highpart (word_mode
, temp
);
10149 htem
= expand_mult_highpart_adjust (word_mode
, hipart
,
10152 if (htem
!= hipart
)
10153 emit_move_insn (hipart
, htem
);
10154 return REDUCE_BIT_FIELD (temp
);
10158 treeop0
= fold_build1 (CONVERT_EXPR
, type
, treeop0
);
10159 treeop1
= fold_build1 (CONVERT_EXPR
, type
, treeop1
);
10160 expand_operands (treeop0
, treeop1
, subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
10161 return REDUCE_BIT_FIELD (expand_mult (mode
, op0
, op1
, target
, unsignedp
));
10164 /* If this is a fixed-point operation, then we cannot use the code
10165 below because "expand_mult" doesn't support sat/no-sat fixed-point
10166 multiplications. */
10167 if (ALL_FIXED_POINT_MODE_P (mode
))
10170 /* If first operand is constant, swap them.
10171 Thus the following special case checks need only
10172 check the second operand. */
10173 if (TREE_CODE (treeop0
) == INTEGER_CST
)
10174 std::swap (treeop0
, treeop1
);
10176 /* Attempt to return something suitable for generating an
10177 indexed address, for machines that support that. */
10179 if (modifier
== EXPAND_SUM
&& mode
== ptr_mode
10180 && tree_fits_shwi_p (treeop1
))
10182 tree exp1
= treeop1
;
10184 op0
= expand_expr (treeop0
, subtarget
, VOIDmode
,
10188 op0
= force_operand (op0
, NULL_RTX
);
10190 op0
= copy_to_mode_reg (mode
, op0
);
10192 op1
= gen_int_mode (tree_to_shwi (exp1
),
10193 TYPE_MODE (TREE_TYPE (exp1
)));
10194 return REDUCE_BIT_FIELD (gen_rtx_MULT (mode
, op0
, op1
));
10197 if (modifier
== EXPAND_STACK_PARM
)
10200 if (SCALAR_INT_MODE_P (mode
) && optimize
>= 2)
10202 gimple
*def_stmt0
= get_def_for_expr (treeop0
, TRUNC_DIV_EXPR
);
10203 gimple
*def_stmt1
= get_def_for_expr (treeop1
, TRUNC_DIV_EXPR
);
10205 && !operand_equal_p (treeop1
, gimple_assign_rhs2 (def_stmt0
), 0))
10208 && !operand_equal_p (treeop0
, gimple_assign_rhs2 (def_stmt1
), 0))
10211 if (def_stmt0
|| def_stmt1
)
10213 /* X / Y * Y can be expanded as X - X % Y too.
10214 Choose the cheaper sequence of those two. */
10216 treeop0
= gimple_assign_rhs1 (def_stmt0
);
10220 treeop0
= gimple_assign_rhs1 (def_stmt1
);
10222 expand_operands (treeop0
, treeop1
, subtarget
, &op0
, &op1
,
10224 bool speed_p
= optimize_insn_for_speed_p ();
10225 do_pending_stack_adjust ();
10228 = expand_expr_divmod (TRUNC_DIV_EXPR
, mode
, treeop0
, treeop1
,
10229 op0
, op1
, NULL_RTX
, unsignedp
);
10230 divmul_ret
= expand_mult (mode
, divmul_ret
, op1
, target
,
10232 rtx_insn
*divmul_insns
= get_insns ();
10236 = expand_expr_divmod (TRUNC_MOD_EXPR
, mode
, treeop0
, treeop1
,
10237 op0
, op1
, NULL_RTX
, unsignedp
);
10238 this_optab
= optab_for_tree_code (MINUS_EXPR
, type
,
10240 modsub_ret
= expand_binop (mode
, this_optab
, op0
, modsub_ret
,
10241 target
, unsignedp
, OPTAB_LIB_WIDEN
);
10242 rtx_insn
*modsub_insns
= get_insns ();
10244 unsigned divmul_cost
= seq_cost (divmul_insns
, speed_p
);
10245 unsigned modsub_cost
= seq_cost (modsub_insns
, speed_p
);
10246 /* If costs are the same then use as tie breaker the other other
10248 if (divmul_cost
== modsub_cost
)
10250 divmul_cost
= seq_cost (divmul_insns
, !speed_p
);
10251 modsub_cost
= seq_cost (modsub_insns
, !speed_p
);
10254 if (divmul_cost
<= modsub_cost
)
10256 emit_insn (divmul_insns
);
10257 return REDUCE_BIT_FIELD (divmul_ret
);
10259 emit_insn (modsub_insns
);
10260 return REDUCE_BIT_FIELD (modsub_ret
);
10264 expand_operands (treeop0
, treeop1
, subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
10266 /* Expand X*Y as X&-Y when Y must be zero or one. */
10267 if (SCALAR_INT_MODE_P (mode
))
10269 bool gimple_zero_one_valued_p (tree
, tree (*)(tree
));
10270 bool bit0_p
= gimple_zero_one_valued_p (treeop0
, nullptr);
10271 bool bit1_p
= gimple_zero_one_valued_p (treeop1
, nullptr);
10273 /* Expand X*Y as X&Y when both X and Y must be zero or one. */
10274 if (bit0_p
&& bit1_p
)
10275 return REDUCE_BIT_FIELD (expand_and (mode
, op0
, op1
, target
));
10277 if (bit0_p
|| bit1_p
)
10279 bool speed
= optimize_insn_for_speed_p ();
10280 int cost
= add_cost (speed
, mode
) + neg_cost (speed
, mode
);
10281 struct algorithm algorithm
;
10282 enum mult_variant variant
;
10283 if (CONST_INT_P (op1
)
10284 ? !choose_mult_variant (mode
, INTVAL (op1
),
10285 &algorithm
, &variant
, cost
)
10286 : cost
< mul_cost (speed
, mode
))
10288 temp
= bit0_p
? expand_and (mode
, negate_rtx (mode
, op0
),
10290 : expand_and (mode
, op0
,
10291 negate_rtx (mode
, op1
),
10293 return REDUCE_BIT_FIELD (temp
);
10298 return REDUCE_BIT_FIELD (expand_mult (mode
, op0
, op1
, target
, unsignedp
));
10300 case TRUNC_MOD_EXPR
:
10301 case FLOOR_MOD_EXPR
:
10302 case CEIL_MOD_EXPR
:
10303 case ROUND_MOD_EXPR
:
10305 case TRUNC_DIV_EXPR
:
10306 case FLOOR_DIV_EXPR
:
10307 case CEIL_DIV_EXPR
:
10308 case ROUND_DIV_EXPR
:
10309 case EXACT_DIV_EXPR
:
10310 /* If this is a fixed-point operation, then we cannot use the code
10311 below because "expand_divmod" doesn't support sat/no-sat fixed-point
10313 if (ALL_FIXED_POINT_MODE_P (mode
))
10316 if (modifier
== EXPAND_STACK_PARM
)
10318 /* Possible optimization: compute the dividend with EXPAND_SUM
10319 then if the divisor is constant can optimize the case
10320 where some terms of the dividend have coeffs divisible by it. */
10321 expand_operands (treeop0
, treeop1
, subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
10322 return expand_expr_divmod (code
, mode
, treeop0
, treeop1
, op0
, op1
,
10323 target
, unsignedp
);
10328 case MULT_HIGHPART_EXPR
:
10329 expand_operands (treeop0
, treeop1
, subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
10330 temp
= expand_mult_highpart (mode
, op0
, op1
, target
, unsignedp
);
10334 case FIXED_CONVERT_EXPR
:
10335 op0
= expand_normal (treeop0
);
10336 if (target
== 0 || modifier
== EXPAND_STACK_PARM
)
10337 target
= gen_reg_rtx (mode
);
10339 if ((TREE_CODE (TREE_TYPE (treeop0
)) == INTEGER_TYPE
10340 && TYPE_UNSIGNED (TREE_TYPE (treeop0
)))
10341 || (TREE_CODE (type
) == INTEGER_TYPE
&& TYPE_UNSIGNED (type
)))
10342 expand_fixed_convert (target
, op0
, 1, TYPE_SATURATING (type
));
10344 expand_fixed_convert (target
, op0
, 0, TYPE_SATURATING (type
));
10347 case FIX_TRUNC_EXPR
:
10348 op0
= expand_normal (treeop0
);
10349 if (target
== 0 || modifier
== EXPAND_STACK_PARM
)
10350 target
= gen_reg_rtx (mode
);
10351 expand_fix (target
, op0
, unsignedp
);
10355 op0
= expand_normal (treeop0
);
10356 if (target
== 0 || modifier
== EXPAND_STACK_PARM
)
10357 target
= gen_reg_rtx (mode
);
10358 /* expand_float can't figure out what to do if FROM has VOIDmode.
10359 So give it the correct mode. With -O, cse will optimize this. */
10360 if (GET_MODE (op0
) == VOIDmode
)
10361 op0
= copy_to_mode_reg (TYPE_MODE (TREE_TYPE (treeop0
)),
10363 expand_float (target
, op0
,
10364 TYPE_UNSIGNED (TREE_TYPE (treeop0
)));
10368 op0
= expand_expr (treeop0
, subtarget
,
10369 VOIDmode
, EXPAND_NORMAL
);
10370 if (modifier
== EXPAND_STACK_PARM
)
10372 temp
= expand_unop (mode
,
10373 optab_for_tree_code (NEGATE_EXPR
, type
,
10377 return REDUCE_BIT_FIELD (temp
);
10381 op0
= expand_expr (treeop0
, subtarget
,
10382 VOIDmode
, EXPAND_NORMAL
);
10383 if (modifier
== EXPAND_STACK_PARM
)
10386 /* ABS_EXPR is not valid for complex arguments. */
10387 gcc_assert (GET_MODE_CLASS (mode
) != MODE_COMPLEX_INT
10388 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_FLOAT
);
10390 /* Unsigned abs is simply the operand. Testing here means we don't
10391 risk generating incorrect code below. */
10392 if (TYPE_UNSIGNED (TREE_TYPE (treeop0
)))
10395 return expand_abs (mode
, op0
, target
, unsignedp
,
10396 safe_from_p (target
, treeop0
, 1));
10400 target
= original_target
;
10402 || modifier
== EXPAND_STACK_PARM
10403 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
10404 || GET_MODE (target
) != mode
10406 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
10407 target
= gen_reg_rtx (mode
);
10408 expand_operands (treeop0
, treeop1
,
10409 target
, &op0
, &op1
, EXPAND_NORMAL
);
10411 /* First try to do it with a special MIN or MAX instruction.
10412 If that does not win, use a conditional jump to select the proper
10414 this_optab
= optab_for_tree_code (code
, type
, optab_default
);
10415 temp
= expand_binop (mode
, this_optab
, op0
, op1
, target
, unsignedp
,
10420 if (VECTOR_TYPE_P (type
))
10421 gcc_unreachable ();
10423 /* At this point, a MEM target is no longer useful; we will get better
10424 code without it. */
10426 if (! REG_P (target
))
10427 target
= gen_reg_rtx (mode
);
10429 /* If op1 was placed in target, swap op0 and op1. */
10430 if (target
!= op0
&& target
== op1
)
10431 std::swap (op0
, op1
);
10433 /* We generate better code and avoid problems with op1 mentioning
10434 target by forcing op1 into a pseudo if it isn't a constant. */
10435 if (! CONSTANT_P (op1
))
10436 op1
= force_reg (mode
, op1
);
10439 enum rtx_code comparison_code
;
10442 if (code
== MAX_EXPR
)
10443 comparison_code
= unsignedp
? GEU
: GE
;
10445 comparison_code
= unsignedp
? LEU
: LE
;
10447 /* Canonicalize to comparisons against 0. */
10448 if (op1
== const1_rtx
)
10450 /* Converting (a >= 1 ? a : 1) into (a > 0 ? a : 1)
10451 or (a != 0 ? a : 1) for unsigned.
10452 For MIN we are safe converting (a <= 1 ? a : 1)
10453 into (a <= 0 ? a : 1) */
10454 cmpop1
= const0_rtx
;
10455 if (code
== MAX_EXPR
)
10456 comparison_code
= unsignedp
? NE
: GT
;
10458 if (op1
== constm1_rtx
&& !unsignedp
)
10460 /* Converting (a >= -1 ? a : -1) into (a >= 0 ? a : -1)
10461 and (a <= -1 ? a : -1) into (a < 0 ? a : -1) */
10462 cmpop1
= const0_rtx
;
10463 if (code
== MIN_EXPR
)
10464 comparison_code
= LT
;
10467 /* Use a conditional move if possible. */
10468 if (can_conditionally_move_p (mode
))
10474 /* Try to emit the conditional move. */
10475 insn
= emit_conditional_move (target
,
10477 op0
, cmpop1
, mode
},
10481 /* If we could do the conditional move, emit the sequence,
10485 rtx_insn
*seq
= get_insns ();
10491 /* Otherwise discard the sequence and fall back to code with
10497 emit_move_insn (target
, op0
);
10499 lab
= gen_label_rtx ();
10500 do_compare_rtx_and_jump (target
, cmpop1
, comparison_code
,
10501 unsignedp
, mode
, NULL_RTX
, NULL
, lab
,
10502 profile_probability::uninitialized ());
10504 emit_move_insn (target
, op1
);
10509 op0
= expand_expr (treeop0
, subtarget
,
10510 VOIDmode
, EXPAND_NORMAL
);
10511 if (modifier
== EXPAND_STACK_PARM
)
10513 /* In case we have to reduce the result to bitfield precision
10514 for unsigned bitfield expand this as XOR with a proper constant
10516 if (reduce_bit_field
&& TYPE_UNSIGNED (type
))
10518 int_mode
= SCALAR_INT_TYPE_MODE (type
);
10519 wide_int mask
= wi::mask (TYPE_PRECISION (type
),
10520 false, GET_MODE_PRECISION (int_mode
));
10522 temp
= expand_binop (int_mode
, xor_optab
, op0
,
10523 immed_wide_int_const (mask
, int_mode
),
10524 target
, 1, OPTAB_LIB_WIDEN
);
10527 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, target
, 1);
10531 /* ??? Can optimize bitwise operations with one arg constant.
10532 Can optimize (a bitwise1 n) bitwise2 (a bitwise3 b)
10533 and (a bitwise1 b) bitwise2 b (etc)
10534 but that is probably not worth while. */
10543 gcc_assert (VECTOR_MODE_P (TYPE_MODE (type
))
10544 || type_has_mode_precision_p (type
));
10550 /* If this is a fixed-point operation, then we cannot use the code
10551 below because "expand_shift" doesn't support sat/no-sat fixed-point
10553 if (ALL_FIXED_POINT_MODE_P (mode
))
10556 if (! safe_from_p (subtarget
, treeop1
, 1))
10558 if (modifier
== EXPAND_STACK_PARM
)
10560 op0
= expand_expr (treeop0
, subtarget
,
10561 VOIDmode
, EXPAND_NORMAL
);
10563 /* Left shift optimization when shifting across word_size boundary.
10565 If mode == GET_MODE_WIDER_MODE (word_mode), then normally
10566 there isn't native instruction to support this wide mode
10567 left shift. Given below scenario:
10569 Type A = (Type) B << C
10572 | dest_high | dest_low |
10576 If the shift amount C caused we shift B to across the word
10577 size boundary, i.e part of B shifted into high half of
10578 destination register, and part of B remains in the low
10579 half, then GCC will use the following left shift expand
10582 1. Initialize dest_low to B.
10583 2. Initialize every bit of dest_high to the sign bit of B.
10584 3. Logic left shift dest_low by C bit to finalize dest_low.
10585 The value of dest_low before this shift is kept in a temp D.
10586 4. Logic left shift dest_high by C.
10587 5. Logic right shift D by (word_size - C).
10588 6. Or the result of 4 and 5 to finalize dest_high.
10590 While, by checking gimple statements, if operand B is
10591 coming from signed extension, then we can simplify above
10594 1. dest_high = src_low >> (word_size - C).
10595 2. dest_low = src_low << C.
10597 We can use one arithmetic right shift to finish all the
10598 purpose of steps 2, 4, 5, 6, thus we reduce the steps
10599 needed from 6 into 2.
10601 The case is similar for zero extension, except that we
10602 initialize dest_high to zero rather than copies of the sign
10603 bit from B. Furthermore, we need to use a logical right shift
10606 The choice of sign-extension versus zero-extension is
10607 determined entirely by whether or not B is signed and is
10608 independent of the current setting of unsignedp. */
10611 if (code
== LSHIFT_EXPR
10614 && GET_MODE_2XWIDER_MODE (word_mode
).exists (&int_mode
)
10615 && mode
== int_mode
10616 && TREE_CONSTANT (treeop1
)
10617 && TREE_CODE (treeop0
) == SSA_NAME
)
10619 gimple
*def
= SSA_NAME_DEF_STMT (treeop0
);
10620 if (is_gimple_assign (def
)
10621 && gimple_assign_rhs_code (def
) == NOP_EXPR
)
10623 scalar_int_mode rmode
= SCALAR_INT_TYPE_MODE
10624 (TREE_TYPE (gimple_assign_rhs1 (def
)));
10626 if (GET_MODE_SIZE (rmode
) < GET_MODE_SIZE (int_mode
)
10627 && TREE_INT_CST_LOW (treeop1
) < GET_MODE_BITSIZE (word_mode
)
10628 && ((TREE_INT_CST_LOW (treeop1
) + GET_MODE_BITSIZE (rmode
))
10629 >= GET_MODE_BITSIZE (word_mode
)))
10631 rtx_insn
*seq
, *seq_old
;
10632 poly_uint64 high_off
= subreg_highpart_offset (word_mode
,
10634 bool extend_unsigned
10635 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def
)));
10636 rtx low
= lowpart_subreg (word_mode
, op0
, int_mode
);
10637 rtx dest_low
= lowpart_subreg (word_mode
, target
, int_mode
);
10638 rtx dest_high
= simplify_gen_subreg (word_mode
, target
,
10639 int_mode
, high_off
);
10640 HOST_WIDE_INT ramount
= (BITS_PER_WORD
10641 - TREE_INT_CST_LOW (treeop1
));
10642 tree rshift
= build_int_cst (TREE_TYPE (treeop1
), ramount
);
10645 /* dest_high = src_low >> (word_size - C). */
10646 temp
= expand_variable_shift (RSHIFT_EXPR
, word_mode
, low
,
10649 if (temp
!= dest_high
)
10650 emit_move_insn (dest_high
, temp
);
10652 /* dest_low = src_low << C. */
10653 temp
= expand_variable_shift (LSHIFT_EXPR
, word_mode
, low
,
10654 treeop1
, dest_low
, unsignedp
);
10655 if (temp
!= dest_low
)
10656 emit_move_insn (dest_low
, temp
);
10658 seq
= get_insns ();
10662 if (have_insn_for (ASHIFT
, int_mode
))
10664 bool speed_p
= optimize_insn_for_speed_p ();
10666 rtx ret_old
= expand_variable_shift (code
, int_mode
,
10671 seq_old
= get_insns ();
10673 if (seq_cost (seq
, speed_p
)
10674 >= seq_cost (seq_old
, speed_p
))
10685 if (temp
== NULL_RTX
)
10686 temp
= expand_variable_shift (code
, mode
, op0
, treeop1
, target
,
10688 if (code
== LSHIFT_EXPR
)
10689 temp
= REDUCE_BIT_FIELD (temp
);
10693 /* Could determine the answer when only additive constants differ. Also,
10694 the addition of one can be handled by changing the condition. */
10701 case UNORDERED_EXPR
:
10710 temp
= do_store_flag (ops
,
10711 modifier
!= EXPAND_STACK_PARM
? target
: NULL_RTX
,
10712 tmode
!= VOIDmode
? tmode
: mode
);
10716 /* Use a compare and a jump for BLKmode comparisons, or for function
10717 type comparisons is have_canonicalize_funcptr_for_compare. */
10720 || modifier
== EXPAND_STACK_PARM
10721 || ! safe_from_p (target
, treeop0
, 1)
10722 || ! safe_from_p (target
, treeop1
, 1)
10723 /* Make sure we don't have a hard reg (such as function's return
10724 value) live across basic blocks, if not optimizing. */
10725 || (!optimize
&& REG_P (target
)
10726 && REGNO (target
) < FIRST_PSEUDO_REGISTER
)))
10727 target
= gen_reg_rtx (tmode
!= VOIDmode
? tmode
: mode
);
10729 emit_move_insn (target
, const0_rtx
);
10731 rtx_code_label
*lab1
= gen_label_rtx ();
10732 jumpifnot_1 (code
, treeop0
, treeop1
, lab1
,
10733 profile_probability::uninitialized ());
10735 if (TYPE_PRECISION (type
) == 1 && !TYPE_UNSIGNED (type
))
10736 emit_move_insn (target
, constm1_rtx
);
10738 emit_move_insn (target
, const1_rtx
);
10744 /* Get the rtx code of the operands. */
10745 op0
= expand_normal (treeop0
);
10746 op1
= expand_normal (treeop1
);
10749 target
= gen_reg_rtx (TYPE_MODE (type
));
10751 /* If target overlaps with op1, then either we need to force
10752 op1 into a pseudo (if target also overlaps with op0),
10753 or write the complex parts in reverse order. */
10754 switch (GET_CODE (target
))
10757 if (reg_overlap_mentioned_p (XEXP (target
, 0), op1
))
10759 if (reg_overlap_mentioned_p (XEXP (target
, 1), op0
))
10761 complex_expr_force_op1
:
10762 temp
= gen_reg_rtx (GET_MODE_INNER (GET_MODE (target
)));
10763 emit_move_insn (temp
, op1
);
10767 complex_expr_swap_order
:
10768 /* Move the imaginary (op1) and real (op0) parts to their
10770 write_complex_part (target
, op1
, true, true);
10771 write_complex_part (target
, op0
, false, false);
10777 temp
= adjust_address_nv (target
,
10778 GET_MODE_INNER (GET_MODE (target
)), 0);
10779 if (reg_overlap_mentioned_p (temp
, op1
))
10781 scalar_mode imode
= GET_MODE_INNER (GET_MODE (target
));
10782 temp
= adjust_address_nv (target
, imode
,
10783 GET_MODE_SIZE (imode
));
10784 if (reg_overlap_mentioned_p (temp
, op0
))
10785 goto complex_expr_force_op1
;
10786 goto complex_expr_swap_order
;
10790 if (reg_overlap_mentioned_p (target
, op1
))
10792 if (reg_overlap_mentioned_p (target
, op0
))
10793 goto complex_expr_force_op1
;
10794 goto complex_expr_swap_order
;
10799 /* Move the real (op0) and imaginary (op1) parts to their location. */
10800 write_complex_part (target
, op0
, false, true);
10801 write_complex_part (target
, op1
, true, false);
10805 case WIDEN_SUM_EXPR
:
10807 tree oprnd0
= treeop0
;
10808 tree oprnd1
= treeop1
;
10810 expand_operands (oprnd0
, oprnd1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
10811 target
= expand_widen_pattern_expr (ops
, op0
, NULL_RTX
, op1
,
10812 target
, unsignedp
);
10816 case VEC_UNPACK_HI_EXPR
:
10817 case VEC_UNPACK_LO_EXPR
:
10818 case VEC_UNPACK_FIX_TRUNC_HI_EXPR
:
10819 case VEC_UNPACK_FIX_TRUNC_LO_EXPR
:
10821 op0
= expand_normal (treeop0
);
10822 temp
= expand_widen_pattern_expr (ops
, op0
, NULL_RTX
, NULL_RTX
,
10823 target
, unsignedp
);
10828 case VEC_UNPACK_FLOAT_HI_EXPR
:
10829 case VEC_UNPACK_FLOAT_LO_EXPR
:
10831 op0
= expand_normal (treeop0
);
10832 /* The signedness is determined from input operand. */
10833 temp
= expand_widen_pattern_expr
10834 (ops
, op0
, NULL_RTX
, NULL_RTX
,
10835 target
, TYPE_UNSIGNED (TREE_TYPE (treeop0
)));
10841 case VEC_WIDEN_MULT_HI_EXPR
:
10842 case VEC_WIDEN_MULT_LO_EXPR
:
10843 case VEC_WIDEN_MULT_EVEN_EXPR
:
10844 case VEC_WIDEN_MULT_ODD_EXPR
:
10845 case VEC_WIDEN_LSHIFT_HI_EXPR
:
10846 case VEC_WIDEN_LSHIFT_LO_EXPR
:
10847 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
10848 target
= expand_widen_pattern_expr (ops
, op0
, op1
, NULL_RTX
,
10849 target
, unsignedp
);
10850 gcc_assert (target
);
10853 case VEC_PACK_SAT_EXPR
:
10854 case VEC_PACK_FIX_TRUNC_EXPR
:
10855 mode
= TYPE_MODE (TREE_TYPE (treeop0
));
10856 subtarget
= NULL_RTX
;
10859 case VEC_PACK_TRUNC_EXPR
:
10860 if (VECTOR_BOOLEAN_TYPE_P (type
)
10861 && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (treeop0
))
10862 && mode
== TYPE_MODE (TREE_TYPE (treeop0
))
10863 && SCALAR_INT_MODE_P (mode
))
10865 class expand_operand eops
[4];
10866 machine_mode imode
= TYPE_MODE (TREE_TYPE (treeop0
));
10867 expand_operands (treeop0
, treeop1
,
10868 subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
10869 this_optab
= vec_pack_sbool_trunc_optab
;
10870 enum insn_code icode
= optab_handler (this_optab
, imode
);
10871 create_output_operand (&eops
[0], target
, mode
);
10872 create_convert_operand_from (&eops
[1], op0
, imode
, false);
10873 create_convert_operand_from (&eops
[2], op1
, imode
, false);
10874 temp
= GEN_INT (TYPE_VECTOR_SUBPARTS (type
).to_constant ());
10875 create_input_operand (&eops
[3], temp
, imode
);
10876 expand_insn (icode
, 4, eops
);
10877 return eops
[0].value
;
10879 mode
= TYPE_MODE (TREE_TYPE (treeop0
));
10880 subtarget
= NULL_RTX
;
10883 case VEC_PACK_FLOAT_EXPR
:
10884 mode
= TYPE_MODE (TREE_TYPE (treeop0
));
10885 expand_operands (treeop0
, treeop1
,
10886 subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
10887 this_optab
= optab_for_tree_code (code
, TREE_TYPE (treeop0
),
10889 target
= expand_binop (mode
, this_optab
, op0
, op1
, target
,
10890 TYPE_UNSIGNED (TREE_TYPE (treeop0
)),
10892 gcc_assert (target
);
10895 case VEC_PERM_EXPR
:
10897 expand_operands (treeop0
, treeop1
, target
, &op0
, &op1
, EXPAND_NORMAL
);
10898 vec_perm_builder sel
;
10899 if (TREE_CODE (treeop2
) == VECTOR_CST
10900 && tree_to_vec_perm_builder (&sel
, treeop2
))
10902 machine_mode sel_mode
= TYPE_MODE (TREE_TYPE (treeop2
));
10903 temp
= expand_vec_perm_const (mode
, op0
, op1
, sel
,
10908 op2
= expand_normal (treeop2
);
10909 temp
= expand_vec_perm_var (mode
, op0
, op1
, op2
, target
);
10915 case DOT_PROD_EXPR
:
10917 tree oprnd0
= treeop0
;
10918 tree oprnd1
= treeop1
;
10919 tree oprnd2
= treeop2
;
10921 expand_operands (oprnd0
, oprnd1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
10922 op2
= expand_normal (oprnd2
);
10923 target
= expand_widen_pattern_expr (ops
, op0
, op1
, op2
,
10924 target
, unsignedp
);
10930 tree oprnd0
= treeop0
;
10931 tree oprnd1
= treeop1
;
10932 tree oprnd2
= treeop2
;
10934 expand_operands (oprnd0
, oprnd1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
10935 op2
= expand_normal (oprnd2
);
10936 target
= expand_widen_pattern_expr (ops
, op0
, op1
, op2
,
10937 target
, unsignedp
);
10941 case REALIGN_LOAD_EXPR
:
10943 tree oprnd0
= treeop0
;
10944 tree oprnd1
= treeop1
;
10945 tree oprnd2
= treeop2
;
10947 this_optab
= optab_for_tree_code (code
, type
, optab_default
);
10948 expand_operands (oprnd0
, oprnd1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
10949 op2
= expand_normal (oprnd2
);
10950 temp
= expand_ternary_op (mode
, this_optab
, op0
, op1
, op2
,
10951 target
, unsignedp
);
10958 /* A COND_EXPR with its type being VOID_TYPE represents a
10959 conditional jump and is handled in
10960 expand_gimple_cond_expr. */
10961 gcc_assert (!VOID_TYPE_P (type
));
10963 /* Note that COND_EXPRs whose type is a structure or union
10964 are required to be constructed to contain assignments of
10965 a temporary variable, so that we can evaluate them here
10966 for side effect only. If type is void, we must do likewise. */
10968 gcc_assert (!TREE_ADDRESSABLE (type
)
10970 && TREE_TYPE (treeop1
) != void_type_node
10971 && TREE_TYPE (treeop2
) != void_type_node
);
10973 temp
= expand_cond_expr_using_cmove (treeop0
, treeop1
, treeop2
);
10977 /* If we are not to produce a result, we have no target. Otherwise,
10978 if a target was specified use it; it will not be used as an
10979 intermediate target unless it is safe. If no target, use a
10982 if (modifier
!= EXPAND_STACK_PARM
10984 && safe_from_p (original_target
, treeop0
, 1)
10985 && GET_MODE (original_target
) == mode
10986 && !MEM_P (original_target
))
10987 temp
= original_target
;
10989 temp
= assign_temp (type
, 0, 1);
10991 do_pending_stack_adjust ();
10993 rtx_code_label
*lab0
= gen_label_rtx ();
10994 rtx_code_label
*lab1
= gen_label_rtx ();
10995 jumpifnot (treeop0
, lab0
,
10996 profile_probability::uninitialized ());
10997 store_expr (treeop1
, temp
,
10998 modifier
== EXPAND_STACK_PARM
,
11001 emit_jump_insn (targetm
.gen_jump (lab1
));
11004 store_expr (treeop2
, temp
,
11005 modifier
== EXPAND_STACK_PARM
,
11013 case VEC_DUPLICATE_EXPR
:
11014 op0
= expand_expr (treeop0
, NULL_RTX
, VOIDmode
, modifier
);
11015 target
= expand_vector_broadcast (mode
, op0
);
11016 gcc_assert (target
);
11019 case VEC_SERIES_EXPR
:
11020 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
, modifier
);
11021 return expand_vec_series_expr (mode
, op0
, op1
, target
);
11023 case BIT_INSERT_EXPR
:
11025 unsigned bitpos
= tree_to_uhwi (treeop2
);
11027 if (INTEGRAL_TYPE_P (TREE_TYPE (treeop1
)))
11028 bitsize
= TYPE_PRECISION (TREE_TYPE (treeop1
));
11030 bitsize
= tree_to_uhwi (TYPE_SIZE (TREE_TYPE (treeop1
)));
11031 op0
= expand_normal (treeop0
);
11032 op1
= expand_normal (treeop1
);
11033 rtx dst
= gen_reg_rtx (mode
);
11034 emit_move_insn (dst
, op0
);
11035 store_bit_field (dst
, bitsize
, bitpos
, 0, 0,
11036 TYPE_MODE (TREE_TYPE (treeop1
)), op1
, false, false);
11041 gcc_unreachable ();
11044 /* Here to do an ordinary binary operator. */
11046 expand_operands (treeop0
, treeop1
,
11047 subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
11049 this_optab
= optab_for_tree_code (code
, type
, optab_default
);
11051 if (modifier
== EXPAND_STACK_PARM
)
11053 temp
= expand_binop (mode
, this_optab
, op0
, op1
, target
,
11054 unsignedp
, OPTAB_LIB_WIDEN
);
11056 /* Bitwise operations do not need bitfield reduction as we expect their
11057 operands being properly truncated. */
11058 if (code
== BIT_XOR_EXPR
11059 || code
== BIT_AND_EXPR
11060 || code
== BIT_IOR_EXPR
)
11062 return REDUCE_BIT_FIELD (temp
);
11064 #undef REDUCE_BIT_FIELD
11067 /* Return TRUE if expression STMT is suitable for replacement.
11068 Never consider memory loads as replaceable, because those don't ever lead
11069 into constant expressions. */
11072 stmt_is_replaceable_p (gimple
*stmt
)
11074 if (ssa_is_replaceable_p (stmt
))
11076 /* Don't move around loads. */
11077 if (!gimple_assign_single_p (stmt
)
11078 || is_gimple_val (gimple_assign_rhs1 (stmt
)))
11084 /* A subroutine of expand_expr_real_1. Expand gimple assignment G,
11085 which is known to set an SSA_NAME result. The other arguments are
11086 as for expand_expr_real_1. */
11089 expand_expr_real_gassign (gassign
*g
, rtx target
, machine_mode tmode
,
11090 enum expand_modifier modifier
, rtx
*alt_rtl
,
11091 bool inner_reference_p
)
11095 location_t saved_loc
= curr_insn_location ();
11096 auto loc
= gimple_location (g
);
11097 if (loc
!= UNKNOWN_LOCATION
)
11098 set_curr_insn_location (loc
);
11099 tree lhs
= gimple_assign_lhs (g
);
11100 ops
.code
= gimple_assign_rhs_code (g
);
11101 ops
.type
= TREE_TYPE (lhs
);
11102 switch (get_gimple_rhs_class (ops
.code
))
11104 case GIMPLE_TERNARY_RHS
:
11105 ops
.op2
= gimple_assign_rhs3 (g
);
11107 case GIMPLE_BINARY_RHS
:
11108 ops
.op1
= gimple_assign_rhs2 (g
);
11110 /* Try to expand conditonal compare. */
11111 if (targetm
.have_ccmp ())
11113 gcc_checking_assert (targetm
.gen_ccmp_next
!= NULL
);
11114 r
= expand_ccmp_expr (g
, TYPE_MODE (ops
.type
));
11119 case GIMPLE_UNARY_RHS
:
11120 ops
.op0
= gimple_assign_rhs1 (g
);
11121 ops
.location
= loc
;
11122 r
= expand_expr_real_2 (&ops
, target
, tmode
, modifier
);
11124 case GIMPLE_SINGLE_RHS
:
11126 r
= expand_expr_real (gimple_assign_rhs1 (g
), target
,
11127 tmode
, modifier
, alt_rtl
,
11128 inner_reference_p
);
11132 gcc_unreachable ();
11134 set_curr_insn_location (saved_loc
);
11135 if (REG_P (r
) && !REG_EXPR (r
))
11136 set_reg_attrs_for_decl_rtl (lhs
, r
);
11141 expand_expr_real_1 (tree exp
, rtx target
, machine_mode tmode
,
11142 enum expand_modifier modifier
, rtx
*alt_rtl
,
11143 bool inner_reference_p
)
11145 rtx op0
, op1
, temp
, decl_rtl
;
11148 machine_mode mode
, dmode
;
11149 enum tree_code code
= TREE_CODE (exp
);
11150 rtx subtarget
, original_target
;
11152 bool reduce_bit_field
;
11153 location_t loc
= EXPR_LOCATION (exp
);
11154 struct separate_ops ops
;
11155 tree treeop0
, treeop1
, treeop2
;
11156 tree ssa_name
= NULL_TREE
;
11159 /* Some ABIs define padding bits in _BitInt uninitialized. Normally, RTL
11160 expansion sign/zero extends integral types with less than mode precision
11161 when reading from bit-fields and after arithmetic operations (see
11162 REDUCE_BIT_FIELD in expand_expr_real_2) and on subsequent loads relies
11163 on those extensions to have been already performed, but because of the
11164 above for _BitInt they need to be sign/zero extended when reading from
11165 locations that could be exposed to ABI boundaries (when loading from
11166 objects in memory, or function arguments, return value). Because we
11167 internally extend after arithmetic operations, we can avoid doing that
11168 when reading from SSA_NAMEs of vars. */
11169 #define EXTEND_BITINT(expr) \
11170 ((TREE_CODE (type) == BITINT_TYPE \
11171 && reduce_bit_field \
11172 && mode != BLKmode \
11173 && modifier != EXPAND_MEMORY \
11174 && modifier != EXPAND_WRITE \
11175 && modifier != EXPAND_INITIALIZER \
11176 && modifier != EXPAND_CONST_ADDRESS) \
11177 ? reduce_to_bit_field_precision ((expr), NULL_RTX, type) : (expr))
11179 type
= TREE_TYPE (exp
);
11180 mode
= TYPE_MODE (type
);
11181 unsignedp
= TYPE_UNSIGNED (type
);
11183 treeop0
= treeop1
= treeop2
= NULL_TREE
;
11184 if (!VL_EXP_CLASS_P (exp
))
11185 switch (TREE_CODE_LENGTH (code
))
11188 case 3: treeop2
= TREE_OPERAND (exp
, 2); /* FALLTHRU */
11189 case 2: treeop1
= TREE_OPERAND (exp
, 1); /* FALLTHRU */
11190 case 1: treeop0
= TREE_OPERAND (exp
, 0); /* FALLTHRU */
11198 ops
.location
= loc
;
11200 ignore
= (target
== const0_rtx
11201 || ((CONVERT_EXPR_CODE_P (code
)
11202 || code
== COND_EXPR
|| code
== VIEW_CONVERT_EXPR
)
11203 && TREE_CODE (type
) == VOID_TYPE
));
11205 /* An operation in what may be a bit-field type needs the
11206 result to be reduced to the precision of the bit-field type,
11207 which is narrower than that of the type's mode. */
11208 reduce_bit_field
= (!ignore
11209 && INTEGRAL_TYPE_P (type
)
11210 && !type_has_mode_precision_p (type
));
11212 /* If we are going to ignore this result, we need only do something
11213 if there is a side-effect somewhere in the expression. If there
11214 is, short-circuit the most common cases here. Note that we must
11215 not call expand_expr with anything but const0_rtx in case this
11216 is an initial expansion of a size that contains a PLACEHOLDER_EXPR. */
11220 if (! TREE_SIDE_EFFECTS (exp
))
11223 /* Ensure we reference a volatile object even if value is ignored, but
11224 don't do this if all we are doing is taking its address. */
11225 if (TREE_THIS_VOLATILE (exp
)
11226 && TREE_CODE (exp
) != FUNCTION_DECL
11227 && mode
!= VOIDmode
&& mode
!= BLKmode
11228 && modifier
!= EXPAND_CONST_ADDRESS
)
11230 temp
= expand_expr (exp
, NULL_RTX
, VOIDmode
, modifier
);
11232 copy_to_reg (temp
);
11236 if (TREE_CODE_CLASS (code
) == tcc_unary
11237 || code
== BIT_FIELD_REF
11238 || code
== COMPONENT_REF
11239 || code
== INDIRECT_REF
)
11240 return expand_expr (treeop0
, const0_rtx
, VOIDmode
,
11243 else if (TREE_CODE_CLASS (code
) == tcc_binary
11244 || TREE_CODE_CLASS (code
) == tcc_comparison
11245 || code
== ARRAY_REF
|| code
== ARRAY_RANGE_REF
)
11247 expand_expr (treeop0
, const0_rtx
, VOIDmode
, modifier
);
11248 expand_expr (treeop1
, const0_rtx
, VOIDmode
, modifier
);
11255 if (reduce_bit_field
&& modifier
== EXPAND_STACK_PARM
)
11258 /* Use subtarget as the target for operand 0 of a binary operation. */
11259 subtarget
= get_subtarget (target
);
11260 original_target
= target
;
11266 tree function
= decl_function_context (exp
);
11268 temp
= label_rtx (exp
);
11269 temp
= gen_rtx_LABEL_REF (Pmode
, temp
);
11271 if (function
!= current_function_decl
11273 LABEL_REF_NONLOCAL_P (temp
) = 1;
11275 temp
= gen_rtx_MEM (FUNCTION_MODE
, temp
);
11280 /* ??? ivopts calls expander, without any preparation from
11281 out-of-ssa. So fake instructions as if this was an access to the
11282 base variable. This unnecessarily allocates a pseudo, see how we can
11283 reuse it, if partition base vars have it set already. */
11284 if (!currently_expanding_to_rtl
)
11286 tree var
= SSA_NAME_VAR (exp
);
11287 if (var
&& DECL_RTL_SET_P (var
))
11288 return DECL_RTL (var
);
11289 return gen_raw_REG (TYPE_MODE (TREE_TYPE (exp
)),
11290 LAST_VIRTUAL_REGISTER
+ 1);
11293 g
= get_gimple_for_ssa_name (exp
);
11294 /* For EXPAND_INITIALIZER try harder to get something simpler. */
11296 && modifier
== EXPAND_INITIALIZER
11297 && !SSA_NAME_IS_DEFAULT_DEF (exp
)
11298 && (optimize
|| !SSA_NAME_VAR (exp
)
11299 || DECL_IGNORED_P (SSA_NAME_VAR (exp
)))
11300 && stmt_is_replaceable_p (SSA_NAME_DEF_STMT (exp
)))
11301 g
= SSA_NAME_DEF_STMT (exp
);
11303 return expand_expr_real_gassign (as_a
<gassign
*> (g
), target
, tmode
,
11304 modifier
, alt_rtl
, inner_reference_p
);
11307 decl_rtl
= get_rtx_for_ssa_name (ssa_name
);
11308 exp
= SSA_NAME_VAR (ssa_name
);
11309 /* Optimize and avoid to EXTEND_BITINIT doing anything if it is an
11310 SSA_NAME computed within the current function. In such case the
11311 value have been already extended before. While if it is a function
11312 parameter, result or some memory location, we need to be prepared
11313 for some other compiler leaving the bits uninitialized. */
11314 if (!exp
|| VAR_P (exp
))
11315 reduce_bit_field
= false;
11316 goto expand_decl_rtl
;
11319 /* Allow accel compiler to handle variables that require special
11320 treatment, e.g. if they have been modified in some way earlier in
11321 compilation by the adjust_private_decl OpenACC hook. */
11322 if (flag_openacc
&& targetm
.goacc
.expand_var_decl
)
11324 temp
= targetm
.goacc
.expand_var_decl (exp
);
11328 /* Expand const VAR_DECLs with CONSTRUCTOR initializers that
11329 have scalar integer modes to a reg via store_constructor. */
11330 if (TREE_READONLY (exp
)
11331 && !TREE_SIDE_EFFECTS (exp
)
11332 && (modifier
== EXPAND_NORMAL
|| modifier
== EXPAND_STACK_PARM
)
11333 && immediate_const_ctor_p (DECL_INITIAL (exp
))
11334 && SCALAR_INT_MODE_P (TYPE_MODE (TREE_TYPE (exp
)))
11335 && crtl
->emit
.regno_pointer_align_length
11338 target
= gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp
)));
11339 store_constructor (DECL_INITIAL (exp
), target
, 0,
11340 int_expr_size (DECL_INITIAL (exp
)), false);
11343 /* ... fall through ... */
11346 /* If a static var's type was incomplete when the decl was written,
11347 but the type is complete now, lay out the decl now. */
11348 if (DECL_SIZE (exp
) == 0
11349 && COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (TREE_TYPE (exp
))
11350 && (TREE_STATIC (exp
) || DECL_EXTERNAL (exp
)))
11351 layout_decl (exp
, 0);
11355 case FUNCTION_DECL
:
11357 decl_rtl
= DECL_RTL (exp
);
11359 gcc_assert (decl_rtl
);
11361 /* DECL_MODE might change when TYPE_MODE depends on attribute target
11362 settings for VECTOR_TYPE_P that might switch for the function. */
11363 if (currently_expanding_to_rtl
11364 && code
== VAR_DECL
&& MEM_P (decl_rtl
)
11365 && VECTOR_TYPE_P (type
) && exp
&& DECL_MODE (exp
) != mode
)
11366 decl_rtl
= change_address (decl_rtl
, TYPE_MODE (type
), 0);
11368 decl_rtl
= copy_rtx (decl_rtl
);
11370 /* Record writes to register variables. */
11371 if (modifier
== EXPAND_WRITE
11372 && REG_P (decl_rtl
)
11373 && HARD_REGISTER_P (decl_rtl
))
11374 add_to_hard_reg_set (&crtl
->asm_clobbers
,
11375 GET_MODE (decl_rtl
), REGNO (decl_rtl
));
11377 /* Ensure variable marked as used even if it doesn't go through
11378 a parser. If it hasn't be used yet, write out an external
11381 TREE_USED (exp
) = 1;
11383 /* Show we haven't gotten RTL for this yet. */
11386 /* Variables inherited from containing functions should have
11387 been lowered by this point. */
11390 tree context
= decl_function_context (exp
);
11391 gcc_assert (SCOPE_FILE_SCOPE_P (context
)
11392 || context
== current_function_decl
11393 || TREE_STATIC (exp
)
11394 || DECL_EXTERNAL (exp
)
11395 /* ??? C++ creates functions that are not
11397 || TREE_CODE (exp
) == FUNCTION_DECL
);
11400 /* This is the case of an array whose size is to be determined
11401 from its initializer, while the initializer is still being parsed.
11402 ??? We aren't parsing while expanding anymore. */
11404 if (MEM_P (decl_rtl
) && REG_P (XEXP (decl_rtl
, 0)))
11405 temp
= validize_mem (decl_rtl
);
11407 /* If DECL_RTL is memory, we are in the normal case and the
11408 address is not valid, get the address into a register. */
11410 else if (MEM_P (decl_rtl
) && modifier
!= EXPAND_INITIALIZER
)
11413 *alt_rtl
= decl_rtl
;
11414 decl_rtl
= use_anchored_address (decl_rtl
);
11415 if (modifier
!= EXPAND_CONST_ADDRESS
11416 && modifier
!= EXPAND_SUM
11417 && !memory_address_addr_space_p (exp
? DECL_MODE (exp
)
11418 : GET_MODE (decl_rtl
),
11419 XEXP (decl_rtl
, 0),
11420 MEM_ADDR_SPACE (decl_rtl
)))
11421 temp
= replace_equiv_address (decl_rtl
,
11422 copy_rtx (XEXP (decl_rtl
, 0)));
11425 /* If we got something, return it. But first, set the alignment
11426 if the address is a register. */
11429 if (exp
&& MEM_P (temp
) && REG_P (XEXP (temp
, 0)))
11430 mark_reg_pointer (XEXP (temp
, 0), DECL_ALIGN (exp
));
11432 else if (MEM_P (decl_rtl
))
11438 && modifier
!= EXPAND_WRITE
11439 && modifier
!= EXPAND_MEMORY
11440 && modifier
!= EXPAND_INITIALIZER
11441 && modifier
!= EXPAND_CONST_ADDRESS
11442 && modifier
!= EXPAND_SUM
11443 && !inner_reference_p
11445 && MEM_ALIGN (temp
) < GET_MODE_ALIGNMENT (mode
))
11446 temp
= expand_misaligned_mem_ref (temp
, mode
, unsignedp
,
11447 MEM_ALIGN (temp
), NULL_RTX
, NULL
);
11449 return EXTEND_BITINT (temp
);
11453 dmode
= DECL_MODE (exp
);
11455 dmode
= TYPE_MODE (TREE_TYPE (ssa_name
));
11457 /* If the mode of DECL_RTL does not match that of the decl,
11458 there are two cases: we are dealing with a BLKmode value
11459 that is returned in a register, or we are dealing with
11460 a promoted value. In the latter case, return a SUBREG
11461 of the wanted mode, but mark it so that we know that it
11462 was already extended. */
11463 if (REG_P (decl_rtl
)
11464 && dmode
!= BLKmode
11465 && GET_MODE (decl_rtl
) != dmode
)
11467 machine_mode pmode
;
11469 /* Get the signedness to be used for this variable. Ensure we get
11470 the same mode we got when the variable was declared. */
11471 if (code
!= SSA_NAME
)
11472 pmode
= promote_decl_mode (exp
, &unsignedp
);
11473 else if ((g
= SSA_NAME_DEF_STMT (ssa_name
))
11474 && gimple_code (g
) == GIMPLE_CALL
11475 && !gimple_call_internal_p (g
))
11476 pmode
= promote_function_mode (type
, mode
, &unsignedp
,
11477 gimple_call_fntype (g
),
11480 pmode
= promote_ssa_mode (ssa_name
, &unsignedp
);
11481 gcc_assert (GET_MODE (decl_rtl
) == pmode
);
11483 /* Some ABIs require scalar floating point modes to be passed
11484 in a wider scalar integer mode. We need to explicitly
11485 truncate to an integer mode of the correct precision before
11486 using a SUBREG to reinterpret as a floating point value. */
11487 if (SCALAR_FLOAT_MODE_P (mode
)
11488 && SCALAR_INT_MODE_P (pmode
)
11489 && known_lt (GET_MODE_SIZE (mode
), GET_MODE_SIZE (pmode
)))
11490 return convert_wider_int_to_float (mode
, pmode
, decl_rtl
);
11492 temp
= gen_lowpart_SUBREG (mode
, decl_rtl
);
11493 SUBREG_PROMOTED_VAR_P (temp
) = 1;
11494 SUBREG_PROMOTED_SET (temp
, unsignedp
);
11495 return EXTEND_BITINT (temp
);
11498 return EXTEND_BITINT (decl_rtl
);
11502 if (TREE_CODE (type
) == BITINT_TYPE
)
11504 unsigned int prec
= TYPE_PRECISION (type
);
11505 struct bitint_info info
;
11506 bool ok
= targetm
.c
.bitint_type_info (prec
, &info
);
11508 scalar_int_mode limb_mode
11509 = as_a
<scalar_int_mode
> (info
.limb_mode
);
11510 unsigned int limb_prec
= GET_MODE_PRECISION (limb_mode
);
11511 if (prec
> limb_prec
&& prec
> MAX_FIXED_MODE_SIZE
)
11513 /* Emit large/huge _BitInt INTEGER_CSTs into memory. */
11514 exp
= tree_output_constant_def (exp
);
11515 return expand_expr (exp
, target
, VOIDmode
, modifier
);
11519 /* Given that TYPE_PRECISION (type) is not always equal to
11520 GET_MODE_PRECISION (TYPE_MODE (type)), we need to extend from
11521 the former to the latter according to the signedness of the
11523 scalar_int_mode int_mode
= SCALAR_INT_TYPE_MODE (type
);
11524 temp
= immed_wide_int_const
11525 (wi::to_wide (exp
, GET_MODE_PRECISION (int_mode
)), int_mode
);
11531 tree tmp
= NULL_TREE
;
11532 if (VECTOR_MODE_P (mode
))
11533 return const_vector_from_tree (exp
);
11534 scalar_int_mode int_mode
;
11535 if (is_int_mode (mode
, &int_mode
))
11537 tree type_for_mode
= lang_hooks
.types
.type_for_mode (int_mode
, 1);
11539 tmp
= fold_unary_loc (loc
, VIEW_CONVERT_EXPR
,
11540 type_for_mode
, exp
);
11544 vec
<constructor_elt
, va_gc
> *v
;
11545 /* Constructors need to be fixed-length. FIXME. */
11546 unsigned int nunits
= VECTOR_CST_NELTS (exp
).to_constant ();
11547 vec_alloc (v
, nunits
);
11548 for (unsigned int i
= 0; i
< nunits
; ++i
)
11549 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, VECTOR_CST_ELT (exp
, i
));
11550 tmp
= build_constructor (type
, v
);
11552 return expand_expr (tmp
, ignore
? const0_rtx
: target
,
11557 if (modifier
== EXPAND_WRITE
)
11559 /* Writing into CONST_DECL is always invalid, but handle it
11561 addr_space_t as
= TYPE_ADDR_SPACE (TREE_TYPE (exp
));
11562 scalar_int_mode address_mode
= targetm
.addr_space
.address_mode (as
);
11563 op0
= expand_expr_addr_expr_1 (exp
, NULL_RTX
, address_mode
,
11564 EXPAND_NORMAL
, as
);
11565 op0
= memory_address_addr_space (mode
, op0
, as
);
11566 temp
= gen_rtx_MEM (mode
, op0
);
11567 set_mem_addr_space (temp
, as
);
11570 return expand_expr (DECL_INITIAL (exp
), target
, VOIDmode
, modifier
);
11573 /* If optimized, generate immediate CONST_DOUBLE
11574 which will be turned into memory by reload if necessary.
11576 We used to force a register so that loop.c could see it. But
11577 this does not allow gen_* patterns to perform optimizations with
11578 the constants. It also produces two insns in cases like "x = 1.0;".
11579 On most machines, floating-point constants are not permitted in
11580 many insns, so we'd end up copying it to a register in any case.
11582 Now, we do the copying in expand_binop, if appropriate. */
11583 return const_double_from_real_value (TREE_REAL_CST (exp
),
11584 TYPE_MODE (TREE_TYPE (exp
)));
11587 return CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (exp
),
11588 TYPE_MODE (TREE_TYPE (exp
)));
11591 /* Handle evaluating a complex constant in a CONCAT target. */
11592 if (original_target
&& GET_CODE (original_target
) == CONCAT
)
11596 mode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (exp
)));
11597 rtarg
= XEXP (original_target
, 0);
11598 itarg
= XEXP (original_target
, 1);
11600 /* Move the real and imaginary parts separately. */
11601 op0
= expand_expr (TREE_REALPART (exp
), rtarg
, mode
, EXPAND_NORMAL
);
11602 op1
= expand_expr (TREE_IMAGPART (exp
), itarg
, mode
, EXPAND_NORMAL
);
11605 emit_move_insn (rtarg
, op0
);
11607 emit_move_insn (itarg
, op1
);
11609 return original_target
;
11615 temp
= expand_expr_constant (exp
, 1, modifier
);
11617 /* temp contains a constant address.
11618 On RISC machines where a constant address isn't valid,
11619 make some insns to get that address into a register. */
11620 if (modifier
!= EXPAND_CONST_ADDRESS
11621 && modifier
!= EXPAND_INITIALIZER
11622 && modifier
!= EXPAND_SUM
11623 && ! memory_address_addr_space_p (mode
, XEXP (temp
, 0),
11624 MEM_ADDR_SPACE (temp
)))
11625 return replace_equiv_address (temp
,
11626 copy_rtx (XEXP (temp
, 0)));
11630 return immed_wide_int_const (poly_int_cst_value (exp
), mode
);
11634 tree val
= treeop0
;
11635 rtx ret
= expand_expr_real_1 (val
, target
, tmode
, modifier
, alt_rtl
,
11636 inner_reference_p
);
11638 if (!SAVE_EXPR_RESOLVED_P (exp
))
11640 /* We can indeed still hit this case, typically via builtin
11641 expanders calling save_expr immediately before expanding
11642 something. Assume this means that we only have to deal
11643 with non-BLKmode values. */
11644 gcc_assert (GET_MODE (ret
) != BLKmode
);
11646 val
= build_decl (curr_insn_location (),
11647 VAR_DECL
, NULL
, TREE_TYPE (exp
));
11648 DECL_ARTIFICIAL (val
) = 1;
11649 DECL_IGNORED_P (val
) = 1;
11651 TREE_OPERAND (exp
, 0) = treeop0
;
11652 SAVE_EXPR_RESOLVED_P (exp
) = 1;
11654 if (!CONSTANT_P (ret
))
11655 ret
= copy_to_reg (ret
);
11656 SET_DECL_RTL (val
, ret
);
11664 /* If we don't need the result, just ensure we evaluate any
11668 unsigned HOST_WIDE_INT idx
;
11671 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp
), idx
, value
)
11672 expand_expr (value
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
11677 return expand_constructor (exp
, target
, modifier
, false);
11679 case TARGET_MEM_REF
:
11682 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp
, 0))));
11683 unsigned int align
;
11685 op0
= addr_for_mem_ref (exp
, as
, true);
11686 op0
= memory_address_addr_space (mode
, op0
, as
);
11687 temp
= gen_rtx_MEM (mode
, op0
);
11688 set_mem_attributes (temp
, exp
, 0);
11689 set_mem_addr_space (temp
, as
);
11690 align
= get_object_alignment (exp
);
11691 if (modifier
!= EXPAND_WRITE
11692 && modifier
!= EXPAND_MEMORY
11694 && align
< GET_MODE_ALIGNMENT (mode
))
11695 temp
= expand_misaligned_mem_ref (temp
, mode
, unsignedp
,
11696 align
, NULL_RTX
, NULL
);
11697 return EXTEND_BITINT (temp
);
11702 const bool reverse
= REF_REVERSE_STORAGE_ORDER (exp
);
11704 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp
, 0))));
11705 machine_mode address_mode
;
11706 tree base
= TREE_OPERAND (exp
, 0);
11709 /* Handle expansion of non-aliased memory with non-BLKmode. That
11710 might end up in a register. */
11711 if (mem_ref_refers_to_non_mem_p (exp
))
11713 poly_int64 offset
= mem_ref_offset (exp
).force_shwi ();
11714 base
= TREE_OPERAND (base
, 0);
11715 poly_uint64 type_size
;
11716 if (known_eq (offset
, 0)
11718 && poly_int_tree_p (TYPE_SIZE (type
), &type_size
)
11719 && known_eq (GET_MODE_BITSIZE (DECL_MODE (base
)), type_size
))
11720 return expand_expr (build1 (VIEW_CONVERT_EXPR
, type
, base
),
11721 target
, tmode
, modifier
);
11722 if (TYPE_MODE (type
) == BLKmode
)
11724 temp
= assign_stack_temp (DECL_MODE (base
),
11725 GET_MODE_SIZE (DECL_MODE (base
)));
11726 store_expr (base
, temp
, 0, false, false);
11727 temp
= adjust_address (temp
, BLKmode
, offset
);
11728 set_mem_size (temp
, int_size_in_bytes (type
));
11731 exp
= build3 (BIT_FIELD_REF
, type
, base
, TYPE_SIZE (type
),
11732 bitsize_int (offset
* BITS_PER_UNIT
));
11733 REF_REVERSE_STORAGE_ORDER (exp
) = reverse
;
11734 return expand_expr (exp
, target
, tmode
, modifier
);
11736 address_mode
= targetm
.addr_space
.address_mode (as
);
11737 if ((def_stmt
= get_def_for_expr (base
, BIT_AND_EXPR
)))
11739 tree mask
= gimple_assign_rhs2 (def_stmt
);
11740 base
= build2 (BIT_AND_EXPR
, TREE_TYPE (base
),
11741 gimple_assign_rhs1 (def_stmt
), mask
);
11742 TREE_OPERAND (exp
, 0) = base
;
11744 align
= get_object_alignment (exp
);
11745 op0
= expand_expr (base
, NULL_RTX
, VOIDmode
, EXPAND_SUM
);
11746 op0
= memory_address_addr_space (mode
, op0
, as
);
11747 if (!integer_zerop (TREE_OPERAND (exp
, 1)))
11749 rtx off
= immed_wide_int_const (mem_ref_offset (exp
), address_mode
);
11750 op0
= simplify_gen_binary (PLUS
, address_mode
, op0
, off
);
11751 op0
= memory_address_addr_space (mode
, op0
, as
);
11753 temp
= gen_rtx_MEM (mode
, op0
);
11754 set_mem_attributes (temp
, exp
, 0);
11755 set_mem_addr_space (temp
, as
);
11756 if (TREE_THIS_VOLATILE (exp
))
11757 MEM_VOLATILE_P (temp
) = 1;
11758 if (modifier
== EXPAND_WRITE
|| modifier
== EXPAND_MEMORY
)
11760 if (!inner_reference_p
11762 && align
< GET_MODE_ALIGNMENT (mode
))
11763 temp
= expand_misaligned_mem_ref (temp
, mode
, unsignedp
, align
,
11764 modifier
== EXPAND_STACK_PARM
11765 ? NULL_RTX
: target
, alt_rtl
);
11767 temp
= flip_storage_order (mode
, temp
);
11768 return EXTEND_BITINT (temp
);
11774 tree array
= treeop0
;
11775 tree index
= treeop1
;
11778 /* Fold an expression like: "foo"[2].
11779 This is not done in fold so it won't happen inside &.
11780 Don't fold if this is for wide characters since it's too
11781 difficult to do correctly and this is a very rare case. */
11783 if (modifier
!= EXPAND_CONST_ADDRESS
11784 && modifier
!= EXPAND_INITIALIZER
11785 && modifier
!= EXPAND_MEMORY
)
11787 tree t
= fold_read_from_constant_string (exp
);
11790 return expand_expr (t
, target
, tmode
, modifier
);
11793 /* If this is a constant index into a constant array,
11794 just get the value from the array. Handle both the cases when
11795 we have an explicit constructor and when our operand is a variable
11796 that was declared const. */
11798 if (modifier
!= EXPAND_CONST_ADDRESS
11799 && modifier
!= EXPAND_INITIALIZER
11800 && modifier
!= EXPAND_MEMORY
11801 && TREE_CODE (array
) == CONSTRUCTOR
11802 && ! TREE_SIDE_EFFECTS (array
)
11803 && TREE_CODE (index
) == INTEGER_CST
)
11805 unsigned HOST_WIDE_INT ix
;
11808 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (array
), ix
,
11810 if (tree_int_cst_equal (field
, index
))
11812 if (!TREE_SIDE_EFFECTS (value
))
11813 return expand_expr (fold (value
), target
, tmode
, modifier
);
11818 else if (optimize
>= 1
11819 && modifier
!= EXPAND_CONST_ADDRESS
11820 && modifier
!= EXPAND_INITIALIZER
11821 && modifier
!= EXPAND_MEMORY
11822 && TREE_READONLY (array
) && ! TREE_SIDE_EFFECTS (array
)
11823 && TREE_CODE (index
) == INTEGER_CST
11824 && (VAR_P (array
) || TREE_CODE (array
) == CONST_DECL
)
11825 && (init
= ctor_for_folding (array
)) != error_mark_node
)
11827 if (init
== NULL_TREE
)
11829 tree value
= build_zero_cst (type
);
11830 if (TREE_CODE (value
) == CONSTRUCTOR
)
11832 /* If VALUE is a CONSTRUCTOR, this optimization is only
11833 useful if this doesn't store the CONSTRUCTOR into
11834 memory. If it does, it is more efficient to just
11835 load the data from the array directly. */
11836 rtx ret
= expand_constructor (value
, target
,
11838 if (ret
== NULL_RTX
)
11843 return expand_expr (value
, target
, tmode
, modifier
);
11845 else if (TREE_CODE (init
) == CONSTRUCTOR
)
11847 unsigned HOST_WIDE_INT ix
;
11850 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init
), ix
,
11852 if (tree_int_cst_equal (field
, index
))
11854 if (TREE_SIDE_EFFECTS (value
))
11857 if (TREE_CODE (value
) == CONSTRUCTOR
)
11859 /* If VALUE is a CONSTRUCTOR, this
11860 optimization is only useful if
11861 this doesn't store the CONSTRUCTOR
11862 into memory. If it does, it is more
11863 efficient to just load the data from
11864 the array directly. */
11865 rtx ret
= expand_constructor (value
, target
,
11867 if (ret
== NULL_RTX
)
11872 expand_expr (fold (value
), target
, tmode
, modifier
);
11875 else if (TREE_CODE (init
) == STRING_CST
)
11877 tree low_bound
= array_ref_low_bound (exp
);
11878 tree index1
= fold_convert_loc (loc
, sizetype
, treeop1
);
11880 /* Optimize the special case of a zero lower bound.
11882 We convert the lower bound to sizetype to avoid problems
11883 with constant folding. E.g. suppose the lower bound is
11884 1 and its mode is QI. Without the conversion
11885 (ARRAY + (INDEX - (unsigned char)1))
11887 (ARRAY + (-(unsigned char)1) + INDEX)
11889 (ARRAY + 255 + INDEX). Oops! */
11890 if (!integer_zerop (low_bound
))
11891 index1
= size_diffop_loc (loc
, index1
,
11892 fold_convert_loc (loc
, sizetype
,
11895 if (tree_fits_uhwi_p (index1
)
11896 && compare_tree_int (index1
, TREE_STRING_LENGTH (init
)) < 0)
11898 tree char_type
= TREE_TYPE (TREE_TYPE (init
));
11899 scalar_int_mode char_mode
;
11901 if (is_int_mode (TYPE_MODE (char_type
), &char_mode
)
11902 && GET_MODE_SIZE (char_mode
) == 1)
11903 return gen_int_mode (TREE_STRING_POINTER (init
)
11904 [TREE_INT_CST_LOW (index1
)],
11910 goto normal_inner_ref
;
11912 case COMPONENT_REF
:
11913 gcc_assert (TREE_CODE (treeop0
) != CONSTRUCTOR
);
11914 /* Fall through. */
11915 case BIT_FIELD_REF
:
11916 case ARRAY_RANGE_REF
:
11919 machine_mode mode1
, mode2
;
11920 poly_int64 bitsize
, bitpos
, bytepos
;
11922 int reversep
, volatilep
= 0;
11924 = get_inner_reference (exp
, &bitsize
, &bitpos
, &offset
, &mode1
,
11925 &unsignedp
, &reversep
, &volatilep
);
11926 rtx orig_op0
, memloc
;
11927 bool clear_mem_expr
= false;
11928 bool must_force_mem
;
11930 /* If we got back the original object, something is wrong. Perhaps
11931 we are evaluating an expression too early. In any event, don't
11932 infinitely recurse. */
11933 gcc_assert (tem
!= exp
);
11935 /* Make sure bitpos is not negative, this can wreak havoc later. */
11936 if (maybe_lt (bitpos
, 0))
11938 gcc_checking_assert (offset
== NULL_TREE
);
11939 offset
= size_int (bits_to_bytes_round_down (bitpos
));
11940 bitpos
= num_trailing_bits (bitpos
);
11943 /* If we have either an offset, a BLKmode result, or a reference
11944 outside the underlying object, we must force it to memory.
11945 Such a case can occur in Ada if we have unchecked conversion
11946 of an expression from a scalar type to an aggregate type or
11947 for an ARRAY_RANGE_REF whose type is BLKmode, or if we were
11948 passed a partially uninitialized object or a view-conversion
11949 to a larger size. */
11950 must_force_mem
= offset
!= NULL_TREE
11951 || mode1
== BLKmode
11952 || (mode
== BLKmode
11953 && !int_mode_for_size (bitsize
, 1).exists ());
11955 const enum expand_modifier tem_modifier
11958 : modifier
== EXPAND_SUM
? EXPAND_NORMAL
: modifier
;
11960 /* If TEM's type is a union of variable size, pass TARGET to the inner
11961 computation, since it will need a temporary and TARGET is known
11962 to have to do. This occurs in unchecked conversion in Ada. */
11963 const rtx tem_target
11964 = TREE_CODE (TREE_TYPE (tem
)) == UNION_TYPE
11965 && COMPLETE_TYPE_P (TREE_TYPE (tem
))
11966 && TREE_CODE (TYPE_SIZE (TREE_TYPE (tem
))) != INTEGER_CST
11967 && modifier
!= EXPAND_STACK_PARM
11972 = expand_expr_real (tem
, tem_target
, VOIDmode
, tem_modifier
, NULL
,
11975 /* If the field has a mode, we want to access it in the
11976 field's mode, not the computed mode.
11977 If a MEM has VOIDmode (external with incomplete type),
11978 use BLKmode for it instead. */
11981 if (mode1
!= VOIDmode
)
11982 op0
= adjust_address (op0
, mode1
, 0);
11983 else if (GET_MODE (op0
) == VOIDmode
)
11984 op0
= adjust_address (op0
, BLKmode
, 0);
11988 = CONSTANT_P (op0
) ? TYPE_MODE (TREE_TYPE (tem
)) : GET_MODE (op0
);
11990 /* See above for the rationale. */
11991 if (maybe_gt (bitpos
+ bitsize
, GET_MODE_BITSIZE (mode2
)))
11992 must_force_mem
= true;
11994 /* Handle CONCAT first. */
11995 if (GET_CODE (op0
) == CONCAT
&& !must_force_mem
)
11997 if (known_eq (bitpos
, 0)
11998 && known_eq (bitsize
, GET_MODE_BITSIZE (GET_MODE (op0
)))
11999 && COMPLEX_MODE_P (mode1
)
12000 && COMPLEX_MODE_P (GET_MODE (op0
))
12001 && (GET_MODE_PRECISION (GET_MODE_INNER (mode1
))
12002 == GET_MODE_PRECISION (GET_MODE_INNER (GET_MODE (op0
)))))
12005 op0
= flip_storage_order (GET_MODE (op0
), op0
);
12006 if (mode1
!= GET_MODE (op0
))
12009 for (int i
= 0; i
< 2; i
++)
12011 rtx op
= read_complex_part (op0
, i
!= 0);
12012 if (GET_CODE (op
) == SUBREG
)
12013 op
= force_reg (GET_MODE (op
), op
);
12014 temp
= gen_lowpart_common (GET_MODE_INNER (mode1
), op
);
12019 if (!REG_P (op
) && !MEM_P (op
))
12020 op
= force_reg (GET_MODE (op
), op
);
12021 op
= gen_lowpart (GET_MODE_INNER (mode1
), op
);
12025 op0
= gen_rtx_CONCAT (mode1
, parts
[0], parts
[1]);
12029 if (known_eq (bitpos
, 0)
12030 && known_eq (bitsize
,
12031 GET_MODE_BITSIZE (GET_MODE (XEXP (op0
, 0))))
12032 && maybe_ne (bitsize
, 0))
12034 op0
= XEXP (op0
, 0);
12035 mode2
= GET_MODE (op0
);
12037 else if (known_eq (bitpos
,
12038 GET_MODE_BITSIZE (GET_MODE (XEXP (op0
, 0))))
12039 && known_eq (bitsize
,
12040 GET_MODE_BITSIZE (GET_MODE (XEXP (op0
, 1))))
12041 && maybe_ne (bitpos
, 0)
12042 && maybe_ne (bitsize
, 0))
12044 op0
= XEXP (op0
, 1);
12046 mode2
= GET_MODE (op0
);
12049 /* Otherwise force into memory. */
12050 must_force_mem
= true;
12053 /* If this is a constant, put it in a register if it is a legitimate
12054 constant and we don't need a memory reference. */
12055 if (CONSTANT_P (op0
)
12056 && mode2
!= BLKmode
12057 && targetm
.legitimate_constant_p (mode2
, op0
)
12058 && !must_force_mem
)
12059 op0
= force_reg (mode2
, op0
);
12061 /* Otherwise, if this is a constant, try to force it to the constant
12062 pool. Note that back-ends, e.g. MIPS, may refuse to do so if it
12063 is a legitimate constant. */
12064 else if (CONSTANT_P (op0
) && (memloc
= force_const_mem (mode2
, op0
)))
12065 op0
= validize_mem (memloc
);
12067 /* Otherwise, if this is a constant or the object is not in memory
12068 and need be, put it there. */
12069 else if (CONSTANT_P (op0
) || (!MEM_P (op0
) && must_force_mem
))
12071 memloc
= assign_temp (TREE_TYPE (tem
), 1, 1);
12072 emit_move_insn (memloc
, op0
);
12074 clear_mem_expr
= true;
12079 machine_mode address_mode
;
12080 rtx offset_rtx
= expand_expr (offset
, NULL_RTX
, VOIDmode
,
12083 gcc_assert (MEM_P (op0
));
12085 address_mode
= get_address_mode (op0
);
12086 if (GET_MODE (offset_rtx
) != address_mode
)
12088 /* We cannot be sure that the RTL in offset_rtx is valid outside
12089 of a memory address context, so force it into a register
12090 before attempting to convert it to the desired mode. */
12091 offset_rtx
= force_operand (offset_rtx
, NULL_RTX
);
12092 offset_rtx
= convert_to_mode (address_mode
, offset_rtx
, 0);
12095 /* See the comment in expand_assignment for the rationale. */
12096 if (mode1
!= VOIDmode
12097 && maybe_ne (bitpos
, 0)
12098 && maybe_gt (bitsize
, 0)
12099 && multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
12100 && multiple_p (bitpos
, bitsize
)
12101 && multiple_p (bitsize
, GET_MODE_ALIGNMENT (mode1
))
12102 && MEM_ALIGN (op0
) >= GET_MODE_ALIGNMENT (mode1
))
12104 op0
= adjust_address (op0
, mode1
, bytepos
);
12108 op0
= offset_address (op0
, offset_rtx
,
12109 highest_pow2_factor (offset
));
12112 /* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT,
12113 record its alignment as BIGGEST_ALIGNMENT. */
12115 && known_eq (bitpos
, 0)
12117 && is_aligning_offset (offset
, tem
))
12118 set_mem_align (op0
, BIGGEST_ALIGNMENT
);
12120 /* Don't forget about volatility even if this is a bitfield. */
12121 if (MEM_P (op0
) && volatilep
&& ! MEM_VOLATILE_P (op0
))
12123 if (op0
== orig_op0
)
12124 op0
= copy_rtx (op0
);
12126 MEM_VOLATILE_P (op0
) = 1;
12129 if (MEM_P (op0
) && TREE_CODE (tem
) == FUNCTION_DECL
)
12131 if (op0
== orig_op0
)
12132 op0
= copy_rtx (op0
);
12134 set_mem_align (op0
, BITS_PER_UNIT
);
12137 /* In cases where an aligned union has an unaligned object
12138 as a field, we might be extracting a BLKmode value from
12139 an integer-mode (e.g., SImode) object. Handle this case
12140 by doing the extract into an object as wide as the field
12141 (which we know to be the width of a basic mode), then
12142 storing into memory, and changing the mode to BLKmode. */
12143 if (mode1
== VOIDmode
12144 || REG_P (op0
) || GET_CODE (op0
) == SUBREG
12145 || (mode1
!= BLKmode
&& ! direct_load
[(int) mode1
]
12146 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_INT
12147 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_FLOAT
12148 && modifier
!= EXPAND_CONST_ADDRESS
12149 && modifier
!= EXPAND_INITIALIZER
12150 && modifier
!= EXPAND_MEMORY
)
12151 /* If the bitfield is volatile and the bitsize
12152 is narrower than the access size of the bitfield,
12153 we need to extract bitfields from the access. */
12154 || (volatilep
&& TREE_CODE (exp
) == COMPONENT_REF
12155 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (exp
, 1))
12156 && mode1
!= BLKmode
12157 && maybe_lt (bitsize
, GET_MODE_SIZE (mode1
) * BITS_PER_UNIT
))
12158 /* If the field isn't aligned enough to fetch as a memref,
12159 fetch it as a bit field. */
12160 || (mode1
!= BLKmode
12162 ? MEM_ALIGN (op0
) < GET_MODE_ALIGNMENT (mode1
)
12163 || !multiple_p (bitpos
, GET_MODE_ALIGNMENT (mode1
))
12164 : TYPE_ALIGN (TREE_TYPE (tem
)) < GET_MODE_ALIGNMENT (mode
)
12165 || !multiple_p (bitpos
, GET_MODE_ALIGNMENT (mode
)))
12166 && modifier
!= EXPAND_MEMORY
12167 && ((modifier
== EXPAND_CONST_ADDRESS
12168 || modifier
== EXPAND_INITIALIZER
)
12170 : targetm
.slow_unaligned_access (mode1
,
12172 || !multiple_p (bitpos
, BITS_PER_UNIT
)))
12173 /* If the type and the field are a constant size and the
12174 size of the type isn't the same size as the bitfield,
12175 we must use bitfield operations. */
12176 || (known_size_p (bitsize
)
12177 && TYPE_SIZE (TREE_TYPE (exp
))
12178 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp
)))
12179 && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp
))),
12182 machine_mode ext_mode
= mode
;
12184 if (ext_mode
== BLKmode
12185 && ! (target
!= 0 && MEM_P (op0
)
12187 && multiple_p (bitpos
, BITS_PER_UNIT
)))
12188 ext_mode
= int_mode_for_size (bitsize
, 1).else_blk ();
12190 if (ext_mode
== BLKmode
)
12193 target
= assign_temp (type
, 1, 1);
12195 /* ??? Unlike the similar test a few lines below, this one is
12196 very likely obsolete. */
12197 if (known_eq (bitsize
, 0))
12200 /* In this case, BITPOS must start at a byte boundary and
12201 TARGET, if specified, must be a MEM. */
12202 gcc_assert (MEM_P (op0
)
12203 && (!target
|| MEM_P (target
)));
12205 bytepos
= exact_div (bitpos
, BITS_PER_UNIT
);
12206 poly_int64 bytesize
= bits_to_bytes_round_up (bitsize
);
12207 emit_block_move (target
,
12208 adjust_address (op0
, VOIDmode
, bytepos
),
12209 gen_int_mode (bytesize
, Pmode
),
12210 (modifier
== EXPAND_STACK_PARM
12211 ? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL
));
12216 /* If we have nothing to extract, the result will be 0 for targets
12217 with SHIFT_COUNT_TRUNCATED == 0 and garbage otherwise. Always
12218 return 0 for the sake of consistency, as reading a zero-sized
12219 bitfield is valid in Ada and the value is fully specified. */
12220 if (known_eq (bitsize
, 0))
12223 op0
= validize_mem (op0
);
12225 if (MEM_P (op0
) && REG_P (XEXP (op0
, 0)))
12226 mark_reg_pointer (XEXP (op0
, 0), MEM_ALIGN (op0
));
12228 /* If the result has aggregate type and the extraction is done in
12229 an integral mode, then the field may be not aligned on a byte
12230 boundary; in this case, if it has reverse storage order, it
12231 needs to be extracted as a scalar field with reverse storage
12232 order and put back into memory order afterwards. */
12233 if (AGGREGATE_TYPE_P (type
)
12234 && GET_MODE_CLASS (ext_mode
) == MODE_INT
)
12235 reversep
= TYPE_REVERSE_STORAGE_ORDER (type
);
12237 gcc_checking_assert (known_ge (bitpos
, 0));
12238 op0
= extract_bit_field (op0
, bitsize
, bitpos
, unsignedp
,
12239 (modifier
== EXPAND_STACK_PARM
12240 ? NULL_RTX
: target
),
12241 ext_mode
, ext_mode
, reversep
, alt_rtl
);
12243 /* If the result has aggregate type and the mode of OP0 is an
12244 integral mode then, if BITSIZE is narrower than this mode
12245 and this is for big-endian data, we must put the field
12246 into the high-order bits. And we must also put it back
12247 into memory order if it has been previously reversed. */
12248 scalar_int_mode op0_mode
;
12249 if (AGGREGATE_TYPE_P (type
)
12250 && is_int_mode (GET_MODE (op0
), &op0_mode
))
12252 HOST_WIDE_INT size
= GET_MODE_BITSIZE (op0_mode
);
12254 gcc_checking_assert (known_le (bitsize
, size
));
12255 if (maybe_lt (bitsize
, size
)
12256 && reversep
? !BYTES_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
12257 op0
= expand_shift (LSHIFT_EXPR
, op0_mode
, op0
,
12258 size
- bitsize
, op0
, 1);
12261 op0
= flip_storage_order (op0_mode
, op0
);
12264 /* If the result type is BLKmode, store the data into a temporary
12265 of the appropriate type, but with the mode corresponding to the
12266 mode for the data we have (op0's mode). */
12267 if (mode
== BLKmode
)
12270 = assign_stack_temp_for_type (ext_mode
,
12271 GET_MODE_BITSIZE (ext_mode
),
12273 emit_move_insn (new_rtx
, op0
);
12274 op0
= copy_rtx (new_rtx
);
12275 PUT_MODE (op0
, BLKmode
);
12281 /* If the result is BLKmode, use that to access the object
12283 if (mode
== BLKmode
)
12286 /* Get a reference to just this component. */
12287 bytepos
= bits_to_bytes_round_down (bitpos
);
12288 if (modifier
== EXPAND_CONST_ADDRESS
12289 || modifier
== EXPAND_SUM
|| modifier
== EXPAND_INITIALIZER
)
12290 op0
= adjust_address_nv (op0
, mode1
, bytepos
);
12292 op0
= adjust_address (op0
, mode1
, bytepos
);
12294 if (op0
== orig_op0
)
12295 op0
= copy_rtx (op0
);
12297 /* Don't set memory attributes if the base expression is
12298 SSA_NAME that got expanded as a MEM or a CONSTANT. In that case,
12299 we should just honor its original memory attributes. */
12300 if (!(TREE_CODE (tem
) == SSA_NAME
12301 && (MEM_P (orig_op0
) || CONSTANT_P (orig_op0
))))
12302 set_mem_attributes (op0
, exp
, 0);
12304 if (REG_P (XEXP (op0
, 0)))
12305 mark_reg_pointer (XEXP (op0
, 0), MEM_ALIGN (op0
));
12307 /* If op0 is a temporary because the original expressions was forced
12308 to memory, clear MEM_EXPR so that the original expression cannot
12309 be marked as addressable through MEM_EXPR of the temporary. */
12310 if (clear_mem_expr
)
12311 set_mem_expr (op0
, NULL_TREE
);
12313 MEM_VOLATILE_P (op0
) |= volatilep
;
12316 && modifier
!= EXPAND_MEMORY
12317 && modifier
!= EXPAND_WRITE
)
12318 op0
= flip_storage_order (mode1
, op0
);
12320 op0
= EXTEND_BITINT (op0
);
12322 if (mode
== mode1
|| mode1
== BLKmode
|| mode1
== tmode
12323 || modifier
== EXPAND_CONST_ADDRESS
12324 || modifier
== EXPAND_INITIALIZER
)
12328 target
= gen_reg_rtx (tmode
!= VOIDmode
? tmode
: mode
);
12330 convert_move (target
, op0
, unsignedp
);
12335 return expand_expr (OBJ_TYPE_REF_EXPR (exp
), target
, tmode
, modifier
);
12338 /* All valid uses of __builtin_va_arg_pack () are removed during
12340 if (CALL_EXPR_VA_ARG_PACK (exp
))
12341 error ("invalid use of %<__builtin_va_arg_pack ()%>");
12343 tree fndecl
= get_callee_fndecl (exp
), attr
;
12346 /* Don't diagnose the error attribute in thunks, those are
12347 artificially created. */
12348 && !CALL_FROM_THUNK_P (exp
)
12349 && (attr
= lookup_attribute ("error",
12350 DECL_ATTRIBUTES (fndecl
))) != NULL
)
12352 const char *ident
= lang_hooks
.decl_printable_name (fndecl
, 1);
12353 error ("call to %qs declared with attribute error: %s",
12354 identifier_to_locale (ident
),
12355 TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr
))));
12358 /* Don't diagnose the warning attribute in thunks, those are
12359 artificially created. */
12360 && !CALL_FROM_THUNK_P (exp
)
12361 && (attr
= lookup_attribute ("warning",
12362 DECL_ATTRIBUTES (fndecl
))) != NULL
)
12364 const char *ident
= lang_hooks
.decl_printable_name (fndecl
, 1);
12365 warning_at (EXPR_LOCATION (exp
),
12366 OPT_Wattribute_warning
,
12367 "call to %qs declared with attribute warning: %s",
12368 identifier_to_locale (ident
),
12369 TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr
))));
12372 /* Check for a built-in function. */
12373 if (fndecl
&& fndecl_built_in_p (fndecl
))
12375 gcc_assert (DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_FRONTEND
);
12376 return expand_builtin (exp
, target
, subtarget
, tmode
, ignore
);
12379 temp
= expand_call (exp
, target
, ignore
);
12380 return EXTEND_BITINT (temp
);
12382 case VIEW_CONVERT_EXPR
:
12385 /* If we are converting to BLKmode, try to avoid an intermediate
12386 temporary by fetching an inner memory reference. */
12387 if (mode
== BLKmode
12388 && poly_int_tree_p (TYPE_SIZE (type
))
12389 && TYPE_MODE (TREE_TYPE (treeop0
)) != BLKmode
12390 && handled_component_p (treeop0
))
12392 machine_mode mode1
;
12393 poly_int64 bitsize
, bitpos
, bytepos
;
12395 int reversep
, volatilep
= 0;
12397 = get_inner_reference (treeop0
, &bitsize
, &bitpos
, &offset
, &mode1
,
12398 &unsignedp
, &reversep
, &volatilep
);
12400 /* ??? We should work harder and deal with non-zero offsets. */
12402 && multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
12404 && known_size_p (bitsize
)
12405 && known_eq (wi::to_poly_offset (TYPE_SIZE (type
)), bitsize
))
12407 /* See the normal_inner_ref case for the rationale. */
12409 = expand_expr_real (tem
,
12410 (TREE_CODE (TREE_TYPE (tem
)) == UNION_TYPE
12411 && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem
)))
12413 && modifier
!= EXPAND_STACK_PARM
12414 ? target
: NULL_RTX
),
12416 modifier
== EXPAND_SUM
? EXPAND_NORMAL
: modifier
,
12419 if (MEM_P (orig_op0
))
12423 /* Get a reference to just this component. */
12424 if (modifier
== EXPAND_CONST_ADDRESS
12425 || modifier
== EXPAND_SUM
12426 || modifier
== EXPAND_INITIALIZER
)
12427 op0
= adjust_address_nv (op0
, mode
, bytepos
);
12429 op0
= adjust_address (op0
, mode
, bytepos
);
12431 if (op0
== orig_op0
)
12432 op0
= copy_rtx (op0
);
12434 set_mem_attributes (op0
, treeop0
, 0);
12435 if (REG_P (XEXP (op0
, 0)))
12436 mark_reg_pointer (XEXP (op0
, 0), MEM_ALIGN (op0
));
12438 MEM_VOLATILE_P (op0
) |= volatilep
;
12444 op0
= expand_expr_real (treeop0
, NULL_RTX
, VOIDmode
, modifier
,
12445 NULL
, inner_reference_p
);
12447 /* If the input and output modes are both the same, we are done. */
12448 if (mode
== GET_MODE (op0
))
12450 /* Similarly if the output mode is BLKmode and input is a MEM,
12451 adjust_address done below is all we need. */
12452 else if (mode
== BLKmode
&& MEM_P (op0
))
12454 /* If neither mode is BLKmode, and both modes are the same size
12455 then we can use gen_lowpart. */
12456 else if (mode
!= BLKmode
12457 && GET_MODE (op0
) != BLKmode
12458 && known_eq (GET_MODE_PRECISION (mode
),
12459 GET_MODE_PRECISION (GET_MODE (op0
)))
12460 && !COMPLEX_MODE_P (GET_MODE (op0
)))
12462 if (GET_CODE (op0
) == SUBREG
)
12463 op0
= force_reg (GET_MODE (op0
), op0
);
12464 temp
= gen_lowpart_common (mode
, op0
);
12469 if (!REG_P (op0
) && !MEM_P (op0
))
12470 op0
= force_reg (GET_MODE (op0
), op0
);
12471 op0
= gen_lowpart (mode
, op0
);
12474 /* If both types are integral, convert from one mode to the other. */
12475 else if (INTEGRAL_TYPE_P (type
)
12476 && INTEGRAL_TYPE_P (TREE_TYPE (treeop0
))
12478 && GET_MODE (op0
) != BLKmode
)
12479 op0
= convert_modes (mode
, GET_MODE (op0
), op0
,
12480 TYPE_UNSIGNED (TREE_TYPE (treeop0
)));
12481 /* If the output type is a bit-field type, do an extraction. */
12482 else if (reduce_bit_field
)
12483 return extract_bit_field (op0
, TYPE_PRECISION (type
), 0,
12484 TYPE_UNSIGNED (type
), NULL_RTX
,
12485 mode
, mode
, false, NULL
);
12486 /* As a last resort, spill op0 to memory, and reload it in a
12488 else if (!MEM_P (op0
))
12490 /* If the operand is not a MEM, force it into memory. Since we
12491 are going to be changing the mode of the MEM, don't call
12492 force_const_mem for constants because we don't allow pool
12493 constants to change mode. */
12494 tree inner_type
= TREE_TYPE (treeop0
);
12496 gcc_assert (!TREE_ADDRESSABLE (exp
));
12498 if (target
== 0 || GET_MODE (target
) != TYPE_MODE (inner_type
))
12500 = assign_stack_temp_for_type
12501 (TYPE_MODE (inner_type
),
12502 GET_MODE_SIZE (TYPE_MODE (inner_type
)), inner_type
);
12504 emit_move_insn (target
, op0
);
12508 /* If OP0 is (now) a MEM, we need to deal with alignment issues. If the
12509 output type is such that the operand is known to be aligned, indicate
12510 that it is. Otherwise, we need only be concerned about alignment for
12511 non-BLKmode results. */
12514 enum insn_code icode
;
12516 if (modifier
!= EXPAND_WRITE
12517 && modifier
!= EXPAND_MEMORY
12518 && !inner_reference_p
12520 && MEM_ALIGN (op0
) < GET_MODE_ALIGNMENT (mode
))
12522 /* If the target does have special handling for unaligned
12523 loads of mode then use them. */
12524 if ((icode
= optab_handler (movmisalign_optab
, mode
))
12525 != CODE_FOR_nothing
)
12529 op0
= adjust_address (op0
, mode
, 0);
12530 /* We've already validated the memory, and we're creating a
12531 new pseudo destination. The predicates really can't
12533 reg
= gen_reg_rtx (mode
);
12535 /* Nor can the insn generator. */
12536 rtx_insn
*insn
= GEN_FCN (icode
) (reg
, op0
);
12540 else if (STRICT_ALIGNMENT
)
12542 poly_uint64 mode_size
= GET_MODE_SIZE (mode
);
12543 poly_uint64 temp_size
= mode_size
;
12544 if (GET_MODE (op0
) != BLKmode
)
12545 temp_size
= upper_bound (temp_size
,
12546 GET_MODE_SIZE (GET_MODE (op0
)));
12548 = assign_stack_temp_for_type (mode
, temp_size
, type
);
12549 rtx new_with_op0_mode
12550 = adjust_address (new_rtx
, GET_MODE (op0
), 0);
12552 gcc_assert (!TREE_ADDRESSABLE (exp
));
12554 if (GET_MODE (op0
) == BLKmode
)
12556 rtx size_rtx
= gen_int_mode (mode_size
, Pmode
);
12557 emit_block_move (new_with_op0_mode
, op0
, size_rtx
,
12558 (modifier
== EXPAND_STACK_PARM
12559 ? BLOCK_OP_CALL_PARM
12560 : BLOCK_OP_NORMAL
));
12563 emit_move_insn (new_with_op0_mode
, op0
);
12569 op0
= adjust_address (op0
, mode
, 0);
12576 tree lhs
= treeop0
;
12577 tree rhs
= treeop1
;
12578 gcc_assert (ignore
);
12580 /* Check for |= or &= of a bitfield of size one into another bitfield
12581 of size 1. In this case, (unless we need the result of the
12582 assignment) we can do this more efficiently with a
12583 test followed by an assignment, if necessary.
12585 ??? At this point, we can't get a BIT_FIELD_REF here. But if
12586 things change so we do, this code should be enhanced to
12588 if (TREE_CODE (lhs
) == COMPONENT_REF
12589 && (TREE_CODE (rhs
) == BIT_IOR_EXPR
12590 || TREE_CODE (rhs
) == BIT_AND_EXPR
)
12591 && TREE_OPERAND (rhs
, 0) == lhs
12592 && TREE_CODE (TREE_OPERAND (rhs
, 1)) == COMPONENT_REF
12593 && integer_onep (DECL_SIZE (TREE_OPERAND (lhs
, 1)))
12594 && integer_onep (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs
, 1), 1))))
12596 rtx_code_label
*label
= gen_label_rtx ();
12597 int value
= TREE_CODE (rhs
) == BIT_IOR_EXPR
;
12598 profile_probability prob
= profile_probability::uninitialized ();
12600 jumpifnot (TREE_OPERAND (rhs
, 1), label
, prob
);
12602 jumpif (TREE_OPERAND (rhs
, 1), label
, prob
);
12603 expand_assignment (lhs
, build_int_cst (TREE_TYPE (rhs
), value
),
12605 do_pending_stack_adjust ();
12606 emit_label (label
);
12610 expand_assignment (lhs
, rhs
, false);
12615 return expand_expr_addr_expr (exp
, target
, tmode
, modifier
);
12617 case REALPART_EXPR
:
12618 op0
= expand_normal (treeop0
);
12619 return read_complex_part (op0
, false);
12621 case IMAGPART_EXPR
:
12622 op0
= expand_normal (treeop0
);
12623 return read_complex_part (op0
, true);
12630 /* Expanded in cfgexpand.cc. */
12631 gcc_unreachable ();
12633 case TRY_CATCH_EXPR
:
12635 case EH_FILTER_EXPR
:
12636 case TRY_FINALLY_EXPR
:
12638 /* Lowered by tree-eh.cc. */
12639 gcc_unreachable ();
12641 case WITH_CLEANUP_EXPR
:
12642 case CLEANUP_POINT_EXPR
:
12644 case CASE_LABEL_EXPR
:
12649 case COMPOUND_EXPR
:
12650 case PREINCREMENT_EXPR
:
12651 case PREDECREMENT_EXPR
:
12652 case POSTINCREMENT_EXPR
:
12653 case POSTDECREMENT_EXPR
:
12656 case COMPOUND_LITERAL_EXPR
:
12657 /* Lowered by gimplify.cc. */
12658 gcc_unreachable ();
12661 /* Function descriptors are not valid except for as
12662 initialization constants, and should not be expanded. */
12663 gcc_unreachable ();
12665 case WITH_SIZE_EXPR
:
12666 /* WITH_SIZE_EXPR expands to its first argument. The caller should
12667 have pulled out the size to use in whatever context it needed. */
12668 return expand_expr_real (treeop0
, original_target
, tmode
,
12669 modifier
, alt_rtl
, inner_reference_p
);
12672 return expand_expr_real_2 (&ops
, target
, tmode
, modifier
);
12675 #undef EXTEND_BITINT
12677 /* Subroutine of above: reduce EXP to the precision of TYPE (in the
12678 signedness of TYPE), possibly returning the result in TARGET.
12679 TYPE is known to be a partial integer type. */
12681 reduce_to_bit_field_precision (rtx exp
, rtx target
, tree type
)
12683 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (type
);
12684 HOST_WIDE_INT prec
= TYPE_PRECISION (type
);
12685 gcc_assert ((GET_MODE (exp
) == VOIDmode
|| GET_MODE (exp
) == mode
)
12686 && (!target
|| GET_MODE (target
) == mode
));
12688 /* For constant values, reduce using wide_int_to_tree. */
12689 if (poly_int_rtx_p (exp
))
12691 auto value
= wi::to_poly_wide (exp
, mode
);
12692 tree t
= wide_int_to_tree (type
, value
);
12693 return expand_expr (t
, target
, VOIDmode
, EXPAND_NORMAL
);
12695 else if (TYPE_UNSIGNED (type
))
12697 rtx mask
= immed_wide_int_const
12698 (wi::mask (prec
, false, GET_MODE_PRECISION (mode
)), mode
);
12699 return expand_and (mode
, exp
, mask
, target
);
12703 int count
= GET_MODE_PRECISION (mode
) - prec
;
12704 exp
= expand_shift (LSHIFT_EXPR
, mode
, exp
, count
, target
, 0);
12705 return expand_shift (RSHIFT_EXPR
, mode
, exp
, count
, target
, 0);
12709 /* Subroutine of above: returns true if OFFSET corresponds to an offset that
12710 when applied to the address of EXP produces an address known to be
12711 aligned more than BIGGEST_ALIGNMENT. */
12714 is_aligning_offset (const_tree offset
, const_tree exp
)
12716 /* Strip off any conversions. */
12717 while (CONVERT_EXPR_P (offset
))
12718 offset
= TREE_OPERAND (offset
, 0);
12720 /* We must now have a BIT_AND_EXPR with a constant that is one less than
12721 power of 2 and which is larger than BIGGEST_ALIGNMENT. */
12722 if (TREE_CODE (offset
) != BIT_AND_EXPR
12723 || !tree_fits_uhwi_p (TREE_OPERAND (offset
, 1))
12724 || compare_tree_int (TREE_OPERAND (offset
, 1),
12725 BIGGEST_ALIGNMENT
/ BITS_PER_UNIT
) <= 0
12726 || !pow2p_hwi (tree_to_uhwi (TREE_OPERAND (offset
, 1)) + 1))
12729 /* Look at the first operand of BIT_AND_EXPR and strip any conversion.
12730 It must be NEGATE_EXPR. Then strip any more conversions. */
12731 offset
= TREE_OPERAND (offset
, 0);
12732 while (CONVERT_EXPR_P (offset
))
12733 offset
= TREE_OPERAND (offset
, 0);
12735 if (TREE_CODE (offset
) != NEGATE_EXPR
)
12738 offset
= TREE_OPERAND (offset
, 0);
12739 while (CONVERT_EXPR_P (offset
))
12740 offset
= TREE_OPERAND (offset
, 0);
12742 /* This must now be the address of EXP. */
12743 return TREE_CODE (offset
) == ADDR_EXPR
&& TREE_OPERAND (offset
, 0) == exp
;
12746 /* Return a STRING_CST corresponding to ARG's constant initializer either
12747 if it's a string constant, or, when VALREP is set, any other constant,
12749 On success, set *PTR_OFFSET to the (possibly non-constant) byte offset
12750 within the byte string that ARG is references. If nonnull set *MEM_SIZE
12751 to the size of the byte string. If nonnull, set *DECL to the constant
12752 declaration ARG refers to. */
12755 constant_byte_string (tree arg
, tree
*ptr_offset
, tree
*mem_size
, tree
*decl
,
12756 bool valrep
= false)
12758 tree dummy
= NULL_TREE
;
12762 /* Store the type of the original expression before conversions
12763 via NOP_EXPR or POINTER_PLUS_EXPR to other types have been
12765 tree argtype
= TREE_TYPE (arg
);
12770 /* Non-constant index into the character array in an ARRAY_REF
12771 expression or null. */
12772 tree varidx
= NULL_TREE
;
12774 poly_int64 base_off
= 0;
12776 if (TREE_CODE (arg
) == ADDR_EXPR
)
12778 arg
= TREE_OPERAND (arg
, 0);
12780 if (TREE_CODE (arg
) == ARRAY_REF
)
12782 tree idx
= TREE_OPERAND (arg
, 1);
12783 if (TREE_CODE (idx
) != INTEGER_CST
)
12785 /* From a pointer (but not array) argument extract the variable
12786 index to prevent get_addr_base_and_unit_offset() from failing
12787 due to it. Use it later to compute the non-constant offset
12788 into the string and return it to the caller. */
12790 ref
= TREE_OPERAND (arg
, 0);
12792 if (TREE_CODE (TREE_TYPE (arg
)) == ARRAY_TYPE
)
12795 if (!integer_zerop (array_ref_low_bound (arg
)))
12798 if (!integer_onep (array_ref_element_size (arg
)))
12802 array
= get_addr_base_and_unit_offset (ref
, &base_off
);
12804 || (TREE_CODE (array
) != VAR_DECL
12805 && TREE_CODE (array
) != CONST_DECL
12806 && TREE_CODE (array
) != STRING_CST
))
12809 else if (TREE_CODE (arg
) == PLUS_EXPR
|| TREE_CODE (arg
) == POINTER_PLUS_EXPR
)
12811 tree arg0
= TREE_OPERAND (arg
, 0);
12812 tree arg1
= TREE_OPERAND (arg
, 1);
12815 tree str
= string_constant (arg0
, &offset
, mem_size
, decl
);
12818 str
= string_constant (arg1
, &offset
, mem_size
, decl
);
12824 /* Avoid pointers to arrays (see bug 86622). */
12825 if (POINTER_TYPE_P (TREE_TYPE (arg
))
12826 && TREE_CODE (TREE_TYPE (TREE_TYPE (arg
))) == ARRAY_TYPE
12827 && !(decl
&& !*decl
)
12828 && !(decl
&& tree_fits_uhwi_p (DECL_SIZE_UNIT (*decl
))
12829 && tree_fits_uhwi_p (*mem_size
)
12830 && tree_int_cst_equal (*mem_size
, DECL_SIZE_UNIT (*decl
))))
12833 tree type
= TREE_TYPE (offset
);
12834 arg1
= fold_convert (type
, arg1
);
12835 *ptr_offset
= fold_build2 (PLUS_EXPR
, type
, offset
, arg1
);
12840 else if (TREE_CODE (arg
) == SSA_NAME
)
12842 gimple
*stmt
= SSA_NAME_DEF_STMT (arg
);
12843 if (!is_gimple_assign (stmt
))
12846 tree rhs1
= gimple_assign_rhs1 (stmt
);
12847 tree_code code
= gimple_assign_rhs_code (stmt
);
12848 if (code
== ADDR_EXPR
)
12849 return string_constant (rhs1
, ptr_offset
, mem_size
, decl
);
12850 else if (code
!= POINTER_PLUS_EXPR
)
12854 if (tree str
= string_constant (rhs1
, &offset
, mem_size
, decl
))
12856 /* Avoid pointers to arrays (see bug 86622). */
12857 if (POINTER_TYPE_P (TREE_TYPE (rhs1
))
12858 && TREE_CODE (TREE_TYPE (TREE_TYPE (rhs1
))) == ARRAY_TYPE
12859 && !(decl
&& !*decl
)
12860 && !(decl
&& tree_fits_uhwi_p (DECL_SIZE_UNIT (*decl
))
12861 && tree_fits_uhwi_p (*mem_size
)
12862 && tree_int_cst_equal (*mem_size
, DECL_SIZE_UNIT (*decl
))))
12865 tree rhs2
= gimple_assign_rhs2 (stmt
);
12866 tree type
= TREE_TYPE (offset
);
12867 rhs2
= fold_convert (type
, rhs2
);
12868 *ptr_offset
= fold_build2 (PLUS_EXPR
, type
, offset
, rhs2
);
12873 else if (DECL_P (arg
))
12878 tree offset
= wide_int_to_tree (sizetype
, base_off
);
12881 if (TREE_CODE (TREE_TYPE (array
)) != ARRAY_TYPE
)
12884 gcc_assert (TREE_CODE (arg
) == ARRAY_REF
);
12885 tree chartype
= TREE_TYPE (TREE_TYPE (TREE_OPERAND (arg
, 0)));
12886 if (TREE_CODE (chartype
) != INTEGER_TYPE
)
12889 offset
= fold_convert (sizetype
, varidx
);
12892 if (TREE_CODE (array
) == STRING_CST
)
12894 *ptr_offset
= fold_convert (sizetype
, offset
);
12895 *mem_size
= TYPE_SIZE_UNIT (TREE_TYPE (array
));
12898 gcc_checking_assert (tree_to_shwi (TYPE_SIZE_UNIT (TREE_TYPE (array
)))
12899 >= TREE_STRING_LENGTH (array
));
12903 tree init
= ctor_for_folding (array
);
12904 if (!init
|| init
== error_mark_node
)
12909 HOST_WIDE_INT cstoff
;
12910 if (!base_off
.is_constant (&cstoff
))
12913 /* Check that the host and target are sane. */
12914 if (CHAR_BIT
!= 8 || BITS_PER_UNIT
!= 8)
12917 HOST_WIDE_INT typesz
= int_size_in_bytes (TREE_TYPE (init
));
12918 if (typesz
<= 0 || (int) typesz
!= typesz
)
12921 HOST_WIDE_INT size
= typesz
;
12923 && DECL_SIZE_UNIT (array
)
12924 && tree_fits_shwi_p (DECL_SIZE_UNIT (array
)))
12926 size
= tree_to_shwi (DECL_SIZE_UNIT (array
));
12927 gcc_checking_assert (size
>= typesz
);
12930 /* If value representation was requested convert the initializer
12931 for the whole array or object into a string of bytes forming
12932 its value representation and return it. */
12933 unsigned char *bytes
= XNEWVEC (unsigned char, size
);
12934 int r
= native_encode_initializer (init
, bytes
, size
);
12937 XDELETEVEC (bytes
);
12942 memset (bytes
+ r
, '\0', size
- r
);
12944 const char *p
= reinterpret_cast<const char *>(bytes
);
12945 init
= build_string_literal (size
, p
, char_type_node
);
12946 init
= TREE_OPERAND (init
, 0);
12947 init
= TREE_OPERAND (init
, 0);
12950 *mem_size
= size_int (TREE_STRING_LENGTH (init
));
12951 *ptr_offset
= wide_int_to_tree (ssizetype
, base_off
);
12959 if (TREE_CODE (init
) == CONSTRUCTOR
)
12961 /* Convert the 64-bit constant offset to a wider type to avoid
12962 overflow and use it to obtain the initializer for the subobject
12965 if (!base_off
.is_constant (&wioff
))
12968 wioff
*= BITS_PER_UNIT
;
12969 if (!wi::fits_uhwi_p (wioff
))
12972 base_off
= wioff
.to_uhwi ();
12973 unsigned HOST_WIDE_INT fieldoff
= 0;
12974 init
= fold_ctor_reference (TREE_TYPE (arg
), init
, base_off
, 0, array
,
12976 if (!init
|| init
== error_mark_node
)
12979 HOST_WIDE_INT cstoff
;
12980 if (!base_off
.is_constant (&cstoff
))
12983 cstoff
= (cstoff
- fieldoff
) / BITS_PER_UNIT
;
12984 tree off
= build_int_cst (sizetype
, cstoff
);
12986 offset
= fold_build2 (PLUS_EXPR
, TREE_TYPE (offset
), offset
, off
);
12991 *ptr_offset
= offset
;
12993 tree inittype
= TREE_TYPE (init
);
12995 if (TREE_CODE (init
) == INTEGER_CST
12996 && (TREE_CODE (TREE_TYPE (array
)) == INTEGER_TYPE
12997 || TYPE_MAIN_VARIANT (inittype
) == char_type_node
))
12999 /* Check that the host and target are sane. */
13000 if (CHAR_BIT
!= 8 || BITS_PER_UNIT
!= 8)
13003 /* For a reference to (address of) a single constant character,
13004 store the native representation of the character in CHARBUF.
13005 If the reference is to an element of an array or a member
13006 of a struct, only consider narrow characters until ctors
13007 for wide character arrays are transformed to STRING_CSTs
13008 like those for narrow arrays. */
13009 unsigned char charbuf
[MAX_BITSIZE_MODE_ANY_MODE
/ BITS_PER_UNIT
];
13010 int len
= native_encode_expr (init
, charbuf
, sizeof charbuf
, 0);
13013 /* Construct a string literal with elements of INITTYPE and
13014 the representation above. Then strip
13015 the ADDR_EXPR (ARRAY_REF (...)) around the STRING_CST. */
13016 init
= build_string_literal (len
, (char *)charbuf
, inittype
);
13017 init
= TREE_OPERAND (TREE_OPERAND (init
, 0), 0);
13021 tree initsize
= TYPE_SIZE_UNIT (inittype
);
13023 if (TREE_CODE (init
) == CONSTRUCTOR
&& initializer_zerop (init
))
13025 /* Fold an empty/zero constructor for an implicitly initialized
13026 object or subobject into the empty string. */
13028 /* Determine the character type from that of the original
13030 tree chartype
= argtype
;
13031 if (POINTER_TYPE_P (chartype
))
13032 chartype
= TREE_TYPE (chartype
);
13033 while (TREE_CODE (chartype
) == ARRAY_TYPE
)
13034 chartype
= TREE_TYPE (chartype
);
13036 if (INTEGRAL_TYPE_P (chartype
)
13037 && TYPE_PRECISION (chartype
) == TYPE_PRECISION (char_type_node
))
13039 /* Convert a char array to an empty STRING_CST having an array
13040 of the expected type and size. */
13042 initsize
= integer_zero_node
;
13044 unsigned HOST_WIDE_INT size
= tree_to_uhwi (initsize
);
13045 if (size
> (unsigned HOST_WIDE_INT
) INT_MAX
)
13048 init
= build_string_literal (size
, NULL
, chartype
, size
);
13049 init
= TREE_OPERAND (init
, 0);
13050 init
= TREE_OPERAND (init
, 0);
13052 *ptr_offset
= integer_zero_node
;
13059 if (TREE_CODE (init
) != STRING_CST
)
13062 *mem_size
= initsize
;
13064 gcc_checking_assert (tree_to_shwi (initsize
) >= TREE_STRING_LENGTH (init
));
13069 /* Return STRING_CST if an ARG corresponds to a string constant or zero
13070 if it doesn't. If we return nonzero, set *PTR_OFFSET to the (possibly
13071 non-constant) offset in bytes within the string that ARG is accessing.
13072 If MEM_SIZE is non-zero the storage size of the memory is returned.
13073 If DECL is non-zero the constant declaration is returned if available. */
13076 string_constant (tree arg
, tree
*ptr_offset
, tree
*mem_size
, tree
*decl
)
13078 return constant_byte_string (arg
, ptr_offset
, mem_size
, decl
, false);
13081 /* Similar to string_constant, return a STRING_CST corresponding
13082 to the value representation of the first argument if it's
13086 byte_representation (tree arg
, tree
*ptr_offset
, tree
*mem_size
, tree
*decl
)
13088 return constant_byte_string (arg
, ptr_offset
, mem_size
, decl
, true);
13091 /* Optimize x % C1 == C2 for signed modulo if C1 is a power of two and C2
13092 is non-zero and C3 ((1<<(prec-1)) | (C1 - 1)):
13093 for C2 > 0 to x & C3 == C2
13094 for C2 < 0 to x & C3 == (C2 & C3). */
13096 maybe_optimize_pow2p_mod_cmp (enum tree_code code
, tree
*arg0
, tree
*arg1
)
13098 gimple
*stmt
= get_def_for_expr (*arg0
, TRUNC_MOD_EXPR
);
13099 tree treeop0
= gimple_assign_rhs1 (stmt
);
13100 tree treeop1
= gimple_assign_rhs2 (stmt
);
13101 tree type
= TREE_TYPE (*arg0
);
13102 scalar_int_mode mode
;
13103 if (!is_a
<scalar_int_mode
> (TYPE_MODE (type
), &mode
))
13105 if (GET_MODE_BITSIZE (mode
) != TYPE_PRECISION (type
)
13106 || TYPE_PRECISION (type
) <= 1
13107 || TYPE_UNSIGNED (type
)
13108 /* Signed x % c == 0 should have been optimized into unsigned modulo
13110 || integer_zerop (*arg1
)
13111 /* If c is known to be non-negative, modulo will be expanded as unsigned
13113 || get_range_pos_neg (treeop0
) == 1)
13116 /* x % c == d where d < 0 && d <= -c should be always false. */
13117 if (tree_int_cst_sgn (*arg1
) == -1
13118 && -wi::to_widest (treeop1
) >= wi::to_widest (*arg1
))
13121 int prec
= TYPE_PRECISION (type
);
13122 wide_int w
= wi::to_wide (treeop1
) - 1;
13123 w
|= wi::shifted_mask (0, prec
- 1, true, prec
);
13124 tree c3
= wide_int_to_tree (type
, w
);
13126 if (tree_int_cst_sgn (*arg1
) == -1)
13127 c4
= wide_int_to_tree (type
, w
& wi::to_wide (*arg1
));
13129 rtx op0
= expand_normal (treeop0
);
13130 treeop0
= make_tree (TREE_TYPE (treeop0
), op0
);
13132 bool speed_p
= optimize_insn_for_speed_p ();
13134 do_pending_stack_adjust ();
13136 location_t loc
= gimple_location (stmt
);
13137 struct separate_ops ops
;
13138 ops
.code
= TRUNC_MOD_EXPR
;
13139 ops
.location
= loc
;
13140 ops
.type
= TREE_TYPE (treeop0
);
13143 ops
.op2
= NULL_TREE
;
13145 rtx mor
= expand_expr_real_2 (&ops
, NULL_RTX
, TYPE_MODE (ops
.type
),
13147 rtx_insn
*moinsns
= get_insns ();
13150 unsigned mocost
= seq_cost (moinsns
, speed_p
);
13151 mocost
+= rtx_cost (mor
, mode
, EQ
, 0, speed_p
);
13152 mocost
+= rtx_cost (expand_normal (*arg1
), mode
, EQ
, 1, speed_p
);
13154 ops
.code
= BIT_AND_EXPR
;
13155 ops
.location
= loc
;
13156 ops
.type
= TREE_TYPE (treeop0
);
13159 ops
.op2
= NULL_TREE
;
13161 rtx mur
= expand_expr_real_2 (&ops
, NULL_RTX
, TYPE_MODE (ops
.type
),
13163 rtx_insn
*muinsns
= get_insns ();
13166 unsigned mucost
= seq_cost (muinsns
, speed_p
);
13167 mucost
+= rtx_cost (mur
, mode
, EQ
, 0, speed_p
);
13168 mucost
+= rtx_cost (expand_normal (c4
), mode
, EQ
, 1, speed_p
);
13170 if (mocost
<= mucost
)
13172 emit_insn (moinsns
);
13173 *arg0
= make_tree (TREE_TYPE (*arg0
), mor
);
13177 emit_insn (muinsns
);
13178 *arg0
= make_tree (TREE_TYPE (*arg0
), mur
);
13183 /* Attempt to optimize unsigned (X % C1) == C2 (or (X % C1) != C2).
13185 (X - C2) * C3 <= C4 (or >), where
13186 C3 is modular multiplicative inverse of C1 and 1<<prec and
13187 C4 is ((1<<prec) - 1) / C1 or ((1<<prec) - 1) / C1 - 1 (the latter
13188 if C2 > ((1<<prec) - 1) % C1).
13189 If C1 is even, S = ctz (C1) and C2 is 0, use
13190 ((X * C3) r>> S) <= C4, where C3 is modular multiplicative
13191 inverse of C1>>S and 1<<prec and C4 is (((1<<prec) - 1) / (C1>>S)) >> S.
13193 For signed (X % C1) == 0 if C1 is odd to (all operations in it
13195 (X * C3) + C4 <= 2 * C4, where
13196 C3 is modular multiplicative inverse of (unsigned) C1 and 1<<prec and
13197 C4 is ((1<<(prec - 1) - 1) / C1).
13198 If C1 is even, S = ctz(C1), use
13199 ((X * C3) + C4) r>> S <= (C4 >> (S - 1))
13200 where C3 is modular multiplicative inverse of (unsigned)(C1>>S) and 1<<prec
13201 and C4 is ((1<<(prec - 1) - 1) / (C1>>S)) & (-1<<S).
13203 See the Hacker's Delight book, section 10-17. */
13205 maybe_optimize_mod_cmp (enum tree_code code
, tree
*arg0
, tree
*arg1
)
13207 gcc_checking_assert (code
== EQ_EXPR
|| code
== NE_EXPR
);
13208 gcc_checking_assert (TREE_CODE (*arg1
) == INTEGER_CST
);
13213 gimple
*stmt
= get_def_for_expr (*arg0
, TRUNC_MOD_EXPR
);
13217 tree treeop0
= gimple_assign_rhs1 (stmt
);
13218 tree treeop1
= gimple_assign_rhs2 (stmt
);
13219 if (TREE_CODE (treeop0
) != SSA_NAME
13220 || TREE_CODE (treeop1
) != INTEGER_CST
13221 /* Don't optimize the undefined behavior case x % 0;
13222 x % 1 should have been optimized into zero, punt if
13223 it makes it here for whatever reason;
13224 x % -c should have been optimized into x % c. */
13225 || compare_tree_int (treeop1
, 2) <= 0
13226 /* Likewise x % c == d where d >= c should be always false. */
13227 || tree_int_cst_le (treeop1
, *arg1
))
13230 /* Unsigned x % pow2 is handled right already, for signed
13231 modulo handle it in maybe_optimize_pow2p_mod_cmp. */
13232 if (integer_pow2p (treeop1
))
13233 return maybe_optimize_pow2p_mod_cmp (code
, arg0
, arg1
);
13235 tree type
= TREE_TYPE (*arg0
);
13236 scalar_int_mode mode
;
13237 if (!is_a
<scalar_int_mode
> (TYPE_MODE (type
), &mode
))
13239 if (GET_MODE_BITSIZE (mode
) != TYPE_PRECISION (type
)
13240 || TYPE_PRECISION (type
) <= 1)
13243 signop sgn
= UNSIGNED
;
13244 /* If both operands are known to have the sign bit clear, handle
13245 even the signed modulo case as unsigned. treeop1 is always
13246 positive >= 2, checked above. */
13247 if (!TYPE_UNSIGNED (type
) && get_range_pos_neg (treeop0
) != 1)
13250 if (!TYPE_UNSIGNED (type
))
13252 if (tree_int_cst_sgn (*arg1
) == -1)
13254 type
= unsigned_type_for (type
);
13255 if (!type
|| TYPE_MODE (type
) != TYPE_MODE (TREE_TYPE (*arg0
)))
13259 int prec
= TYPE_PRECISION (type
);
13260 wide_int w
= wi::to_wide (treeop1
);
13261 int shift
= wi::ctz (w
);
13262 /* Unsigned (X % C1) == C2 is equivalent to (X - C2) % C1 == 0 if
13263 C2 <= -1U % C1, because for any Z >= 0U - C2 in that case (Z % C1) != 0.
13264 If C1 is odd, we can handle all cases by subtracting
13265 C4 below. We could handle even the even C1 and C2 > -1U % C1 cases
13266 e.g. by testing for overflow on the subtraction, punt on that for now
13268 if ((sgn
== SIGNED
|| shift
) && !integer_zerop (*arg1
))
13272 wide_int x
= wi::umod_trunc (wi::mask (prec
, false, prec
), w
);
13273 if (wi::gtu_p (wi::to_wide (*arg1
), x
))
13277 imm_use_iterator imm_iter
;
13278 use_operand_p use_p
;
13279 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, treeop0
)
13281 gimple
*use_stmt
= USE_STMT (use_p
);
13282 /* Punt if treeop0 is used in the same bb in a division
13283 or another modulo with the same divisor. We should expect
13284 the division and modulo combined together. */
13285 if (use_stmt
== stmt
13286 || gimple_bb (use_stmt
) != gimple_bb (stmt
))
13288 if (!is_gimple_assign (use_stmt
)
13289 || (gimple_assign_rhs_code (use_stmt
) != TRUNC_DIV_EXPR
13290 && gimple_assign_rhs_code (use_stmt
) != TRUNC_MOD_EXPR
))
13292 if (gimple_assign_rhs1 (use_stmt
) != treeop0
13293 || !operand_equal_p (gimple_assign_rhs2 (use_stmt
), treeop1
, 0))
13298 w
= wi::lrshift (w
, shift
);
13299 wide_int a
= wide_int::from (w
, prec
+ 1, UNSIGNED
);
13300 wide_int b
= wi::shifted_mask (prec
, 1, false, prec
+ 1);
13301 wide_int m
= wide_int::from (wi::mod_inv (a
, b
), prec
, UNSIGNED
);
13302 tree c3
= wide_int_to_tree (type
, m
);
13303 tree c5
= NULL_TREE
;
13305 if (sgn
== UNSIGNED
)
13307 d
= wi::divmod_trunc (wi::mask (prec
, false, prec
), w
, UNSIGNED
, &e
);
13308 /* Use <= floor ((1<<prec) - 1) / C1 only if C2 <= ((1<<prec) - 1) % C1,
13309 otherwise use < or subtract one from C4. E.g. for
13310 x % 3U == 0 we transform this into x * 0xaaaaaaab <= 0x55555555, but
13311 x % 3U == 1 already needs to be
13312 (x - 1) * 0xaaaaaaabU <= 0x55555554. */
13313 if (!shift
&& wi::gtu_p (wi::to_wide (*arg1
), e
))
13316 d
= wi::lrshift (d
, shift
);
13320 e
= wi::udiv_trunc (wi::mask (prec
- 1, false, prec
), w
);
13322 d
= wi::lshift (e
, 1);
13325 e
= wi::bit_and (e
, wi::mask (shift
, true, prec
));
13326 d
= wi::lrshift (e
, shift
- 1);
13328 c5
= wide_int_to_tree (type
, e
);
13330 tree c4
= wide_int_to_tree (type
, d
);
13332 rtx op0
= expand_normal (treeop0
);
13333 treeop0
= make_tree (TREE_TYPE (treeop0
), op0
);
13335 bool speed_p
= optimize_insn_for_speed_p ();
13337 do_pending_stack_adjust ();
13339 location_t loc
= gimple_location (stmt
);
13340 struct separate_ops ops
;
13341 ops
.code
= TRUNC_MOD_EXPR
;
13342 ops
.location
= loc
;
13343 ops
.type
= TREE_TYPE (treeop0
);
13346 ops
.op2
= NULL_TREE
;
13348 rtx mor
= expand_expr_real_2 (&ops
, NULL_RTX
, TYPE_MODE (ops
.type
),
13350 rtx_insn
*moinsns
= get_insns ();
13353 unsigned mocost
= seq_cost (moinsns
, speed_p
);
13354 mocost
+= rtx_cost (mor
, mode
, EQ
, 0, speed_p
);
13355 mocost
+= rtx_cost (expand_normal (*arg1
), mode
, EQ
, 1, speed_p
);
13357 tree t
= fold_convert_loc (loc
, type
, treeop0
);
13358 if (!integer_zerop (*arg1
))
13359 t
= fold_build2_loc (loc
, MINUS_EXPR
, type
, t
, fold_convert (type
, *arg1
));
13360 t
= fold_build2_loc (loc
, MULT_EXPR
, type
, t
, c3
);
13362 t
= fold_build2_loc (loc
, PLUS_EXPR
, type
, t
, c5
);
13365 tree s
= build_int_cst (NULL_TREE
, shift
);
13366 t
= fold_build2_loc (loc
, RROTATE_EXPR
, type
, t
, s
);
13370 rtx mur
= expand_normal (t
);
13371 rtx_insn
*muinsns
= get_insns ();
13374 unsigned mucost
= seq_cost (muinsns
, speed_p
);
13375 mucost
+= rtx_cost (mur
, mode
, LE
, 0, speed_p
);
13376 mucost
+= rtx_cost (expand_normal (c4
), mode
, LE
, 1, speed_p
);
13378 if (mocost
<= mucost
)
13380 emit_insn (moinsns
);
13381 *arg0
= make_tree (TREE_TYPE (*arg0
), mor
);
13385 emit_insn (muinsns
);
13386 *arg0
= make_tree (type
, mur
);
13388 return code
== EQ_EXPR
? LE_EXPR
: GT_EXPR
;
13391 /* Optimize x - y < 0 into x < 0 if x - y has undefined overflow. */
13394 maybe_optimize_sub_cmp_0 (enum tree_code code
, tree
*arg0
, tree
*arg1
)
13396 gcc_checking_assert (code
== GT_EXPR
|| code
== GE_EXPR
13397 || code
== LT_EXPR
|| code
== LE_EXPR
);
13398 gcc_checking_assert (integer_zerop (*arg1
));
13403 gimple
*stmt
= get_def_for_expr (*arg0
, MINUS_EXPR
);
13407 tree treeop0
= gimple_assign_rhs1 (stmt
);
13408 tree treeop1
= gimple_assign_rhs2 (stmt
);
13409 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (treeop0
)))
13412 if (issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_COMPARISON
))
13413 warning_at (gimple_location (stmt
), OPT_Wstrict_overflow
,
13414 "assuming signed overflow does not occur when "
13415 "simplifying %<X - Y %s 0%> to %<X %s Y%>",
13416 op_symbol_code (code
), op_symbol_code (code
));
13423 /* Expand CODE with arguments INNER & (1<<BITNUM) and 0 that represents
13424 a single bit equality/inequality test, returns where the result is located. */
13427 expand_single_bit_test (location_t loc
, enum tree_code code
,
13428 tree inner
, int bitnum
,
13429 tree result_type
, rtx target
,
13432 gcc_assert (code
== NE_EXPR
|| code
== EQ_EXPR
);
13434 tree type
= TREE_TYPE (inner
);
13435 scalar_int_mode operand_mode
= SCALAR_INT_TYPE_MODE (type
);
13437 tree signed_type
, unsigned_type
, intermediate_type
;
13440 /* First, see if we can fold the single bit test into a sign-bit
13442 if (bitnum
== TYPE_PRECISION (type
) - 1
13443 && type_has_mode_precision_p (type
))
13445 tree stype
= signed_type_for (type
);
13446 tree tmp
= fold_build2_loc (loc
, code
== EQ_EXPR
? GE_EXPR
: LT_EXPR
,
13448 fold_convert_loc (loc
, stype
, inner
),
13449 build_int_cst (stype
, 0));
13450 return expand_expr (tmp
, target
, VOIDmode
, EXPAND_NORMAL
);
13453 /* Otherwise we have (A & C) != 0 where C is a single bit,
13454 convert that into ((A >> C2) & 1). Where C2 = log2(C).
13455 Similarly for (A & C) == 0. */
13457 /* If INNER is a right shift of a constant and it plus BITNUM does
13458 not overflow, adjust BITNUM and INNER. */
13459 if ((inner_def
= get_def_for_expr (inner
, RSHIFT_EXPR
))
13460 && TREE_CODE (gimple_assign_rhs2 (inner_def
)) == INTEGER_CST
13461 && bitnum
< TYPE_PRECISION (type
)
13462 && wi::ltu_p (wi::to_wide (gimple_assign_rhs2 (inner_def
)),
13463 TYPE_PRECISION (type
) - bitnum
))
13465 bitnum
+= tree_to_uhwi (gimple_assign_rhs2 (inner_def
));
13466 inner
= gimple_assign_rhs1 (inner_def
);
13469 /* If we are going to be able to omit the AND below, we must do our
13470 operations as unsigned. If we must use the AND, we have a choice.
13471 Normally unsigned is faster, but for some machines signed is. */
13472 ops_unsigned
= (load_extend_op (operand_mode
) == SIGN_EXTEND
13473 && !flag_syntax_only
) ? 0 : 1;
13475 signed_type
= lang_hooks
.types
.type_for_mode (operand_mode
, 0);
13476 unsigned_type
= lang_hooks
.types
.type_for_mode (operand_mode
, 1);
13477 intermediate_type
= ops_unsigned
? unsigned_type
: signed_type
;
13478 inner
= fold_convert_loc (loc
, intermediate_type
, inner
);
13480 rtx inner0
= expand_expr (inner
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
13482 if (CONST_SCALAR_INT_P (inner0
))
13484 wide_int t
= rtx_mode_t (inner0
, operand_mode
);
13485 bool setp
= (wi::lrshift (t
, bitnum
) & 1) != 0;
13486 return (setp
^ (code
== EQ_EXPR
)) ? const1_rtx
: const0_rtx
;
13488 int bitpos
= bitnum
;
13490 if (BYTES_BIG_ENDIAN
)
13491 bitpos
= GET_MODE_BITSIZE (operand_mode
) - 1 - bitpos
;
13493 inner0
= extract_bit_field (inner0
, 1, bitpos
, 1, target
,
13494 operand_mode
, mode
, 0, NULL
);
13496 if (code
== EQ_EXPR
)
13497 inner0
= expand_binop (GET_MODE (inner0
), xor_optab
, inner0
, const1_rtx
,
13498 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
13499 if (GET_MODE (inner0
) != mode
)
13501 rtx t
= gen_reg_rtx (mode
);
13502 convert_move (t
, inner0
, 0);
13508 /* Generate code to calculate OPS, and exploded expression
13509 using a store-flag instruction and return an rtx for the result.
13510 OPS reflects a comparison.
13512 If TARGET is nonzero, store the result there if convenient.
13514 Return zero if there is no suitable set-flag instruction
13515 available on this machine.
13517 Once expand_expr has been called on the arguments of the comparison,
13518 we are committed to doing the store flag, since it is not safe to
13519 re-evaluate the expression. We emit the store-flag insn by calling
13520 emit_store_flag, but only expand the arguments if we have a reason
13521 to believe that emit_store_flag will be successful. If we think that
13522 it will, but it isn't, we have to simulate the store-flag with a
13523 set/jump/set sequence. */
13526 do_store_flag (const_sepops ops
, rtx target
, machine_mode mode
)
13528 enum rtx_code code
;
13529 tree arg0
, arg1
, type
;
13530 machine_mode operand_mode
;
13533 rtx subtarget
= target
;
13534 location_t loc
= ops
->location
;
13535 unsigned HOST_WIDE_INT nunits
;
13540 /* Don't crash if the comparison was erroneous. */
13541 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13544 type
= TREE_TYPE (arg0
);
13545 operand_mode
= TYPE_MODE (type
);
13546 unsignedp
= TYPE_UNSIGNED (type
);
13548 /* We won't bother with BLKmode store-flag operations because it would mean
13549 passing a lot of information to emit_store_flag. */
13550 if (operand_mode
== BLKmode
)
13553 /* We won't bother with store-flag operations involving function pointers
13554 when function pointers must be canonicalized before comparisons. */
13555 if (targetm
.have_canonicalize_funcptr_for_compare ()
13556 && ((POINTER_TYPE_P (TREE_TYPE (arg0
))
13557 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
))))
13558 || (POINTER_TYPE_P (TREE_TYPE (arg1
))
13559 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg1
))))))
13565 /* For vector typed comparisons emit code to generate the desired
13566 all-ones or all-zeros mask. */
13567 if (VECTOR_TYPE_P (ops
->type
))
13569 tree ifexp
= build2 (ops
->code
, ops
->type
, arg0
, arg1
);
13570 if (VECTOR_BOOLEAN_TYPE_P (ops
->type
)
13571 && expand_vec_cmp_expr_p (TREE_TYPE (arg0
), ops
->type
, ops
->code
))
13572 return expand_vec_cmp_expr (ops
->type
, ifexp
, target
);
13574 gcc_unreachable ();
13577 /* Optimize (x % C1) == C2 or (x % C1) != C2 if it is beneficial
13578 into (x - C2) * C3 < C4. */
13579 if ((ops
->code
== EQ_EXPR
|| ops
->code
== NE_EXPR
)
13580 && TREE_CODE (arg0
) == SSA_NAME
13581 && TREE_CODE (arg1
) == INTEGER_CST
)
13583 enum tree_code new_code
= maybe_optimize_mod_cmp (ops
->code
,
13585 if (new_code
!= ops
->code
)
13587 struct separate_ops nops
= *ops
;
13588 nops
.code
= new_code
;
13591 nops
.type
= TREE_TYPE (arg0
);
13592 return do_store_flag (&nops
, target
, mode
);
13596 /* Optimize (x - y) < 0 into x < y if x - y has undefined overflow. */
13598 && (ops
->code
== LT_EXPR
|| ops
->code
== LE_EXPR
13599 || ops
->code
== GT_EXPR
|| ops
->code
== GE_EXPR
)
13600 && integer_zerop (arg1
)
13601 && TREE_CODE (arg0
) == SSA_NAME
)
13602 maybe_optimize_sub_cmp_0 (ops
->code
, &arg0
, &arg1
);
13604 /* Get the rtx comparison code to use. We know that EXP is a comparison
13605 operation of some type. Some comparisons against 1 and -1 can be
13606 converted to comparisons with zero. Do so here so that the tests
13607 below will be aware that we have a comparison with zero. These
13608 tests will not catch constants in the first operand, but constants
13609 are rarely passed as the first operand. */
13620 if (integer_onep (arg1
))
13621 arg1
= integer_zero_node
, code
= unsignedp
? LEU
: LE
;
13623 code
= unsignedp
? LTU
: LT
;
13626 if (! unsignedp
&& integer_all_onesp (arg1
))
13627 arg1
= integer_zero_node
, code
= LT
;
13629 code
= unsignedp
? LEU
: LE
;
13632 if (! unsignedp
&& integer_all_onesp (arg1
))
13633 arg1
= integer_zero_node
, code
= GE
;
13635 code
= unsignedp
? GTU
: GT
;
13638 if (integer_onep (arg1
))
13639 arg1
= integer_zero_node
, code
= unsignedp
? GTU
: GT
;
13641 code
= unsignedp
? GEU
: GE
;
13644 case UNORDERED_EXPR
:
13670 gcc_unreachable ();
13673 /* Put a constant second. */
13674 if (TREE_CODE (arg0
) == REAL_CST
|| TREE_CODE (arg0
) == INTEGER_CST
13675 || TREE_CODE (arg0
) == FIXED_CST
)
13677 std::swap (arg0
, arg1
);
13678 code
= swap_condition (code
);
13681 /* If this is an equality or inequality test of a single bit, we can
13682 do this by shifting the bit being tested to the low-order bit and
13683 masking the result with the constant 1. If the condition was EQ,
13684 we xor it with 1. This does not require an scc insn and is faster
13685 than an scc insn even if we have it. */
13687 if ((code
== NE
|| code
== EQ
)
13688 && (integer_zerop (arg1
)
13689 || integer_pow2p (arg1
))
13690 /* vector types are not handled here. */
13691 && TREE_CODE (TREE_TYPE (arg1
)) != VECTOR_TYPE
13692 && (TYPE_PRECISION (ops
->type
) != 1 || TYPE_UNSIGNED (ops
->type
)))
13695 wide_int nz
= tree_nonzero_bits (narg0
);
13696 gimple
*srcstmt
= get_def_for_expr (narg0
, BIT_AND_EXPR
);
13697 /* If the defining statement was (x & POW2), then use that instead of
13698 the non-zero bits. */
13699 if (srcstmt
&& integer_pow2p (gimple_assign_rhs2 (srcstmt
)))
13701 nz
= wi::to_wide (gimple_assign_rhs2 (srcstmt
));
13702 narg0
= gimple_assign_rhs1 (srcstmt
);
13705 if (wi::popcount (nz
) == 1
13706 && (integer_zerop (arg1
)
13707 || wi::to_wide (arg1
) == nz
))
13709 int bitnum
= wi::exact_log2 (nz
);
13710 enum tree_code tcode
= EQ_EXPR
;
13711 if ((code
== NE
) ^ !integer_zerop (arg1
))
13714 type
= lang_hooks
.types
.type_for_mode (mode
, unsignedp
);
13715 return expand_single_bit_test (loc
, tcode
,
13717 bitnum
, type
, target
, mode
);
13722 if (! get_subtarget (target
)
13723 || GET_MODE (subtarget
) != operand_mode
)
13726 expand_operands (arg0
, arg1
, subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
13728 /* For boolean vectors with less than mode precision
13729 make sure to fill padding with consistent values. */
13730 if (VECTOR_BOOLEAN_TYPE_P (type
)
13731 && SCALAR_INT_MODE_P (operand_mode
)
13732 && TYPE_VECTOR_SUBPARTS (type
).is_constant (&nunits
)
13733 && maybe_ne (GET_MODE_PRECISION (operand_mode
), nunits
))
13735 gcc_assert (code
== EQ
|| code
== NE
);
13736 op0
= expand_binop (mode
, and_optab
, op0
,
13737 GEN_INT ((HOST_WIDE_INT_1U
<< nunits
) - 1),
13738 NULL_RTX
, true, OPTAB_WIDEN
);
13739 op1
= expand_binop (mode
, and_optab
, op1
,
13740 GEN_INT ((HOST_WIDE_INT_1U
<< nunits
) - 1),
13741 NULL_RTX
, true, OPTAB_WIDEN
);
13745 target
= gen_reg_rtx (mode
);
13747 /* Try a cstore if possible. */
13748 return emit_store_flag_force (target
, code
, op0
, op1
,
13749 operand_mode
, unsignedp
,
13750 (TYPE_PRECISION (ops
->type
) == 1
13751 && !TYPE_UNSIGNED (ops
->type
)) ? -1 : 1);
13754 /* Attempt to generate a casesi instruction. Returns true if successful,
13755 false otherwise (i.e. if there is no casesi instruction).
13757 DEFAULT_PROBABILITY is the probability of jumping to the default
13760 try_casesi (tree index_type
, tree index_expr
, tree minval
, tree range
,
13761 rtx table_label
, rtx default_label
, rtx fallback_label
,
13762 profile_probability default_probability
)
13764 class expand_operand ops
[5];
13765 scalar_int_mode index_mode
= SImode
;
13766 rtx op1
, op2
, index
;
13768 if (! targetm
.have_casesi ())
13771 /* The index must be some form of integer. Convert it to SImode. */
13772 scalar_int_mode omode
= SCALAR_INT_TYPE_MODE (index_type
);
13773 if (GET_MODE_BITSIZE (omode
) > GET_MODE_BITSIZE (index_mode
))
13775 rtx rangertx
= expand_normal (range
);
13777 /* We must handle the endpoints in the original mode. */
13778 index_expr
= build2 (MINUS_EXPR
, index_type
,
13779 index_expr
, minval
);
13780 minval
= integer_zero_node
;
13781 index
= expand_normal (index_expr
);
13783 emit_cmp_and_jump_insns (rangertx
, index
, LTU
, NULL_RTX
,
13784 omode
, 1, default_label
,
13785 default_probability
);
13786 /* Now we can safely truncate. */
13787 index
= convert_to_mode (index_mode
, index
, 0);
13791 if (omode
!= index_mode
)
13793 index_type
= lang_hooks
.types
.type_for_mode (index_mode
, 0);
13794 index_expr
= fold_convert (index_type
, index_expr
);
13797 index
= expand_normal (index_expr
);
13800 do_pending_stack_adjust ();
13802 op1
= expand_normal (minval
);
13803 op2
= expand_normal (range
);
13805 create_input_operand (&ops
[0], index
, index_mode
);
13806 create_convert_operand_from_type (&ops
[1], op1
, TREE_TYPE (minval
));
13807 create_convert_operand_from_type (&ops
[2], op2
, TREE_TYPE (range
));
13808 create_fixed_operand (&ops
[3], table_label
);
13809 create_fixed_operand (&ops
[4], (default_label
13811 : fallback_label
));
13812 expand_jump_insn (targetm
.code_for_casesi
, 5, ops
);
13816 /* Attempt to generate a tablejump instruction; same concept. */
13817 /* Subroutine of the next function.
13819 INDEX is the value being switched on, with the lowest value
13820 in the table already subtracted.
13821 MODE is its expected mode (needed if INDEX is constant).
13822 RANGE is the length of the jump table.
13823 TABLE_LABEL is a CODE_LABEL rtx for the table itself.
13825 DEFAULT_LABEL is a CODE_LABEL rtx to jump to if the
13826 index value is out of range.
13827 DEFAULT_PROBABILITY is the probability of jumping to
13828 the default label. */
13831 do_tablejump (rtx index
, machine_mode mode
, rtx range
, rtx table_label
,
13832 rtx default_label
, profile_probability default_probability
)
13836 if (INTVAL (range
) > cfun
->cfg
->max_jumptable_ents
)
13837 cfun
->cfg
->max_jumptable_ents
= INTVAL (range
);
13839 /* Do an unsigned comparison (in the proper mode) between the index
13840 expression and the value which represents the length of the range.
13841 Since we just finished subtracting the lower bound of the range
13842 from the index expression, this comparison allows us to simultaneously
13843 check that the original index expression value is both greater than
13844 or equal to the minimum value of the range and less than or equal to
13845 the maximum value of the range. */
13848 emit_cmp_and_jump_insns (index
, range
, GTU
, NULL_RTX
, mode
, 1,
13849 default_label
, default_probability
);
13851 /* If index is in range, it must fit in Pmode.
13852 Convert to Pmode so we can index with it. */
13855 unsigned int width
;
13857 /* We know the value of INDEX is between 0 and RANGE. If we have a
13858 sign-extended subreg, and RANGE does not have the sign bit set, then
13859 we have a value that is valid for both sign and zero extension. In
13860 this case, we get better code if we sign extend. */
13861 if (GET_CODE (index
) == SUBREG
13862 && SUBREG_PROMOTED_VAR_P (index
)
13863 && SUBREG_PROMOTED_SIGNED_P (index
)
13864 && ((width
= GET_MODE_PRECISION (as_a
<scalar_int_mode
> (mode
)))
13865 <= HOST_BITS_PER_WIDE_INT
)
13866 && ! (UINTVAL (range
) & (HOST_WIDE_INT_1U
<< (width
- 1))))
13867 index
= convert_to_mode (Pmode
, index
, 0);
13869 index
= convert_to_mode (Pmode
, index
, 1);
13872 /* Don't let a MEM slip through, because then INDEX that comes
13873 out of PIC_CASE_VECTOR_ADDRESS won't be a valid address,
13874 and break_out_memory_refs will go to work on it and mess it up. */
13875 #ifdef PIC_CASE_VECTOR_ADDRESS
13876 if (flag_pic
&& !REG_P (index
))
13877 index
= copy_to_mode_reg (Pmode
, index
);
13880 /* ??? The only correct use of CASE_VECTOR_MODE is the one inside the
13881 GET_MODE_SIZE, because this indicates how large insns are. The other
13882 uses should all be Pmode, because they are addresses. This code
13883 could fail if addresses and insns are not the same size. */
13884 index
= simplify_gen_binary (MULT
, Pmode
, index
,
13885 gen_int_mode (GET_MODE_SIZE (CASE_VECTOR_MODE
),
13887 index
= simplify_gen_binary (PLUS
, Pmode
, index
,
13888 gen_rtx_LABEL_REF (Pmode
, table_label
));
13890 #ifdef PIC_CASE_VECTOR_ADDRESS
13892 index
= PIC_CASE_VECTOR_ADDRESS (index
);
13895 index
= memory_address (CASE_VECTOR_MODE
, index
);
13896 temp
= gen_reg_rtx (CASE_VECTOR_MODE
);
13897 vector
= gen_const_mem (CASE_VECTOR_MODE
, index
);
13898 convert_move (temp
, vector
, 0);
13900 emit_jump_insn (targetm
.gen_tablejump (temp
, table_label
));
13902 /* If we are generating PIC code or if the table is PC-relative, the
13903 table and JUMP_INSN must be adjacent, so don't output a BARRIER. */
13904 if (! CASE_VECTOR_PC_RELATIVE
&& ! flag_pic
)
13909 try_tablejump (tree index_type
, tree index_expr
, tree minval
, tree range
,
13910 rtx table_label
, rtx default_label
,
13911 profile_probability default_probability
)
13915 if (! targetm
.have_tablejump ())
13918 index_expr
= fold_build2 (MINUS_EXPR
, index_type
,
13919 fold_convert (index_type
, index_expr
),
13920 fold_convert (index_type
, minval
));
13921 index
= expand_normal (index_expr
);
13922 do_pending_stack_adjust ();
13924 do_tablejump (index
, TYPE_MODE (index_type
),
13925 convert_modes (TYPE_MODE (index_type
),
13926 TYPE_MODE (TREE_TYPE (range
)),
13927 expand_normal (range
),
13928 TYPE_UNSIGNED (TREE_TYPE (range
))),
13929 table_label
, default_label
, default_probability
);
13933 /* Return a CONST_VECTOR rtx representing vector mask for
13934 a VECTOR_CST of booleans. */
13936 const_vector_mask_from_tree (tree exp
)
13938 machine_mode mode
= TYPE_MODE (TREE_TYPE (exp
));
13939 machine_mode inner
= GET_MODE_INNER (mode
);
13941 rtx_vector_builder
builder (mode
, VECTOR_CST_NPATTERNS (exp
),
13942 VECTOR_CST_NELTS_PER_PATTERN (exp
));
13943 unsigned int count
= builder
.encoded_nelts ();
13944 for (unsigned int i
= 0; i
< count
; ++i
)
13946 tree elt
= VECTOR_CST_ELT (exp
, i
);
13947 gcc_assert (TREE_CODE (elt
) == INTEGER_CST
);
13948 if (integer_zerop (elt
))
13949 builder
.quick_push (CONST0_RTX (inner
));
13950 else if (integer_onep (elt
)
13951 || integer_minus_onep (elt
))
13952 builder
.quick_push (CONSTM1_RTX (inner
));
13954 gcc_unreachable ();
13956 return builder
.build ();
13959 /* Return a CONST_VECTOR rtx for a VECTOR_CST tree. */
13961 const_vector_from_tree (tree exp
)
13963 machine_mode mode
= TYPE_MODE (TREE_TYPE (exp
));
13965 if (initializer_zerop (exp
))
13966 return CONST0_RTX (mode
);
13968 if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp
)))
13969 return const_vector_mask_from_tree (exp
);
13971 machine_mode inner
= GET_MODE_INNER (mode
);
13973 rtx_vector_builder
builder (mode
, VECTOR_CST_NPATTERNS (exp
),
13974 VECTOR_CST_NELTS_PER_PATTERN (exp
));
13975 unsigned int count
= builder
.encoded_nelts ();
13976 for (unsigned int i
= 0; i
< count
; ++i
)
13978 tree elt
= VECTOR_CST_ELT (exp
, i
);
13979 if (TREE_CODE (elt
) == REAL_CST
)
13980 builder
.quick_push (const_double_from_real_value (TREE_REAL_CST (elt
),
13982 else if (TREE_CODE (elt
) == FIXED_CST
)
13983 builder
.quick_push (CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt
),
13986 builder
.quick_push (immed_wide_int_const (wi::to_poly_wide (elt
),
13989 return builder
.build ();
13992 /* Build a decl for a personality function given a language prefix. */
13995 build_personality_function (const char *lang
)
13997 const char *unwind_and_version
;
14001 switch (targetm_common
.except_unwind_info (&global_options
))
14006 unwind_and_version
= "_sj0";
14010 unwind_and_version
= "_v0";
14013 unwind_and_version
= "_seh0";
14016 gcc_unreachable ();
14019 name
= ACONCAT (("__", lang
, "_personality", unwind_and_version
, NULL
));
14021 type
= build_function_type_list (unsigned_type_node
,
14022 integer_type_node
, integer_type_node
,
14023 long_long_unsigned_type_node
,
14024 ptr_type_node
, ptr_type_node
, NULL_TREE
);
14025 decl
= build_decl (UNKNOWN_LOCATION
, FUNCTION_DECL
,
14026 get_identifier (name
), type
);
14027 DECL_ARTIFICIAL (decl
) = 1;
14028 DECL_EXTERNAL (decl
) = 1;
14029 TREE_PUBLIC (decl
) = 1;
14031 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
14032 are the flags assigned by targetm.encode_section_info. */
14033 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl
), 0), NULL
);
14038 /* Extracts the personality function of DECL and returns the corresponding
14042 get_personality_function (tree decl
)
14044 tree personality
= DECL_FUNCTION_PERSONALITY (decl
);
14045 enum eh_personality_kind pk
;
14047 pk
= function_needs_eh_personality (DECL_STRUCT_FUNCTION (decl
));
14048 if (pk
== eh_personality_none
)
14052 && pk
== eh_personality_any
)
14053 personality
= lang_hooks
.eh_personality ();
14055 if (pk
== eh_personality_lang
)
14056 gcc_assert (personality
!= NULL_TREE
);
14058 return XEXP (DECL_RTL (personality
), 0);
14061 /* Returns a tree for the size of EXP in bytes. */
14064 tree_expr_size (const_tree exp
)
14067 && DECL_SIZE_UNIT (exp
) != 0)
14068 return DECL_SIZE_UNIT (exp
);
14070 return size_in_bytes (TREE_TYPE (exp
));
14073 /* Return an rtx for the size in bytes of the value of EXP. */
14076 expr_size (tree exp
)
14080 if (TREE_CODE (exp
) == WITH_SIZE_EXPR
)
14081 size
= TREE_OPERAND (exp
, 1);
14084 size
= tree_expr_size (exp
);
14086 gcc_assert (size
== SUBSTITUTE_PLACEHOLDER_IN_EXPR (size
, exp
));
14089 return expand_expr (size
, NULL_RTX
, TYPE_MODE (sizetype
), EXPAND_NORMAL
);
14092 /* Return a wide integer for the size in bytes of the value of EXP, or -1
14093 if the size can vary or is larger than an integer. */
14096 int_expr_size (const_tree exp
)
14100 if (TREE_CODE (exp
) == WITH_SIZE_EXPR
)
14101 size
= TREE_OPERAND (exp
, 1);
14104 size
= tree_expr_size (exp
);
14108 if (size
== 0 || !tree_fits_shwi_p (size
))
14111 return tree_to_shwi (size
);