1 /* Convert tree expression to rtl instructions, for GNU compiler.
2 Copyright (C) 1988-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
38 #include "diagnostic.h"
40 #include "fold-const.h"
41 #include "stor-layout.h"
45 #include "insn-attr.h"
50 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
52 #include "optabs-tree.h"
55 #include "langhooks.h"
56 #include "common/common-target.h"
57 #include "tree-ssa-live.h"
58 #include "tree-outof-ssa.h"
59 #include "tree-ssa-address.h"
61 #include "tree-chkp.h"
66 /* If this is nonzero, we do not bother generating VOLATILE
67 around volatile memory references, and we are willing to
68 output indirect addresses. If cse is to follow, we reject
69 indirect addresses so a useful potential cse is generated;
70 if it is used only once, instruction combination will produce
71 the same indirect address eventually. */
74 static bool block_move_libcall_safe_for_call_parm (void);
75 static bool emit_block_move_via_movmem (rtx
, rtx
, rtx
, unsigned, unsigned, HOST_WIDE_INT
,
76 unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
77 unsigned HOST_WIDE_INT
);
78 static void emit_block_move_via_loop (rtx
, rtx
, rtx
, unsigned);
79 static void clear_by_pieces (rtx
, unsigned HOST_WIDE_INT
, unsigned int);
80 static rtx_insn
*compress_float_constant (rtx
, rtx
);
81 static rtx
get_subtarget (rtx
);
82 static void store_constructor (tree
, rtx
, int, poly_int64
, bool);
83 static rtx
store_field (rtx
, poly_int64
, poly_int64
, poly_uint64
, poly_uint64
,
84 machine_mode
, tree
, alias_set_type
, bool, bool);
86 static unsigned HOST_WIDE_INT
highest_pow2_factor_for_target (const_tree
, const_tree
);
88 static int is_aligning_offset (const_tree
, const_tree
);
89 static rtx
reduce_to_bit_field_precision (rtx
, rtx
, tree
);
90 static rtx
do_store_flag (sepops
, rtx
, machine_mode
);
92 static void emit_single_push_insn (machine_mode
, rtx
, tree
);
94 static void do_tablejump (rtx
, machine_mode
, rtx
, rtx
, rtx
,
96 static rtx
const_vector_from_tree (tree
);
97 static rtx
const_scalar_mask_from_tree (scalar_int_mode
, tree
);
98 static tree
tree_expr_size (const_tree
);
99 static HOST_WIDE_INT
int_expr_size (tree
);
100 static void convert_mode_scalar (rtx
, rtx
, int);
103 /* This is run to set up which modes can be used
104 directly in memory and to initialize the block move optab. It is run
105 at the beginning of compilation and when the target is reinitialized. */
108 init_expr_target (void)
115 /* Try indexing by frame ptr and try by stack ptr.
116 It is known that on the Convex the stack ptr isn't a valid index.
117 With luck, one or the other is valid on any machine. */
118 mem
= gen_rtx_MEM (word_mode
, stack_pointer_rtx
);
119 mem1
= gen_rtx_MEM (word_mode
, frame_pointer_rtx
);
121 /* A scratch register we can modify in-place below to avoid
122 useless RTL allocations. */
123 reg
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 1);
125 rtx_insn
*insn
= as_a
<rtx_insn
*> (rtx_alloc (INSN
));
126 pat
= gen_rtx_SET (NULL_RTX
, NULL_RTX
);
127 PATTERN (insn
) = pat
;
129 for (machine_mode mode
= VOIDmode
; (int) mode
< NUM_MACHINE_MODES
;
130 mode
= (machine_mode
) ((int) mode
+ 1))
134 direct_load
[(int) mode
] = direct_store
[(int) mode
] = 0;
135 PUT_MODE (mem
, mode
);
136 PUT_MODE (mem1
, mode
);
138 /* See if there is some register that can be used in this mode and
139 directly loaded or stored from memory. */
141 if (mode
!= VOIDmode
&& mode
!= BLKmode
)
142 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
143 && (direct_load
[(int) mode
] == 0 || direct_store
[(int) mode
] == 0);
146 if (!targetm
.hard_regno_mode_ok (regno
, mode
))
149 set_mode_and_regno (reg
, mode
, regno
);
152 SET_DEST (pat
) = reg
;
153 if (recog (pat
, insn
, &num_clobbers
) >= 0)
154 direct_load
[(int) mode
] = 1;
156 SET_SRC (pat
) = mem1
;
157 SET_DEST (pat
) = reg
;
158 if (recog (pat
, insn
, &num_clobbers
) >= 0)
159 direct_load
[(int) mode
] = 1;
162 SET_DEST (pat
) = mem
;
163 if (recog (pat
, insn
, &num_clobbers
) >= 0)
164 direct_store
[(int) mode
] = 1;
167 SET_DEST (pat
) = mem1
;
168 if (recog (pat
, insn
, &num_clobbers
) >= 0)
169 direct_store
[(int) mode
] = 1;
173 mem
= gen_rtx_MEM (VOIDmode
, gen_raw_REG (Pmode
, LAST_VIRTUAL_REGISTER
+ 1));
175 opt_scalar_float_mode mode_iter
;
176 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_FLOAT
)
178 scalar_float_mode mode
= mode_iter
.require ();
179 scalar_float_mode srcmode
;
180 FOR_EACH_MODE_UNTIL (srcmode
, mode
)
184 ic
= can_extend_p (mode
, srcmode
, 0);
185 if (ic
== CODE_FOR_nothing
)
188 PUT_MODE (mem
, srcmode
);
190 if (insn_operand_matches (ic
, 1, mem
))
191 float_extend_from_mem
[mode
][srcmode
] = true;
196 /* This is run at the start of compiling a function. */
201 memset (&crtl
->expr
, 0, sizeof (crtl
->expr
));
204 /* Copy data from FROM to TO, where the machine modes are not the same.
205 Both modes may be integer, or both may be floating, or both may be
207 UNSIGNEDP should be nonzero if FROM is an unsigned type.
208 This causes zero-extension instead of sign-extension. */
211 convert_move (rtx to
, rtx from
, int unsignedp
)
213 machine_mode to_mode
= GET_MODE (to
);
214 machine_mode from_mode
= GET_MODE (from
);
216 gcc_assert (to_mode
!= BLKmode
);
217 gcc_assert (from_mode
!= BLKmode
);
219 /* If the source and destination are already the same, then there's
224 /* If FROM is a SUBREG that indicates that we have already done at least
225 the required extension, strip it. We don't handle such SUBREGs as
228 scalar_int_mode to_int_mode
;
229 if (GET_CODE (from
) == SUBREG
230 && SUBREG_PROMOTED_VAR_P (from
)
231 && is_a
<scalar_int_mode
> (to_mode
, &to_int_mode
)
232 && (GET_MODE_PRECISION (subreg_promoted_mode (from
))
233 >= GET_MODE_PRECISION (to_int_mode
))
234 && SUBREG_CHECK_PROMOTED_SIGN (from
, unsignedp
))
235 from
= gen_lowpart (to_int_mode
, from
), from_mode
= to_int_mode
;
237 gcc_assert (GET_CODE (to
) != SUBREG
|| !SUBREG_PROMOTED_VAR_P (to
));
239 if (to_mode
== from_mode
240 || (from_mode
== VOIDmode
&& CONSTANT_P (from
)))
242 emit_move_insn (to
, from
);
246 if (VECTOR_MODE_P (to_mode
) || VECTOR_MODE_P (from_mode
))
248 gcc_assert (GET_MODE_BITSIZE (from_mode
) == GET_MODE_BITSIZE (to_mode
));
250 if (VECTOR_MODE_P (to_mode
))
251 from
= simplify_gen_subreg (to_mode
, from
, GET_MODE (from
), 0);
253 to
= simplify_gen_subreg (from_mode
, to
, GET_MODE (to
), 0);
255 emit_move_insn (to
, from
);
259 if (GET_CODE (to
) == CONCAT
&& GET_CODE (from
) == CONCAT
)
261 convert_move (XEXP (to
, 0), XEXP (from
, 0), unsignedp
);
262 convert_move (XEXP (to
, 1), XEXP (from
, 1), unsignedp
);
266 convert_mode_scalar (to
, from
, unsignedp
);
269 /* Like convert_move, but deals only with scalar modes. */
272 convert_mode_scalar (rtx to
, rtx from
, int unsignedp
)
274 /* Both modes should be scalar types. */
275 scalar_mode from_mode
= as_a
<scalar_mode
> (GET_MODE (from
));
276 scalar_mode to_mode
= as_a
<scalar_mode
> (GET_MODE (to
));
277 bool to_real
= SCALAR_FLOAT_MODE_P (to_mode
);
278 bool from_real
= SCALAR_FLOAT_MODE_P (from_mode
);
282 gcc_assert (to_real
== from_real
);
284 /* rtx code for making an equivalent value. */
285 enum rtx_code equiv_code
= (unsignedp
< 0 ? UNKNOWN
286 : (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
));
294 gcc_assert ((GET_MODE_PRECISION (from_mode
)
295 != GET_MODE_PRECISION (to_mode
))
296 || (DECIMAL_FLOAT_MODE_P (from_mode
)
297 != DECIMAL_FLOAT_MODE_P (to_mode
)));
299 if (GET_MODE_PRECISION (from_mode
) == GET_MODE_PRECISION (to_mode
))
300 /* Conversion between decimal float and binary float, same size. */
301 tab
= DECIMAL_FLOAT_MODE_P (from_mode
) ? trunc_optab
: sext_optab
;
302 else if (GET_MODE_PRECISION (from_mode
) < GET_MODE_PRECISION (to_mode
))
307 /* Try converting directly if the insn is supported. */
309 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
310 if (code
!= CODE_FOR_nothing
)
312 emit_unop_insn (code
, to
, from
,
313 tab
== sext_optab
? FLOAT_EXTEND
: FLOAT_TRUNCATE
);
317 /* Otherwise use a libcall. */
318 libcall
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
320 /* Is this conversion implemented yet? */
321 gcc_assert (libcall
);
324 value
= emit_library_call_value (libcall
, NULL_RTX
, LCT_CONST
, to_mode
,
326 insns
= get_insns ();
328 emit_libcall_block (insns
, to
, value
,
329 tab
== trunc_optab
? gen_rtx_FLOAT_TRUNCATE (to_mode
,
331 : gen_rtx_FLOAT_EXTEND (to_mode
, from
));
335 /* Handle pointer conversion. */ /* SPEE 900220. */
336 /* If the target has a converter from FROM_MODE to TO_MODE, use it. */
340 if (GET_MODE_PRECISION (from_mode
) > GET_MODE_PRECISION (to_mode
))
347 if (convert_optab_handler (ctab
, to_mode
, from_mode
)
350 emit_unop_insn (convert_optab_handler (ctab
, to_mode
, from_mode
),
356 /* Targets are expected to provide conversion insns between PxImode and
357 xImode for all MODE_PARTIAL_INT modes they use, but no others. */
358 if (GET_MODE_CLASS (to_mode
) == MODE_PARTIAL_INT
)
360 scalar_int_mode full_mode
361 = smallest_int_mode_for_size (GET_MODE_BITSIZE (to_mode
));
363 gcc_assert (convert_optab_handler (trunc_optab
, to_mode
, full_mode
)
364 != CODE_FOR_nothing
);
366 if (full_mode
!= from_mode
)
367 from
= convert_to_mode (full_mode
, from
, unsignedp
);
368 emit_unop_insn (convert_optab_handler (trunc_optab
, to_mode
, full_mode
),
372 if (GET_MODE_CLASS (from_mode
) == MODE_PARTIAL_INT
)
375 scalar_int_mode full_mode
376 = smallest_int_mode_for_size (GET_MODE_BITSIZE (from_mode
));
377 convert_optab ctab
= unsignedp
? zext_optab
: sext_optab
;
378 enum insn_code icode
;
380 icode
= convert_optab_handler (ctab
, full_mode
, from_mode
);
381 gcc_assert (icode
!= CODE_FOR_nothing
);
383 if (to_mode
== full_mode
)
385 emit_unop_insn (icode
, to
, from
, UNKNOWN
);
389 new_from
= gen_reg_rtx (full_mode
);
390 emit_unop_insn (icode
, new_from
, from
, UNKNOWN
);
392 /* else proceed to integer conversions below. */
393 from_mode
= full_mode
;
397 /* Make sure both are fixed-point modes or both are not. */
398 gcc_assert (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode
) ==
399 ALL_SCALAR_FIXED_POINT_MODE_P (to_mode
));
400 if (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode
))
402 /* If we widen from_mode to to_mode and they are in the same class,
403 we won't saturate the result.
404 Otherwise, always saturate the result to play safe. */
405 if (GET_MODE_CLASS (from_mode
) == GET_MODE_CLASS (to_mode
)
406 && GET_MODE_SIZE (from_mode
) < GET_MODE_SIZE (to_mode
))
407 expand_fixed_convert (to
, from
, 0, 0);
409 expand_fixed_convert (to
, from
, 0, 1);
413 /* Now both modes are integers. */
415 /* Handle expanding beyond a word. */
416 if (GET_MODE_PRECISION (from_mode
) < GET_MODE_PRECISION (to_mode
)
417 && GET_MODE_PRECISION (to_mode
) > BITS_PER_WORD
)
424 scalar_mode lowpart_mode
;
425 int nwords
= CEIL (GET_MODE_SIZE (to_mode
), UNITS_PER_WORD
);
427 /* Try converting directly if the insn is supported. */
428 if ((code
= can_extend_p (to_mode
, from_mode
, unsignedp
))
431 /* If FROM is a SUBREG, put it into a register. Do this
432 so that we always generate the same set of insns for
433 better cse'ing; if an intermediate assignment occurred,
434 we won't be doing the operation directly on the SUBREG. */
435 if (optimize
> 0 && GET_CODE (from
) == SUBREG
)
436 from
= force_reg (from_mode
, from
);
437 emit_unop_insn (code
, to
, from
, equiv_code
);
440 /* Next, try converting via full word. */
441 else if (GET_MODE_PRECISION (from_mode
) < BITS_PER_WORD
442 && ((code
= can_extend_p (to_mode
, word_mode
, unsignedp
))
443 != CODE_FOR_nothing
))
445 rtx word_to
= gen_reg_rtx (word_mode
);
448 if (reg_overlap_mentioned_p (to
, from
))
449 from
= force_reg (from_mode
, from
);
452 convert_move (word_to
, from
, unsignedp
);
453 emit_unop_insn (code
, to
, word_to
, equiv_code
);
457 /* No special multiword conversion insn; do it by hand. */
460 /* Since we will turn this into a no conflict block, we must ensure
461 the source does not overlap the target so force it into an isolated
462 register when maybe so. Likewise for any MEM input, since the
463 conversion sequence might require several references to it and we
464 must ensure we're getting the same value every time. */
466 if (MEM_P (from
) || reg_overlap_mentioned_p (to
, from
))
467 from
= force_reg (from_mode
, from
);
469 /* Get a copy of FROM widened to a word, if necessary. */
470 if (GET_MODE_PRECISION (from_mode
) < BITS_PER_WORD
)
471 lowpart_mode
= word_mode
;
473 lowpart_mode
= from_mode
;
475 lowfrom
= convert_to_mode (lowpart_mode
, from
, unsignedp
);
477 lowpart
= gen_lowpart (lowpart_mode
, to
);
478 emit_move_insn (lowpart
, lowfrom
);
480 /* Compute the value to put in each remaining word. */
482 fill_value
= const0_rtx
;
484 fill_value
= emit_store_flag_force (gen_reg_rtx (word_mode
),
485 LT
, lowfrom
, const0_rtx
,
486 lowpart_mode
, 0, -1);
488 /* Fill the remaining words. */
489 for (i
= GET_MODE_SIZE (lowpart_mode
) / UNITS_PER_WORD
; i
< nwords
; i
++)
491 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
492 rtx subword
= operand_subword (to
, index
, 1, to_mode
);
494 gcc_assert (subword
);
496 if (fill_value
!= subword
)
497 emit_move_insn (subword
, fill_value
);
500 insns
= get_insns ();
507 /* Truncating multi-word to a word or less. */
508 if (GET_MODE_PRECISION (from_mode
) > BITS_PER_WORD
509 && GET_MODE_PRECISION (to_mode
) <= BITS_PER_WORD
)
512 && ! MEM_VOLATILE_P (from
)
513 && direct_load
[(int) to_mode
]
514 && ! mode_dependent_address_p (XEXP (from
, 0),
515 MEM_ADDR_SPACE (from
)))
517 || GET_CODE (from
) == SUBREG
))
518 from
= force_reg (from_mode
, from
);
519 convert_move (to
, gen_lowpart (word_mode
, from
), 0);
523 /* Now follow all the conversions between integers
524 no more than a word long. */
526 /* For truncation, usually we can just refer to FROM in a narrower mode. */
527 if (GET_MODE_BITSIZE (to_mode
) < GET_MODE_BITSIZE (from_mode
)
528 && TRULY_NOOP_TRUNCATION_MODES_P (to_mode
, from_mode
))
531 && ! MEM_VOLATILE_P (from
)
532 && direct_load
[(int) to_mode
]
533 && ! mode_dependent_address_p (XEXP (from
, 0),
534 MEM_ADDR_SPACE (from
)))
536 || GET_CODE (from
) == SUBREG
))
537 from
= force_reg (from_mode
, from
);
538 if (REG_P (from
) && REGNO (from
) < FIRST_PSEUDO_REGISTER
539 && !targetm
.hard_regno_mode_ok (REGNO (from
), to_mode
))
540 from
= copy_to_reg (from
);
541 emit_move_insn (to
, gen_lowpart (to_mode
, from
));
545 /* Handle extension. */
546 if (GET_MODE_PRECISION (to_mode
) > GET_MODE_PRECISION (from_mode
))
548 /* Convert directly if that works. */
549 if ((code
= can_extend_p (to_mode
, from_mode
, unsignedp
))
552 emit_unop_insn (code
, to
, from
, equiv_code
);
557 scalar_mode intermediate
;
561 /* Search for a mode to convert via. */
562 opt_scalar_mode intermediate_iter
;
563 FOR_EACH_MODE_FROM (intermediate_iter
, from_mode
)
565 scalar_mode intermediate
= intermediate_iter
.require ();
566 if (((can_extend_p (to_mode
, intermediate
, unsignedp
)
568 || (GET_MODE_SIZE (to_mode
) < GET_MODE_SIZE (intermediate
)
569 && TRULY_NOOP_TRUNCATION_MODES_P (to_mode
,
571 && (can_extend_p (intermediate
, from_mode
, unsignedp
)
572 != CODE_FOR_nothing
))
574 convert_move (to
, convert_to_mode (intermediate
, from
,
575 unsignedp
), unsignedp
);
580 /* No suitable intermediate mode.
581 Generate what we need with shifts. */
582 shift_amount
= (GET_MODE_PRECISION (to_mode
)
583 - GET_MODE_PRECISION (from_mode
));
584 from
= gen_lowpart (to_mode
, force_reg (from_mode
, from
));
585 tmp
= expand_shift (LSHIFT_EXPR
, to_mode
, from
, shift_amount
,
587 tmp
= expand_shift (RSHIFT_EXPR
, to_mode
, tmp
, shift_amount
,
590 emit_move_insn (to
, tmp
);
595 /* Support special truncate insns for certain modes. */
596 if (convert_optab_handler (trunc_optab
, to_mode
,
597 from_mode
) != CODE_FOR_nothing
)
599 emit_unop_insn (convert_optab_handler (trunc_optab
, to_mode
, from_mode
),
604 /* Handle truncation of volatile memrefs, and so on;
605 the things that couldn't be truncated directly,
606 and for which there was no special instruction.
608 ??? Code above formerly short-circuited this, for most integer
609 mode pairs, with a force_reg in from_mode followed by a recursive
610 call to this routine. Appears always to have been wrong. */
611 if (GET_MODE_PRECISION (to_mode
) < GET_MODE_PRECISION (from_mode
))
613 rtx temp
= force_reg (to_mode
, gen_lowpart (to_mode
, from
));
614 emit_move_insn (to
, temp
);
618 /* Mode combination is not recognized. */
622 /* Return an rtx for a value that would result
623 from converting X to mode MODE.
624 Both X and MODE may be floating, or both integer.
625 UNSIGNEDP is nonzero if X is an unsigned value.
626 This can be done by referring to a part of X in place
627 or by copying to a new temporary with conversion. */
630 convert_to_mode (machine_mode mode
, rtx x
, int unsignedp
)
632 return convert_modes (mode
, VOIDmode
, x
, unsignedp
);
635 /* Return an rtx for a value that would result
636 from converting X from mode OLDMODE to mode MODE.
637 Both modes may be floating, or both integer.
638 UNSIGNEDP is nonzero if X is an unsigned value.
640 This can be done by referring to a part of X in place
641 or by copying to a new temporary with conversion.
643 You can give VOIDmode for OLDMODE, if you are sure X has a nonvoid mode. */
646 convert_modes (machine_mode mode
, machine_mode oldmode
, rtx x
, int unsignedp
)
649 scalar_int_mode int_mode
;
651 /* If FROM is a SUBREG that indicates that we have already done at least
652 the required extension, strip it. */
654 if (GET_CODE (x
) == SUBREG
655 && SUBREG_PROMOTED_VAR_P (x
)
656 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
657 && (GET_MODE_PRECISION (subreg_promoted_mode (x
))
658 >= GET_MODE_PRECISION (int_mode
))
659 && SUBREG_CHECK_PROMOTED_SIGN (x
, unsignedp
))
660 x
= gen_lowpart (int_mode
, SUBREG_REG (x
));
662 if (GET_MODE (x
) != VOIDmode
)
663 oldmode
= GET_MODE (x
);
668 if (CONST_SCALAR_INT_P (x
)
669 && is_int_mode (mode
, &int_mode
))
671 /* If the caller did not tell us the old mode, then there is not
672 much to do with respect to canonicalization. We have to
673 assume that all the bits are significant. */
674 if (GET_MODE_CLASS (oldmode
) != MODE_INT
)
675 oldmode
= MAX_MODE_INT
;
676 wide_int w
= wide_int::from (rtx_mode_t (x
, oldmode
),
677 GET_MODE_PRECISION (int_mode
),
678 unsignedp
? UNSIGNED
: SIGNED
);
679 return immed_wide_int_const (w
, int_mode
);
682 /* We can do this with a gen_lowpart if both desired and current modes
683 are integer, and this is either a constant integer, a register, or a
685 scalar_int_mode int_oldmode
;
686 if (is_int_mode (mode
, &int_mode
)
687 && is_int_mode (oldmode
, &int_oldmode
)
688 && GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (int_oldmode
)
689 && ((MEM_P (x
) && !MEM_VOLATILE_P (x
) && direct_load
[(int) int_mode
])
690 || CONST_POLY_INT_P (x
)
692 && (!HARD_REGISTER_P (x
)
693 || targetm
.hard_regno_mode_ok (REGNO (x
), int_mode
))
694 && TRULY_NOOP_TRUNCATION_MODES_P (int_mode
, GET_MODE (x
)))))
695 return gen_lowpart (int_mode
, x
);
697 /* Converting from integer constant into mode is always equivalent to an
699 if (VECTOR_MODE_P (mode
) && GET_MODE (x
) == VOIDmode
)
701 gcc_assert (GET_MODE_BITSIZE (mode
) == GET_MODE_BITSIZE (oldmode
));
702 return simplify_gen_subreg (mode
, x
, oldmode
, 0);
705 temp
= gen_reg_rtx (mode
);
706 convert_move (temp
, x
, unsignedp
);
710 /* Return the largest alignment we can use for doing a move (or store)
711 of MAX_PIECES. ALIGN is the largest alignment we could use. */
714 alignment_for_piecewise_move (unsigned int max_pieces
, unsigned int align
)
716 scalar_int_mode tmode
717 = int_mode_for_size (max_pieces
* BITS_PER_UNIT
, 1).require ();
719 if (align
>= GET_MODE_ALIGNMENT (tmode
))
720 align
= GET_MODE_ALIGNMENT (tmode
);
723 scalar_int_mode xmode
= NARROWEST_INT_MODE
;
724 opt_scalar_int_mode mode_iter
;
725 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_INT
)
727 tmode
= mode_iter
.require ();
728 if (GET_MODE_SIZE (tmode
) > max_pieces
729 || targetm
.slow_unaligned_access (tmode
, align
))
734 align
= MAX (align
, GET_MODE_ALIGNMENT (xmode
));
740 /* Return the widest integer mode that is narrower than SIZE bytes. */
742 static scalar_int_mode
743 widest_int_mode_for_size (unsigned int size
)
745 scalar_int_mode result
= NARROWEST_INT_MODE
;
747 gcc_checking_assert (size
> 1);
749 opt_scalar_int_mode tmode
;
750 FOR_EACH_MODE_IN_CLASS (tmode
, MODE_INT
)
751 if (GET_MODE_SIZE (tmode
.require ()) < size
)
752 result
= tmode
.require ();
757 /* Determine whether an operation OP on LEN bytes with alignment ALIGN can
758 and should be performed piecewise. */
761 can_do_by_pieces (unsigned HOST_WIDE_INT len
, unsigned int align
,
762 enum by_pieces_operation op
)
764 return targetm
.use_by_pieces_infrastructure_p (len
, align
, op
,
765 optimize_insn_for_speed_p ());
768 /* Determine whether the LEN bytes can be moved by using several move
769 instructions. Return nonzero if a call to move_by_pieces should
773 can_move_by_pieces (unsigned HOST_WIDE_INT len
, unsigned int align
)
775 return can_do_by_pieces (len
, align
, MOVE_BY_PIECES
);
778 /* Return number of insns required to perform operation OP by pieces
779 for L bytes. ALIGN (in bits) is maximum alignment we can assume. */
781 unsigned HOST_WIDE_INT
782 by_pieces_ninsns (unsigned HOST_WIDE_INT l
, unsigned int align
,
783 unsigned int max_size
, by_pieces_operation op
)
785 unsigned HOST_WIDE_INT n_insns
= 0;
787 align
= alignment_for_piecewise_move (MOVE_MAX_PIECES
, align
);
789 while (max_size
> 1 && l
> 0)
791 scalar_int_mode mode
= widest_int_mode_for_size (max_size
);
792 enum insn_code icode
;
794 unsigned int modesize
= GET_MODE_SIZE (mode
);
796 icode
= optab_handler (mov_optab
, mode
);
797 if (icode
!= CODE_FOR_nothing
&& align
>= GET_MODE_ALIGNMENT (mode
))
799 unsigned HOST_WIDE_INT n_pieces
= l
/ modesize
;
807 case COMPARE_BY_PIECES
:
808 int batch
= targetm
.compare_by_pieces_branch_ratio (mode
);
809 int batch_ops
= 4 * batch
- 1;
810 unsigned HOST_WIDE_INT full
= n_pieces
/ batch
;
811 n_insns
+= full
* batch_ops
;
812 if (n_pieces
% batch
!= 0)
825 /* Used when performing piecewise block operations, holds information
826 about one of the memory objects involved. The member functions
827 can be used to generate code for loading from the object and
828 updating the address when iterating. */
832 /* The object being referenced, a MEM. Can be NULL_RTX to indicate
835 /* The address of the object. Can differ from that seen in the
836 MEM rtx if we copied the address to a register. */
838 /* Nonzero if the address on the object has an autoincrement already,
839 signifies whether that was an increment or decrement. */
840 signed char m_addr_inc
;
841 /* Nonzero if we intend to use autoinc without the address already
842 having autoinc form. We will insert add insns around each memory
843 reference, expecting later passes to form autoinc addressing modes.
844 The only supported options are predecrement and postincrement. */
845 signed char m_explicit_inc
;
846 /* True if we have either of the two possible cases of using
849 /* True if this is an address to be used for load operations rather
853 /* Optionally, a function to obtain constants for any given offset into
854 the objects, and data associated with it. */
855 by_pieces_constfn m_constfn
;
858 pieces_addr (rtx
, bool, by_pieces_constfn
, void *);
859 rtx
adjust (scalar_int_mode
, HOST_WIDE_INT
);
860 void increment_address (HOST_WIDE_INT
);
861 void maybe_predec (HOST_WIDE_INT
);
862 void maybe_postinc (HOST_WIDE_INT
);
863 void decide_autoinc (machine_mode
, bool, HOST_WIDE_INT
);
870 /* Initialize a pieces_addr structure from an object OBJ. IS_LOAD is
871 true if the operation to be performed on this object is a load
872 rather than a store. For stores, OBJ can be NULL, in which case we
873 assume the operation is a stack push. For loads, the optional
874 CONSTFN and its associated CFNDATA can be used in place of the
877 pieces_addr::pieces_addr (rtx obj
, bool is_load
, by_pieces_constfn constfn
,
879 : m_obj (obj
), m_is_load (is_load
), m_constfn (constfn
), m_cfndata (cfndata
)
885 rtx addr
= XEXP (obj
, 0);
886 rtx_code code
= GET_CODE (addr
);
888 bool dec
= code
== PRE_DEC
|| code
== POST_DEC
;
889 bool inc
= code
== PRE_INC
|| code
== POST_INC
;
892 m_addr_inc
= dec
? -1 : 1;
894 /* While we have always looked for these codes here, the code
895 implementing the memory operation has never handled them.
896 Support could be added later if necessary or beneficial. */
897 gcc_assert (code
!= PRE_INC
&& code
!= POST_DEC
);
905 if (STACK_GROWS_DOWNWARD
)
911 gcc_assert (constfn
!= NULL
);
915 gcc_assert (is_load
);
918 /* Decide whether to use autoinc for an address involved in a memory op.
919 MODE is the mode of the accesses, REVERSE is true if we've decided to
920 perform the operation starting from the end, and LEN is the length of
921 the operation. Don't override an earlier decision to set m_auto. */
924 pieces_addr::decide_autoinc (machine_mode
ARG_UNUSED (mode
), bool reverse
,
927 if (m_auto
|| m_obj
== NULL_RTX
)
930 bool use_predec
= (m_is_load
931 ? USE_LOAD_PRE_DECREMENT (mode
)
932 : USE_STORE_PRE_DECREMENT (mode
));
933 bool use_postinc
= (m_is_load
934 ? USE_LOAD_POST_INCREMENT (mode
)
935 : USE_STORE_POST_INCREMENT (mode
));
936 machine_mode addr_mode
= get_address_mode (m_obj
);
938 if (use_predec
&& reverse
)
940 m_addr
= copy_to_mode_reg (addr_mode
,
941 plus_constant (addr_mode
,
946 else if (use_postinc
&& !reverse
)
948 m_addr
= copy_to_mode_reg (addr_mode
, m_addr
);
952 else if (CONSTANT_P (m_addr
))
953 m_addr
= copy_to_mode_reg (addr_mode
, m_addr
);
956 /* Adjust the address to refer to the data at OFFSET in MODE. If we
957 are using autoincrement for this address, we don't add the offset,
958 but we still modify the MEM's properties. */
961 pieces_addr::adjust (scalar_int_mode mode
, HOST_WIDE_INT offset
)
964 return m_constfn (m_cfndata
, offset
, mode
);
965 if (m_obj
== NULL_RTX
)
968 return adjust_automodify_address (m_obj
, mode
, m_addr
, offset
);
970 return adjust_address (m_obj
, mode
, offset
);
973 /* Emit an add instruction to increment the address by SIZE. */
976 pieces_addr::increment_address (HOST_WIDE_INT size
)
978 rtx amount
= gen_int_mode (size
, GET_MODE (m_addr
));
979 emit_insn (gen_add2_insn (m_addr
, amount
));
982 /* If we are supposed to decrement the address after each access, emit code
983 to do so now. Increment by SIZE (which has should have the correct sign
987 pieces_addr::maybe_predec (HOST_WIDE_INT size
)
989 if (m_explicit_inc
>= 0)
991 gcc_assert (HAVE_PRE_DECREMENT
);
992 increment_address (size
);
995 /* If we are supposed to decrement the address after each access, emit code
996 to do so now. Increment by SIZE. */
999 pieces_addr::maybe_postinc (HOST_WIDE_INT size
)
1001 if (m_explicit_inc
<= 0)
1003 gcc_assert (HAVE_POST_INCREMENT
);
1004 increment_address (size
);
1007 /* This structure is used by do_op_by_pieces to describe the operation
1010 class op_by_pieces_d
1013 pieces_addr m_to
, m_from
;
1014 unsigned HOST_WIDE_INT m_len
;
1015 HOST_WIDE_INT m_offset
;
1016 unsigned int m_align
;
1017 unsigned int m_max_size
;
1020 /* Virtual functions, overriden by derived classes for the specific
1022 virtual void generate (rtx
, rtx
, machine_mode
) = 0;
1023 virtual bool prepare_mode (machine_mode
, unsigned int) = 0;
1024 virtual void finish_mode (machine_mode
)
1029 op_by_pieces_d (rtx
, bool, rtx
, bool, by_pieces_constfn
, void *,
1030 unsigned HOST_WIDE_INT
, unsigned int);
1034 /* The constructor for an op_by_pieces_d structure. We require two
1035 objects named TO and FROM, which are identified as loads or stores
1036 by TO_LOAD and FROM_LOAD. If FROM is a load, the optional FROM_CFN
1037 and its associated FROM_CFN_DATA can be used to replace loads with
1038 constant values. LEN describes the length of the operation. */
1040 op_by_pieces_d::op_by_pieces_d (rtx to
, bool to_load
,
1041 rtx from
, bool from_load
,
1042 by_pieces_constfn from_cfn
,
1043 void *from_cfn_data
,
1044 unsigned HOST_WIDE_INT len
,
1046 : m_to (to
, to_load
, NULL
, NULL
),
1047 m_from (from
, from_load
, from_cfn
, from_cfn_data
),
1048 m_len (len
), m_max_size (MOVE_MAX_PIECES
+ 1)
1050 int toi
= m_to
.get_addr_inc ();
1051 int fromi
= m_from
.get_addr_inc ();
1052 if (toi
>= 0 && fromi
>= 0)
1054 else if (toi
<= 0 && fromi
<= 0)
1059 m_offset
= m_reverse
? len
: 0;
1060 align
= MIN (to
? MEM_ALIGN (to
) : align
,
1061 from
? MEM_ALIGN (from
) : align
);
1063 /* If copying requires more than two move insns,
1064 copy addresses to registers (to make displacements shorter)
1065 and use post-increment if available. */
1066 if (by_pieces_ninsns (len
, align
, m_max_size
, MOVE_BY_PIECES
) > 2)
1068 /* Find the mode of the largest comparison. */
1069 scalar_int_mode mode
= widest_int_mode_for_size (m_max_size
);
1071 m_from
.decide_autoinc (mode
, m_reverse
, len
);
1072 m_to
.decide_autoinc (mode
, m_reverse
, len
);
1075 align
= alignment_for_piecewise_move (MOVE_MAX_PIECES
, align
);
1079 /* This function contains the main loop used for expanding a block
1080 operation. First move what we can in the largest integer mode,
1081 then go to successively smaller modes. For every access, call
1082 GENFUN with the two operands and the EXTRA_DATA. */
1085 op_by_pieces_d::run ()
1087 while (m_max_size
> 1 && m_len
> 0)
1089 scalar_int_mode mode
= widest_int_mode_for_size (m_max_size
);
1091 if (prepare_mode (mode
, m_align
))
1093 unsigned int size
= GET_MODE_SIZE (mode
);
1094 rtx to1
= NULL_RTX
, from1
;
1096 while (m_len
>= size
)
1101 to1
= m_to
.adjust (mode
, m_offset
);
1102 from1
= m_from
.adjust (mode
, m_offset
);
1104 m_to
.maybe_predec (-(HOST_WIDE_INT
)size
);
1105 m_from
.maybe_predec (-(HOST_WIDE_INT
)size
);
1107 generate (to1
, from1
, mode
);
1109 m_to
.maybe_postinc (size
);
1110 m_from
.maybe_postinc (size
);
1121 m_max_size
= GET_MODE_SIZE (mode
);
1124 /* The code above should have handled everything. */
1125 gcc_assert (!m_len
);
1128 /* Derived class from op_by_pieces_d, providing support for block move
1131 class move_by_pieces_d
: public op_by_pieces_d
1133 insn_gen_fn m_gen_fun
;
1134 void generate (rtx
, rtx
, machine_mode
);
1135 bool prepare_mode (machine_mode
, unsigned int);
1138 move_by_pieces_d (rtx to
, rtx from
, unsigned HOST_WIDE_INT len
,
1140 : op_by_pieces_d (to
, false, from
, true, NULL
, NULL
, len
, align
)
1143 rtx
finish_endp (int);
1146 /* Return true if MODE can be used for a set of copies, given an
1147 alignment ALIGN. Prepare whatever data is necessary for later
1148 calls to generate. */
1151 move_by_pieces_d::prepare_mode (machine_mode mode
, unsigned int align
)
1153 insn_code icode
= optab_handler (mov_optab
, mode
);
1154 m_gen_fun
= GEN_FCN (icode
);
1155 return icode
!= CODE_FOR_nothing
&& align
>= GET_MODE_ALIGNMENT (mode
);
1158 /* A callback used when iterating for a compare_by_pieces_operation.
1159 OP0 and OP1 are the values that have been loaded and should be
1160 compared in MODE. If OP0 is NULL, this means we should generate a
1161 push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1162 gen function that should be used to generate the mode. */
1165 move_by_pieces_d::generate (rtx op0
, rtx op1
,
1166 machine_mode mode ATTRIBUTE_UNUSED
)
1168 #ifdef PUSH_ROUNDING
1169 if (op0
== NULL_RTX
)
1171 emit_single_push_insn (mode
, op1
, NULL
);
1175 emit_insn (m_gen_fun (op0
, op1
));
1178 /* Perform the final adjustment at the end of a string to obtain the
1179 correct return value for the block operation. If ENDP is 1 return
1180 memory at the end ala mempcpy, and if ENDP is 2 return memory the
1181 end minus one byte ala stpcpy. */
1184 move_by_pieces_d::finish_endp (int endp
)
1186 gcc_assert (!m_reverse
);
1189 m_to
.maybe_postinc (-1);
1192 return m_to
.adjust (QImode
, m_offset
);
1195 /* Generate several move instructions to copy LEN bytes from block FROM to
1196 block TO. (These are MEM rtx's with BLKmode).
1198 If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1199 used to push FROM to the stack.
1201 ALIGN is maximum stack alignment we can assume.
1203 If ENDP is 0 return to, if ENDP is 1 return memory at the end ala
1204 mempcpy, and if ENDP is 2 return memory the end minus one byte ala
1208 move_by_pieces (rtx to
, rtx from
, unsigned HOST_WIDE_INT len
,
1209 unsigned int align
, int endp
)
1211 #ifndef PUSH_ROUNDING
1216 move_by_pieces_d
data (to
, from
, len
, align
);
1221 return data
.finish_endp (endp
);
1226 /* Derived class from op_by_pieces_d, providing support for block move
1229 class store_by_pieces_d
: public op_by_pieces_d
1231 insn_gen_fn m_gen_fun
;
1232 void generate (rtx
, rtx
, machine_mode
);
1233 bool prepare_mode (machine_mode
, unsigned int);
1236 store_by_pieces_d (rtx to
, by_pieces_constfn cfn
, void *cfn_data
,
1237 unsigned HOST_WIDE_INT len
, unsigned int align
)
1238 : op_by_pieces_d (to
, false, NULL_RTX
, true, cfn
, cfn_data
, len
, align
)
1241 rtx
finish_endp (int);
1244 /* Return true if MODE can be used for a set of stores, given an
1245 alignment ALIGN. Prepare whatever data is necessary for later
1246 calls to generate. */
1249 store_by_pieces_d::prepare_mode (machine_mode mode
, unsigned int align
)
1251 insn_code icode
= optab_handler (mov_optab
, mode
);
1252 m_gen_fun
= GEN_FCN (icode
);
1253 return icode
!= CODE_FOR_nothing
&& align
>= GET_MODE_ALIGNMENT (mode
);
1256 /* A callback used when iterating for a store_by_pieces_operation.
1257 OP0 and OP1 are the values that have been loaded and should be
1258 compared in MODE. If OP0 is NULL, this means we should generate a
1259 push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1260 gen function that should be used to generate the mode. */
1263 store_by_pieces_d::generate (rtx op0
, rtx op1
, machine_mode
)
1265 emit_insn (m_gen_fun (op0
, op1
));
1268 /* Perform the final adjustment at the end of a string to obtain the
1269 correct return value for the block operation. If ENDP is 1 return
1270 memory at the end ala mempcpy, and if ENDP is 2 return memory the
1271 end minus one byte ala stpcpy. */
1274 store_by_pieces_d::finish_endp (int endp
)
1276 gcc_assert (!m_reverse
);
1279 m_to
.maybe_postinc (-1);
1282 return m_to
.adjust (QImode
, m_offset
);
1285 /* Determine whether the LEN bytes generated by CONSTFUN can be
1286 stored to memory using several move instructions. CONSTFUNDATA is
1287 a pointer which will be passed as argument in every CONSTFUN call.
1288 ALIGN is maximum alignment we can assume. MEMSETP is true if this is
1289 a memset operation and false if it's a copy of a constant string.
1290 Return nonzero if a call to store_by_pieces should succeed. */
1293 can_store_by_pieces (unsigned HOST_WIDE_INT len
,
1294 rtx (*constfun
) (void *, HOST_WIDE_INT
, scalar_int_mode
),
1295 void *constfundata
, unsigned int align
, bool memsetp
)
1297 unsigned HOST_WIDE_INT l
;
1298 unsigned int max_size
;
1299 HOST_WIDE_INT offset
= 0;
1300 enum insn_code icode
;
1302 /* cst is set but not used if LEGITIMATE_CONSTANT doesn't use it. */
1303 rtx cst ATTRIBUTE_UNUSED
;
1308 if (!targetm
.use_by_pieces_infrastructure_p (len
, align
,
1312 optimize_insn_for_speed_p ()))
1315 align
= alignment_for_piecewise_move (STORE_MAX_PIECES
, align
);
1317 /* We would first store what we can in the largest integer mode, then go to
1318 successively smaller modes. */
1321 reverse
<= (HAVE_PRE_DECREMENT
|| HAVE_POST_DECREMENT
);
1325 max_size
= STORE_MAX_PIECES
+ 1;
1326 while (max_size
> 1 && l
> 0)
1328 scalar_int_mode mode
= widest_int_mode_for_size (max_size
);
1330 icode
= optab_handler (mov_optab
, mode
);
1331 if (icode
!= CODE_FOR_nothing
1332 && align
>= GET_MODE_ALIGNMENT (mode
))
1334 unsigned int size
= GET_MODE_SIZE (mode
);
1341 cst
= (*constfun
) (constfundata
, offset
, mode
);
1342 if (!targetm
.legitimate_constant_p (mode
, cst
))
1352 max_size
= GET_MODE_SIZE (mode
);
1355 /* The code above should have handled everything. */
1362 /* Generate several move instructions to store LEN bytes generated by
1363 CONSTFUN to block TO. (A MEM rtx with BLKmode). CONSTFUNDATA is a
1364 pointer which will be passed as argument in every CONSTFUN call.
1365 ALIGN is maximum alignment we can assume. MEMSETP is true if this is
1366 a memset operation and false if it's a copy of a constant string.
1367 If ENDP is 0 return to, if ENDP is 1 return memory at the end ala
1368 mempcpy, and if ENDP is 2 return memory the end minus one byte ala
1372 store_by_pieces (rtx to
, unsigned HOST_WIDE_INT len
,
1373 rtx (*constfun
) (void *, HOST_WIDE_INT
, scalar_int_mode
),
1374 void *constfundata
, unsigned int align
, bool memsetp
, int endp
)
1378 gcc_assert (endp
!= 2);
1382 gcc_assert (targetm
.use_by_pieces_infrastructure_p
1384 memsetp
? SET_BY_PIECES
: STORE_BY_PIECES
,
1385 optimize_insn_for_speed_p ()));
1387 store_by_pieces_d
data (to
, constfun
, constfundata
, len
, align
);
1391 return data
.finish_endp (endp
);
1396 /* Callback routine for clear_by_pieces.
1397 Return const0_rtx unconditionally. */
1400 clear_by_pieces_1 (void *, HOST_WIDE_INT
, scalar_int_mode
)
1405 /* Generate several move instructions to clear LEN bytes of block TO. (A MEM
1406 rtx with BLKmode). ALIGN is maximum alignment we can assume. */
1409 clear_by_pieces (rtx to
, unsigned HOST_WIDE_INT len
, unsigned int align
)
1414 store_by_pieces_d
data (to
, clear_by_pieces_1
, NULL
, len
, align
);
1418 /* Context used by compare_by_pieces_genfn. It stores the fail label
1419 to jump to in case of miscomparison, and for branch ratios greater than 1,
1420 it stores an accumulator and the current and maximum counts before
1421 emitting another branch. */
1423 class compare_by_pieces_d
: public op_by_pieces_d
1425 rtx_code_label
*m_fail_label
;
1427 int m_count
, m_batch
;
1429 void generate (rtx
, rtx
, machine_mode
);
1430 bool prepare_mode (machine_mode
, unsigned int);
1431 void finish_mode (machine_mode
);
1433 compare_by_pieces_d (rtx op0
, rtx op1
, by_pieces_constfn op1_cfn
,
1434 void *op1_cfn_data
, HOST_WIDE_INT len
, int align
,
1435 rtx_code_label
*fail_label
)
1436 : op_by_pieces_d (op0
, true, op1
, true, op1_cfn
, op1_cfn_data
, len
, align
)
1438 m_fail_label
= fail_label
;
1442 /* A callback used when iterating for a compare_by_pieces_operation.
1443 OP0 and OP1 are the values that have been loaded and should be
1444 compared in MODE. DATA holds a pointer to the compare_by_pieces_data
1445 context structure. */
1448 compare_by_pieces_d::generate (rtx op0
, rtx op1
, machine_mode mode
)
1452 rtx temp
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
,
1453 true, OPTAB_LIB_WIDEN
);
1455 temp
= expand_binop (mode
, ior_optab
, m_accumulator
, temp
, temp
,
1456 true, OPTAB_LIB_WIDEN
);
1457 m_accumulator
= temp
;
1459 if (++m_count
< m_batch
)
1463 op0
= m_accumulator
;
1465 m_accumulator
= NULL_RTX
;
1467 do_compare_rtx_and_jump (op0
, op1
, NE
, true, mode
, NULL_RTX
, NULL
,
1468 m_fail_label
, profile_probability::uninitialized ());
1471 /* Return true if MODE can be used for a set of moves and comparisons,
1472 given an alignment ALIGN. Prepare whatever data is necessary for
1473 later calls to generate. */
1476 compare_by_pieces_d::prepare_mode (machine_mode mode
, unsigned int align
)
1478 insn_code icode
= optab_handler (mov_optab
, mode
);
1479 if (icode
== CODE_FOR_nothing
1480 || align
< GET_MODE_ALIGNMENT (mode
)
1481 || !can_compare_p (EQ
, mode
, ccp_jump
))
1483 m_batch
= targetm
.compare_by_pieces_branch_ratio (mode
);
1486 m_accumulator
= NULL_RTX
;
1491 /* Called after expanding a series of comparisons in MODE. If we have
1492 accumulated results for which we haven't emitted a branch yet, do
1496 compare_by_pieces_d::finish_mode (machine_mode mode
)
1498 if (m_accumulator
!= NULL_RTX
)
1499 do_compare_rtx_and_jump (m_accumulator
, const0_rtx
, NE
, true, mode
,
1500 NULL_RTX
, NULL
, m_fail_label
,
1501 profile_probability::uninitialized ());
1504 /* Generate several move instructions to compare LEN bytes from blocks
1505 ARG0 and ARG1. (These are MEM rtx's with BLKmode).
1507 If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1508 used to push FROM to the stack.
1510 ALIGN is maximum stack alignment we can assume.
1512 Optionally, the caller can pass a constfn and associated data in A1_CFN
1513 and A1_CFN_DATA. describing that the second operand being compared is a
1514 known constant and how to obtain its data. */
1517 compare_by_pieces (rtx arg0
, rtx arg1
, unsigned HOST_WIDE_INT len
,
1518 rtx target
, unsigned int align
,
1519 by_pieces_constfn a1_cfn
, void *a1_cfn_data
)
1521 rtx_code_label
*fail_label
= gen_label_rtx ();
1522 rtx_code_label
*end_label
= gen_label_rtx ();
1524 if (target
== NULL_RTX
1525 || !REG_P (target
) || REGNO (target
) < FIRST_PSEUDO_REGISTER
)
1526 target
= gen_reg_rtx (TYPE_MODE (integer_type_node
));
1528 compare_by_pieces_d
data (arg0
, arg1
, a1_cfn
, a1_cfn_data
, len
, align
,
1533 emit_move_insn (target
, const0_rtx
);
1534 emit_jump (end_label
);
1536 emit_label (fail_label
);
1537 emit_move_insn (target
, const1_rtx
);
1538 emit_label (end_label
);
1543 /* Emit code to move a block Y to a block X. This may be done with
1544 string-move instructions, with multiple scalar move instructions,
1545 or with a library call.
1547 Both X and Y must be MEM rtx's (perhaps inside VOLATILE) with mode BLKmode.
1548 SIZE is an rtx that says how long they are.
1549 ALIGN is the maximum alignment we can assume they have.
1550 METHOD describes what kind of copy this is, and what mechanisms may be used.
1551 MIN_SIZE is the minimal size of block to move
1552 MAX_SIZE is the maximal size of block to move, if it can not be represented
1553 in unsigned HOST_WIDE_INT, than it is mask of all ones.
1555 Return the address of the new block, if memcpy is called and returns it,
1559 emit_block_move_hints (rtx x
, rtx y
, rtx size
, enum block_op_methods method
,
1560 unsigned int expected_align
, HOST_WIDE_INT expected_size
,
1561 unsigned HOST_WIDE_INT min_size
,
1562 unsigned HOST_WIDE_INT max_size
,
1563 unsigned HOST_WIDE_INT probable_max_size
)
1570 if (CONST_INT_P (size
) && INTVAL (size
) == 0)
1575 case BLOCK_OP_NORMAL
:
1576 case BLOCK_OP_TAILCALL
:
1577 may_use_call
= true;
1580 case BLOCK_OP_CALL_PARM
:
1581 may_use_call
= block_move_libcall_safe_for_call_parm ();
1583 /* Make inhibit_defer_pop nonzero around the library call
1584 to force it to pop the arguments right away. */
1588 case BLOCK_OP_NO_LIBCALL
:
1589 may_use_call
= false;
1596 gcc_assert (MEM_P (x
) && MEM_P (y
));
1597 align
= MIN (MEM_ALIGN (x
), MEM_ALIGN (y
));
1598 gcc_assert (align
>= BITS_PER_UNIT
);
1600 /* Make sure we've got BLKmode addresses; store_one_arg can decide that
1601 block copy is more efficient for other large modes, e.g. DCmode. */
1602 x
= adjust_address (x
, BLKmode
, 0);
1603 y
= adjust_address (y
, BLKmode
, 0);
1605 /* Set MEM_SIZE as appropriate for this block copy. The main place this
1606 can be incorrect is coming from __builtin_memcpy. */
1607 if (CONST_INT_P (size
))
1609 x
= shallow_copy_rtx (x
);
1610 y
= shallow_copy_rtx (y
);
1611 set_mem_size (x
, INTVAL (size
));
1612 set_mem_size (y
, INTVAL (size
));
1615 if (CONST_INT_P (size
) && can_move_by_pieces (INTVAL (size
), align
))
1616 move_by_pieces (x
, y
, INTVAL (size
), align
, 0);
1617 else if (emit_block_move_via_movmem (x
, y
, size
, align
,
1618 expected_align
, expected_size
,
1619 min_size
, max_size
, probable_max_size
))
1621 else if (may_use_call
1622 && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x
))
1623 && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (y
)))
1625 /* Since x and y are passed to a libcall, mark the corresponding
1626 tree EXPR as addressable. */
1627 tree y_expr
= MEM_EXPR (y
);
1628 tree x_expr
= MEM_EXPR (x
);
1630 mark_addressable (y_expr
);
1632 mark_addressable (x_expr
);
1633 retval
= emit_block_copy_via_libcall (x
, y
, size
,
1634 method
== BLOCK_OP_TAILCALL
);
1638 emit_block_move_via_loop (x
, y
, size
, align
);
1640 if (method
== BLOCK_OP_CALL_PARM
)
1647 emit_block_move (rtx x
, rtx y
, rtx size
, enum block_op_methods method
)
1649 unsigned HOST_WIDE_INT max
, min
= 0;
1650 if (GET_CODE (size
) == CONST_INT
)
1651 min
= max
= UINTVAL (size
);
1653 max
= GET_MODE_MASK (GET_MODE (size
));
1654 return emit_block_move_hints (x
, y
, size
, method
, 0, -1,
1658 /* A subroutine of emit_block_move. Returns true if calling the
1659 block move libcall will not clobber any parameters which may have
1660 already been placed on the stack. */
1663 block_move_libcall_safe_for_call_parm (void)
1665 #if defined (REG_PARM_STACK_SPACE)
1669 /* If arguments are pushed on the stack, then they're safe. */
1673 /* If registers go on the stack anyway, any argument is sure to clobber
1674 an outgoing argument. */
1675 #if defined (REG_PARM_STACK_SPACE)
1676 fn
= builtin_decl_implicit (BUILT_IN_MEMCPY
);
1677 /* Avoid set but not used warning if *REG_PARM_STACK_SPACE doesn't
1678 depend on its argument. */
1680 if (OUTGOING_REG_PARM_STACK_SPACE ((!fn
? NULL_TREE
: TREE_TYPE (fn
)))
1681 && REG_PARM_STACK_SPACE (fn
) != 0)
1685 /* If any argument goes in memory, then it might clobber an outgoing
1688 CUMULATIVE_ARGS args_so_far_v
;
1689 cumulative_args_t args_so_far
;
1692 fn
= builtin_decl_implicit (BUILT_IN_MEMCPY
);
1693 INIT_CUMULATIVE_ARGS (args_so_far_v
, TREE_TYPE (fn
), NULL_RTX
, 0, 3);
1694 args_so_far
= pack_cumulative_args (&args_so_far_v
);
1696 arg
= TYPE_ARG_TYPES (TREE_TYPE (fn
));
1697 for ( ; arg
!= void_list_node
; arg
= TREE_CHAIN (arg
))
1699 machine_mode mode
= TYPE_MODE (TREE_VALUE (arg
));
1700 rtx tmp
= targetm
.calls
.function_arg (args_so_far
, mode
,
1702 if (!tmp
|| !REG_P (tmp
))
1704 if (targetm
.calls
.arg_partial_bytes (args_so_far
, mode
, NULL
, 1))
1706 targetm
.calls
.function_arg_advance (args_so_far
, mode
,
1713 /* A subroutine of emit_block_move. Expand a movmem pattern;
1714 return true if successful. */
1717 emit_block_move_via_movmem (rtx x
, rtx y
, rtx size
, unsigned int align
,
1718 unsigned int expected_align
, HOST_WIDE_INT expected_size
,
1719 unsigned HOST_WIDE_INT min_size
,
1720 unsigned HOST_WIDE_INT max_size
,
1721 unsigned HOST_WIDE_INT probable_max_size
)
1723 int save_volatile_ok
= volatile_ok
;
1725 if (expected_align
< align
)
1726 expected_align
= align
;
1727 if (expected_size
!= -1)
1729 if ((unsigned HOST_WIDE_INT
)expected_size
> probable_max_size
)
1730 expected_size
= probable_max_size
;
1731 if ((unsigned HOST_WIDE_INT
)expected_size
< min_size
)
1732 expected_size
= min_size
;
1735 /* Since this is a move insn, we don't care about volatility. */
1738 /* Try the most limited insn first, because there's no point
1739 including more than one in the machine description unless
1740 the more limited one has some advantage. */
1742 opt_scalar_int_mode mode_iter
;
1743 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_INT
)
1745 scalar_int_mode mode
= mode_iter
.require ();
1746 enum insn_code code
= direct_optab_handler (movmem_optab
, mode
);
1748 if (code
!= CODE_FOR_nothing
1749 /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
1750 here because if SIZE is less than the mode mask, as it is
1751 returned by the macro, it will definitely be less than the
1752 actual mode mask. Since SIZE is within the Pmode address
1753 space, we limit MODE to Pmode. */
1754 && ((CONST_INT_P (size
)
1755 && ((unsigned HOST_WIDE_INT
) INTVAL (size
)
1756 <= (GET_MODE_MASK (mode
) >> 1)))
1757 || max_size
<= (GET_MODE_MASK (mode
) >> 1)
1758 || GET_MODE_BITSIZE (mode
) >= GET_MODE_BITSIZE (Pmode
)))
1760 struct expand_operand ops
[9];
1763 /* ??? When called via emit_block_move_for_call, it'd be
1764 nice if there were some way to inform the backend, so
1765 that it doesn't fail the expansion because it thinks
1766 emitting the libcall would be more efficient. */
1767 nops
= insn_data
[(int) code
].n_generator_args
;
1768 gcc_assert (nops
== 4 || nops
== 6 || nops
== 8 || nops
== 9);
1770 create_fixed_operand (&ops
[0], x
);
1771 create_fixed_operand (&ops
[1], y
);
1772 /* The check above guarantees that this size conversion is valid. */
1773 create_convert_operand_to (&ops
[2], size
, mode
, true);
1774 create_integer_operand (&ops
[3], align
/ BITS_PER_UNIT
);
1777 create_integer_operand (&ops
[4], expected_align
/ BITS_PER_UNIT
);
1778 create_integer_operand (&ops
[5], expected_size
);
1782 create_integer_operand (&ops
[6], min_size
);
1783 /* If we can not represent the maximal size,
1784 make parameter NULL. */
1785 if ((HOST_WIDE_INT
) max_size
!= -1)
1786 create_integer_operand (&ops
[7], max_size
);
1788 create_fixed_operand (&ops
[7], NULL
);
1792 /* If we can not represent the maximal size,
1793 make parameter NULL. */
1794 if ((HOST_WIDE_INT
) probable_max_size
!= -1)
1795 create_integer_operand (&ops
[8], probable_max_size
);
1797 create_fixed_operand (&ops
[8], NULL
);
1799 if (maybe_expand_insn (code
, nops
, ops
))
1801 volatile_ok
= save_volatile_ok
;
1807 volatile_ok
= save_volatile_ok
;
1811 /* A subroutine of emit_block_move. Copy the data via an explicit
1812 loop. This is used only when libcalls are forbidden. */
1813 /* ??? It'd be nice to copy in hunks larger than QImode. */
1816 emit_block_move_via_loop (rtx x
, rtx y
, rtx size
,
1817 unsigned int align ATTRIBUTE_UNUSED
)
1819 rtx_code_label
*cmp_label
, *top_label
;
1820 rtx iter
, x_addr
, y_addr
, tmp
;
1821 machine_mode x_addr_mode
= get_address_mode (x
);
1822 machine_mode y_addr_mode
= get_address_mode (y
);
1823 machine_mode iter_mode
;
1825 iter_mode
= GET_MODE (size
);
1826 if (iter_mode
== VOIDmode
)
1827 iter_mode
= word_mode
;
1829 top_label
= gen_label_rtx ();
1830 cmp_label
= gen_label_rtx ();
1831 iter
= gen_reg_rtx (iter_mode
);
1833 emit_move_insn (iter
, const0_rtx
);
1835 x_addr
= force_operand (XEXP (x
, 0), NULL_RTX
);
1836 y_addr
= force_operand (XEXP (y
, 0), NULL_RTX
);
1837 do_pending_stack_adjust ();
1839 emit_jump (cmp_label
);
1840 emit_label (top_label
);
1842 tmp
= convert_modes (x_addr_mode
, iter_mode
, iter
, true);
1843 x_addr
= simplify_gen_binary (PLUS
, x_addr_mode
, x_addr
, tmp
);
1845 if (x_addr_mode
!= y_addr_mode
)
1846 tmp
= convert_modes (y_addr_mode
, iter_mode
, iter
, true);
1847 y_addr
= simplify_gen_binary (PLUS
, y_addr_mode
, y_addr
, tmp
);
1849 x
= change_address (x
, QImode
, x_addr
);
1850 y
= change_address (y
, QImode
, y_addr
);
1852 emit_move_insn (x
, y
);
1854 tmp
= expand_simple_binop (iter_mode
, PLUS
, iter
, const1_rtx
, iter
,
1855 true, OPTAB_LIB_WIDEN
);
1857 emit_move_insn (iter
, tmp
);
1859 emit_label (cmp_label
);
1861 emit_cmp_and_jump_insns (iter
, size
, LT
, NULL_RTX
, iter_mode
,
1863 profile_probability::guessed_always ()
1864 .apply_scale (9, 10));
1867 /* Expand a call to memcpy or memmove or memcmp, and return the result.
1868 TAILCALL is true if this is a tail call. */
1871 emit_block_op_via_libcall (enum built_in_function fncode
, rtx dst
, rtx src
,
1872 rtx size
, bool tailcall
)
1874 rtx dst_addr
, src_addr
;
1875 tree call_expr
, dst_tree
, src_tree
, size_tree
;
1876 machine_mode size_mode
;
1878 dst_addr
= copy_addr_to_reg (XEXP (dst
, 0));
1879 dst_addr
= convert_memory_address (ptr_mode
, dst_addr
);
1880 dst_tree
= make_tree (ptr_type_node
, dst_addr
);
1882 src_addr
= copy_addr_to_reg (XEXP (src
, 0));
1883 src_addr
= convert_memory_address (ptr_mode
, src_addr
);
1884 src_tree
= make_tree (ptr_type_node
, src_addr
);
1886 size_mode
= TYPE_MODE (sizetype
);
1887 size
= convert_to_mode (size_mode
, size
, 1);
1888 size
= copy_to_mode_reg (size_mode
, size
);
1889 size_tree
= make_tree (sizetype
, size
);
1891 /* It is incorrect to use the libcall calling conventions for calls to
1892 memcpy/memmove/memcmp because they can be provided by the user. */
1893 tree fn
= builtin_decl_implicit (fncode
);
1894 call_expr
= build_call_expr (fn
, 3, dst_tree
, src_tree
, size_tree
);
1895 CALL_EXPR_TAILCALL (call_expr
) = tailcall
;
1897 return expand_call (call_expr
, NULL_RTX
, false);
1900 /* Try to expand cmpstrn or cmpmem operation ICODE with the given operands.
1901 ARG3_TYPE is the type of ARG3_RTX. Return the result rtx on success,
1902 otherwise return null. */
1905 expand_cmpstrn_or_cmpmem (insn_code icode
, rtx target
, rtx arg1_rtx
,
1906 rtx arg2_rtx
, tree arg3_type
, rtx arg3_rtx
,
1907 HOST_WIDE_INT align
)
1909 machine_mode insn_mode
= insn_data
[icode
].operand
[0].mode
;
1911 if (target
&& (!REG_P (target
) || HARD_REGISTER_P (target
)))
1914 struct expand_operand ops
[5];
1915 create_output_operand (&ops
[0], target
, insn_mode
);
1916 create_fixed_operand (&ops
[1], arg1_rtx
);
1917 create_fixed_operand (&ops
[2], arg2_rtx
);
1918 create_convert_operand_from (&ops
[3], arg3_rtx
, TYPE_MODE (arg3_type
),
1919 TYPE_UNSIGNED (arg3_type
));
1920 create_integer_operand (&ops
[4], align
);
1921 if (maybe_expand_insn (icode
, 5, ops
))
1922 return ops
[0].value
;
1926 /* Expand a block compare between X and Y with length LEN using the
1927 cmpmem optab, placing the result in TARGET. LEN_TYPE is the type
1928 of the expression that was used to calculate the length. ALIGN
1929 gives the known minimum common alignment. */
1932 emit_block_cmp_via_cmpmem (rtx x
, rtx y
, rtx len
, tree len_type
, rtx target
,
1935 /* Note: The cmpstrnsi pattern, if it exists, is not suitable for
1936 implementing memcmp because it will stop if it encounters two
1938 insn_code icode
= direct_optab_handler (cmpmem_optab
, SImode
);
1940 if (icode
== CODE_FOR_nothing
)
1943 return expand_cmpstrn_or_cmpmem (icode
, target
, x
, y
, len_type
, len
, align
);
1946 /* Emit code to compare a block Y to a block X. This may be done with
1947 string-compare instructions, with multiple scalar instructions,
1948 or with a library call.
1950 Both X and Y must be MEM rtx's. LEN is an rtx that says how long
1951 they are. LEN_TYPE is the type of the expression that was used to
1954 If EQUALITY_ONLY is true, it means we don't have to return the tri-state
1955 value of a normal memcmp call, instead we can just compare for equality.
1956 If FORCE_LIBCALL is true, we should emit a call to memcmp rather than
1959 Optionally, the caller can pass a constfn and associated data in Y_CFN
1960 and Y_CFN_DATA. describing that the second operand being compared is a
1961 known constant and how to obtain its data.
1962 Return the result of the comparison, or NULL_RTX if we failed to
1963 perform the operation. */
1966 emit_block_cmp_hints (rtx x
, rtx y
, rtx len
, tree len_type
, rtx target
,
1967 bool equality_only
, by_pieces_constfn y_cfn
,
1972 if (CONST_INT_P (len
) && INTVAL (len
) == 0)
1975 gcc_assert (MEM_P (x
) && MEM_P (y
));
1976 unsigned int align
= MIN (MEM_ALIGN (x
), MEM_ALIGN (y
));
1977 gcc_assert (align
>= BITS_PER_UNIT
);
1979 x
= adjust_address (x
, BLKmode
, 0);
1980 y
= adjust_address (y
, BLKmode
, 0);
1983 && CONST_INT_P (len
)
1984 && can_do_by_pieces (INTVAL (len
), align
, COMPARE_BY_PIECES
))
1985 result
= compare_by_pieces (x
, y
, INTVAL (len
), target
, align
,
1988 result
= emit_block_cmp_via_cmpmem (x
, y
, len
, len_type
, target
, align
);
1993 /* Copy all or part of a value X into registers starting at REGNO.
1994 The number of registers to be filled is NREGS. */
1997 move_block_to_reg (int regno
, rtx x
, int nregs
, machine_mode mode
)
2002 if (CONSTANT_P (x
) && !targetm
.legitimate_constant_p (mode
, x
))
2003 x
= validize_mem (force_const_mem (mode
, x
));
2005 /* See if the machine can do this with a load multiple insn. */
2006 if (targetm
.have_load_multiple ())
2008 rtx_insn
*last
= get_last_insn ();
2009 rtx first
= gen_rtx_REG (word_mode
, regno
);
2010 if (rtx_insn
*pat
= targetm
.gen_load_multiple (first
, x
,
2017 delete_insns_since (last
);
2020 for (int i
= 0; i
< nregs
; i
++)
2021 emit_move_insn (gen_rtx_REG (word_mode
, regno
+ i
),
2022 operand_subword_force (x
, i
, mode
));
2025 /* Copy all or part of a BLKmode value X out of registers starting at REGNO.
2026 The number of registers to be filled is NREGS. */
2029 move_block_from_reg (int regno
, rtx x
, int nregs
)
2034 /* See if the machine can do this with a store multiple insn. */
2035 if (targetm
.have_store_multiple ())
2037 rtx_insn
*last
= get_last_insn ();
2038 rtx first
= gen_rtx_REG (word_mode
, regno
);
2039 if (rtx_insn
*pat
= targetm
.gen_store_multiple (x
, first
,
2046 delete_insns_since (last
);
2049 for (int i
= 0; i
< nregs
; i
++)
2051 rtx tem
= operand_subword (x
, i
, 1, BLKmode
);
2055 emit_move_insn (tem
, gen_rtx_REG (word_mode
, regno
+ i
));
2059 /* Generate a PARALLEL rtx for a new non-consecutive group of registers from
2060 ORIG, where ORIG is a non-consecutive group of registers represented by
2061 a PARALLEL. The clone is identical to the original except in that the
2062 original set of registers is replaced by a new set of pseudo registers.
2063 The new set has the same modes as the original set. */
2066 gen_group_rtx (rtx orig
)
2071 gcc_assert (GET_CODE (orig
) == PARALLEL
);
2073 length
= XVECLEN (orig
, 0);
2074 tmps
= XALLOCAVEC (rtx
, length
);
2076 /* Skip a NULL entry in first slot. */
2077 i
= XEXP (XVECEXP (orig
, 0, 0), 0) ? 0 : 1;
2082 for (; i
< length
; i
++)
2084 machine_mode mode
= GET_MODE (XEXP (XVECEXP (orig
, 0, i
), 0));
2085 rtx offset
= XEXP (XVECEXP (orig
, 0, i
), 1);
2087 tmps
[i
] = gen_rtx_EXPR_LIST (VOIDmode
, gen_reg_rtx (mode
), offset
);
2090 return gen_rtx_PARALLEL (GET_MODE (orig
), gen_rtvec_v (length
, tmps
));
2093 /* A subroutine of emit_group_load. Arguments as for emit_group_load,
2094 except that values are placed in TMPS[i], and must later be moved
2095 into corresponding XEXP (XVECEXP (DST, 0, i), 0) element. */
2098 emit_group_load_1 (rtx
*tmps
, rtx dst
, rtx orig_src
, tree type
,
2103 machine_mode m
= GET_MODE (orig_src
);
2105 gcc_assert (GET_CODE (dst
) == PARALLEL
);
2108 && !SCALAR_INT_MODE_P (m
)
2109 && !MEM_P (orig_src
)
2110 && GET_CODE (orig_src
) != CONCAT
)
2112 scalar_int_mode imode
;
2113 if (int_mode_for_mode (GET_MODE (orig_src
)).exists (&imode
))
2115 src
= gen_reg_rtx (imode
);
2116 emit_move_insn (gen_lowpart (GET_MODE (orig_src
), src
), orig_src
);
2120 src
= assign_stack_temp (GET_MODE (orig_src
), ssize
);
2121 emit_move_insn (src
, orig_src
);
2123 emit_group_load_1 (tmps
, dst
, src
, type
, ssize
);
2127 /* Check for a NULL entry, used to indicate that the parameter goes
2128 both on the stack and in registers. */
2129 if (XEXP (XVECEXP (dst
, 0, 0), 0))
2134 /* Process the pieces. */
2135 for (i
= start
; i
< XVECLEN (dst
, 0); i
++)
2137 machine_mode mode
= GET_MODE (XEXP (XVECEXP (dst
, 0, i
), 0));
2138 poly_int64 bytepos
= INTVAL (XEXP (XVECEXP (dst
, 0, i
), 1));
2139 poly_int64 bytelen
= GET_MODE_SIZE (mode
);
2140 poly_int64 shift
= 0;
2142 /* Handle trailing fragments that run over the size of the struct.
2143 It's the target's responsibility to make sure that the fragment
2144 cannot be strictly smaller in some cases and strictly larger
2146 gcc_checking_assert (ordered_p (bytepos
+ bytelen
, ssize
));
2147 if (known_size_p (ssize
) && maybe_gt (bytepos
+ bytelen
, ssize
))
2149 /* Arrange to shift the fragment to where it belongs.
2150 extract_bit_field loads to the lsb of the reg. */
2152 #ifdef BLOCK_REG_PADDING
2153 BLOCK_REG_PADDING (GET_MODE (orig_src
), type
, i
== start
)
2154 == (BYTES_BIG_ENDIAN
? PAD_UPWARD
: PAD_DOWNWARD
)
2159 shift
= (bytelen
- (ssize
- bytepos
)) * BITS_PER_UNIT
;
2160 bytelen
= ssize
- bytepos
;
2161 gcc_assert (maybe_gt (bytelen
, 0));
2164 /* If we won't be loading directly from memory, protect the real source
2165 from strange tricks we might play; but make sure that the source can
2166 be loaded directly into the destination. */
2168 if (!MEM_P (orig_src
)
2169 && (!CONSTANT_P (orig_src
)
2170 || (GET_MODE (orig_src
) != mode
2171 && GET_MODE (orig_src
) != VOIDmode
)))
2173 if (GET_MODE (orig_src
) == VOIDmode
)
2174 src
= gen_reg_rtx (mode
);
2176 src
= gen_reg_rtx (GET_MODE (orig_src
));
2178 emit_move_insn (src
, orig_src
);
2181 /* Optimize the access just a bit. */
2183 && (! targetm
.slow_unaligned_access (mode
, MEM_ALIGN (src
))
2184 || MEM_ALIGN (src
) >= GET_MODE_ALIGNMENT (mode
))
2185 && multiple_p (bytepos
* BITS_PER_UNIT
, GET_MODE_ALIGNMENT (mode
))
2186 && known_eq (bytelen
, GET_MODE_SIZE (mode
)))
2188 tmps
[i
] = gen_reg_rtx (mode
);
2189 emit_move_insn (tmps
[i
], adjust_address (src
, mode
, bytepos
));
2191 else if (COMPLEX_MODE_P (mode
)
2192 && GET_MODE (src
) == mode
2193 && known_eq (bytelen
, GET_MODE_SIZE (mode
)))
2194 /* Let emit_move_complex do the bulk of the work. */
2196 else if (GET_CODE (src
) == CONCAT
)
2198 poly_int64 slen
= GET_MODE_SIZE (GET_MODE (src
));
2199 poly_int64 slen0
= GET_MODE_SIZE (GET_MODE (XEXP (src
, 0)));
2203 if (can_div_trunc_p (bytepos
, slen0
, &elt
, &subpos
)
2204 && known_le (subpos
+ bytelen
, slen0
))
2206 /* The following assumes that the concatenated objects all
2207 have the same size. In this case, a simple calculation
2208 can be used to determine the object and the bit field
2210 tmps
[i
] = XEXP (src
, elt
);
2211 if (maybe_ne (subpos
, 0)
2212 || maybe_ne (subpos
+ bytelen
, slen0
)
2213 || (!CONSTANT_P (tmps
[i
])
2214 && (!REG_P (tmps
[i
]) || GET_MODE (tmps
[i
]) != mode
)))
2215 tmps
[i
] = extract_bit_field (tmps
[i
], bytelen
* BITS_PER_UNIT
,
2216 subpos
* BITS_PER_UNIT
,
2217 1, NULL_RTX
, mode
, mode
, false,
2224 gcc_assert (known_eq (bytepos
, 0));
2225 mem
= assign_stack_temp (GET_MODE (src
), slen
);
2226 emit_move_insn (mem
, src
);
2227 tmps
[i
] = extract_bit_field (mem
, bytelen
* BITS_PER_UNIT
,
2228 0, 1, NULL_RTX
, mode
, mode
, false,
2232 /* FIXME: A SIMD parallel will eventually lead to a subreg of a
2233 SIMD register, which is currently broken. While we get GCC
2234 to emit proper RTL for these cases, let's dump to memory. */
2235 else if (VECTOR_MODE_P (GET_MODE (dst
))
2238 int slen
= GET_MODE_SIZE (GET_MODE (src
));
2241 mem
= assign_stack_temp (GET_MODE (src
), slen
);
2242 emit_move_insn (mem
, src
);
2243 tmps
[i
] = adjust_address (mem
, mode
, bytepos
);
2245 else if (CONSTANT_P (src
) && GET_MODE (dst
) != BLKmode
2246 && XVECLEN (dst
, 0) > 1)
2247 tmps
[i
] = simplify_gen_subreg (mode
, src
, GET_MODE (dst
), bytepos
);
2248 else if (CONSTANT_P (src
))
2250 if (known_eq (bytelen
, ssize
))
2256 /* TODO: const_wide_int can have sizes other than this... */
2257 gcc_assert (known_eq (2 * bytelen
, ssize
));
2258 split_double (src
, &first
, &second
);
2265 else if (REG_P (src
) && GET_MODE (src
) == mode
)
2268 tmps
[i
] = extract_bit_field (src
, bytelen
* BITS_PER_UNIT
,
2269 bytepos
* BITS_PER_UNIT
, 1, NULL_RTX
,
2270 mode
, mode
, false, NULL
);
2272 if (maybe_ne (shift
, 0))
2273 tmps
[i
] = expand_shift (LSHIFT_EXPR
, mode
, tmps
[i
],
2278 /* Emit code to move a block SRC of type TYPE to a block DST,
2279 where DST is non-consecutive registers represented by a PARALLEL.
2280 SSIZE represents the total size of block ORIG_SRC in bytes, or -1
2284 emit_group_load (rtx dst
, rtx src
, tree type
, poly_int64 ssize
)
2289 tmps
= XALLOCAVEC (rtx
, XVECLEN (dst
, 0));
2290 emit_group_load_1 (tmps
, dst
, src
, type
, ssize
);
2292 /* Copy the extracted pieces into the proper (probable) hard regs. */
2293 for (i
= 0; i
< XVECLEN (dst
, 0); i
++)
2295 rtx d
= XEXP (XVECEXP (dst
, 0, i
), 0);
2298 emit_move_insn (d
, tmps
[i
]);
2302 /* Similar, but load SRC into new pseudos in a format that looks like
2303 PARALLEL. This can later be fed to emit_group_move to get things
2304 in the right place. */
2307 emit_group_load_into_temps (rtx parallel
, rtx src
, tree type
, poly_int64 ssize
)
2312 vec
= rtvec_alloc (XVECLEN (parallel
, 0));
2313 emit_group_load_1 (&RTVEC_ELT (vec
, 0), parallel
, src
, type
, ssize
);
2315 /* Convert the vector to look just like the original PARALLEL, except
2316 with the computed values. */
2317 for (i
= 0; i
< XVECLEN (parallel
, 0); i
++)
2319 rtx e
= XVECEXP (parallel
, 0, i
);
2320 rtx d
= XEXP (e
, 0);
2324 d
= force_reg (GET_MODE (d
), RTVEC_ELT (vec
, i
));
2325 e
= alloc_EXPR_LIST (REG_NOTE_KIND (e
), d
, XEXP (e
, 1));
2327 RTVEC_ELT (vec
, i
) = e
;
2330 return gen_rtx_PARALLEL (GET_MODE (parallel
), vec
);
2333 /* Emit code to move a block SRC to block DST, where SRC and DST are
2334 non-consecutive groups of registers, each represented by a PARALLEL. */
2337 emit_group_move (rtx dst
, rtx src
)
2341 gcc_assert (GET_CODE (src
) == PARALLEL
2342 && GET_CODE (dst
) == PARALLEL
2343 && XVECLEN (src
, 0) == XVECLEN (dst
, 0));
2345 /* Skip first entry if NULL. */
2346 for (i
= XEXP (XVECEXP (src
, 0, 0), 0) ? 0 : 1; i
< XVECLEN (src
, 0); i
++)
2347 emit_move_insn (XEXP (XVECEXP (dst
, 0, i
), 0),
2348 XEXP (XVECEXP (src
, 0, i
), 0));
2351 /* Move a group of registers represented by a PARALLEL into pseudos. */
2354 emit_group_move_into_temps (rtx src
)
2356 rtvec vec
= rtvec_alloc (XVECLEN (src
, 0));
2359 for (i
= 0; i
< XVECLEN (src
, 0); i
++)
2361 rtx e
= XVECEXP (src
, 0, i
);
2362 rtx d
= XEXP (e
, 0);
2365 e
= alloc_EXPR_LIST (REG_NOTE_KIND (e
), copy_to_reg (d
), XEXP (e
, 1));
2366 RTVEC_ELT (vec
, i
) = e
;
2369 return gen_rtx_PARALLEL (GET_MODE (src
), vec
);
2372 /* Emit code to move a block SRC to a block ORIG_DST of type TYPE,
2373 where SRC is non-consecutive registers represented by a PARALLEL.
2374 SSIZE represents the total size of block ORIG_DST, or -1 if not
2378 emit_group_store (rtx orig_dst
, rtx src
, tree type ATTRIBUTE_UNUSED
,
2382 int start
, finish
, i
;
2383 machine_mode m
= GET_MODE (orig_dst
);
2385 gcc_assert (GET_CODE (src
) == PARALLEL
);
2387 if (!SCALAR_INT_MODE_P (m
)
2388 && !MEM_P (orig_dst
) && GET_CODE (orig_dst
) != CONCAT
)
2390 scalar_int_mode imode
;
2391 if (int_mode_for_mode (GET_MODE (orig_dst
)).exists (&imode
))
2393 dst
= gen_reg_rtx (imode
);
2394 emit_group_store (dst
, src
, type
, ssize
);
2395 dst
= gen_lowpart (GET_MODE (orig_dst
), dst
);
2399 dst
= assign_stack_temp (GET_MODE (orig_dst
), ssize
);
2400 emit_group_store (dst
, src
, type
, ssize
);
2402 emit_move_insn (orig_dst
, dst
);
2406 /* Check for a NULL entry, used to indicate that the parameter goes
2407 both on the stack and in registers. */
2408 if (XEXP (XVECEXP (src
, 0, 0), 0))
2412 finish
= XVECLEN (src
, 0);
2414 tmps
= XALLOCAVEC (rtx
, finish
);
2416 /* Copy the (probable) hard regs into pseudos. */
2417 for (i
= start
; i
< finish
; i
++)
2419 rtx reg
= XEXP (XVECEXP (src
, 0, i
), 0);
2420 if (!REG_P (reg
) || REGNO (reg
) < FIRST_PSEUDO_REGISTER
)
2422 tmps
[i
] = gen_reg_rtx (GET_MODE (reg
));
2423 emit_move_insn (tmps
[i
], reg
);
2429 /* If we won't be storing directly into memory, protect the real destination
2430 from strange tricks we might play. */
2432 if (GET_CODE (dst
) == PARALLEL
)
2436 /* We can get a PARALLEL dst if there is a conditional expression in
2437 a return statement. In that case, the dst and src are the same,
2438 so no action is necessary. */
2439 if (rtx_equal_p (dst
, src
))
2442 /* It is unclear if we can ever reach here, but we may as well handle
2443 it. Allocate a temporary, and split this into a store/load to/from
2445 temp
= assign_stack_temp (GET_MODE (dst
), ssize
);
2446 emit_group_store (temp
, src
, type
, ssize
);
2447 emit_group_load (dst
, temp
, type
, ssize
);
2450 else if (!MEM_P (dst
) && GET_CODE (dst
) != CONCAT
)
2452 machine_mode outer
= GET_MODE (dst
);
2458 if (!REG_P (dst
) || REGNO (dst
) < FIRST_PSEUDO_REGISTER
)
2459 dst
= gen_reg_rtx (outer
);
2461 /* Make life a bit easier for combine. */
2462 /* If the first element of the vector is the low part
2463 of the destination mode, use a paradoxical subreg to
2464 initialize the destination. */
2467 inner
= GET_MODE (tmps
[start
]);
2468 bytepos
= subreg_lowpart_offset (inner
, outer
);
2469 if (known_eq (INTVAL (XEXP (XVECEXP (src
, 0, start
), 1)), bytepos
))
2471 temp
= simplify_gen_subreg (outer
, tmps
[start
],
2475 emit_move_insn (dst
, temp
);
2482 /* If the first element wasn't the low part, try the last. */
2484 && start
< finish
- 1)
2486 inner
= GET_MODE (tmps
[finish
- 1]);
2487 bytepos
= subreg_lowpart_offset (inner
, outer
);
2488 if (known_eq (INTVAL (XEXP (XVECEXP (src
, 0, finish
- 1), 1)),
2491 temp
= simplify_gen_subreg (outer
, tmps
[finish
- 1],
2495 emit_move_insn (dst
, temp
);
2502 /* Otherwise, simply initialize the result to zero. */
2504 emit_move_insn (dst
, CONST0_RTX (outer
));
2507 /* Process the pieces. */
2508 for (i
= start
; i
< finish
; i
++)
2510 poly_int64 bytepos
= INTVAL (XEXP (XVECEXP (src
, 0, i
), 1));
2511 machine_mode mode
= GET_MODE (tmps
[i
]);
2512 poly_int64 bytelen
= GET_MODE_SIZE (mode
);
2513 poly_uint64 adj_bytelen
;
2516 /* Handle trailing fragments that run over the size of the struct.
2517 It's the target's responsibility to make sure that the fragment
2518 cannot be strictly smaller in some cases and strictly larger
2520 gcc_checking_assert (ordered_p (bytepos
+ bytelen
, ssize
));
2521 if (known_size_p (ssize
) && maybe_gt (bytepos
+ bytelen
, ssize
))
2522 adj_bytelen
= ssize
- bytepos
;
2524 adj_bytelen
= bytelen
;
2526 if (GET_CODE (dst
) == CONCAT
)
2528 if (known_le (bytepos
+ adj_bytelen
,
2529 GET_MODE_SIZE (GET_MODE (XEXP (dst
, 0)))))
2530 dest
= XEXP (dst
, 0);
2531 else if (known_ge (bytepos
, GET_MODE_SIZE (GET_MODE (XEXP (dst
, 0)))))
2533 bytepos
-= GET_MODE_SIZE (GET_MODE (XEXP (dst
, 0)));
2534 dest
= XEXP (dst
, 1);
2538 machine_mode dest_mode
= GET_MODE (dest
);
2539 machine_mode tmp_mode
= GET_MODE (tmps
[i
]);
2541 gcc_assert (known_eq (bytepos
, 0) && XVECLEN (src
, 0));
2543 if (GET_MODE_ALIGNMENT (dest_mode
)
2544 >= GET_MODE_ALIGNMENT (tmp_mode
))
2546 dest
= assign_stack_temp (dest_mode
,
2547 GET_MODE_SIZE (dest_mode
));
2548 emit_move_insn (adjust_address (dest
,
2556 dest
= assign_stack_temp (tmp_mode
,
2557 GET_MODE_SIZE (tmp_mode
));
2558 emit_move_insn (dest
, tmps
[i
]);
2559 dst
= adjust_address (dest
, dest_mode
, bytepos
);
2565 /* Handle trailing fragments that run over the size of the struct. */
2566 if (known_size_p (ssize
) && maybe_gt (bytepos
+ bytelen
, ssize
))
2568 /* store_bit_field always takes its value from the lsb.
2569 Move the fragment to the lsb if it's not already there. */
2571 #ifdef BLOCK_REG_PADDING
2572 BLOCK_REG_PADDING (GET_MODE (orig_dst
), type
, i
== start
)
2573 == (BYTES_BIG_ENDIAN
? PAD_UPWARD
: PAD_DOWNWARD
)
2579 poly_int64 shift
= (bytelen
- (ssize
- bytepos
)) * BITS_PER_UNIT
;
2580 tmps
[i
] = expand_shift (RSHIFT_EXPR
, mode
, tmps
[i
],
2584 /* Make sure not to write past the end of the struct. */
2585 store_bit_field (dest
,
2586 adj_bytelen
* BITS_PER_UNIT
, bytepos
* BITS_PER_UNIT
,
2587 bytepos
* BITS_PER_UNIT
, ssize
* BITS_PER_UNIT
- 1,
2588 VOIDmode
, tmps
[i
], false);
2591 /* Optimize the access just a bit. */
2592 else if (MEM_P (dest
)
2593 && (!targetm
.slow_unaligned_access (mode
, MEM_ALIGN (dest
))
2594 || MEM_ALIGN (dest
) >= GET_MODE_ALIGNMENT (mode
))
2595 && multiple_p (bytepos
* BITS_PER_UNIT
,
2596 GET_MODE_ALIGNMENT (mode
))
2597 && known_eq (bytelen
, GET_MODE_SIZE (mode
)))
2598 emit_move_insn (adjust_address (dest
, mode
, bytepos
), tmps
[i
]);
2601 store_bit_field (dest
, bytelen
* BITS_PER_UNIT
, bytepos
* BITS_PER_UNIT
,
2602 0, 0, mode
, tmps
[i
], false);
2605 /* Copy from the pseudo into the (probable) hard reg. */
2606 if (orig_dst
!= dst
)
2607 emit_move_insn (orig_dst
, dst
);
2610 /* Return a form of X that does not use a PARALLEL. TYPE is the type
2611 of the value stored in X. */
2614 maybe_emit_group_store (rtx x
, tree type
)
2616 machine_mode mode
= TYPE_MODE (type
);
2617 gcc_checking_assert (GET_MODE (x
) == VOIDmode
|| GET_MODE (x
) == mode
);
2618 if (GET_CODE (x
) == PARALLEL
)
2620 rtx result
= gen_reg_rtx (mode
);
2621 emit_group_store (result
, x
, type
, int_size_in_bytes (type
));
2627 /* Copy a BLKmode object of TYPE out of a register SRCREG into TARGET.
2629 This is used on targets that return BLKmode values in registers. */
2632 copy_blkmode_from_reg (rtx target
, rtx srcreg
, tree type
)
2634 unsigned HOST_WIDE_INT bytes
= int_size_in_bytes (type
);
2635 rtx src
= NULL
, dst
= NULL
;
2636 unsigned HOST_WIDE_INT bitsize
= MIN (TYPE_ALIGN (type
), BITS_PER_WORD
);
2637 unsigned HOST_WIDE_INT bitpos
, xbitpos
, padding_correction
= 0;
2638 /* No current ABI uses variable-sized modes to pass a BLKmnode type. */
2639 fixed_size_mode mode
= as_a
<fixed_size_mode
> (GET_MODE (srcreg
));
2640 fixed_size_mode tmode
= as_a
<fixed_size_mode
> (GET_MODE (target
));
2641 fixed_size_mode copy_mode
;
2643 /* BLKmode registers created in the back-end shouldn't have survived. */
2644 gcc_assert (mode
!= BLKmode
);
2646 /* If the structure doesn't take up a whole number of words, see whether
2647 SRCREG is padded on the left or on the right. If it's on the left,
2648 set PADDING_CORRECTION to the number of bits to skip.
2650 In most ABIs, the structure will be returned at the least end of
2651 the register, which translates to right padding on little-endian
2652 targets and left padding on big-endian targets. The opposite
2653 holds if the structure is returned at the most significant
2654 end of the register. */
2655 if (bytes
% UNITS_PER_WORD
!= 0
2656 && (targetm
.calls
.return_in_msb (type
)
2658 : BYTES_BIG_ENDIAN
))
2660 = (BITS_PER_WORD
- ((bytes
% UNITS_PER_WORD
) * BITS_PER_UNIT
));
2662 /* We can use a single move if we have an exact mode for the size. */
2663 else if (MEM_P (target
)
2664 && (!targetm
.slow_unaligned_access (mode
, MEM_ALIGN (target
))
2665 || MEM_ALIGN (target
) >= GET_MODE_ALIGNMENT (mode
))
2666 && bytes
== GET_MODE_SIZE (mode
))
2668 emit_move_insn (adjust_address (target
, mode
, 0), srcreg
);
2672 /* And if we additionally have the same mode for a register. */
2673 else if (REG_P (target
)
2674 && GET_MODE (target
) == mode
2675 && bytes
== GET_MODE_SIZE (mode
))
2677 emit_move_insn (target
, srcreg
);
2681 /* This code assumes srcreg is at least a full word. If it isn't, copy it
2682 into a new pseudo which is a full word. */
2683 if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
2685 srcreg
= convert_to_mode (word_mode
, srcreg
, TYPE_UNSIGNED (type
));
2689 /* Copy the structure BITSIZE bits at a time. If the target lives in
2690 memory, take care of not reading/writing past its end by selecting
2691 a copy mode suited to BITSIZE. This should always be possible given
2694 If the target lives in register, make sure not to select a copy mode
2695 larger than the mode of the register.
2697 We could probably emit more efficient code for machines which do not use
2698 strict alignment, but it doesn't seem worth the effort at the current
2701 copy_mode
= word_mode
;
2704 opt_scalar_int_mode mem_mode
= int_mode_for_size (bitsize
, 1);
2705 if (mem_mode
.exists ())
2706 copy_mode
= mem_mode
.require ();
2708 else if (REG_P (target
) && GET_MODE_BITSIZE (tmode
) < BITS_PER_WORD
)
2711 for (bitpos
= 0, xbitpos
= padding_correction
;
2712 bitpos
< bytes
* BITS_PER_UNIT
;
2713 bitpos
+= bitsize
, xbitpos
+= bitsize
)
2715 /* We need a new source operand each time xbitpos is on a
2716 word boundary and when xbitpos == padding_correction
2717 (the first time through). */
2718 if (xbitpos
% BITS_PER_WORD
== 0 || xbitpos
== padding_correction
)
2719 src
= operand_subword_force (srcreg
, xbitpos
/ BITS_PER_WORD
, mode
);
2721 /* We need a new destination operand each time bitpos is on
2723 if (REG_P (target
) && GET_MODE_BITSIZE (tmode
) < BITS_PER_WORD
)
2725 else if (bitpos
% BITS_PER_WORD
== 0)
2726 dst
= operand_subword (target
, bitpos
/ BITS_PER_WORD
, 1, tmode
);
2728 /* Use xbitpos for the source extraction (right justified) and
2729 bitpos for the destination store (left justified). */
2730 store_bit_field (dst
, bitsize
, bitpos
% BITS_PER_WORD
, 0, 0, copy_mode
,
2731 extract_bit_field (src
, bitsize
,
2732 xbitpos
% BITS_PER_WORD
, 1,
2733 NULL_RTX
, copy_mode
, copy_mode
,
2739 /* Copy BLKmode value SRC into a register of mode MODE_IN. Return the
2740 register if it contains any data, otherwise return null.
2742 This is used on targets that return BLKmode values in registers. */
2745 copy_blkmode_to_reg (machine_mode mode_in
, tree src
)
2748 unsigned HOST_WIDE_INT bitpos
, xbitpos
, padding_correction
= 0, bytes
;
2749 unsigned int bitsize
;
2750 rtx
*dst_words
, dst
, x
, src_word
= NULL_RTX
, dst_word
= NULL_RTX
;
2751 /* No current ABI uses variable-sized modes to pass a BLKmnode type. */
2752 fixed_size_mode mode
= as_a
<fixed_size_mode
> (mode_in
);
2753 fixed_size_mode dst_mode
;
2755 gcc_assert (TYPE_MODE (TREE_TYPE (src
)) == BLKmode
);
2757 x
= expand_normal (src
);
2759 bytes
= arg_int_size_in_bytes (TREE_TYPE (src
));
2763 /* If the structure doesn't take up a whole number of words, see
2764 whether the register value should be padded on the left or on
2765 the right. Set PADDING_CORRECTION to the number of padding
2766 bits needed on the left side.
2768 In most ABIs, the structure will be returned at the least end of
2769 the register, which translates to right padding on little-endian
2770 targets and left padding on big-endian targets. The opposite
2771 holds if the structure is returned at the most significant
2772 end of the register. */
2773 if (bytes
% UNITS_PER_WORD
!= 0
2774 && (targetm
.calls
.return_in_msb (TREE_TYPE (src
))
2776 : BYTES_BIG_ENDIAN
))
2777 padding_correction
= (BITS_PER_WORD
- ((bytes
% UNITS_PER_WORD
)
2780 n_regs
= (bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
2781 dst_words
= XALLOCAVEC (rtx
, n_regs
);
2782 bitsize
= BITS_PER_WORD
;
2783 if (targetm
.slow_unaligned_access (word_mode
, TYPE_ALIGN (TREE_TYPE (src
))))
2784 bitsize
= MIN (TYPE_ALIGN (TREE_TYPE (src
)), BITS_PER_WORD
);
2786 /* Copy the structure BITSIZE bits at a time. */
2787 for (bitpos
= 0, xbitpos
= padding_correction
;
2788 bitpos
< bytes
* BITS_PER_UNIT
;
2789 bitpos
+= bitsize
, xbitpos
+= bitsize
)
2791 /* We need a new destination pseudo each time xbitpos is
2792 on a word boundary and when xbitpos == padding_correction
2793 (the first time through). */
2794 if (xbitpos
% BITS_PER_WORD
== 0
2795 || xbitpos
== padding_correction
)
2797 /* Generate an appropriate register. */
2798 dst_word
= gen_reg_rtx (word_mode
);
2799 dst_words
[xbitpos
/ BITS_PER_WORD
] = dst_word
;
2801 /* Clear the destination before we move anything into it. */
2802 emit_move_insn (dst_word
, CONST0_RTX (word_mode
));
2805 /* We need a new source operand each time bitpos is on a word
2807 if (bitpos
% BITS_PER_WORD
== 0)
2808 src_word
= operand_subword_force (x
, bitpos
/ BITS_PER_WORD
, BLKmode
);
2810 /* Use bitpos for the source extraction (left justified) and
2811 xbitpos for the destination store (right justified). */
2812 store_bit_field (dst_word
, bitsize
, xbitpos
% BITS_PER_WORD
,
2814 extract_bit_field (src_word
, bitsize
,
2815 bitpos
% BITS_PER_WORD
, 1,
2816 NULL_RTX
, word_mode
, word_mode
,
2821 if (mode
== BLKmode
)
2823 /* Find the smallest integer mode large enough to hold the
2824 entire structure. */
2825 opt_scalar_int_mode mode_iter
;
2826 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_INT
)
2827 if (GET_MODE_SIZE (mode_iter
.require ()) >= bytes
)
2830 /* A suitable mode should have been found. */
2831 mode
= mode_iter
.require ();
2834 if (GET_MODE_SIZE (mode
) < GET_MODE_SIZE (word_mode
))
2835 dst_mode
= word_mode
;
2838 dst
= gen_reg_rtx (dst_mode
);
2840 for (i
= 0; i
< n_regs
; i
++)
2841 emit_move_insn (operand_subword (dst
, i
, 0, dst_mode
), dst_words
[i
]);
2843 if (mode
!= dst_mode
)
2844 dst
= gen_lowpart (mode
, dst
);
2849 /* Add a USE expression for REG to the (possibly empty) list pointed
2850 to by CALL_FUSAGE. REG must denote a hard register. */
2853 use_reg_mode (rtx
*call_fusage
, rtx reg
, machine_mode mode
)
2855 gcc_assert (REG_P (reg
));
2857 if (!HARD_REGISTER_P (reg
))
2861 = gen_rtx_EXPR_LIST (mode
, gen_rtx_USE (VOIDmode
, reg
), *call_fusage
);
2864 /* Add a CLOBBER expression for REG to the (possibly empty) list pointed
2865 to by CALL_FUSAGE. REG must denote a hard register. */
2868 clobber_reg_mode (rtx
*call_fusage
, rtx reg
, machine_mode mode
)
2870 gcc_assert (REG_P (reg
) && REGNO (reg
) < FIRST_PSEUDO_REGISTER
);
2873 = gen_rtx_EXPR_LIST (mode
, gen_rtx_CLOBBER (VOIDmode
, reg
), *call_fusage
);
2876 /* Add USE expressions to *CALL_FUSAGE for each of NREGS consecutive regs,
2877 starting at REGNO. All of these registers must be hard registers. */
2880 use_regs (rtx
*call_fusage
, int regno
, int nregs
)
2884 gcc_assert (regno
+ nregs
<= FIRST_PSEUDO_REGISTER
);
2886 for (i
= 0; i
< nregs
; i
++)
2887 use_reg (call_fusage
, regno_reg_rtx
[regno
+ i
]);
2890 /* Add USE expressions to *CALL_FUSAGE for each REG contained in the
2891 PARALLEL REGS. This is for calls that pass values in multiple
2892 non-contiguous locations. The Irix 6 ABI has examples of this. */
2895 use_group_regs (rtx
*call_fusage
, rtx regs
)
2899 for (i
= 0; i
< XVECLEN (regs
, 0); i
++)
2901 rtx reg
= XEXP (XVECEXP (regs
, 0, i
), 0);
2903 /* A NULL entry means the parameter goes both on the stack and in
2904 registers. This can also be a MEM for targets that pass values
2905 partially on the stack and partially in registers. */
2906 if (reg
!= 0 && REG_P (reg
))
2907 use_reg (call_fusage
, reg
);
2911 /* Return the defining gimple statement for SSA_NAME NAME if it is an
2912 assigment and the code of the expresion on the RHS is CODE. Return
2916 get_def_for_expr (tree name
, enum tree_code code
)
2920 if (TREE_CODE (name
) != SSA_NAME
)
2923 def_stmt
= get_gimple_for_ssa_name (name
);
2925 || gimple_assign_rhs_code (def_stmt
) != code
)
2931 /* Return the defining gimple statement for SSA_NAME NAME if it is an
2932 assigment and the class of the expresion on the RHS is CLASS. Return
2936 get_def_for_expr_class (tree name
, enum tree_code_class tclass
)
2940 if (TREE_CODE (name
) != SSA_NAME
)
2943 def_stmt
= get_gimple_for_ssa_name (name
);
2945 || TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt
)) != tclass
)
2951 /* Write zeros through the storage of OBJECT. If OBJECT has BLKmode, SIZE is
2952 its length in bytes. */
2955 clear_storage_hints (rtx object
, rtx size
, enum block_op_methods method
,
2956 unsigned int expected_align
, HOST_WIDE_INT expected_size
,
2957 unsigned HOST_WIDE_INT min_size
,
2958 unsigned HOST_WIDE_INT max_size
,
2959 unsigned HOST_WIDE_INT probable_max_size
)
2961 machine_mode mode
= GET_MODE (object
);
2964 gcc_assert (method
== BLOCK_OP_NORMAL
|| method
== BLOCK_OP_TAILCALL
);
2966 /* If OBJECT is not BLKmode and SIZE is the same size as its mode,
2967 just move a zero. Otherwise, do this a piece at a time. */
2969 && CONST_INT_P (size
)
2970 && INTVAL (size
) == (HOST_WIDE_INT
) GET_MODE_SIZE (mode
))
2972 rtx zero
= CONST0_RTX (mode
);
2975 emit_move_insn (object
, zero
);
2979 if (COMPLEX_MODE_P (mode
))
2981 zero
= CONST0_RTX (GET_MODE_INNER (mode
));
2984 write_complex_part (object
, zero
, 0);
2985 write_complex_part (object
, zero
, 1);
2991 if (size
== const0_rtx
)
2994 align
= MEM_ALIGN (object
);
2996 if (CONST_INT_P (size
)
2997 && targetm
.use_by_pieces_infrastructure_p (INTVAL (size
), align
,
2999 optimize_insn_for_speed_p ()))
3000 clear_by_pieces (object
, INTVAL (size
), align
);
3001 else if (set_storage_via_setmem (object
, size
, const0_rtx
, align
,
3002 expected_align
, expected_size
,
3003 min_size
, max_size
, probable_max_size
))
3005 else if (ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (object
)))
3006 return set_storage_via_libcall (object
, size
, const0_rtx
,
3007 method
== BLOCK_OP_TAILCALL
);
3015 clear_storage (rtx object
, rtx size
, enum block_op_methods method
)
3017 unsigned HOST_WIDE_INT max
, min
= 0;
3018 if (GET_CODE (size
) == CONST_INT
)
3019 min
= max
= UINTVAL (size
);
3021 max
= GET_MODE_MASK (GET_MODE (size
));
3022 return clear_storage_hints (object
, size
, method
, 0, -1, min
, max
, max
);
3026 /* A subroutine of clear_storage. Expand a call to memset.
3027 Return the return value of memset, 0 otherwise. */
3030 set_storage_via_libcall (rtx object
, rtx size
, rtx val
, bool tailcall
)
3032 tree call_expr
, fn
, object_tree
, size_tree
, val_tree
;
3033 machine_mode size_mode
;
3035 object
= copy_addr_to_reg (XEXP (object
, 0));
3036 object_tree
= make_tree (ptr_type_node
, object
);
3038 if (!CONST_INT_P (val
))
3039 val
= convert_to_mode (TYPE_MODE (integer_type_node
), val
, 1);
3040 val_tree
= make_tree (integer_type_node
, val
);
3042 size_mode
= TYPE_MODE (sizetype
);
3043 size
= convert_to_mode (size_mode
, size
, 1);
3044 size
= copy_to_mode_reg (size_mode
, size
);
3045 size_tree
= make_tree (sizetype
, size
);
3047 /* It is incorrect to use the libcall calling conventions for calls to
3048 memset because it can be provided by the user. */
3049 fn
= builtin_decl_implicit (BUILT_IN_MEMSET
);
3050 call_expr
= build_call_expr (fn
, 3, object_tree
, val_tree
, size_tree
);
3051 CALL_EXPR_TAILCALL (call_expr
) = tailcall
;
3053 return expand_call (call_expr
, NULL_RTX
, false);
3056 /* Expand a setmem pattern; return true if successful. */
3059 set_storage_via_setmem (rtx object
, rtx size
, rtx val
, unsigned int align
,
3060 unsigned int expected_align
, HOST_WIDE_INT expected_size
,
3061 unsigned HOST_WIDE_INT min_size
,
3062 unsigned HOST_WIDE_INT max_size
,
3063 unsigned HOST_WIDE_INT probable_max_size
)
3065 /* Try the most limited insn first, because there's no point
3066 including more than one in the machine description unless
3067 the more limited one has some advantage. */
3069 if (expected_align
< align
)
3070 expected_align
= align
;
3071 if (expected_size
!= -1)
3073 if ((unsigned HOST_WIDE_INT
)expected_size
> max_size
)
3074 expected_size
= max_size
;
3075 if ((unsigned HOST_WIDE_INT
)expected_size
< min_size
)
3076 expected_size
= min_size
;
3079 opt_scalar_int_mode mode_iter
;
3080 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_INT
)
3082 scalar_int_mode mode
= mode_iter
.require ();
3083 enum insn_code code
= direct_optab_handler (setmem_optab
, mode
);
3085 if (code
!= CODE_FOR_nothing
3086 /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
3087 here because if SIZE is less than the mode mask, as it is
3088 returned by the macro, it will definitely be less than the
3089 actual mode mask. Since SIZE is within the Pmode address
3090 space, we limit MODE to Pmode. */
3091 && ((CONST_INT_P (size
)
3092 && ((unsigned HOST_WIDE_INT
) INTVAL (size
)
3093 <= (GET_MODE_MASK (mode
) >> 1)))
3094 || max_size
<= (GET_MODE_MASK (mode
) >> 1)
3095 || GET_MODE_BITSIZE (mode
) >= GET_MODE_BITSIZE (Pmode
)))
3097 struct expand_operand ops
[9];
3100 nops
= insn_data
[(int) code
].n_generator_args
;
3101 gcc_assert (nops
== 4 || nops
== 6 || nops
== 8 || nops
== 9);
3103 create_fixed_operand (&ops
[0], object
);
3104 /* The check above guarantees that this size conversion is valid. */
3105 create_convert_operand_to (&ops
[1], size
, mode
, true);
3106 create_convert_operand_from (&ops
[2], val
, byte_mode
, true);
3107 create_integer_operand (&ops
[3], align
/ BITS_PER_UNIT
);
3110 create_integer_operand (&ops
[4], expected_align
/ BITS_PER_UNIT
);
3111 create_integer_operand (&ops
[5], expected_size
);
3115 create_integer_operand (&ops
[6], min_size
);
3116 /* If we can not represent the maximal size,
3117 make parameter NULL. */
3118 if ((HOST_WIDE_INT
) max_size
!= -1)
3119 create_integer_operand (&ops
[7], max_size
);
3121 create_fixed_operand (&ops
[7], NULL
);
3125 /* If we can not represent the maximal size,
3126 make parameter NULL. */
3127 if ((HOST_WIDE_INT
) probable_max_size
!= -1)
3128 create_integer_operand (&ops
[8], probable_max_size
);
3130 create_fixed_operand (&ops
[8], NULL
);
3132 if (maybe_expand_insn (code
, nops
, ops
))
3141 /* Write to one of the components of the complex value CPLX. Write VAL to
3142 the real part if IMAG_P is false, and the imaginary part if its true. */
3145 write_complex_part (rtx cplx
, rtx val
, bool imag_p
)
3151 if (GET_CODE (cplx
) == CONCAT
)
3153 emit_move_insn (XEXP (cplx
, imag_p
), val
);
3157 cmode
= GET_MODE (cplx
);
3158 imode
= GET_MODE_INNER (cmode
);
3159 ibitsize
= GET_MODE_BITSIZE (imode
);
3161 /* For MEMs simplify_gen_subreg may generate an invalid new address
3162 because, e.g., the original address is considered mode-dependent
3163 by the target, which restricts simplify_subreg from invoking
3164 adjust_address_nv. Instead of preparing fallback support for an
3165 invalid address, we call adjust_address_nv directly. */
3168 emit_move_insn (adjust_address_nv (cplx
, imode
,
3169 imag_p
? GET_MODE_SIZE (imode
) : 0),
3174 /* If the sub-object is at least word sized, then we know that subregging
3175 will work. This special case is important, since store_bit_field
3176 wants to operate on integer modes, and there's rarely an OImode to
3177 correspond to TCmode. */
3178 if (ibitsize
>= BITS_PER_WORD
3179 /* For hard regs we have exact predicates. Assume we can split
3180 the original object if it spans an even number of hard regs.
3181 This special case is important for SCmode on 64-bit platforms
3182 where the natural size of floating-point regs is 32-bit. */
3184 && REGNO (cplx
) < FIRST_PSEUDO_REGISTER
3185 && REG_NREGS (cplx
) % 2 == 0))
3187 rtx part
= simplify_gen_subreg (imode
, cplx
, cmode
,
3188 imag_p
? GET_MODE_SIZE (imode
) : 0);
3191 emit_move_insn (part
, val
);
3195 /* simplify_gen_subreg may fail for sub-word MEMs. */
3196 gcc_assert (MEM_P (cplx
) && ibitsize
< BITS_PER_WORD
);
3199 store_bit_field (cplx
, ibitsize
, imag_p
? ibitsize
: 0, 0, 0, imode
, val
,
3203 /* Extract one of the components of the complex value CPLX. Extract the
3204 real part if IMAG_P is false, and the imaginary part if it's true. */
3207 read_complex_part (rtx cplx
, bool imag_p
)
3213 if (GET_CODE (cplx
) == CONCAT
)
3214 return XEXP (cplx
, imag_p
);
3216 cmode
= GET_MODE (cplx
);
3217 imode
= GET_MODE_INNER (cmode
);
3218 ibitsize
= GET_MODE_BITSIZE (imode
);
3220 /* Special case reads from complex constants that got spilled to memory. */
3221 if (MEM_P (cplx
) && GET_CODE (XEXP (cplx
, 0)) == SYMBOL_REF
)
3223 tree decl
= SYMBOL_REF_DECL (XEXP (cplx
, 0));
3224 if (decl
&& TREE_CODE (decl
) == COMPLEX_CST
)
3226 tree part
= imag_p
? TREE_IMAGPART (decl
) : TREE_REALPART (decl
);
3227 if (CONSTANT_CLASS_P (part
))
3228 return expand_expr (part
, NULL_RTX
, imode
, EXPAND_NORMAL
);
3232 /* For MEMs simplify_gen_subreg may generate an invalid new address
3233 because, e.g., the original address is considered mode-dependent
3234 by the target, which restricts simplify_subreg from invoking
3235 adjust_address_nv. Instead of preparing fallback support for an
3236 invalid address, we call adjust_address_nv directly. */
3238 return adjust_address_nv (cplx
, imode
,
3239 imag_p
? GET_MODE_SIZE (imode
) : 0);
3241 /* If the sub-object is at least word sized, then we know that subregging
3242 will work. This special case is important, since extract_bit_field
3243 wants to operate on integer modes, and there's rarely an OImode to
3244 correspond to TCmode. */
3245 if (ibitsize
>= BITS_PER_WORD
3246 /* For hard regs we have exact predicates. Assume we can split
3247 the original object if it spans an even number of hard regs.
3248 This special case is important for SCmode on 64-bit platforms
3249 where the natural size of floating-point regs is 32-bit. */
3251 && REGNO (cplx
) < FIRST_PSEUDO_REGISTER
3252 && REG_NREGS (cplx
) % 2 == 0))
3254 rtx ret
= simplify_gen_subreg (imode
, cplx
, cmode
,
3255 imag_p
? GET_MODE_SIZE (imode
) : 0);
3259 /* simplify_gen_subreg may fail for sub-word MEMs. */
3260 gcc_assert (MEM_P (cplx
) && ibitsize
< BITS_PER_WORD
);
3263 return extract_bit_field (cplx
, ibitsize
, imag_p
? ibitsize
: 0,
3264 true, NULL_RTX
, imode
, imode
, false, NULL
);
3267 /* A subroutine of emit_move_insn_1. Yet another lowpart generator.
3268 NEW_MODE and OLD_MODE are the same size. Return NULL if X cannot be
3269 represented in NEW_MODE. If FORCE is true, this will never happen, as
3270 we'll force-create a SUBREG if needed. */
3273 emit_move_change_mode (machine_mode new_mode
,
3274 machine_mode old_mode
, rtx x
, bool force
)
3278 if (push_operand (x
, GET_MODE (x
)))
3280 ret
= gen_rtx_MEM (new_mode
, XEXP (x
, 0));
3281 MEM_COPY_ATTRIBUTES (ret
, x
);
3285 /* We don't have to worry about changing the address since the
3286 size in bytes is supposed to be the same. */
3287 if (reload_in_progress
)
3289 /* Copy the MEM to change the mode and move any
3290 substitutions from the old MEM to the new one. */
3291 ret
= adjust_address_nv (x
, new_mode
, 0);
3292 copy_replacements (x
, ret
);
3295 ret
= adjust_address (x
, new_mode
, 0);
3299 /* Note that we do want simplify_subreg's behavior of validating
3300 that the new mode is ok for a hard register. If we were to use
3301 simplify_gen_subreg, we would create the subreg, but would
3302 probably run into the target not being able to implement it. */
3303 /* Except, of course, when FORCE is true, when this is exactly what
3304 we want. Which is needed for CCmodes on some targets. */
3306 ret
= simplify_gen_subreg (new_mode
, x
, old_mode
, 0);
3308 ret
= simplify_subreg (new_mode
, x
, old_mode
, 0);
3314 /* A subroutine of emit_move_insn_1. Generate a move from Y into X using
3315 an integer mode of the same size as MODE. Returns the instruction
3316 emitted, or NULL if such a move could not be generated. */
3319 emit_move_via_integer (machine_mode mode
, rtx x
, rtx y
, bool force
)
3321 scalar_int_mode imode
;
3322 enum insn_code code
;
3324 /* There must exist a mode of the exact size we require. */
3325 if (!int_mode_for_mode (mode
).exists (&imode
))
3328 /* The target must support moves in this mode. */
3329 code
= optab_handler (mov_optab
, imode
);
3330 if (code
== CODE_FOR_nothing
)
3333 x
= emit_move_change_mode (imode
, mode
, x
, force
);
3336 y
= emit_move_change_mode (imode
, mode
, y
, force
);
3339 return emit_insn (GEN_FCN (code
) (x
, y
));
3342 /* A subroutine of emit_move_insn_1. X is a push_operand in MODE.
3343 Return an equivalent MEM that does not use an auto-increment. */
3346 emit_move_resolve_push (machine_mode mode
, rtx x
)
3348 enum rtx_code code
= GET_CODE (XEXP (x
, 0));
3349 HOST_WIDE_INT adjust
;
3352 adjust
= GET_MODE_SIZE (mode
);
3353 #ifdef PUSH_ROUNDING
3354 adjust
= PUSH_ROUNDING (adjust
);
3356 if (code
== PRE_DEC
|| code
== POST_DEC
)
3358 else if (code
== PRE_MODIFY
|| code
== POST_MODIFY
)
3360 rtx expr
= XEXP (XEXP (x
, 0), 1);
3363 gcc_assert (GET_CODE (expr
) == PLUS
|| GET_CODE (expr
) == MINUS
);
3364 gcc_assert (CONST_INT_P (XEXP (expr
, 1)));
3365 val
= INTVAL (XEXP (expr
, 1));
3366 if (GET_CODE (expr
) == MINUS
)
3368 gcc_assert (adjust
== val
|| adjust
== -val
);
3372 /* Do not use anti_adjust_stack, since we don't want to update
3373 stack_pointer_delta. */
3374 temp
= expand_simple_binop (Pmode
, PLUS
, stack_pointer_rtx
,
3375 gen_int_mode (adjust
, Pmode
), stack_pointer_rtx
,
3376 0, OPTAB_LIB_WIDEN
);
3377 if (temp
!= stack_pointer_rtx
)
3378 emit_move_insn (stack_pointer_rtx
, temp
);
3385 temp
= stack_pointer_rtx
;
3390 temp
= plus_constant (Pmode
, stack_pointer_rtx
, -adjust
);
3396 return replace_equiv_address (x
, temp
);
3399 /* A subroutine of emit_move_complex. Generate a move from Y into X.
3400 X is known to satisfy push_operand, and MODE is known to be complex.
3401 Returns the last instruction emitted. */
3404 emit_move_complex_push (machine_mode mode
, rtx x
, rtx y
)
3406 scalar_mode submode
= GET_MODE_INNER (mode
);
3409 #ifdef PUSH_ROUNDING
3410 unsigned int submodesize
= GET_MODE_SIZE (submode
);
3412 /* In case we output to the stack, but the size is smaller than the
3413 machine can push exactly, we need to use move instructions. */
3414 if (PUSH_ROUNDING (submodesize
) != submodesize
)
3416 x
= emit_move_resolve_push (mode
, x
);
3417 return emit_move_insn (x
, y
);
3421 /* Note that the real part always precedes the imag part in memory
3422 regardless of machine's endianness. */
3423 switch (GET_CODE (XEXP (x
, 0)))
3437 emit_move_insn (gen_rtx_MEM (submode
, XEXP (x
, 0)),
3438 read_complex_part (y
, imag_first
));
3439 return emit_move_insn (gen_rtx_MEM (submode
, XEXP (x
, 0)),
3440 read_complex_part (y
, !imag_first
));
3443 /* A subroutine of emit_move_complex. Perform the move from Y to X
3444 via two moves of the parts. Returns the last instruction emitted. */
3447 emit_move_complex_parts (rtx x
, rtx y
)
3449 /* Show the output dies here. This is necessary for SUBREGs
3450 of pseudos since we cannot track their lifetimes correctly;
3451 hard regs shouldn't appear here except as return values. */
3452 if (!reload_completed
&& !reload_in_progress
3453 && REG_P (x
) && !reg_overlap_mentioned_p (x
, y
))
3456 write_complex_part (x
, read_complex_part (y
, false), false);
3457 write_complex_part (x
, read_complex_part (y
, true), true);
3459 return get_last_insn ();
3462 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3463 MODE is known to be complex. Returns the last instruction emitted. */
3466 emit_move_complex (machine_mode mode
, rtx x
, rtx y
)
3470 /* Need to take special care for pushes, to maintain proper ordering
3471 of the data, and possibly extra padding. */
3472 if (push_operand (x
, mode
))
3473 return emit_move_complex_push (mode
, x
, y
);
3475 /* See if we can coerce the target into moving both values at once, except
3476 for floating point where we favor moving as parts if this is easy. */
3477 if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
3478 && optab_handler (mov_optab
, GET_MODE_INNER (mode
)) != CODE_FOR_nothing
3480 && HARD_REGISTER_P (x
)
3481 && REG_NREGS (x
) == 1)
3483 && HARD_REGISTER_P (y
)
3484 && REG_NREGS (y
) == 1))
3486 /* Not possible if the values are inherently not adjacent. */
3487 else if (GET_CODE (x
) == CONCAT
|| GET_CODE (y
) == CONCAT
)
3489 /* Is possible if both are registers (or subregs of registers). */
3490 else if (register_operand (x
, mode
) && register_operand (y
, mode
))
3492 /* If one of the operands is a memory, and alignment constraints
3493 are friendly enough, we may be able to do combined memory operations.
3494 We do not attempt this if Y is a constant because that combination is
3495 usually better with the by-parts thing below. */
3496 else if ((MEM_P (x
) ? !CONSTANT_P (y
) : MEM_P (y
))
3497 && (!STRICT_ALIGNMENT
3498 || get_mode_alignment (mode
) == BIGGEST_ALIGNMENT
))
3507 /* For memory to memory moves, optimal behavior can be had with the
3508 existing block move logic. */
3509 if (MEM_P (x
) && MEM_P (y
))
3511 emit_block_move (x
, y
, GEN_INT (GET_MODE_SIZE (mode
)),
3512 BLOCK_OP_NO_LIBCALL
);
3513 return get_last_insn ();
3516 ret
= emit_move_via_integer (mode
, x
, y
, true);
3521 return emit_move_complex_parts (x
, y
);
3524 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3525 MODE is known to be MODE_CC. Returns the last instruction emitted. */
3528 emit_move_ccmode (machine_mode mode
, rtx x
, rtx y
)
3532 /* Assume all MODE_CC modes are equivalent; if we have movcc, use it. */
3535 enum insn_code code
= optab_handler (mov_optab
, CCmode
);
3536 if (code
!= CODE_FOR_nothing
)
3538 x
= emit_move_change_mode (CCmode
, mode
, x
, true);
3539 y
= emit_move_change_mode (CCmode
, mode
, y
, true);
3540 return emit_insn (GEN_FCN (code
) (x
, y
));
3544 /* Otherwise, find the MODE_INT mode of the same width. */
3545 ret
= emit_move_via_integer (mode
, x
, y
, false);
3546 gcc_assert (ret
!= NULL
);
3550 /* Return true if word I of OP lies entirely in the
3551 undefined bits of a paradoxical subreg. */
3554 undefined_operand_subword_p (const_rtx op
, int i
)
3556 if (GET_CODE (op
) != SUBREG
)
3558 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3559 poly_int64 offset
= i
* UNITS_PER_WORD
+ subreg_memory_offset (op
);
3560 return (known_ge (offset
, GET_MODE_SIZE (innermostmode
))
3561 || known_le (offset
, -UNITS_PER_WORD
));
3564 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3565 MODE is any multi-word or full-word mode that lacks a move_insn
3566 pattern. Note that you will get better code if you define such
3567 patterns, even if they must turn into multiple assembler instructions. */
3570 emit_move_multi_word (machine_mode mode
, rtx x
, rtx y
)
3572 rtx_insn
*last_insn
= 0;
3578 gcc_assert (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
);
3580 /* If X is a push on the stack, do the push now and replace
3581 X with a reference to the stack pointer. */
3582 if (push_operand (x
, mode
))
3583 x
= emit_move_resolve_push (mode
, x
);
3585 /* If we are in reload, see if either operand is a MEM whose address
3586 is scheduled for replacement. */
3587 if (reload_in_progress
&& MEM_P (x
)
3588 && (inner
= find_replacement (&XEXP (x
, 0))) != XEXP (x
, 0))
3589 x
= replace_equiv_address_nv (x
, inner
);
3590 if (reload_in_progress
&& MEM_P (y
)
3591 && (inner
= find_replacement (&XEXP (y
, 0))) != XEXP (y
, 0))
3592 y
= replace_equiv_address_nv (y
, inner
);
3596 need_clobber
= false;
3598 i
< (GET_MODE_SIZE (mode
) + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
;
3601 rtx xpart
= operand_subword (x
, i
, 1, mode
);
3604 /* Do not generate code for a move if it would come entirely
3605 from the undefined bits of a paradoxical subreg. */
3606 if (undefined_operand_subword_p (y
, i
))
3609 ypart
= operand_subword (y
, i
, 1, mode
);
3611 /* If we can't get a part of Y, put Y into memory if it is a
3612 constant. Otherwise, force it into a register. Then we must
3613 be able to get a part of Y. */
3614 if (ypart
== 0 && CONSTANT_P (y
))
3616 y
= use_anchored_address (force_const_mem (mode
, y
));
3617 ypart
= operand_subword (y
, i
, 1, mode
);
3619 else if (ypart
== 0)
3620 ypart
= operand_subword_force (y
, i
, mode
);
3622 gcc_assert (xpart
&& ypart
);
3624 need_clobber
|= (GET_CODE (xpart
) == SUBREG
);
3626 last_insn
= emit_move_insn (xpart
, ypart
);
3632 /* Show the output dies here. This is necessary for SUBREGs
3633 of pseudos since we cannot track their lifetimes correctly;
3634 hard regs shouldn't appear here except as return values.
3635 We never want to emit such a clobber after reload. */
3637 && ! (reload_in_progress
|| reload_completed
)
3638 && need_clobber
!= 0)
3646 /* Low level part of emit_move_insn.
3647 Called just like emit_move_insn, but assumes X and Y
3648 are basically valid. */
3651 emit_move_insn_1 (rtx x
, rtx y
)
3653 machine_mode mode
= GET_MODE (x
);
3654 enum insn_code code
;
3656 gcc_assert ((unsigned int) mode
< (unsigned int) MAX_MACHINE_MODE
);
3658 code
= optab_handler (mov_optab
, mode
);
3659 if (code
!= CODE_FOR_nothing
)
3660 return emit_insn (GEN_FCN (code
) (x
, y
));
3662 /* Expand complex moves by moving real part and imag part. */
3663 if (COMPLEX_MODE_P (mode
))
3664 return emit_move_complex (mode
, x
, y
);
3666 if (GET_MODE_CLASS (mode
) == MODE_DECIMAL_FLOAT
3667 || ALL_FIXED_POINT_MODE_P (mode
))
3669 rtx_insn
*result
= emit_move_via_integer (mode
, x
, y
, true);
3671 /* If we can't find an integer mode, use multi words. */
3675 return emit_move_multi_word (mode
, x
, y
);
3678 if (GET_MODE_CLASS (mode
) == MODE_CC
)
3679 return emit_move_ccmode (mode
, x
, y
);
3681 /* Try using a move pattern for the corresponding integer mode. This is
3682 only safe when simplify_subreg can convert MODE constants into integer
3683 constants. At present, it can only do this reliably if the value
3684 fits within a HOST_WIDE_INT. */
3685 if (!CONSTANT_P (y
) || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3687 rtx_insn
*ret
= emit_move_via_integer (mode
, x
, y
, lra_in_progress
);
3691 if (! lra_in_progress
|| recog (PATTERN (ret
), ret
, 0) >= 0)
3696 return emit_move_multi_word (mode
, x
, y
);
3699 /* Generate code to copy Y into X.
3700 Both Y and X must have the same mode, except that
3701 Y can be a constant with VOIDmode.
3702 This mode cannot be BLKmode; use emit_block_move for that.
3704 Return the last instruction emitted. */
3707 emit_move_insn (rtx x
, rtx y
)
3709 machine_mode mode
= GET_MODE (x
);
3710 rtx y_cst
= NULL_RTX
;
3711 rtx_insn
*last_insn
;
3714 gcc_assert (mode
!= BLKmode
3715 && (GET_MODE (y
) == mode
|| GET_MODE (y
) == VOIDmode
));
3720 && SCALAR_FLOAT_MODE_P (GET_MODE (x
))
3721 && (last_insn
= compress_float_constant (x
, y
)))
3726 if (!targetm
.legitimate_constant_p (mode
, y
))
3728 y
= force_const_mem (mode
, y
);
3730 /* If the target's cannot_force_const_mem prevented the spill,
3731 assume that the target's move expanders will also take care
3732 of the non-legitimate constant. */
3736 y
= use_anchored_address (y
);
3740 /* If X or Y are memory references, verify that their addresses are valid
3743 && (! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
3745 && ! push_operand (x
, GET_MODE (x
))))
3746 x
= validize_mem (x
);
3749 && ! memory_address_addr_space_p (GET_MODE (y
), XEXP (y
, 0),
3750 MEM_ADDR_SPACE (y
)))
3751 y
= validize_mem (y
);
3753 gcc_assert (mode
!= BLKmode
);
3755 last_insn
= emit_move_insn_1 (x
, y
);
3757 if (y_cst
&& REG_P (x
)
3758 && (set
= single_set (last_insn
)) != NULL_RTX
3759 && SET_DEST (set
) == x
3760 && ! rtx_equal_p (y_cst
, SET_SRC (set
)))
3761 set_unique_reg_note (last_insn
, REG_EQUAL
, copy_rtx (y_cst
));
3766 /* Generate the body of an instruction to copy Y into X.
3767 It may be a list of insns, if one insn isn't enough. */
3770 gen_move_insn (rtx x
, rtx y
)
3775 emit_move_insn_1 (x
, y
);
3781 /* If Y is representable exactly in a narrower mode, and the target can
3782 perform the extension directly from constant or memory, then emit the
3783 move as an extension. */
3786 compress_float_constant (rtx x
, rtx y
)
3788 machine_mode dstmode
= GET_MODE (x
);
3789 machine_mode orig_srcmode
= GET_MODE (y
);
3790 machine_mode srcmode
;
3791 const REAL_VALUE_TYPE
*r
;
3792 int oldcost
, newcost
;
3793 bool speed
= optimize_insn_for_speed_p ();
3795 r
= CONST_DOUBLE_REAL_VALUE (y
);
3797 if (targetm
.legitimate_constant_p (dstmode
, y
))
3798 oldcost
= set_src_cost (y
, orig_srcmode
, speed
);
3800 oldcost
= set_src_cost (force_const_mem (dstmode
, y
), dstmode
, speed
);
3802 FOR_EACH_MODE_UNTIL (srcmode
, orig_srcmode
)
3806 rtx_insn
*last_insn
;
3808 /* Skip if the target can't extend this way. */
3809 ic
= can_extend_p (dstmode
, srcmode
, 0);
3810 if (ic
== CODE_FOR_nothing
)
3813 /* Skip if the narrowed value isn't exact. */
3814 if (! exact_real_truncate (srcmode
, r
))
3817 trunc_y
= const_double_from_real_value (*r
, srcmode
);
3819 if (targetm
.legitimate_constant_p (srcmode
, trunc_y
))
3821 /* Skip if the target needs extra instructions to perform
3823 if (!insn_operand_matches (ic
, 1, trunc_y
))
3825 /* This is valid, but may not be cheaper than the original. */
3826 newcost
= set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode
, trunc_y
),
3828 if (oldcost
< newcost
)
3831 else if (float_extend_from_mem
[dstmode
][srcmode
])
3833 trunc_y
= force_const_mem (srcmode
, trunc_y
);
3834 /* This is valid, but may not be cheaper than the original. */
3835 newcost
= set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode
, trunc_y
),
3837 if (oldcost
< newcost
)
3839 trunc_y
= validize_mem (trunc_y
);
3844 /* For CSE's benefit, force the compressed constant pool entry
3845 into a new pseudo. This constant may be used in different modes,
3846 and if not, combine will put things back together for us. */
3847 trunc_y
= force_reg (srcmode
, trunc_y
);
3849 /* If x is a hard register, perform the extension into a pseudo,
3850 so that e.g. stack realignment code is aware of it. */
3852 if (REG_P (x
) && HARD_REGISTER_P (x
))
3853 target
= gen_reg_rtx (dstmode
);
3855 emit_unop_insn (ic
, target
, trunc_y
, UNKNOWN
);
3856 last_insn
= get_last_insn ();
3859 set_unique_reg_note (last_insn
, REG_EQUAL
, y
);
3862 return emit_move_insn (x
, target
);
3869 /* Pushing data onto the stack. */
3871 /* Push a block of length SIZE (perhaps variable)
3872 and return an rtx to address the beginning of the block.
3873 The value may be virtual_outgoing_args_rtx.
3875 EXTRA is the number of bytes of padding to push in addition to SIZE.
3876 BELOW nonzero means this padding comes at low addresses;
3877 otherwise, the padding comes at high addresses. */
3880 push_block (rtx size
, poly_int64 extra
, int below
)
3884 size
= convert_modes (Pmode
, ptr_mode
, size
, 1);
3885 if (CONSTANT_P (size
))
3886 anti_adjust_stack (plus_constant (Pmode
, size
, extra
));
3887 else if (REG_P (size
) && known_eq (extra
, 0))
3888 anti_adjust_stack (size
);
3891 temp
= copy_to_mode_reg (Pmode
, size
);
3892 if (maybe_ne (extra
, 0))
3893 temp
= expand_binop (Pmode
, add_optab
, temp
,
3894 gen_int_mode (extra
, Pmode
),
3895 temp
, 0, OPTAB_LIB_WIDEN
);
3896 anti_adjust_stack (temp
);
3899 if (STACK_GROWS_DOWNWARD
)
3901 temp
= virtual_outgoing_args_rtx
;
3902 if (maybe_ne (extra
, 0) && below
)
3903 temp
= plus_constant (Pmode
, temp
, extra
);
3907 if (CONST_INT_P (size
))
3908 temp
= plus_constant (Pmode
, virtual_outgoing_args_rtx
,
3909 -INTVAL (size
) - (below
? 0 : extra
));
3910 else if (maybe_ne (extra
, 0) && !below
)
3911 temp
= gen_rtx_PLUS (Pmode
, virtual_outgoing_args_rtx
,
3912 negate_rtx (Pmode
, plus_constant (Pmode
, size
,
3915 temp
= gen_rtx_PLUS (Pmode
, virtual_outgoing_args_rtx
,
3916 negate_rtx (Pmode
, size
));
3919 return memory_address (NARROWEST_INT_MODE
, temp
);
3922 /* A utility routine that returns the base of an auto-inc memory, or NULL. */
3925 mem_autoinc_base (rtx mem
)
3929 rtx addr
= XEXP (mem
, 0);
3930 if (GET_RTX_CLASS (GET_CODE (addr
)) == RTX_AUTOINC
)
3931 return XEXP (addr
, 0);
3936 /* A utility routine used here, in reload, and in try_split. The insns
3937 after PREV up to and including LAST are known to adjust the stack,
3938 with a final value of END_ARGS_SIZE. Iterate backward from LAST
3939 placing notes as appropriate. PREV may be NULL, indicating the
3940 entire insn sequence prior to LAST should be scanned.
3942 The set of allowed stack pointer modifications is small:
3943 (1) One or more auto-inc style memory references (aka pushes),
3944 (2) One or more addition/subtraction with the SP as destination,
3945 (3) A single move insn with the SP as destination,
3946 (4) A call_pop insn,
3947 (5) Noreturn call insns if !ACCUMULATE_OUTGOING_ARGS.
3949 Insns in the sequence that do not modify the SP are ignored,
3950 except for noreturn calls.
3952 The return value is the amount of adjustment that can be trivially
3953 verified, via immediate operand or auto-inc. If the adjustment
3954 cannot be trivially extracted, the return value is HOST_WIDE_INT_MIN. */
3957 find_args_size_adjust (rtx_insn
*insn
)
3962 pat
= PATTERN (insn
);
3965 /* Look for a call_pop pattern. */
3968 /* We have to allow non-call_pop patterns for the case
3969 of emit_single_push_insn of a TLS address. */
3970 if (GET_CODE (pat
) != PARALLEL
)
3973 /* All call_pop have a stack pointer adjust in the parallel.
3974 The call itself is always first, and the stack adjust is
3975 usually last, so search from the end. */
3976 for (i
= XVECLEN (pat
, 0) - 1; i
> 0; --i
)
3978 set
= XVECEXP (pat
, 0, i
);
3979 if (GET_CODE (set
) != SET
)
3981 dest
= SET_DEST (set
);
3982 if (dest
== stack_pointer_rtx
)
3985 /* We'd better have found the stack pointer adjust. */
3988 /* Fall through to process the extracted SET and DEST
3989 as if it was a standalone insn. */
3991 else if (GET_CODE (pat
) == SET
)
3993 else if ((set
= single_set (insn
)) != NULL
)
3995 else if (GET_CODE (pat
) == PARALLEL
)
3997 /* ??? Some older ports use a parallel with a stack adjust
3998 and a store for a PUSH_ROUNDING pattern, rather than a
3999 PRE/POST_MODIFY rtx. Don't force them to update yet... */
4000 /* ??? See h8300 and m68k, pushqi1. */
4001 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; --i
)
4003 set
= XVECEXP (pat
, 0, i
);
4004 if (GET_CODE (set
) != SET
)
4006 dest
= SET_DEST (set
);
4007 if (dest
== stack_pointer_rtx
)
4010 /* We do not expect an auto-inc of the sp in the parallel. */
4011 gcc_checking_assert (mem_autoinc_base (dest
) != stack_pointer_rtx
);
4012 gcc_checking_assert (mem_autoinc_base (SET_SRC (set
))
4013 != stack_pointer_rtx
);
4021 dest
= SET_DEST (set
);
4023 /* Look for direct modifications of the stack pointer. */
4024 if (REG_P (dest
) && REGNO (dest
) == STACK_POINTER_REGNUM
)
4026 /* Look for a trivial adjustment, otherwise assume nothing. */
4027 /* Note that the SPU restore_stack_block pattern refers to
4028 the stack pointer in V4SImode. Consider that non-trivial. */
4029 if (SCALAR_INT_MODE_P (GET_MODE (dest
))
4030 && GET_CODE (SET_SRC (set
)) == PLUS
4031 && XEXP (SET_SRC (set
), 0) == stack_pointer_rtx
4032 && CONST_INT_P (XEXP (SET_SRC (set
), 1)))
4033 return INTVAL (XEXP (SET_SRC (set
), 1));
4034 /* ??? Reload can generate no-op moves, which will be cleaned
4035 up later. Recognize it and continue searching. */
4036 else if (rtx_equal_p (dest
, SET_SRC (set
)))
4039 return HOST_WIDE_INT_MIN
;
4045 /* Otherwise only think about autoinc patterns. */
4046 if (mem_autoinc_base (dest
) == stack_pointer_rtx
)
4049 gcc_checking_assert (mem_autoinc_base (SET_SRC (set
))
4050 != stack_pointer_rtx
);
4052 else if (mem_autoinc_base (SET_SRC (set
)) == stack_pointer_rtx
)
4053 mem
= SET_SRC (set
);
4057 addr
= XEXP (mem
, 0);
4058 switch (GET_CODE (addr
))
4062 return GET_MODE_SIZE (GET_MODE (mem
));
4065 return -GET_MODE_SIZE (GET_MODE (mem
));
4068 addr
= XEXP (addr
, 1);
4069 gcc_assert (GET_CODE (addr
) == PLUS
);
4070 gcc_assert (XEXP (addr
, 0) == stack_pointer_rtx
);
4071 gcc_assert (CONST_INT_P (XEXP (addr
, 1)));
4072 return INTVAL (XEXP (addr
, 1));
4080 fixup_args_size_notes (rtx_insn
*prev
, rtx_insn
*last
,
4081 poly_int64 end_args_size
)
4083 poly_int64 args_size
= end_args_size
;
4084 bool saw_unknown
= false;
4087 for (insn
= last
; insn
!= prev
; insn
= PREV_INSN (insn
))
4089 if (!NONDEBUG_INSN_P (insn
))
4092 poly_int64 this_delta
= find_args_size_adjust (insn
);
4093 if (known_eq (this_delta
, 0))
4096 || ACCUMULATE_OUTGOING_ARGS
4097 || find_reg_note (insn
, REG_NORETURN
, NULL_RTX
) == NULL_RTX
)
4101 gcc_assert (!saw_unknown
);
4102 if (known_eq (this_delta
, HOST_WIDE_INT_MIN
))
4105 add_args_size_note (insn
, args_size
);
4106 if (STACK_GROWS_DOWNWARD
)
4107 this_delta
= -poly_uint64 (this_delta
);
4110 args_size
= HOST_WIDE_INT_MIN
;
4112 args_size
-= this_delta
;
4118 #ifdef PUSH_ROUNDING
4119 /* Emit single push insn. */
4122 emit_single_push_insn_1 (machine_mode mode
, rtx x
, tree type
)
4125 unsigned rounded_size
= PUSH_ROUNDING (GET_MODE_SIZE (mode
));
4127 enum insn_code icode
;
4129 stack_pointer_delta
+= PUSH_ROUNDING (GET_MODE_SIZE (mode
));
4130 /* If there is push pattern, use it. Otherwise try old way of throwing
4131 MEM representing push operation to move expander. */
4132 icode
= optab_handler (push_optab
, mode
);
4133 if (icode
!= CODE_FOR_nothing
)
4135 struct expand_operand ops
[1];
4137 create_input_operand (&ops
[0], x
, mode
);
4138 if (maybe_expand_insn (icode
, 1, ops
))
4141 if (GET_MODE_SIZE (mode
) == rounded_size
)
4142 dest_addr
= gen_rtx_fmt_e (STACK_PUSH_CODE
, Pmode
, stack_pointer_rtx
);
4143 /* If we are to pad downward, adjust the stack pointer first and
4144 then store X into the stack location using an offset. This is
4145 because emit_move_insn does not know how to pad; it does not have
4147 else if (targetm
.calls
.function_arg_padding (mode
, type
) == PAD_DOWNWARD
)
4149 unsigned padding_size
= rounded_size
- GET_MODE_SIZE (mode
);
4150 HOST_WIDE_INT offset
;
4152 emit_move_insn (stack_pointer_rtx
,
4153 expand_binop (Pmode
,
4154 STACK_GROWS_DOWNWARD
? sub_optab
4157 gen_int_mode (rounded_size
, Pmode
),
4158 NULL_RTX
, 0, OPTAB_LIB_WIDEN
));
4160 offset
= (HOST_WIDE_INT
) padding_size
;
4161 if (STACK_GROWS_DOWNWARD
&& STACK_PUSH_CODE
== POST_DEC
)
4162 /* We have already decremented the stack pointer, so get the
4164 offset
+= (HOST_WIDE_INT
) rounded_size
;
4166 if (!STACK_GROWS_DOWNWARD
&& STACK_PUSH_CODE
== POST_INC
)
4167 /* We have already incremented the stack pointer, so get the
4169 offset
-= (HOST_WIDE_INT
) rounded_size
;
4171 dest_addr
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
4172 gen_int_mode (offset
, Pmode
));
4176 if (STACK_GROWS_DOWNWARD
)
4177 /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */
4178 dest_addr
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
4179 gen_int_mode (-(HOST_WIDE_INT
) rounded_size
,
4182 /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */
4183 dest_addr
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
4184 gen_int_mode (rounded_size
, Pmode
));
4186 dest_addr
= gen_rtx_PRE_MODIFY (Pmode
, stack_pointer_rtx
, dest_addr
);
4189 dest
= gen_rtx_MEM (mode
, dest_addr
);
4193 set_mem_attributes (dest
, type
, 1);
4195 if (cfun
->tail_call_marked
)
4196 /* Function incoming arguments may overlap with sibling call
4197 outgoing arguments and we cannot allow reordering of reads
4198 from function arguments with stores to outgoing arguments
4199 of sibling calls. */
4200 set_mem_alias_set (dest
, 0);
4202 emit_move_insn (dest
, x
);
4205 /* Emit and annotate a single push insn. */
4208 emit_single_push_insn (machine_mode mode
, rtx x
, tree type
)
4210 poly_int64 delta
, old_delta
= stack_pointer_delta
;
4211 rtx_insn
*prev
= get_last_insn ();
4214 emit_single_push_insn_1 (mode
, x
, type
);
4216 last
= get_last_insn ();
4218 /* Notice the common case where we emitted exactly one insn. */
4219 if (PREV_INSN (last
) == prev
)
4221 add_args_size_note (last
, stack_pointer_delta
);
4225 delta
= fixup_args_size_notes (prev
, last
, stack_pointer_delta
);
4226 gcc_assert (known_eq (delta
, HOST_WIDE_INT_MIN
)
4227 || known_eq (delta
, old_delta
));
4231 /* If reading SIZE bytes from X will end up reading from
4232 Y return the number of bytes that overlap. Return -1
4233 if there is no overlap or -2 if we can't determine
4234 (for example when X and Y have different base registers). */
4237 memory_load_overlap (rtx x
, rtx y
, HOST_WIDE_INT size
)
4239 rtx tmp
= plus_constant (Pmode
, x
, size
);
4240 rtx sub
= simplify_gen_binary (MINUS
, Pmode
, tmp
, y
);
4242 if (!CONST_INT_P (sub
))
4245 HOST_WIDE_INT val
= INTVAL (sub
);
4247 return IN_RANGE (val
, 1, size
) ? val
: -1;
4250 /* Generate code to push X onto the stack, assuming it has mode MODE and
4252 MODE is redundant except when X is a CONST_INT (since they don't
4254 SIZE is an rtx for the size of data to be copied (in bytes),
4255 needed only if X is BLKmode.
4256 Return true if successful. May return false if asked to push a
4257 partial argument during a sibcall optimization (as specified by
4258 SIBCALL_P) and the incoming and outgoing pointers cannot be shown
4261 ALIGN (in bits) is maximum alignment we can assume.
4263 If PARTIAL and REG are both nonzero, then copy that many of the first
4264 bytes of X into registers starting with REG, and push the rest of X.
4265 The amount of space pushed is decreased by PARTIAL bytes.
4266 REG must be a hard register in this case.
4267 If REG is zero but PARTIAL is not, take any all others actions for an
4268 argument partially in registers, but do not actually load any
4271 EXTRA is the amount in bytes of extra space to leave next to this arg.
4272 This is ignored if an argument block has already been allocated.
4274 On a machine that lacks real push insns, ARGS_ADDR is the address of
4275 the bottom of the argument block for this call. We use indexing off there
4276 to store the arg. On machines with push insns, ARGS_ADDR is 0 when a
4277 argument block has not been preallocated.
4279 ARGS_SO_FAR is the size of args previously pushed for this call.
4281 REG_PARM_STACK_SPACE is nonzero if functions require stack space
4282 for arguments passed in registers. If nonzero, it will be the number
4283 of bytes required. */
4286 emit_push_insn (rtx x
, machine_mode mode
, tree type
, rtx size
,
4287 unsigned int align
, int partial
, rtx reg
, poly_int64 extra
,
4288 rtx args_addr
, rtx args_so_far
, int reg_parm_stack_space
,
4289 rtx alignment_pad
, bool sibcall_p
)
4292 pad_direction stack_direction
4293 = STACK_GROWS_DOWNWARD
? PAD_DOWNWARD
: PAD_UPWARD
;
4295 /* Decide where to pad the argument: PAD_DOWNWARD for below,
4296 PAD_UPWARD for above, or PAD_NONE for don't pad it.
4297 Default is below for small data on big-endian machines; else above. */
4298 pad_direction where_pad
= targetm
.calls
.function_arg_padding (mode
, type
);
4300 /* Invert direction if stack is post-decrement.
4302 if (STACK_PUSH_CODE
== POST_DEC
)
4303 if (where_pad
!= PAD_NONE
)
4304 where_pad
= (where_pad
== PAD_DOWNWARD
? PAD_UPWARD
: PAD_DOWNWARD
);
4308 int nregs
= partial
/ UNITS_PER_WORD
;
4309 rtx
*tmp_regs
= NULL
;
4310 int overlapping
= 0;
4313 || (STRICT_ALIGNMENT
&& align
< GET_MODE_ALIGNMENT (mode
)))
4315 /* Copy a block into the stack, entirely or partially. */
4322 offset
= partial
% (PARM_BOUNDARY
/ BITS_PER_UNIT
);
4323 used
= partial
- offset
;
4325 if (mode
!= BLKmode
)
4327 /* A value is to be stored in an insufficiently aligned
4328 stack slot; copy via a suitably aligned slot if
4330 size
= GEN_INT (GET_MODE_SIZE (mode
));
4331 if (!MEM_P (xinner
))
4333 temp
= assign_temp (type
, 1, 1);
4334 emit_move_insn (temp
, xinner
);
4341 /* USED is now the # of bytes we need not copy to the stack
4342 because registers will take care of them. */
4345 xinner
= adjust_address (xinner
, BLKmode
, used
);
4347 /* If the partial register-part of the arg counts in its stack size,
4348 skip the part of stack space corresponding to the registers.
4349 Otherwise, start copying to the beginning of the stack space,
4350 by setting SKIP to 0. */
4351 skip
= (reg_parm_stack_space
== 0) ? 0 : used
;
4353 #ifdef PUSH_ROUNDING
4354 /* Do it with several push insns if that doesn't take lots of insns
4355 and if there is no difficulty with push insns that skip bytes
4356 on the stack for alignment purposes. */
4359 && CONST_INT_P (size
)
4361 && MEM_ALIGN (xinner
) >= align
4362 && can_move_by_pieces ((unsigned) INTVAL (size
) - used
, align
)
4363 /* Here we avoid the case of a structure whose weak alignment
4364 forces many pushes of a small amount of data,
4365 and such small pushes do rounding that causes trouble. */
4366 && ((!targetm
.slow_unaligned_access (word_mode
, align
))
4367 || align
>= BIGGEST_ALIGNMENT
4368 || (PUSH_ROUNDING (align
/ BITS_PER_UNIT
)
4369 == (align
/ BITS_PER_UNIT
)))
4370 && (HOST_WIDE_INT
) PUSH_ROUNDING (INTVAL (size
)) == INTVAL (size
))
4372 /* Push padding now if padding above and stack grows down,
4373 or if padding below and stack grows up.
4374 But if space already allocated, this has already been done. */
4375 if (maybe_ne (extra
, 0)
4377 && where_pad
!= PAD_NONE
4378 && where_pad
!= stack_direction
)
4379 anti_adjust_stack (gen_int_mode (extra
, Pmode
));
4381 move_by_pieces (NULL
, xinner
, INTVAL (size
) - used
, align
, 0);
4384 #endif /* PUSH_ROUNDING */
4388 /* Otherwise make space on the stack and copy the data
4389 to the address of that space. */
4391 /* Deduct words put into registers from the size we must copy. */
4394 if (CONST_INT_P (size
))
4395 size
= GEN_INT (INTVAL (size
) - used
);
4397 size
= expand_binop (GET_MODE (size
), sub_optab
, size
,
4398 gen_int_mode (used
, GET_MODE (size
)),
4399 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4402 /* Get the address of the stack space.
4403 In this case, we do not deal with EXTRA separately.
4404 A single stack adjust will do. */
4407 temp
= push_block (size
, extra
, where_pad
== PAD_DOWNWARD
);
4410 else if (CONST_INT_P (args_so_far
))
4411 temp
= memory_address (BLKmode
,
4412 plus_constant (Pmode
, args_addr
,
4413 skip
+ INTVAL (args_so_far
)));
4415 temp
= memory_address (BLKmode
,
4416 plus_constant (Pmode
,
4417 gen_rtx_PLUS (Pmode
,
4422 if (!ACCUMULATE_OUTGOING_ARGS
)
4424 /* If the source is referenced relative to the stack pointer,
4425 copy it to another register to stabilize it. We do not need
4426 to do this if we know that we won't be changing sp. */
4428 if (reg_mentioned_p (virtual_stack_dynamic_rtx
, temp
)
4429 || reg_mentioned_p (virtual_outgoing_args_rtx
, temp
))
4430 temp
= copy_to_reg (temp
);
4433 target
= gen_rtx_MEM (BLKmode
, temp
);
4435 /* We do *not* set_mem_attributes here, because incoming arguments
4436 may overlap with sibling call outgoing arguments and we cannot
4437 allow reordering of reads from function arguments with stores
4438 to outgoing arguments of sibling calls. We do, however, want
4439 to record the alignment of the stack slot. */
4440 /* ALIGN may well be better aligned than TYPE, e.g. due to
4441 PARM_BOUNDARY. Assume the caller isn't lying. */
4442 set_mem_align (target
, align
);
4444 /* If part should go in registers and pushing to that part would
4445 overwrite some of the values that need to go into regs, load the
4446 overlapping values into temporary pseudos to be moved into the hard
4447 regs at the end after the stack pushing has completed.
4448 We cannot load them directly into the hard regs here because
4449 they can be clobbered by the block move expansions.
4452 if (partial
> 0 && reg
!= 0 && mode
== BLKmode
4453 && GET_CODE (reg
) != PARALLEL
)
4455 overlapping
= memory_load_overlap (XEXP (x
, 0), temp
, partial
);
4456 if (overlapping
> 0)
4458 gcc_assert (overlapping
% UNITS_PER_WORD
== 0);
4459 overlapping
/= UNITS_PER_WORD
;
4461 tmp_regs
= XALLOCAVEC (rtx
, overlapping
);
4463 for (int i
= 0; i
< overlapping
; i
++)
4464 tmp_regs
[i
] = gen_reg_rtx (word_mode
);
4466 for (int i
= 0; i
< overlapping
; i
++)
4467 emit_move_insn (tmp_regs
[i
],
4468 operand_subword_force (target
, i
, mode
));
4470 else if (overlapping
== -1)
4472 /* Could not determine whether there is overlap.
4473 Fail the sibcall. */
4481 emit_block_move (target
, xinner
, size
, BLOCK_OP_CALL_PARM
);
4484 else if (partial
> 0)
4486 /* Scalar partly in registers. */
4488 int size
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
4491 /* # bytes of start of argument
4492 that we must make space for but need not store. */
4493 int offset
= partial
% (PARM_BOUNDARY
/ BITS_PER_UNIT
);
4494 int args_offset
= INTVAL (args_so_far
);
4497 /* Push padding now if padding above and stack grows down,
4498 or if padding below and stack grows up.
4499 But if space already allocated, this has already been done. */
4500 if (maybe_ne (extra
, 0)
4502 && where_pad
!= PAD_NONE
4503 && where_pad
!= stack_direction
)
4504 anti_adjust_stack (gen_int_mode (extra
, Pmode
));
4506 /* If we make space by pushing it, we might as well push
4507 the real data. Otherwise, we can leave OFFSET nonzero
4508 and leave the space uninitialized. */
4512 /* Now NOT_STACK gets the number of words that we don't need to
4513 allocate on the stack. Convert OFFSET to words too. */
4514 not_stack
= (partial
- offset
) / UNITS_PER_WORD
;
4515 offset
/= UNITS_PER_WORD
;
4517 /* If the partial register-part of the arg counts in its stack size,
4518 skip the part of stack space corresponding to the registers.
4519 Otherwise, start copying to the beginning of the stack space,
4520 by setting SKIP to 0. */
4521 skip
= (reg_parm_stack_space
== 0) ? 0 : not_stack
;
4523 if (CONSTANT_P (x
) && !targetm
.legitimate_constant_p (mode
, x
))
4524 x
= validize_mem (force_const_mem (mode
, x
));
4526 /* If X is a hard register in a non-integer mode, copy it into a pseudo;
4527 SUBREGs of such registers are not allowed. */
4528 if ((REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
4529 && GET_MODE_CLASS (GET_MODE (x
)) != MODE_INT
))
4530 x
= copy_to_reg (x
);
4532 /* Loop over all the words allocated on the stack for this arg. */
4533 /* We can do it by words, because any scalar bigger than a word
4534 has a size a multiple of a word. */
4535 for (i
= size
- 1; i
>= not_stack
; i
--)
4536 if (i
>= not_stack
+ offset
)
4537 if (!emit_push_insn (operand_subword_force (x
, i
, mode
),
4538 word_mode
, NULL_TREE
, NULL_RTX
, align
, 0, NULL_RTX
,
4540 GEN_INT (args_offset
+ ((i
- not_stack
+ skip
)
4542 reg_parm_stack_space
, alignment_pad
, sibcall_p
))
4550 /* Push padding now if padding above and stack grows down,
4551 or if padding below and stack grows up.
4552 But if space already allocated, this has already been done. */
4553 if (maybe_ne (extra
, 0)
4555 && where_pad
!= PAD_NONE
4556 && where_pad
!= stack_direction
)
4557 anti_adjust_stack (gen_int_mode (extra
, Pmode
));
4559 #ifdef PUSH_ROUNDING
4560 if (args_addr
== 0 && PUSH_ARGS
)
4561 emit_single_push_insn (mode
, x
, type
);
4565 addr
= simplify_gen_binary (PLUS
, Pmode
, args_addr
, args_so_far
);
4566 dest
= gen_rtx_MEM (mode
, memory_address (mode
, addr
));
4568 /* We do *not* set_mem_attributes here, because incoming arguments
4569 may overlap with sibling call outgoing arguments and we cannot
4570 allow reordering of reads from function arguments with stores
4571 to outgoing arguments of sibling calls. We do, however, want
4572 to record the alignment of the stack slot. */
4573 /* ALIGN may well be better aligned than TYPE, e.g. due to
4574 PARM_BOUNDARY. Assume the caller isn't lying. */
4575 set_mem_align (dest
, align
);
4577 emit_move_insn (dest
, x
);
4581 /* Move the partial arguments into the registers and any overlapping
4582 values that we moved into the pseudos in tmp_regs. */
4583 if (partial
> 0 && reg
!= 0)
4585 /* Handle calls that pass values in multiple non-contiguous locations.
4586 The Irix 6 ABI has examples of this. */
4587 if (GET_CODE (reg
) == PARALLEL
)
4588 emit_group_load (reg
, x
, type
, -1);
4591 gcc_assert (partial
% UNITS_PER_WORD
== 0);
4592 move_block_to_reg (REGNO (reg
), x
, nregs
- overlapping
, mode
);
4594 for (int i
= 0; i
< overlapping
; i
++)
4595 emit_move_insn (gen_rtx_REG (word_mode
, REGNO (reg
)
4596 + nregs
- overlapping
+ i
),
4602 if (maybe_ne (extra
, 0) && args_addr
== 0 && where_pad
== stack_direction
)
4603 anti_adjust_stack (gen_int_mode (extra
, Pmode
));
4605 if (alignment_pad
&& args_addr
== 0)
4606 anti_adjust_stack (alignment_pad
);
4611 /* Return X if X can be used as a subtarget in a sequence of arithmetic
4615 get_subtarget (rtx x
)
4619 /* Only registers can be subtargets. */
4621 /* Don't use hard regs to avoid extending their life. */
4622 || REGNO (x
) < FIRST_PSEUDO_REGISTER
4626 /* A subroutine of expand_assignment. Optimize FIELD op= VAL, where
4627 FIELD is a bitfield. Returns true if the optimization was successful,
4628 and there's nothing else to do. */
4631 optimize_bitfield_assignment_op (poly_uint64 pbitsize
,
4632 poly_uint64 pbitpos
,
4633 poly_uint64 pbitregion_start
,
4634 poly_uint64 pbitregion_end
,
4635 machine_mode mode1
, rtx str_rtx
,
4636 tree to
, tree src
, bool reverse
)
4638 machine_mode str_mode
= GET_MODE (str_rtx
);
4639 unsigned int str_bitsize
= GET_MODE_BITSIZE (str_mode
);
4644 enum tree_code code
;
4646 unsigned HOST_WIDE_INT bitsize
, bitpos
, bitregion_start
, bitregion_end
;
4647 if (mode1
!= VOIDmode
4648 || !pbitsize
.is_constant (&bitsize
)
4649 || !pbitpos
.is_constant (&bitpos
)
4650 || !pbitregion_start
.is_constant (&bitregion_start
)
4651 || !pbitregion_end
.is_constant (&bitregion_end
)
4652 || bitsize
>= BITS_PER_WORD
4653 || str_bitsize
> BITS_PER_WORD
4654 || TREE_SIDE_EFFECTS (to
)
4655 || TREE_THIS_VOLATILE (to
))
4659 if (TREE_CODE (src
) != SSA_NAME
)
4661 if (TREE_CODE (TREE_TYPE (src
)) != INTEGER_TYPE
)
4664 srcstmt
= get_gimple_for_ssa_name (src
);
4666 || TREE_CODE_CLASS (gimple_assign_rhs_code (srcstmt
)) != tcc_binary
)
4669 code
= gimple_assign_rhs_code (srcstmt
);
4671 op0
= gimple_assign_rhs1 (srcstmt
);
4673 /* If OP0 is an SSA_NAME, then we want to walk the use-def chain
4674 to find its initialization. Hopefully the initialization will
4675 be from a bitfield load. */
4676 if (TREE_CODE (op0
) == SSA_NAME
)
4678 gimple
*op0stmt
= get_gimple_for_ssa_name (op0
);
4680 /* We want to eventually have OP0 be the same as TO, which
4681 should be a bitfield. */
4683 || !is_gimple_assign (op0stmt
)
4684 || gimple_assign_rhs_code (op0stmt
) != TREE_CODE (to
))
4686 op0
= gimple_assign_rhs1 (op0stmt
);
4689 op1
= gimple_assign_rhs2 (srcstmt
);
4691 if (!operand_equal_p (to
, op0
, 0))
4694 if (MEM_P (str_rtx
))
4696 unsigned HOST_WIDE_INT offset1
;
4698 if (str_bitsize
== 0 || str_bitsize
> BITS_PER_WORD
)
4699 str_bitsize
= BITS_PER_WORD
;
4701 scalar_int_mode best_mode
;
4702 if (!get_best_mode (bitsize
, bitpos
, bitregion_start
, bitregion_end
,
4703 MEM_ALIGN (str_rtx
), str_bitsize
, false, &best_mode
))
4705 str_mode
= best_mode
;
4706 str_bitsize
= GET_MODE_BITSIZE (best_mode
);
4709 bitpos
%= str_bitsize
;
4710 offset1
= (offset1
- bitpos
) / BITS_PER_UNIT
;
4711 str_rtx
= adjust_address (str_rtx
, str_mode
, offset1
);
4713 else if (!REG_P (str_rtx
) && GET_CODE (str_rtx
) != SUBREG
)
4716 gcc_assert (!reverse
);
4718 /* If the bit field covers the whole REG/MEM, store_field
4719 will likely generate better code. */
4720 if (bitsize
>= str_bitsize
)
4723 /* We can't handle fields split across multiple entities. */
4724 if (bitpos
+ bitsize
> str_bitsize
)
4727 if (reverse
? !BYTES_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
4728 bitpos
= str_bitsize
- bitpos
- bitsize
;
4734 /* For now, just optimize the case of the topmost bitfield
4735 where we don't need to do any masking and also
4736 1 bit bitfields where xor can be used.
4737 We might win by one instruction for the other bitfields
4738 too if insv/extv instructions aren't used, so that
4739 can be added later. */
4740 if ((reverse
|| bitpos
+ bitsize
!= str_bitsize
)
4741 && (bitsize
!= 1 || TREE_CODE (op1
) != INTEGER_CST
))
4744 value
= expand_expr (op1
, NULL_RTX
, str_mode
, EXPAND_NORMAL
);
4745 value
= convert_modes (str_mode
,
4746 TYPE_MODE (TREE_TYPE (op1
)), value
,
4747 TYPE_UNSIGNED (TREE_TYPE (op1
)));
4749 /* We may be accessing data outside the field, which means
4750 we can alias adjacent data. */
4751 if (MEM_P (str_rtx
))
4753 str_rtx
= shallow_copy_rtx (str_rtx
);
4754 set_mem_alias_set (str_rtx
, 0);
4755 set_mem_expr (str_rtx
, 0);
4758 if (bitsize
== 1 && (reverse
|| bitpos
+ bitsize
!= str_bitsize
))
4760 value
= expand_and (str_mode
, value
, const1_rtx
, NULL
);
4764 binop
= code
== PLUS_EXPR
? add_optab
: sub_optab
;
4766 value
= expand_shift (LSHIFT_EXPR
, str_mode
, value
, bitpos
, NULL_RTX
, 1);
4768 value
= flip_storage_order (str_mode
, value
);
4769 result
= expand_binop (str_mode
, binop
, str_rtx
,
4770 value
, str_rtx
, 1, OPTAB_WIDEN
);
4771 if (result
!= str_rtx
)
4772 emit_move_insn (str_rtx
, result
);
4777 if (TREE_CODE (op1
) != INTEGER_CST
)
4779 value
= expand_expr (op1
, NULL_RTX
, str_mode
, EXPAND_NORMAL
);
4780 value
= convert_modes (str_mode
,
4781 TYPE_MODE (TREE_TYPE (op1
)), value
,
4782 TYPE_UNSIGNED (TREE_TYPE (op1
)));
4784 /* We may be accessing data outside the field, which means
4785 we can alias adjacent data. */
4786 if (MEM_P (str_rtx
))
4788 str_rtx
= shallow_copy_rtx (str_rtx
);
4789 set_mem_alias_set (str_rtx
, 0);
4790 set_mem_expr (str_rtx
, 0);
4793 binop
= code
== BIT_IOR_EXPR
? ior_optab
: xor_optab
;
4794 if (bitpos
+ bitsize
!= str_bitsize
)
4796 rtx mask
= gen_int_mode ((HOST_WIDE_INT_1U
<< bitsize
) - 1,
4798 value
= expand_and (str_mode
, value
, mask
, NULL_RTX
);
4800 value
= expand_shift (LSHIFT_EXPR
, str_mode
, value
, bitpos
, NULL_RTX
, 1);
4802 value
= flip_storage_order (str_mode
, value
);
4803 result
= expand_binop (str_mode
, binop
, str_rtx
,
4804 value
, str_rtx
, 1, OPTAB_WIDEN
);
4805 if (result
!= str_rtx
)
4806 emit_move_insn (str_rtx
, result
);
4816 /* In the C++ memory model, consecutive bit fields in a structure are
4817 considered one memory location.
4819 Given a COMPONENT_REF EXP at position (BITPOS, OFFSET), this function
4820 returns the bit range of consecutive bits in which this COMPONENT_REF
4821 belongs. The values are returned in *BITSTART and *BITEND. *BITPOS
4822 and *OFFSET may be adjusted in the process.
4824 If the access does not need to be restricted, 0 is returned in both
4825 *BITSTART and *BITEND. */
4828 get_bit_range (poly_uint64_pod
*bitstart
, poly_uint64_pod
*bitend
, tree exp
,
4829 poly_int64_pod
*bitpos
, tree
*offset
)
4831 poly_int64 bitoffset
;
4834 gcc_assert (TREE_CODE (exp
) == COMPONENT_REF
);
4836 field
= TREE_OPERAND (exp
, 1);
4837 repr
= DECL_BIT_FIELD_REPRESENTATIVE (field
);
4838 /* If we do not have a DECL_BIT_FIELD_REPRESENTATIVE there is no
4839 need to limit the range we can access. */
4842 *bitstart
= *bitend
= 0;
4846 /* If we have a DECL_BIT_FIELD_REPRESENTATIVE but the enclosing record is
4847 part of a larger bit field, then the representative does not serve any
4848 useful purpose. This can occur in Ada. */
4849 if (handled_component_p (TREE_OPERAND (exp
, 0)))
4852 poly_int64 rbitsize
, rbitpos
;
4854 int unsignedp
, reversep
, volatilep
= 0;
4855 get_inner_reference (TREE_OPERAND (exp
, 0), &rbitsize
, &rbitpos
,
4856 &roffset
, &rmode
, &unsignedp
, &reversep
,
4858 if (!multiple_p (rbitpos
, BITS_PER_UNIT
))
4860 *bitstart
= *bitend
= 0;
4865 /* Compute the adjustment to bitpos from the offset of the field
4866 relative to the representative. DECL_FIELD_OFFSET of field and
4867 repr are the same by construction if they are not constants,
4868 see finish_bitfield_layout. */
4869 poly_uint64 field_offset
, repr_offset
;
4870 if (poly_int_tree_p (DECL_FIELD_OFFSET (field
), &field_offset
)
4871 && poly_int_tree_p (DECL_FIELD_OFFSET (repr
), &repr_offset
))
4872 bitoffset
= (field_offset
- repr_offset
) * BITS_PER_UNIT
;
4875 bitoffset
+= (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field
))
4876 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr
)));
4878 /* If the adjustment is larger than bitpos, we would have a negative bit
4879 position for the lower bound and this may wreak havoc later. Adjust
4880 offset and bitpos to make the lower bound non-negative in that case. */
4881 if (maybe_gt (bitoffset
, *bitpos
))
4883 poly_int64 adjust_bits
= upper_bound (bitoffset
, *bitpos
) - *bitpos
;
4884 poly_int64 adjust_bytes
= exact_div (adjust_bits
, BITS_PER_UNIT
);
4886 *bitpos
+= adjust_bits
;
4887 if (*offset
== NULL_TREE
)
4888 *offset
= size_int (-adjust_bytes
);
4890 *offset
= size_binop (MINUS_EXPR
, *offset
, size_int (adjust_bytes
));
4894 *bitstart
= *bitpos
- bitoffset
;
4896 *bitend
= *bitstart
+ tree_to_uhwi (DECL_SIZE (repr
)) - 1;
4899 /* Returns true if ADDR is an ADDR_EXPR of a DECL that does not reside
4900 in memory and has non-BLKmode. DECL_RTL must not be a MEM; if
4901 DECL_RTL was not set yet, return NORTL. */
4904 addr_expr_of_non_mem_decl_p_1 (tree addr
, bool nortl
)
4906 if (TREE_CODE (addr
) != ADDR_EXPR
)
4909 tree base
= TREE_OPERAND (addr
, 0);
4912 || TREE_ADDRESSABLE (base
)
4913 || DECL_MODE (base
) == BLKmode
)
4916 if (!DECL_RTL_SET_P (base
))
4919 return (!MEM_P (DECL_RTL (base
)));
4922 /* Returns true if the MEM_REF REF refers to an object that does not
4923 reside in memory and has non-BLKmode. */
4926 mem_ref_refers_to_non_mem_p (tree ref
)
4928 tree base
= TREE_OPERAND (ref
, 0);
4929 return addr_expr_of_non_mem_decl_p_1 (base
, false);
4932 /* Expand an assignment that stores the value of FROM into TO. If NONTEMPORAL
4933 is true, try generating a nontemporal store. */
4936 expand_assignment (tree to
, tree from
, bool nontemporal
)
4942 enum insn_code icode
;
4944 /* Don't crash if the lhs of the assignment was erroneous. */
4945 if (TREE_CODE (to
) == ERROR_MARK
)
4947 expand_normal (from
);
4951 /* Optimize away no-op moves without side-effects. */
4952 if (operand_equal_p (to
, from
, 0))
4955 /* Handle misaligned stores. */
4956 mode
= TYPE_MODE (TREE_TYPE (to
));
4957 if ((TREE_CODE (to
) == MEM_REF
4958 || TREE_CODE (to
) == TARGET_MEM_REF
)
4960 && !mem_ref_refers_to_non_mem_p (to
)
4961 && ((align
= get_object_alignment (to
))
4962 < GET_MODE_ALIGNMENT (mode
))
4963 && (((icode
= optab_handler (movmisalign_optab
, mode
))
4964 != CODE_FOR_nothing
)
4965 || targetm
.slow_unaligned_access (mode
, align
)))
4969 reg
= expand_expr (from
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
4970 reg
= force_not_mem (reg
);
4971 mem
= expand_expr (to
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
4972 if (TREE_CODE (to
) == MEM_REF
&& REF_REVERSE_STORAGE_ORDER (to
))
4973 reg
= flip_storage_order (mode
, reg
);
4975 if (icode
!= CODE_FOR_nothing
)
4977 struct expand_operand ops
[2];
4979 create_fixed_operand (&ops
[0], mem
);
4980 create_input_operand (&ops
[1], reg
, mode
);
4981 /* The movmisalign<mode> pattern cannot fail, else the assignment
4982 would silently be omitted. */
4983 expand_insn (icode
, 2, ops
);
4986 store_bit_field (mem
, GET_MODE_BITSIZE (mode
), 0, 0, 0, mode
, reg
,
4991 /* Assignment of a structure component needs special treatment
4992 if the structure component's rtx is not simply a MEM.
4993 Assignment of an array element at a constant index, and assignment of
4994 an array element in an unaligned packed structure field, has the same
4995 problem. Same for (partially) storing into a non-memory object. */
4996 if (handled_component_p (to
)
4997 || (TREE_CODE (to
) == MEM_REF
4998 && (REF_REVERSE_STORAGE_ORDER (to
)
4999 || mem_ref_refers_to_non_mem_p (to
)))
5000 || TREE_CODE (TREE_TYPE (to
)) == ARRAY_TYPE
)
5003 poly_int64 bitsize
, bitpos
;
5004 poly_uint64 bitregion_start
= 0;
5005 poly_uint64 bitregion_end
= 0;
5007 int unsignedp
, reversep
, volatilep
= 0;
5011 tem
= get_inner_reference (to
, &bitsize
, &bitpos
, &offset
, &mode1
,
5012 &unsignedp
, &reversep
, &volatilep
);
5014 /* Make sure bitpos is not negative, it can wreak havoc later. */
5015 if (maybe_lt (bitpos
, 0))
5017 gcc_assert (offset
== NULL_TREE
);
5018 offset
= size_int (bits_to_bytes_round_down (bitpos
));
5019 bitpos
= num_trailing_bits (bitpos
);
5022 if (TREE_CODE (to
) == COMPONENT_REF
5023 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (to
, 1)))
5024 get_bit_range (&bitregion_start
, &bitregion_end
, to
, &bitpos
, &offset
);
5025 /* The C++ memory model naturally applies to byte-aligned fields.
5026 However, if we do not have a DECL_BIT_FIELD_TYPE but BITPOS or
5027 BITSIZE are not byte-aligned, there is no need to limit the range
5028 we can access. This can occur with packed structures in Ada. */
5029 else if (maybe_gt (bitsize
, 0)
5030 && multiple_p (bitsize
, BITS_PER_UNIT
)
5031 && multiple_p (bitpos
, BITS_PER_UNIT
))
5033 bitregion_start
= bitpos
;
5034 bitregion_end
= bitpos
+ bitsize
- 1;
5037 to_rtx
= expand_expr (tem
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
5039 /* If the field has a mode, we want to access it in the
5040 field's mode, not the computed mode.
5041 If a MEM has VOIDmode (external with incomplete type),
5042 use BLKmode for it instead. */
5045 if (mode1
!= VOIDmode
)
5046 to_rtx
= adjust_address (to_rtx
, mode1
, 0);
5047 else if (GET_MODE (to_rtx
) == VOIDmode
)
5048 to_rtx
= adjust_address (to_rtx
, BLKmode
, 0);
5053 machine_mode address_mode
;
5056 if (!MEM_P (to_rtx
))
5058 /* We can get constant negative offsets into arrays with broken
5059 user code. Translate this to a trap instead of ICEing. */
5060 gcc_assert (TREE_CODE (offset
) == INTEGER_CST
);
5061 expand_builtin_trap ();
5062 to_rtx
= gen_rtx_MEM (BLKmode
, const0_rtx
);
5065 offset_rtx
= expand_expr (offset
, NULL_RTX
, VOIDmode
, EXPAND_SUM
);
5066 address_mode
= get_address_mode (to_rtx
);
5067 if (GET_MODE (offset_rtx
) != address_mode
)
5069 /* We cannot be sure that the RTL in offset_rtx is valid outside
5070 of a memory address context, so force it into a register
5071 before attempting to convert it to the desired mode. */
5072 offset_rtx
= force_operand (offset_rtx
, NULL_RTX
);
5073 offset_rtx
= convert_to_mode (address_mode
, offset_rtx
, 0);
5076 /* If we have an expression in OFFSET_RTX and a non-zero
5077 byte offset in BITPOS, adding the byte offset before the
5078 OFFSET_RTX results in better intermediate code, which makes
5079 later rtl optimization passes perform better.
5081 We prefer intermediate code like this:
5083 r124:DI=r123:DI+0x18
5088 r124:DI=r123:DI+0x10
5089 [r124:DI+0x8]=r121:DI
5091 This is only done for aligned data values, as these can
5092 be expected to result in single move instructions. */
5094 if (mode1
!= VOIDmode
5095 && maybe_ne (bitpos
, 0)
5096 && maybe_gt (bitsize
, 0)
5097 && multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
5098 && multiple_p (bitpos
, bitsize
)
5099 && multiple_p (bitsize
, GET_MODE_ALIGNMENT (mode1
))
5100 && MEM_ALIGN (to_rtx
) >= GET_MODE_ALIGNMENT (mode1
))
5102 to_rtx
= adjust_address (to_rtx
, mode1
, bytepos
);
5103 bitregion_start
= 0;
5104 if (known_ge (bitregion_end
, poly_uint64 (bitpos
)))
5105 bitregion_end
-= bitpos
;
5109 to_rtx
= offset_address (to_rtx
, offset_rtx
,
5110 highest_pow2_factor_for_target (to
,
5114 /* No action is needed if the target is not a memory and the field
5115 lies completely outside that target. This can occur if the source
5116 code contains an out-of-bounds access to a small array. */
5118 && GET_MODE (to_rtx
) != BLKmode
5119 && known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (to_rtx
))))
5121 expand_normal (from
);
5124 /* Handle expand_expr of a complex value returning a CONCAT. */
5125 else if (GET_CODE (to_rtx
) == CONCAT
)
5127 unsigned short mode_bitsize
= GET_MODE_BITSIZE (GET_MODE (to_rtx
));
5128 if (TYPE_MODE (TREE_TYPE (from
)) == GET_MODE (to_rtx
)
5129 && COMPLEX_MODE_P (GET_MODE (to_rtx
))
5130 && known_eq (bitpos
, 0)
5131 && known_eq (bitsize
, mode_bitsize
))
5132 result
= store_expr (from
, to_rtx
, false, nontemporal
, reversep
);
5133 else if (known_eq (bitsize
, mode_bitsize
/ 2)
5134 && (known_eq (bitpos
, 0)
5135 || known_eq (bitpos
, mode_bitsize
/ 2)))
5136 result
= store_expr (from
, XEXP (to_rtx
, maybe_ne (bitpos
, 0)),
5137 false, nontemporal
, reversep
);
5138 else if (known_le (bitpos
+ bitsize
, mode_bitsize
/ 2))
5139 result
= store_field (XEXP (to_rtx
, 0), bitsize
, bitpos
,
5140 bitregion_start
, bitregion_end
,
5141 mode1
, from
, get_alias_set (to
),
5142 nontemporal
, reversep
);
5143 else if (known_ge (bitpos
, mode_bitsize
/ 2))
5144 result
= store_field (XEXP (to_rtx
, 1), bitsize
,
5145 bitpos
- mode_bitsize
/ 2,
5146 bitregion_start
, bitregion_end
,
5147 mode1
, from
, get_alias_set (to
),
5148 nontemporal
, reversep
);
5149 else if (known_eq (bitpos
, 0) && known_eq (bitsize
, mode_bitsize
))
5151 result
= expand_normal (from
);
5152 if (GET_CODE (result
) == CONCAT
)
5154 machine_mode to_mode
= GET_MODE_INNER (GET_MODE (to_rtx
));
5155 machine_mode from_mode
= GET_MODE_INNER (GET_MODE (result
));
5157 = simplify_gen_subreg (to_mode
, XEXP (result
, 0),
5160 = simplify_gen_subreg (to_mode
, XEXP (result
, 1),
5162 if (!from_real
|| !from_imag
)
5163 goto concat_store_slow
;
5164 emit_move_insn (XEXP (to_rtx
, 0), from_real
);
5165 emit_move_insn (XEXP (to_rtx
, 1), from_imag
);
5170 = simplify_gen_subreg (GET_MODE (to_rtx
), result
,
5171 TYPE_MODE (TREE_TYPE (from
)), 0);
5174 emit_move_insn (XEXP (to_rtx
, 0),
5175 read_complex_part (from_rtx
, false));
5176 emit_move_insn (XEXP (to_rtx
, 1),
5177 read_complex_part (from_rtx
, true));
5181 machine_mode to_mode
5182 = GET_MODE_INNER (GET_MODE (to_rtx
));
5184 = simplify_gen_subreg (to_mode
, result
,
5185 TYPE_MODE (TREE_TYPE (from
)),
5188 = simplify_gen_subreg (to_mode
, result
,
5189 TYPE_MODE (TREE_TYPE (from
)),
5190 GET_MODE_SIZE (to_mode
));
5191 if (!from_real
|| !from_imag
)
5192 goto concat_store_slow
;
5193 emit_move_insn (XEXP (to_rtx
, 0), from_real
);
5194 emit_move_insn (XEXP (to_rtx
, 1), from_imag
);
5201 rtx temp
= assign_stack_temp (GET_MODE (to_rtx
),
5202 GET_MODE_SIZE (GET_MODE (to_rtx
)));
5203 write_complex_part (temp
, XEXP (to_rtx
, 0), false);
5204 write_complex_part (temp
, XEXP (to_rtx
, 1), true);
5205 result
= store_field (temp
, bitsize
, bitpos
,
5206 bitregion_start
, bitregion_end
,
5207 mode1
, from
, get_alias_set (to
),
5208 nontemporal
, reversep
);
5209 emit_move_insn (XEXP (to_rtx
, 0), read_complex_part (temp
, false));
5210 emit_move_insn (XEXP (to_rtx
, 1), read_complex_part (temp
, true));
5217 /* If the field is at offset zero, we could have been given the
5218 DECL_RTX of the parent struct. Don't munge it. */
5219 to_rtx
= shallow_copy_rtx (to_rtx
);
5220 set_mem_attributes_minus_bitpos (to_rtx
, to
, 0, bitpos
);
5222 MEM_VOLATILE_P (to_rtx
) = 1;
5225 if (optimize_bitfield_assignment_op (bitsize
, bitpos
,
5226 bitregion_start
, bitregion_end
,
5227 mode1
, to_rtx
, to
, from
,
5231 result
= store_field (to_rtx
, bitsize
, bitpos
,
5232 bitregion_start
, bitregion_end
,
5233 mode1
, from
, get_alias_set (to
),
5234 nontemporal
, reversep
);
5238 preserve_temp_slots (result
);
5243 /* If the rhs is a function call and its value is not an aggregate,
5244 call the function before we start to compute the lhs.
5245 This is needed for correct code for cases such as
5246 val = setjmp (buf) on machines where reference to val
5247 requires loading up part of an address in a separate insn.
5249 Don't do this if TO is a VAR_DECL or PARM_DECL whose DECL_RTL is REG
5250 since it might be a promoted variable where the zero- or sign- extension
5251 needs to be done. Handling this in the normal way is safe because no
5252 computation is done before the call. The same is true for SSA names. */
5253 if (TREE_CODE (from
) == CALL_EXPR
&& ! aggregate_value_p (from
, from
)
5254 && COMPLETE_TYPE_P (TREE_TYPE (from
))
5255 && TREE_CODE (TYPE_SIZE (TREE_TYPE (from
))) == INTEGER_CST
5257 || TREE_CODE (to
) == PARM_DECL
5258 || TREE_CODE (to
) == RESULT_DECL
)
5259 && REG_P (DECL_RTL (to
)))
5260 || TREE_CODE (to
) == SSA_NAME
))
5266 value
= expand_normal (from
);
5268 /* Split value and bounds to store them separately. */
5269 chkp_split_slot (value
, &value
, &bounds
);
5272 to_rtx
= expand_expr (to
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
5274 /* Handle calls that return values in multiple non-contiguous locations.
5275 The Irix 6 ABI has examples of this. */
5276 if (GET_CODE (to_rtx
) == PARALLEL
)
5278 if (GET_CODE (value
) == PARALLEL
)
5279 emit_group_move (to_rtx
, value
);
5281 emit_group_load (to_rtx
, value
, TREE_TYPE (from
),
5282 int_size_in_bytes (TREE_TYPE (from
)));
5284 else if (GET_CODE (value
) == PARALLEL
)
5285 emit_group_store (to_rtx
, value
, TREE_TYPE (from
),
5286 int_size_in_bytes (TREE_TYPE (from
)));
5287 else if (GET_MODE (to_rtx
) == BLKmode
)
5289 /* Handle calls that return BLKmode values in registers. */
5291 copy_blkmode_from_reg (to_rtx
, value
, TREE_TYPE (from
));
5293 emit_block_move (to_rtx
, value
, expr_size (from
), BLOCK_OP_NORMAL
);
5297 if (POINTER_TYPE_P (TREE_TYPE (to
)))
5298 value
= convert_memory_address_addr_space
5299 (as_a
<scalar_int_mode
> (GET_MODE (to_rtx
)), value
,
5300 TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (to
))));
5302 emit_move_insn (to_rtx
, value
);
5305 /* Store bounds if required. */
5307 && (BOUNDED_P (to
) || chkp_type_has_pointer (TREE_TYPE (to
))))
5309 gcc_assert (MEM_P (to_rtx
));
5310 chkp_emit_bounds_store (bounds
, value
, to_rtx
);
5313 preserve_temp_slots (to_rtx
);
5318 /* Ordinary treatment. Expand TO to get a REG or MEM rtx. */
5319 to_rtx
= expand_expr (to
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
5321 /* Don't move directly into a return register. */
5322 if (TREE_CODE (to
) == RESULT_DECL
5323 && (REG_P (to_rtx
) || GET_CODE (to_rtx
) == PARALLEL
))
5329 /* If the source is itself a return value, it still is in a pseudo at
5330 this point so we can move it back to the return register directly. */
5332 && TYPE_MODE (TREE_TYPE (from
)) == BLKmode
5333 && TREE_CODE (from
) != CALL_EXPR
)
5334 temp
= copy_blkmode_to_reg (GET_MODE (to_rtx
), from
);
5336 temp
= expand_expr (from
, NULL_RTX
, GET_MODE (to_rtx
), EXPAND_NORMAL
);
5338 /* Handle calls that return values in multiple non-contiguous locations.
5339 The Irix 6 ABI has examples of this. */
5340 if (GET_CODE (to_rtx
) == PARALLEL
)
5342 if (GET_CODE (temp
) == PARALLEL
)
5343 emit_group_move (to_rtx
, temp
);
5345 emit_group_load (to_rtx
, temp
, TREE_TYPE (from
),
5346 int_size_in_bytes (TREE_TYPE (from
)));
5349 emit_move_insn (to_rtx
, temp
);
5351 preserve_temp_slots (to_rtx
);
5356 /* In case we are returning the contents of an object which overlaps
5357 the place the value is being stored, use a safe function when copying
5358 a value through a pointer into a structure value return block. */
5359 if (TREE_CODE (to
) == RESULT_DECL
5360 && TREE_CODE (from
) == INDIRECT_REF
5361 && ADDR_SPACE_GENERIC_P
5362 (TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (from
, 0)))))
5363 && refs_may_alias_p (to
, from
)
5364 && cfun
->returns_struct
5365 && !cfun
->returns_pcc_struct
)
5370 size
= expr_size (from
);
5371 from_rtx
= expand_normal (from
);
5373 emit_block_move_via_libcall (XEXP (to_rtx
, 0), XEXP (from_rtx
, 0), size
);
5375 preserve_temp_slots (to_rtx
);
5380 /* Compute FROM and store the value in the rtx we got. */
5383 result
= store_expr_with_bounds (from
, to_rtx
, 0, nontemporal
, false, to
);
5384 preserve_temp_slots (result
);
5389 /* Emits nontemporal store insn that moves FROM to TO. Returns true if this
5390 succeeded, false otherwise. */
5393 emit_storent_insn (rtx to
, rtx from
)
5395 struct expand_operand ops
[2];
5396 machine_mode mode
= GET_MODE (to
);
5397 enum insn_code code
= optab_handler (storent_optab
, mode
);
5399 if (code
== CODE_FOR_nothing
)
5402 create_fixed_operand (&ops
[0], to
);
5403 create_input_operand (&ops
[1], from
, mode
);
5404 return maybe_expand_insn (code
, 2, ops
);
5407 /* Generate code for computing expression EXP,
5408 and storing the value into TARGET.
5410 If the mode is BLKmode then we may return TARGET itself.
5411 It turns out that in BLKmode it doesn't cause a problem.
5412 because C has no operators that could combine two different
5413 assignments into the same BLKmode object with different values
5414 with no sequence point. Will other languages need this to
5417 If CALL_PARAM_P is nonzero, this is a store into a call param on the
5418 stack, and block moves may need to be treated specially.
5420 If NONTEMPORAL is true, try using a nontemporal store instruction.
5422 If REVERSE is true, the store is to be done in reverse order.
5424 If BTARGET is not NULL then computed bounds of EXP are
5425 associated with BTARGET. */
5428 store_expr_with_bounds (tree exp
, rtx target
, int call_param_p
,
5429 bool nontemporal
, bool reverse
, tree btarget
)
5432 rtx alt_rtl
= NULL_RTX
;
5433 location_t loc
= curr_insn_location ();
5435 if (VOID_TYPE_P (TREE_TYPE (exp
)))
5437 /* C++ can generate ?: expressions with a throw expression in one
5438 branch and an rvalue in the other. Here, we resolve attempts to
5439 store the throw expression's nonexistent result. */
5440 gcc_assert (!call_param_p
);
5441 expand_expr (exp
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5444 if (TREE_CODE (exp
) == COMPOUND_EXPR
)
5446 /* Perform first part of compound expression, then assign from second
5448 expand_expr (TREE_OPERAND (exp
, 0), const0_rtx
, VOIDmode
,
5449 call_param_p
? EXPAND_STACK_PARM
: EXPAND_NORMAL
);
5450 return store_expr_with_bounds (TREE_OPERAND (exp
, 1), target
,
5451 call_param_p
, nontemporal
, reverse
,
5454 else if (TREE_CODE (exp
) == COND_EXPR
&& GET_MODE (target
) == BLKmode
)
5456 /* For conditional expression, get safe form of the target. Then
5457 test the condition, doing the appropriate assignment on either
5458 side. This avoids the creation of unnecessary temporaries.
5459 For non-BLKmode, it is more efficient not to do this. */
5461 rtx_code_label
*lab1
= gen_label_rtx (), *lab2
= gen_label_rtx ();
5463 do_pending_stack_adjust ();
5465 jumpifnot (TREE_OPERAND (exp
, 0), lab1
,
5466 profile_probability::uninitialized ());
5467 store_expr_with_bounds (TREE_OPERAND (exp
, 1), target
, call_param_p
,
5468 nontemporal
, reverse
, btarget
);
5469 emit_jump_insn (targetm
.gen_jump (lab2
));
5472 store_expr_with_bounds (TREE_OPERAND (exp
, 2), target
, call_param_p
,
5473 nontemporal
, reverse
, btarget
);
5479 else if (GET_CODE (target
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (target
))
5480 /* If this is a scalar in a register that is stored in a wider mode
5481 than the declared mode, compute the result into its declared mode
5482 and then convert to the wider mode. Our value is the computed
5485 rtx inner_target
= 0;
5486 scalar_int_mode outer_mode
= subreg_unpromoted_mode (target
);
5487 scalar_int_mode inner_mode
= subreg_promoted_mode (target
);
5489 /* We can do the conversion inside EXP, which will often result
5490 in some optimizations. Do the conversion in two steps: first
5491 change the signedness, if needed, then the extend. But don't
5492 do this if the type of EXP is a subtype of something else
5493 since then the conversion might involve more than just
5494 converting modes. */
5495 if (INTEGRAL_TYPE_P (TREE_TYPE (exp
))
5496 && TREE_TYPE (TREE_TYPE (exp
)) == 0
5497 && GET_MODE_PRECISION (outer_mode
)
5498 == TYPE_PRECISION (TREE_TYPE (exp
)))
5500 if (!SUBREG_CHECK_PROMOTED_SIGN (target
,
5501 TYPE_UNSIGNED (TREE_TYPE (exp
))))
5503 /* Some types, e.g. Fortran's logical*4, won't have a signed
5504 version, so use the mode instead. */
5506 = (signed_or_unsigned_type_for
5507 (SUBREG_PROMOTED_SIGN (target
), TREE_TYPE (exp
)));
5509 ntype
= lang_hooks
.types
.type_for_mode
5510 (TYPE_MODE (TREE_TYPE (exp
)),
5511 SUBREG_PROMOTED_SIGN (target
));
5513 exp
= fold_convert_loc (loc
, ntype
, exp
);
5516 exp
= fold_convert_loc (loc
, lang_hooks
.types
.type_for_mode
5517 (inner_mode
, SUBREG_PROMOTED_SIGN (target
)),
5520 inner_target
= SUBREG_REG (target
);
5523 temp
= expand_expr (exp
, inner_target
, VOIDmode
,
5524 call_param_p
? EXPAND_STACK_PARM
: EXPAND_NORMAL
);
5526 /* Handle bounds returned by call. */
5527 if (TREE_CODE (exp
) == CALL_EXPR
)
5530 chkp_split_slot (temp
, &temp
, &bounds
);
5531 if (bounds
&& btarget
)
5533 gcc_assert (TREE_CODE (btarget
) == SSA_NAME
);
5534 rtx tmp
= targetm
.calls
.load_returned_bounds (bounds
);
5535 chkp_set_rtl_bounds (btarget
, tmp
);
5539 /* If TEMP is a VOIDmode constant, use convert_modes to make
5540 sure that we properly convert it. */
5541 if (CONSTANT_P (temp
) && GET_MODE (temp
) == VOIDmode
)
5543 temp
= convert_modes (outer_mode
, TYPE_MODE (TREE_TYPE (exp
)),
5544 temp
, SUBREG_PROMOTED_SIGN (target
));
5545 temp
= convert_modes (inner_mode
, outer_mode
, temp
,
5546 SUBREG_PROMOTED_SIGN (target
));
5549 convert_move (SUBREG_REG (target
), temp
,
5550 SUBREG_PROMOTED_SIGN (target
));
5554 else if ((TREE_CODE (exp
) == STRING_CST
5555 || (TREE_CODE (exp
) == MEM_REF
5556 && TREE_CODE (TREE_OPERAND (exp
, 0)) == ADDR_EXPR
5557 && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp
, 0), 0))
5559 && integer_zerop (TREE_OPERAND (exp
, 1))))
5560 && !nontemporal
&& !call_param_p
5563 /* Optimize initialization of an array with a STRING_CST. */
5564 HOST_WIDE_INT exp_len
, str_copy_len
;
5566 tree str
= TREE_CODE (exp
) == STRING_CST
5567 ? exp
: TREE_OPERAND (TREE_OPERAND (exp
, 0), 0);
5569 exp_len
= int_expr_size (exp
);
5573 if (TREE_STRING_LENGTH (str
) <= 0)
5576 str_copy_len
= strlen (TREE_STRING_POINTER (str
));
5577 if (str_copy_len
< TREE_STRING_LENGTH (str
) - 1)
5580 str_copy_len
= TREE_STRING_LENGTH (str
);
5581 if ((STORE_MAX_PIECES
& (STORE_MAX_PIECES
- 1)) == 0
5582 && TREE_STRING_POINTER (str
)[TREE_STRING_LENGTH (str
) - 1] == '\0')
5584 str_copy_len
+= STORE_MAX_PIECES
- 1;
5585 str_copy_len
&= ~(STORE_MAX_PIECES
- 1);
5587 str_copy_len
= MIN (str_copy_len
, exp_len
);
5588 if (!can_store_by_pieces (str_copy_len
, builtin_strncpy_read_str
,
5589 CONST_CAST (char *, TREE_STRING_POINTER (str
)),
5590 MEM_ALIGN (target
), false))
5595 dest_mem
= store_by_pieces (dest_mem
,
5596 str_copy_len
, builtin_strncpy_read_str
,
5598 TREE_STRING_POINTER (str
)),
5599 MEM_ALIGN (target
), false,
5600 exp_len
> str_copy_len
? 1 : 0);
5601 if (exp_len
> str_copy_len
)
5602 clear_storage (adjust_address (dest_mem
, BLKmode
, 0),
5603 GEN_INT (exp_len
- str_copy_len
),
5612 /* If we want to use a nontemporal or a reverse order store, force the
5613 value into a register first. */
5614 tmp_target
= nontemporal
|| reverse
? NULL_RTX
: target
;
5615 temp
= expand_expr_real (exp
, tmp_target
, GET_MODE (target
),
5617 ? EXPAND_STACK_PARM
: EXPAND_NORMAL
),
5620 /* Handle bounds returned by call. */
5621 if (TREE_CODE (exp
) == CALL_EXPR
)
5624 chkp_split_slot (temp
, &temp
, &bounds
);
5625 if (bounds
&& btarget
)
5627 gcc_assert (TREE_CODE (btarget
) == SSA_NAME
);
5628 rtx tmp
= targetm
.calls
.load_returned_bounds (bounds
);
5629 chkp_set_rtl_bounds (btarget
, tmp
);
5634 /* If TEMP is a VOIDmode constant and the mode of the type of EXP is not
5635 the same as that of TARGET, adjust the constant. This is needed, for
5636 example, in case it is a CONST_DOUBLE or CONST_WIDE_INT and we want
5637 only a word-sized value. */
5638 if (CONSTANT_P (temp
) && GET_MODE (temp
) == VOIDmode
5639 && TREE_CODE (exp
) != ERROR_MARK
5640 && GET_MODE (target
) != TYPE_MODE (TREE_TYPE (exp
)))
5642 if (GET_MODE_CLASS (GET_MODE (target
))
5643 != GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (exp
)))
5644 && GET_MODE_BITSIZE (GET_MODE (target
))
5645 == GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp
))))
5647 rtx t
= simplify_gen_subreg (GET_MODE (target
), temp
,
5648 TYPE_MODE (TREE_TYPE (exp
)), 0);
5652 if (GET_MODE (temp
) == VOIDmode
)
5653 temp
= convert_modes (GET_MODE (target
), TYPE_MODE (TREE_TYPE (exp
)),
5654 temp
, TYPE_UNSIGNED (TREE_TYPE (exp
)));
5657 /* If value was not generated in the target, store it there.
5658 Convert the value to TARGET's type first if necessary and emit the
5659 pending incrementations that have been queued when expanding EXP.
5660 Note that we cannot emit the whole queue blindly because this will
5661 effectively disable the POST_INC optimization later.
5663 If TEMP and TARGET compare equal according to rtx_equal_p, but
5664 one or both of them are volatile memory refs, we have to distinguish
5666 - expand_expr has used TARGET. In this case, we must not generate
5667 another copy. This can be detected by TARGET being equal according
5669 - expand_expr has not used TARGET - that means that the source just
5670 happens to have the same RTX form. Since temp will have been created
5671 by expand_expr, it will compare unequal according to == .
5672 We must generate a copy in this case, to reach the correct number
5673 of volatile memory references. */
5675 if ((! rtx_equal_p (temp
, target
)
5676 || (temp
!= target
&& (side_effects_p (temp
)
5677 || side_effects_p (target
))))
5678 && TREE_CODE (exp
) != ERROR_MARK
5679 /* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
5680 but TARGET is not valid memory reference, TEMP will differ
5681 from TARGET although it is really the same location. */
5683 && rtx_equal_p (alt_rtl
, target
)
5684 && !side_effects_p (alt_rtl
)
5685 && !side_effects_p (target
))
5686 /* If there's nothing to copy, don't bother. Don't call
5687 expr_size unless necessary, because some front-ends (C++)
5688 expr_size-hook must not be given objects that are not
5689 supposed to be bit-copied or bit-initialized. */
5690 && expr_size (exp
) != const0_rtx
)
5692 if (GET_MODE (temp
) != GET_MODE (target
) && GET_MODE (temp
) != VOIDmode
)
5694 if (GET_MODE (target
) == BLKmode
)
5696 /* Handle calls that return BLKmode values in registers. */
5697 if (REG_P (temp
) && TREE_CODE (exp
) == CALL_EXPR
)
5698 copy_blkmode_from_reg (target
, temp
, TREE_TYPE (exp
));
5700 store_bit_field (target
,
5701 INTVAL (expr_size (exp
)) * BITS_PER_UNIT
,
5702 0, 0, 0, GET_MODE (temp
), temp
, reverse
);
5705 convert_move (target
, temp
, TYPE_UNSIGNED (TREE_TYPE (exp
)));
5708 else if (GET_MODE (temp
) == BLKmode
&& TREE_CODE (exp
) == STRING_CST
)
5710 /* Handle copying a string constant into an array. The string
5711 constant may be shorter than the array. So copy just the string's
5712 actual length, and clear the rest. First get the size of the data
5713 type of the string, which is actually the size of the target. */
5714 rtx size
= expr_size (exp
);
5716 if (CONST_INT_P (size
)
5717 && INTVAL (size
) < TREE_STRING_LENGTH (exp
))
5718 emit_block_move (target
, temp
, size
,
5720 ? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL
));
5723 machine_mode pointer_mode
5724 = targetm
.addr_space
.pointer_mode (MEM_ADDR_SPACE (target
));
5725 machine_mode address_mode
= get_address_mode (target
);
5727 /* Compute the size of the data to copy from the string. */
5729 = size_binop_loc (loc
, MIN_EXPR
,
5730 make_tree (sizetype
, size
),
5731 size_int (TREE_STRING_LENGTH (exp
)));
5733 = expand_expr (copy_size
, NULL_RTX
, VOIDmode
,
5735 ? EXPAND_STACK_PARM
: EXPAND_NORMAL
));
5736 rtx_code_label
*label
= 0;
5738 /* Copy that much. */
5739 copy_size_rtx
= convert_to_mode (pointer_mode
, copy_size_rtx
,
5740 TYPE_UNSIGNED (sizetype
));
5741 emit_block_move (target
, temp
, copy_size_rtx
,
5743 ? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL
));
5745 /* Figure out how much is left in TARGET that we have to clear.
5746 Do all calculations in pointer_mode. */
5747 if (CONST_INT_P (copy_size_rtx
))
5749 size
= plus_constant (address_mode
, size
,
5750 -INTVAL (copy_size_rtx
));
5751 target
= adjust_address (target
, BLKmode
,
5752 INTVAL (copy_size_rtx
));
5756 size
= expand_binop (TYPE_MODE (sizetype
), sub_optab
, size
,
5757 copy_size_rtx
, NULL_RTX
, 0,
5760 if (GET_MODE (copy_size_rtx
) != address_mode
)
5761 copy_size_rtx
= convert_to_mode (address_mode
,
5763 TYPE_UNSIGNED (sizetype
));
5765 target
= offset_address (target
, copy_size_rtx
,
5766 highest_pow2_factor (copy_size
));
5767 label
= gen_label_rtx ();
5768 emit_cmp_and_jump_insns (size
, const0_rtx
, LT
, NULL_RTX
,
5769 GET_MODE (size
), 0, label
);
5772 if (size
!= const0_rtx
)
5773 clear_storage (target
, size
, BLOCK_OP_NORMAL
);
5779 /* Handle calls that return values in multiple non-contiguous locations.
5780 The Irix 6 ABI has examples of this. */
5781 else if (GET_CODE (target
) == PARALLEL
)
5783 if (GET_CODE (temp
) == PARALLEL
)
5784 emit_group_move (target
, temp
);
5786 emit_group_load (target
, temp
, TREE_TYPE (exp
),
5787 int_size_in_bytes (TREE_TYPE (exp
)));
5789 else if (GET_CODE (temp
) == PARALLEL
)
5790 emit_group_store (target
, temp
, TREE_TYPE (exp
),
5791 int_size_in_bytes (TREE_TYPE (exp
)));
5792 else if (GET_MODE (temp
) == BLKmode
)
5793 emit_block_move (target
, temp
, expr_size (exp
),
5795 ? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL
));
5796 /* If we emit a nontemporal store, there is nothing else to do. */
5797 else if (nontemporal
&& emit_storent_insn (target
, temp
))
5802 temp
= flip_storage_order (GET_MODE (target
), temp
);
5803 temp
= force_operand (temp
, target
);
5805 emit_move_insn (target
, temp
);
5812 /* Same as store_expr_with_bounds but ignoring bounds of EXP. */
5814 store_expr (tree exp
, rtx target
, int call_param_p
, bool nontemporal
,
5817 return store_expr_with_bounds (exp
, target
, call_param_p
, nontemporal
,
5821 /* Return true if field F of structure TYPE is a flexible array. */
5824 flexible_array_member_p (const_tree f
, const_tree type
)
5829 return (DECL_CHAIN (f
) == NULL
5830 && TREE_CODE (tf
) == ARRAY_TYPE
5832 && TYPE_MIN_VALUE (TYPE_DOMAIN (tf
))
5833 && integer_zerop (TYPE_MIN_VALUE (TYPE_DOMAIN (tf
)))
5834 && !TYPE_MAX_VALUE (TYPE_DOMAIN (tf
))
5835 && int_size_in_bytes (type
) >= 0);
5838 /* If FOR_CTOR_P, return the number of top-level elements that a constructor
5839 must have in order for it to completely initialize a value of type TYPE.
5840 Return -1 if the number isn't known.
5842 If !FOR_CTOR_P, return an estimate of the number of scalars in TYPE. */
5844 static HOST_WIDE_INT
5845 count_type_elements (const_tree type
, bool for_ctor_p
)
5847 switch (TREE_CODE (type
))
5853 nelts
= array_type_nelts (type
);
5854 if (nelts
&& tree_fits_uhwi_p (nelts
))
5856 unsigned HOST_WIDE_INT n
;
5858 n
= tree_to_uhwi (nelts
) + 1;
5859 if (n
== 0 || for_ctor_p
)
5862 return n
* count_type_elements (TREE_TYPE (type
), false);
5864 return for_ctor_p
? -1 : 1;
5869 unsigned HOST_WIDE_INT n
;
5873 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
5874 if (TREE_CODE (f
) == FIELD_DECL
)
5877 n
+= count_type_elements (TREE_TYPE (f
), false);
5878 else if (!flexible_array_member_p (f
, type
))
5879 /* Don't count flexible arrays, which are not supposed
5880 to be initialized. */
5888 case QUAL_UNION_TYPE
:
5893 gcc_assert (!for_ctor_p
);
5894 /* Estimate the number of scalars in each field and pick the
5895 maximum. Other estimates would do instead; the idea is simply
5896 to make sure that the estimate is not sensitive to the ordering
5899 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
5900 if (TREE_CODE (f
) == FIELD_DECL
)
5902 m
= count_type_elements (TREE_TYPE (f
), false);
5903 /* If the field doesn't span the whole union, add an extra
5904 scalar for the rest. */
5905 if (simple_cst_equal (TYPE_SIZE (TREE_TYPE (f
)),
5906 TYPE_SIZE (type
)) != 1)
5918 return TYPE_VECTOR_SUBPARTS (type
);
5922 case FIXED_POINT_TYPE
:
5927 case REFERENCE_TYPE
:
5943 /* Helper for categorize_ctor_elements. Identical interface. */
5946 categorize_ctor_elements_1 (const_tree ctor
, HOST_WIDE_INT
*p_nz_elts
,
5947 HOST_WIDE_INT
*p_init_elts
, bool *p_complete
)
5949 unsigned HOST_WIDE_INT idx
;
5950 HOST_WIDE_INT nz_elts
, init_elts
, num_fields
;
5951 tree value
, purpose
, elt_type
;
5953 /* Whether CTOR is a valid constant initializer, in accordance with what
5954 initializer_constant_valid_p does. If inferred from the constructor
5955 elements, true until proven otherwise. */
5956 bool const_from_elts_p
= constructor_static_from_elts_p (ctor
);
5957 bool const_p
= const_from_elts_p
? true : TREE_STATIC (ctor
);
5962 elt_type
= NULL_TREE
;
5964 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor
), idx
, purpose
, value
)
5966 HOST_WIDE_INT mult
= 1;
5968 if (purpose
&& TREE_CODE (purpose
) == RANGE_EXPR
)
5970 tree lo_index
= TREE_OPERAND (purpose
, 0);
5971 tree hi_index
= TREE_OPERAND (purpose
, 1);
5973 if (tree_fits_uhwi_p (lo_index
) && tree_fits_uhwi_p (hi_index
))
5974 mult
= (tree_to_uhwi (hi_index
)
5975 - tree_to_uhwi (lo_index
) + 1);
5978 elt_type
= TREE_TYPE (value
);
5980 switch (TREE_CODE (value
))
5984 HOST_WIDE_INT nz
= 0, ic
= 0;
5986 bool const_elt_p
= categorize_ctor_elements_1 (value
, &nz
, &ic
,
5989 nz_elts
+= mult
* nz
;
5990 init_elts
+= mult
* ic
;
5992 if (const_from_elts_p
&& const_p
)
5993 const_p
= const_elt_p
;
6000 if (!initializer_zerop (value
))
6006 nz_elts
+= mult
* TREE_STRING_LENGTH (value
);
6007 init_elts
+= mult
* TREE_STRING_LENGTH (value
);
6011 if (!initializer_zerop (TREE_REALPART (value
)))
6013 if (!initializer_zerop (TREE_IMAGPART (value
)))
6021 for (i
= 0; i
< VECTOR_CST_NELTS (value
); ++i
)
6023 tree v
= VECTOR_CST_ELT (value
, i
);
6024 if (!initializer_zerop (v
))
6033 HOST_WIDE_INT tc
= count_type_elements (elt_type
, false);
6034 nz_elts
+= mult
* tc
;
6035 init_elts
+= mult
* tc
;
6037 if (const_from_elts_p
&& const_p
)
6039 = initializer_constant_valid_p (value
,
6041 TYPE_REVERSE_STORAGE_ORDER
6049 if (*p_complete
&& !complete_ctor_at_level_p (TREE_TYPE (ctor
),
6050 num_fields
, elt_type
))
6051 *p_complete
= false;
6053 *p_nz_elts
+= nz_elts
;
6054 *p_init_elts
+= init_elts
;
6059 /* Examine CTOR to discover:
6060 * how many scalar fields are set to nonzero values,
6061 and place it in *P_NZ_ELTS;
6062 * how many scalar fields in total are in CTOR,
6063 and place it in *P_ELT_COUNT.
6064 * whether the constructor is complete -- in the sense that every
6065 meaningful byte is explicitly given a value --
6066 and place it in *P_COMPLETE.
6068 Return whether or not CTOR is a valid static constant initializer, the same
6069 as "initializer_constant_valid_p (CTOR, TREE_TYPE (CTOR)) != 0". */
6072 categorize_ctor_elements (const_tree ctor
, HOST_WIDE_INT
*p_nz_elts
,
6073 HOST_WIDE_INT
*p_init_elts
, bool *p_complete
)
6079 return categorize_ctor_elements_1 (ctor
, p_nz_elts
, p_init_elts
, p_complete
);
6082 /* TYPE is initialized by a constructor with NUM_ELTS elements, the last
6083 of which had type LAST_TYPE. Each element was itself a complete
6084 initializer, in the sense that every meaningful byte was explicitly
6085 given a value. Return true if the same is true for the constructor
6089 complete_ctor_at_level_p (const_tree type
, HOST_WIDE_INT num_elts
,
6090 const_tree last_type
)
6092 if (TREE_CODE (type
) == UNION_TYPE
6093 || TREE_CODE (type
) == QUAL_UNION_TYPE
)
6098 gcc_assert (num_elts
== 1 && last_type
);
6100 /* ??? We could look at each element of the union, and find the
6101 largest element. Which would avoid comparing the size of the
6102 initialized element against any tail padding in the union.
6103 Doesn't seem worth the effort... */
6104 return simple_cst_equal (TYPE_SIZE (type
), TYPE_SIZE (last_type
)) == 1;
6107 return count_type_elements (type
, true) == num_elts
;
6110 /* Return 1 if EXP contains mostly (3/4) zeros. */
6113 mostly_zeros_p (const_tree exp
)
6115 if (TREE_CODE (exp
) == CONSTRUCTOR
)
6117 HOST_WIDE_INT nz_elts
, init_elts
;
6120 categorize_ctor_elements (exp
, &nz_elts
, &init_elts
, &complete_p
);
6121 return !complete_p
|| nz_elts
< init_elts
/ 4;
6124 return initializer_zerop (exp
);
6127 /* Return 1 if EXP contains all zeros. */
6130 all_zeros_p (const_tree exp
)
6132 if (TREE_CODE (exp
) == CONSTRUCTOR
)
6134 HOST_WIDE_INT nz_elts
, init_elts
;
6137 categorize_ctor_elements (exp
, &nz_elts
, &init_elts
, &complete_p
);
6138 return nz_elts
== 0;
6141 return initializer_zerop (exp
);
6144 /* Helper function for store_constructor.
6145 TARGET, BITSIZE, BITPOS, MODE, EXP are as for store_field.
6146 CLEARED is as for store_constructor.
6147 ALIAS_SET is the alias set to use for any stores.
6148 If REVERSE is true, the store is to be done in reverse order.
6150 This provides a recursive shortcut back to store_constructor when it isn't
6151 necessary to go through store_field. This is so that we can pass through
6152 the cleared field to let store_constructor know that we may not have to
6153 clear a substructure if the outer structure has already been cleared. */
6156 store_constructor_field (rtx target
, poly_uint64 bitsize
, poly_int64 bitpos
,
6157 poly_uint64 bitregion_start
,
6158 poly_uint64 bitregion_end
,
6160 tree exp
, int cleared
,
6161 alias_set_type alias_set
, bool reverse
)
6164 poly_uint64 bytesize
;
6165 if (TREE_CODE (exp
) == CONSTRUCTOR
6166 /* We can only call store_constructor recursively if the size and
6167 bit position are on a byte boundary. */
6168 && multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
6169 && maybe_ne (bitsize
, 0U)
6170 && multiple_p (bitsize
, BITS_PER_UNIT
, &bytesize
)
6171 /* If we have a nonzero bitpos for a register target, then we just
6172 let store_field do the bitfield handling. This is unlikely to
6173 generate unnecessary clear instructions anyways. */
6174 && (known_eq (bitpos
, 0) || MEM_P (target
)))
6178 machine_mode target_mode
= GET_MODE (target
);
6179 if (target_mode
!= BLKmode
6180 && !multiple_p (bitpos
, GET_MODE_ALIGNMENT (target_mode
)))
6181 target_mode
= BLKmode
;
6182 target
= adjust_address (target
, target_mode
, bytepos
);
6186 /* Update the alias set, if required. */
6187 if (MEM_P (target
) && ! MEM_KEEP_ALIAS_SET_P (target
)
6188 && MEM_ALIAS_SET (target
) != 0)
6190 target
= copy_rtx (target
);
6191 set_mem_alias_set (target
, alias_set
);
6194 store_constructor (exp
, target
, cleared
, bytesize
, reverse
);
6197 store_field (target
, bitsize
, bitpos
, bitregion_start
, bitregion_end
, mode
,
6198 exp
, alias_set
, false, reverse
);
6202 /* Returns the number of FIELD_DECLs in TYPE. */
6205 fields_length (const_tree type
)
6207 tree t
= TYPE_FIELDS (type
);
6210 for (; t
; t
= DECL_CHAIN (t
))
6211 if (TREE_CODE (t
) == FIELD_DECL
)
6218 /* Store the value of constructor EXP into the rtx TARGET.
6219 TARGET is either a REG or a MEM; we know it cannot conflict, since
6220 safe_from_p has been called.
6221 CLEARED is true if TARGET is known to have been zero'd.
6222 SIZE is the number of bytes of TARGET we are allowed to modify: this
6223 may not be the same as the size of EXP if we are assigning to a field
6224 which has been packed to exclude padding bits.
6225 If REVERSE is true, the store is to be done in reverse order. */
6228 store_constructor (tree exp
, rtx target
, int cleared
, poly_int64 size
,
6231 tree type
= TREE_TYPE (exp
);
6232 HOST_WIDE_INT exp_size
= int_size_in_bytes (type
);
6233 poly_int64 bitregion_end
= known_gt (size
, 0) ? size
* BITS_PER_UNIT
- 1 : 0;
6235 switch (TREE_CODE (type
))
6239 case QUAL_UNION_TYPE
:
6241 unsigned HOST_WIDE_INT idx
;
6244 /* The storage order is specified for every aggregate type. */
6245 reverse
= TYPE_REVERSE_STORAGE_ORDER (type
);
6247 /* If size is zero or the target is already cleared, do nothing. */
6248 if (known_eq (size
, 0) || cleared
)
6250 /* We either clear the aggregate or indicate the value is dead. */
6251 else if ((TREE_CODE (type
) == UNION_TYPE
6252 || TREE_CODE (type
) == QUAL_UNION_TYPE
)
6253 && ! CONSTRUCTOR_ELTS (exp
))
6254 /* If the constructor is empty, clear the union. */
6256 clear_storage (target
, expr_size (exp
), BLOCK_OP_NORMAL
);
6260 /* If we are building a static constructor into a register,
6261 set the initial value as zero so we can fold the value into
6262 a constant. But if more than one register is involved,
6263 this probably loses. */
6264 else if (REG_P (target
) && TREE_STATIC (exp
)
6265 && (GET_MODE_SIZE (GET_MODE (target
))
6266 <= REGMODE_NATURAL_SIZE (GET_MODE (target
))))
6268 emit_move_insn (target
, CONST0_RTX (GET_MODE (target
)));
6272 /* If the constructor has fewer fields than the structure or
6273 if we are initializing the structure to mostly zeros, clear
6274 the whole structure first. Don't do this if TARGET is a
6275 register whose mode size isn't equal to SIZE since
6276 clear_storage can't handle this case. */
6277 else if (known_size_p (size
)
6278 && (((int) CONSTRUCTOR_NELTS (exp
) != fields_length (type
))
6279 || mostly_zeros_p (exp
))
6281 || known_eq (GET_MODE_SIZE (GET_MODE (target
)), size
)))
6283 clear_storage (target
, gen_int_mode (size
, Pmode
),
6288 if (REG_P (target
) && !cleared
)
6289 emit_clobber (target
);
6291 /* Store each element of the constructor into the
6292 corresponding field of TARGET. */
6293 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp
), idx
, field
, value
)
6296 HOST_WIDE_INT bitsize
;
6297 HOST_WIDE_INT bitpos
= 0;
6299 rtx to_rtx
= target
;
6301 /* Just ignore missing fields. We cleared the whole
6302 structure, above, if any fields are missing. */
6306 if (cleared
&& initializer_zerop (value
))
6309 if (tree_fits_uhwi_p (DECL_SIZE (field
)))
6310 bitsize
= tree_to_uhwi (DECL_SIZE (field
));
6314 mode
= DECL_MODE (field
);
6315 if (DECL_BIT_FIELD (field
))
6318 offset
= DECL_FIELD_OFFSET (field
);
6319 if (tree_fits_shwi_p (offset
)
6320 && tree_fits_shwi_p (bit_position (field
)))
6322 bitpos
= int_bit_position (field
);
6328 /* If this initializes a field that is smaller than a
6329 word, at the start of a word, try to widen it to a full
6330 word. This special case allows us to output C++ member
6331 function initializations in a form that the optimizers
6333 if (WORD_REGISTER_OPERATIONS
6335 && bitsize
< BITS_PER_WORD
6336 && bitpos
% BITS_PER_WORD
== 0
6337 && GET_MODE_CLASS (mode
) == MODE_INT
6338 && TREE_CODE (value
) == INTEGER_CST
6340 && bitpos
+ BITS_PER_WORD
<= exp_size
* BITS_PER_UNIT
)
6342 tree type
= TREE_TYPE (value
);
6344 if (TYPE_PRECISION (type
) < BITS_PER_WORD
)
6346 type
= lang_hooks
.types
.type_for_mode
6347 (word_mode
, TYPE_UNSIGNED (type
));
6348 value
= fold_convert (type
, value
);
6349 /* Make sure the bits beyond the original bitsize are zero
6350 so that we can correctly avoid extra zeroing stores in
6351 later constructor elements. */
6353 = wide_int_to_tree (type
, wi::mask (bitsize
, false,
6355 value
= fold_build2 (BIT_AND_EXPR
, type
, value
, bitsize_mask
);
6358 if (BYTES_BIG_ENDIAN
)
6360 = fold_build2 (LSHIFT_EXPR
, type
, value
,
6361 build_int_cst (type
,
6362 BITS_PER_WORD
- bitsize
));
6363 bitsize
= BITS_PER_WORD
;
6367 if (MEM_P (to_rtx
) && !MEM_KEEP_ALIAS_SET_P (to_rtx
)
6368 && DECL_NONADDRESSABLE_P (field
))
6370 to_rtx
= copy_rtx (to_rtx
);
6371 MEM_KEEP_ALIAS_SET_P (to_rtx
) = 1;
6374 store_constructor_field (to_rtx
, bitsize
, bitpos
,
6375 0, bitregion_end
, mode
,
6377 get_alias_set (TREE_TYPE (field
)),
6385 unsigned HOST_WIDE_INT i
;
6388 tree elttype
= TREE_TYPE (type
);
6390 HOST_WIDE_INT minelt
= 0;
6391 HOST_WIDE_INT maxelt
= 0;
6393 /* The storage order is specified for every aggregate type. */
6394 reverse
= TYPE_REVERSE_STORAGE_ORDER (type
);
6396 domain
= TYPE_DOMAIN (type
);
6397 const_bounds_p
= (TYPE_MIN_VALUE (domain
)
6398 && TYPE_MAX_VALUE (domain
)
6399 && tree_fits_shwi_p (TYPE_MIN_VALUE (domain
))
6400 && tree_fits_shwi_p (TYPE_MAX_VALUE (domain
)));
6402 /* If we have constant bounds for the range of the type, get them. */
6405 minelt
= tree_to_shwi (TYPE_MIN_VALUE (domain
));
6406 maxelt
= tree_to_shwi (TYPE_MAX_VALUE (domain
));
6409 /* If the constructor has fewer elements than the array, clear
6410 the whole array first. Similarly if this is static
6411 constructor of a non-BLKmode object. */
6414 else if (REG_P (target
) && TREE_STATIC (exp
))
6418 unsigned HOST_WIDE_INT idx
;
6420 HOST_WIDE_INT count
= 0, zero_count
= 0;
6421 need_to_clear
= ! const_bounds_p
;
6423 /* This loop is a more accurate version of the loop in
6424 mostly_zeros_p (it handles RANGE_EXPR in an index). It
6425 is also needed to check for missing elements. */
6426 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp
), idx
, index
, value
)
6428 HOST_WIDE_INT this_node_count
;
6433 if (index
!= NULL_TREE
&& TREE_CODE (index
) == RANGE_EXPR
)
6435 tree lo_index
= TREE_OPERAND (index
, 0);
6436 tree hi_index
= TREE_OPERAND (index
, 1);
6438 if (! tree_fits_uhwi_p (lo_index
)
6439 || ! tree_fits_uhwi_p (hi_index
))
6445 this_node_count
= (tree_to_uhwi (hi_index
)
6446 - tree_to_uhwi (lo_index
) + 1);
6449 this_node_count
= 1;
6451 count
+= this_node_count
;
6452 if (mostly_zeros_p (value
))
6453 zero_count
+= this_node_count
;
6456 /* Clear the entire array first if there are any missing
6457 elements, or if the incidence of zero elements is >=
6460 && (count
< maxelt
- minelt
+ 1
6461 || 4 * zero_count
>= 3 * count
))
6465 if (need_to_clear
&& maybe_gt (size
, 0))
6468 emit_move_insn (target
, CONST0_RTX (GET_MODE (target
)));
6470 clear_storage (target
, gen_int_mode (size
, Pmode
),
6475 if (!cleared
&& REG_P (target
))
6476 /* Inform later passes that the old value is dead. */
6477 emit_clobber (target
);
6479 /* Store each element of the constructor into the
6480 corresponding element of TARGET, determined by counting the
6482 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp
), i
, index
, value
)
6486 HOST_WIDE_INT bitpos
;
6487 rtx xtarget
= target
;
6489 if (cleared
&& initializer_zerop (value
))
6492 mode
= TYPE_MODE (elttype
);
6493 if (mode
== BLKmode
)
6494 bitsize
= (tree_fits_uhwi_p (TYPE_SIZE (elttype
))
6495 ? tree_to_uhwi (TYPE_SIZE (elttype
))
6498 bitsize
= GET_MODE_BITSIZE (mode
);
6500 if (index
!= NULL_TREE
&& TREE_CODE (index
) == RANGE_EXPR
)
6502 tree lo_index
= TREE_OPERAND (index
, 0);
6503 tree hi_index
= TREE_OPERAND (index
, 1);
6504 rtx index_r
, pos_rtx
;
6505 HOST_WIDE_INT lo
, hi
, count
;
6508 /* If the range is constant and "small", unroll the loop. */
6510 && tree_fits_shwi_p (lo_index
)
6511 && tree_fits_shwi_p (hi_index
)
6512 && (lo
= tree_to_shwi (lo_index
),
6513 hi
= tree_to_shwi (hi_index
),
6514 count
= hi
- lo
+ 1,
6517 || (tree_fits_uhwi_p (TYPE_SIZE (elttype
))
6518 && (tree_to_uhwi (TYPE_SIZE (elttype
)) * count
6521 lo
-= minelt
; hi
-= minelt
;
6522 for (; lo
<= hi
; lo
++)
6524 bitpos
= lo
* tree_to_shwi (TYPE_SIZE (elttype
));
6527 && !MEM_KEEP_ALIAS_SET_P (target
)
6528 && TREE_CODE (type
) == ARRAY_TYPE
6529 && TYPE_NONALIASED_COMPONENT (type
))
6531 target
= copy_rtx (target
);
6532 MEM_KEEP_ALIAS_SET_P (target
) = 1;
6535 store_constructor_field
6536 (target
, bitsize
, bitpos
, 0, bitregion_end
,
6537 mode
, value
, cleared
,
6538 get_alias_set (elttype
), reverse
);
6543 rtx_code_label
*loop_start
= gen_label_rtx ();
6544 rtx_code_label
*loop_end
= gen_label_rtx ();
6547 expand_normal (hi_index
);
6549 index
= build_decl (EXPR_LOCATION (exp
),
6550 VAR_DECL
, NULL_TREE
, domain
);
6551 index_r
= gen_reg_rtx (promote_decl_mode (index
, NULL
));
6552 SET_DECL_RTL (index
, index_r
);
6553 store_expr (lo_index
, index_r
, 0, false, reverse
);
6555 /* Build the head of the loop. */
6556 do_pending_stack_adjust ();
6557 emit_label (loop_start
);
6559 /* Assign value to element index. */
6561 fold_convert (ssizetype
,
6562 fold_build2 (MINUS_EXPR
,
6565 TYPE_MIN_VALUE (domain
)));
6568 size_binop (MULT_EXPR
, position
,
6569 fold_convert (ssizetype
,
6570 TYPE_SIZE_UNIT (elttype
)));
6572 pos_rtx
= expand_normal (position
);
6573 xtarget
= offset_address (target
, pos_rtx
,
6574 highest_pow2_factor (position
));
6575 xtarget
= adjust_address (xtarget
, mode
, 0);
6576 if (TREE_CODE (value
) == CONSTRUCTOR
)
6577 store_constructor (value
, xtarget
, cleared
,
6578 exact_div (bitsize
, BITS_PER_UNIT
),
6581 store_expr (value
, xtarget
, 0, false, reverse
);
6583 /* Generate a conditional jump to exit the loop. */
6584 exit_cond
= build2 (LT_EXPR
, integer_type_node
,
6586 jumpif (exit_cond
, loop_end
,
6587 profile_probability::uninitialized ());
6589 /* Update the loop counter, and jump to the head of
6591 expand_assignment (index
,
6592 build2 (PLUS_EXPR
, TREE_TYPE (index
),
6593 index
, integer_one_node
),
6596 emit_jump (loop_start
);
6598 /* Build the end of the loop. */
6599 emit_label (loop_end
);
6602 else if ((index
!= 0 && ! tree_fits_shwi_p (index
))
6603 || ! tree_fits_uhwi_p (TYPE_SIZE (elttype
)))
6608 index
= ssize_int (1);
6611 index
= fold_convert (ssizetype
,
6612 fold_build2 (MINUS_EXPR
,
6615 TYPE_MIN_VALUE (domain
)));
6618 size_binop (MULT_EXPR
, index
,
6619 fold_convert (ssizetype
,
6620 TYPE_SIZE_UNIT (elttype
)));
6621 xtarget
= offset_address (target
,
6622 expand_normal (position
),
6623 highest_pow2_factor (position
));
6624 xtarget
= adjust_address (xtarget
, mode
, 0);
6625 store_expr (value
, xtarget
, 0, false, reverse
);
6630 bitpos
= ((tree_to_shwi (index
) - minelt
)
6631 * tree_to_uhwi (TYPE_SIZE (elttype
)));
6633 bitpos
= (i
* tree_to_uhwi (TYPE_SIZE (elttype
)));
6635 if (MEM_P (target
) && !MEM_KEEP_ALIAS_SET_P (target
)
6636 && TREE_CODE (type
) == ARRAY_TYPE
6637 && TYPE_NONALIASED_COMPONENT (type
))
6639 target
= copy_rtx (target
);
6640 MEM_KEEP_ALIAS_SET_P (target
) = 1;
6642 store_constructor_field (target
, bitsize
, bitpos
, 0,
6643 bitregion_end
, mode
, value
,
6644 cleared
, get_alias_set (elttype
),
6653 unsigned HOST_WIDE_INT idx
;
6654 constructor_elt
*ce
;
6657 insn_code icode
= CODE_FOR_nothing
;
6659 tree elttype
= TREE_TYPE (type
);
6660 int elt_size
= tree_to_uhwi (TYPE_SIZE (elttype
));
6661 machine_mode eltmode
= TYPE_MODE (elttype
);
6662 HOST_WIDE_INT bitsize
;
6663 HOST_WIDE_INT bitpos
;
6664 rtvec vector
= NULL
;
6666 alias_set_type alias
;
6667 bool vec_vec_init_p
= false;
6668 machine_mode mode
= GET_MODE (target
);
6670 gcc_assert (eltmode
!= BLKmode
);
6672 /* Try using vec_duplicate_optab for uniform vectors. */
6673 if (!TREE_SIDE_EFFECTS (exp
)
6674 && VECTOR_MODE_P (mode
)
6675 && eltmode
== GET_MODE_INNER (mode
)
6676 && ((icode
= optab_handler (vec_duplicate_optab
, mode
))
6677 != CODE_FOR_nothing
)
6678 && (elt
= uniform_vector_p (exp
)))
6680 struct expand_operand ops
[2];
6681 create_output_operand (&ops
[0], target
, mode
);
6682 create_input_operand (&ops
[1], expand_normal (elt
), eltmode
);
6683 expand_insn (icode
, 2, ops
);
6684 if (!rtx_equal_p (target
, ops
[0].value
))
6685 emit_move_insn (target
, ops
[0].value
);
6689 n_elts
= TYPE_VECTOR_SUBPARTS (type
);
6690 if (REG_P (target
) && VECTOR_MODE_P (mode
))
6692 machine_mode emode
= eltmode
;
6694 if (CONSTRUCTOR_NELTS (exp
)
6695 && (TREE_CODE (TREE_TYPE (CONSTRUCTOR_ELT (exp
, 0)->value
))
6698 tree etype
= TREE_TYPE (CONSTRUCTOR_ELT (exp
, 0)->value
);
6699 gcc_assert (CONSTRUCTOR_NELTS (exp
) * TYPE_VECTOR_SUBPARTS (etype
)
6701 emode
= TYPE_MODE (etype
);
6703 icode
= convert_optab_handler (vec_init_optab
, mode
, emode
);
6704 if (icode
!= CODE_FOR_nothing
)
6706 unsigned int i
, n
= n_elts
;
6708 if (emode
!= eltmode
)
6710 n
= CONSTRUCTOR_NELTS (exp
);
6711 vec_vec_init_p
= true;
6713 vector
= rtvec_alloc (n
);
6714 for (i
= 0; i
< n
; i
++)
6715 RTVEC_ELT (vector
, i
) = CONST0_RTX (emode
);
6719 /* If the constructor has fewer elements than the vector,
6720 clear the whole array first. Similarly if this is static
6721 constructor of a non-BLKmode object. */
6724 else if (REG_P (target
) && TREE_STATIC (exp
))
6728 unsigned HOST_WIDE_INT count
= 0, zero_count
= 0;
6731 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp
), idx
, value
)
6733 tree sz
= TYPE_SIZE (TREE_TYPE (value
));
6735 = tree_to_uhwi (int_const_binop (TRUNC_DIV_EXPR
, sz
,
6736 TYPE_SIZE (elttype
)));
6738 count
+= n_elts_here
;
6739 if (mostly_zeros_p (value
))
6740 zero_count
+= n_elts_here
;
6743 /* Clear the entire vector first if there are any missing elements,
6744 or if the incidence of zero elements is >= 75%. */
6745 need_to_clear
= (count
< n_elts
|| 4 * zero_count
>= 3 * count
);
6748 if (need_to_clear
&& maybe_gt (size
, 0) && !vector
)
6751 emit_move_insn (target
, CONST0_RTX (mode
));
6753 clear_storage (target
, gen_int_mode (size
, Pmode
),
6758 /* Inform later passes that the old value is dead. */
6759 if (!cleared
&& !vector
&& REG_P (target
))
6760 emit_move_insn (target
, CONST0_RTX (mode
));
6763 alias
= MEM_ALIAS_SET (target
);
6765 alias
= get_alias_set (elttype
);
6767 /* Store each element of the constructor into the corresponding
6768 element of TARGET, determined by counting the elements. */
6769 for (idx
= 0, i
= 0;
6770 vec_safe_iterate (CONSTRUCTOR_ELTS (exp
), idx
, &ce
);
6771 idx
++, i
+= bitsize
/ elt_size
)
6773 HOST_WIDE_INT eltpos
;
6774 tree value
= ce
->value
;
6776 bitsize
= tree_to_uhwi (TYPE_SIZE (TREE_TYPE (value
)));
6777 if (cleared
&& initializer_zerop (value
))
6781 eltpos
= tree_to_uhwi (ce
->index
);
6789 gcc_assert (ce
->index
== NULL_TREE
);
6790 gcc_assert (TREE_CODE (TREE_TYPE (value
)) == VECTOR_TYPE
);
6794 gcc_assert (TREE_CODE (TREE_TYPE (value
)) != VECTOR_TYPE
);
6795 RTVEC_ELT (vector
, eltpos
) = expand_normal (value
);
6799 machine_mode value_mode
6800 = (TREE_CODE (TREE_TYPE (value
)) == VECTOR_TYPE
6801 ? TYPE_MODE (TREE_TYPE (value
)) : eltmode
);
6802 bitpos
= eltpos
* elt_size
;
6803 store_constructor_field (target
, bitsize
, bitpos
, 0,
6804 bitregion_end
, value_mode
,
6805 value
, cleared
, alias
, reverse
);
6810 emit_insn (GEN_FCN (icode
) (target
,
6811 gen_rtx_PARALLEL (mode
, vector
)));
6820 /* Store the value of EXP (an expression tree)
6821 into a subfield of TARGET which has mode MODE and occupies
6822 BITSIZE bits, starting BITPOS bits from the start of TARGET.
6823 If MODE is VOIDmode, it means that we are storing into a bit-field.
6825 BITREGION_START is bitpos of the first bitfield in this region.
6826 BITREGION_END is the bitpos of the ending bitfield in this region.
6827 These two fields are 0, if the C++ memory model does not apply,
6828 or we are not interested in keeping track of bitfield regions.
6830 Always return const0_rtx unless we have something particular to
6833 ALIAS_SET is the alias set for the destination. This value will
6834 (in general) be different from that for TARGET, since TARGET is a
6835 reference to the containing structure.
6837 If NONTEMPORAL is true, try generating a nontemporal store.
6839 If REVERSE is true, the store is to be done in reverse order. */
6842 store_field (rtx target
, poly_int64 bitsize
, poly_int64 bitpos
,
6843 poly_uint64 bitregion_start
, poly_uint64 bitregion_end
,
6844 machine_mode mode
, tree exp
,
6845 alias_set_type alias_set
, bool nontemporal
, bool reverse
)
6847 if (TREE_CODE (exp
) == ERROR_MARK
)
6850 /* If we have nothing to store, do nothing unless the expression has
6851 side-effects. Don't do that for zero sized addressable lhs of
6853 if (known_eq (bitsize
, 0)
6854 && (!TREE_ADDRESSABLE (TREE_TYPE (exp
))
6855 || TREE_CODE (exp
) != CALL_EXPR
))
6856 return expand_expr (exp
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6858 if (GET_CODE (target
) == CONCAT
)
6860 /* We're storing into a struct containing a single __complex. */
6862 gcc_assert (known_eq (bitpos
, 0));
6863 return store_expr (exp
, target
, 0, nontemporal
, reverse
);
6866 /* If the structure is in a register or if the component
6867 is a bit field, we cannot use addressing to access it.
6868 Use bit-field techniques or SUBREG to store in it. */
6870 poly_int64 decl_bitsize
;
6871 if (mode
== VOIDmode
6872 || (mode
!= BLKmode
&& ! direct_store
[(int) mode
]
6873 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_INT
6874 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_FLOAT
)
6876 || GET_CODE (target
) == SUBREG
6877 /* If the field isn't aligned enough to store as an ordinary memref,
6878 store it as a bit field. */
6880 && ((((MEM_ALIGN (target
) < GET_MODE_ALIGNMENT (mode
))
6881 || !multiple_p (bitpos
, GET_MODE_ALIGNMENT (mode
)))
6882 && targetm
.slow_unaligned_access (mode
, MEM_ALIGN (target
)))
6883 || !multiple_p (bitpos
, BITS_PER_UNIT
)))
6884 || (known_size_p (bitsize
)
6886 && maybe_gt (GET_MODE_BITSIZE (mode
), bitsize
))
6887 /* If the RHS and field are a constant size and the size of the
6888 RHS isn't the same size as the bitfield, we must use bitfield
6890 || (known_size_p (bitsize
)
6891 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp
)))
6892 && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp
))),
6894 /* Except for initialization of full bytes from a CONSTRUCTOR, which
6895 we will handle specially below. */
6896 && !(TREE_CODE (exp
) == CONSTRUCTOR
6897 && multiple_p (bitsize
, BITS_PER_UNIT
))
6898 /* And except for bitwise copying of TREE_ADDRESSABLE types,
6899 where the FIELD_DECL has the right bitsize, but TREE_TYPE (exp)
6900 includes some extra padding. store_expr / expand_expr will in
6901 that case call get_inner_reference that will have the bitsize
6902 we check here and thus the block move will not clobber the
6903 padding that shouldn't be clobbered. In the future we could
6904 replace the TREE_ADDRESSABLE check with a check that
6905 get_base_address needs to live in memory. */
6906 && (!TREE_ADDRESSABLE (TREE_TYPE (exp
))
6907 || TREE_CODE (exp
) != COMPONENT_REF
6908 || !multiple_p (bitsize
, BITS_PER_UNIT
)
6909 || !multiple_p (bitpos
, BITS_PER_UNIT
)
6910 || !poly_int_tree_p (DECL_SIZE (TREE_OPERAND (exp
, 1)),
6912 || maybe_ne (decl_bitsize
, bitsize
)))
6913 /* If we are expanding a MEM_REF of a non-BLKmode non-addressable
6914 decl we must use bitfield operations. */
6915 || (known_size_p (bitsize
)
6916 && TREE_CODE (exp
) == MEM_REF
6917 && TREE_CODE (TREE_OPERAND (exp
, 0)) == ADDR_EXPR
6918 && DECL_P (TREE_OPERAND (TREE_OPERAND (exp
, 0), 0))
6919 && !TREE_ADDRESSABLE (TREE_OPERAND (TREE_OPERAND (exp
, 0), 0))
6920 && DECL_MODE (TREE_OPERAND (TREE_OPERAND (exp
, 0), 0)) != BLKmode
))
6925 /* If EXP is a NOP_EXPR of precision less than its mode, then that
6926 implies a mask operation. If the precision is the same size as
6927 the field we're storing into, that mask is redundant. This is
6928 particularly common with bit field assignments generated by the
6930 nop_def
= get_def_for_expr (exp
, NOP_EXPR
);
6933 tree type
= TREE_TYPE (exp
);
6934 if (INTEGRAL_TYPE_P (type
)
6935 && TYPE_PRECISION (type
) < GET_MODE_BITSIZE (TYPE_MODE (type
))
6936 && known_eq (bitsize
, TYPE_PRECISION (type
)))
6938 tree op
= gimple_assign_rhs1 (nop_def
);
6939 type
= TREE_TYPE (op
);
6940 if (INTEGRAL_TYPE_P (type
)
6941 && known_ge (TYPE_PRECISION (type
), bitsize
))
6946 temp
= expand_normal (exp
);
6948 /* We don't support variable-sized BLKmode bitfields, since our
6949 handling of BLKmode is bound up with the ability to break
6950 things into words. */
6951 gcc_assert (mode
!= BLKmode
|| bitsize
.is_constant ());
6953 /* Handle calls that return values in multiple non-contiguous locations.
6954 The Irix 6 ABI has examples of this. */
6955 if (GET_CODE (temp
) == PARALLEL
)
6957 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
6958 scalar_int_mode temp_mode
6959 = smallest_int_mode_for_size (size
* BITS_PER_UNIT
);
6960 rtx temp_target
= gen_reg_rtx (temp_mode
);
6961 emit_group_store (temp_target
, temp
, TREE_TYPE (exp
), size
);
6965 /* Handle calls that return BLKmode values in registers. */
6966 else if (mode
== BLKmode
&& REG_P (temp
) && TREE_CODE (exp
) == CALL_EXPR
)
6968 rtx temp_target
= gen_reg_rtx (GET_MODE (temp
));
6969 copy_blkmode_from_reg (temp_target
, temp
, TREE_TYPE (exp
));
6973 /* If the value has aggregate type and an integral mode then, if BITSIZE
6974 is narrower than this mode and this is for big-endian data, we first
6975 need to put the value into the low-order bits for store_bit_field,
6976 except when MODE is BLKmode and BITSIZE larger than the word size
6977 (see the handling of fields larger than a word in store_bit_field).
6978 Moreover, the field may be not aligned on a byte boundary; in this
6979 case, if it has reverse storage order, it needs to be accessed as a
6980 scalar field with reverse storage order and we must first put the
6981 value into target order. */
6982 scalar_int_mode temp_mode
;
6983 if (AGGREGATE_TYPE_P (TREE_TYPE (exp
))
6984 && is_int_mode (GET_MODE (temp
), &temp_mode
))
6986 HOST_WIDE_INT size
= GET_MODE_BITSIZE (temp_mode
);
6988 reverse
= TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (exp
));
6991 temp
= flip_storage_order (temp_mode
, temp
);
6993 gcc_checking_assert (known_le (bitsize
, size
));
6994 if (maybe_lt (bitsize
, size
)
6995 && reverse
? !BYTES_BIG_ENDIAN
: BYTES_BIG_ENDIAN
6996 /* Use of to_constant for BLKmode was checked above. */
6997 && !(mode
== BLKmode
&& bitsize
.to_constant () > BITS_PER_WORD
))
6998 temp
= expand_shift (RSHIFT_EXPR
, temp_mode
, temp
,
6999 size
- bitsize
, NULL_RTX
, 1);
7002 /* Unless MODE is VOIDmode or BLKmode, convert TEMP to MODE. */
7003 if (mode
!= VOIDmode
&& mode
!= BLKmode
7004 && mode
!= TYPE_MODE (TREE_TYPE (exp
)))
7005 temp
= convert_modes (mode
, TYPE_MODE (TREE_TYPE (exp
)), temp
, 1);
7007 /* If the mode of TEMP and TARGET is BLKmode, both must be in memory
7008 and BITPOS must be aligned on a byte boundary. If so, we simply do
7009 a block copy. Likewise for a BLKmode-like TARGET. */
7010 if (GET_MODE (temp
) == BLKmode
7011 && (GET_MODE (target
) == BLKmode
7013 && GET_MODE_CLASS (GET_MODE (target
)) == MODE_INT
7014 && multiple_p (bitpos
, BITS_PER_UNIT
)
7015 && multiple_p (bitsize
, BITS_PER_UNIT
))))
7017 gcc_assert (MEM_P (target
) && MEM_P (temp
));
7018 poly_int64 bytepos
= exact_div (bitpos
, BITS_PER_UNIT
);
7019 poly_int64 bytesize
= bits_to_bytes_round_up (bitsize
);
7021 target
= adjust_address (target
, VOIDmode
, bytepos
);
7022 emit_block_move (target
, temp
,
7023 gen_int_mode (bytesize
, Pmode
),
7029 /* If the mode of TEMP is still BLKmode and BITSIZE not larger than the
7030 word size, we need to load the value (see again store_bit_field). */
7031 if (GET_MODE (temp
) == BLKmode
&& known_le (bitsize
, BITS_PER_WORD
))
7033 scalar_int_mode temp_mode
= smallest_int_mode_for_size (bitsize
);
7034 temp
= extract_bit_field (temp
, bitsize
, 0, 1, NULL_RTX
, temp_mode
,
7035 temp_mode
, false, NULL
);
7038 /* Store the value in the bitfield. */
7039 store_bit_field (target
, bitsize
, bitpos
,
7040 bitregion_start
, bitregion_end
,
7041 mode
, temp
, reverse
);
7047 /* Now build a reference to just the desired component. */
7048 rtx to_rtx
= adjust_address (target
, mode
,
7049 exact_div (bitpos
, BITS_PER_UNIT
));
7051 if (to_rtx
== target
)
7052 to_rtx
= copy_rtx (to_rtx
);
7054 if (!MEM_KEEP_ALIAS_SET_P (to_rtx
) && MEM_ALIAS_SET (to_rtx
) != 0)
7055 set_mem_alias_set (to_rtx
, alias_set
);
7057 /* Above we avoided using bitfield operations for storing a CONSTRUCTOR
7058 into a target smaller than its type; handle that case now. */
7059 if (TREE_CODE (exp
) == CONSTRUCTOR
&& known_size_p (bitsize
))
7061 poly_int64 bytesize
= exact_div (bitsize
, BITS_PER_UNIT
);
7062 store_constructor (exp
, to_rtx
, 0, bytesize
, reverse
);
7066 return store_expr (exp
, to_rtx
, 0, nontemporal
, reverse
);
7070 /* Given an expression EXP that may be a COMPONENT_REF, a BIT_FIELD_REF,
7071 an ARRAY_REF, or an ARRAY_RANGE_REF, look for nested operations of these
7072 codes and find the ultimate containing object, which we return.
7074 We set *PBITSIZE to the size in bits that we want, *PBITPOS to the
7075 bit position, *PUNSIGNEDP to the signedness and *PREVERSEP to the
7076 storage order of the field.
7077 If the position of the field is variable, we store a tree
7078 giving the variable offset (in units) in *POFFSET.
7079 This offset is in addition to the bit position.
7080 If the position is not variable, we store 0 in *POFFSET.
7082 If any of the extraction expressions is volatile,
7083 we store 1 in *PVOLATILEP. Otherwise we don't change that.
7085 If the field is a non-BLKmode bit-field, *PMODE is set to VOIDmode.
7086 Otherwise, it is a mode that can be used to access the field.
7088 If the field describes a variable-sized object, *PMODE is set to
7089 BLKmode and *PBITSIZE is set to -1. An access cannot be made in
7090 this case, but the address of the object can be found. */
7093 get_inner_reference (tree exp
, poly_int64_pod
*pbitsize
,
7094 poly_int64_pod
*pbitpos
, tree
*poffset
,
7095 machine_mode
*pmode
, int *punsignedp
,
7096 int *preversep
, int *pvolatilep
)
7099 machine_mode mode
= VOIDmode
;
7100 bool blkmode_bitfield
= false;
7101 tree offset
= size_zero_node
;
7102 poly_offset_int bit_offset
= 0;
7104 /* First get the mode, signedness, storage order and size. We do this from
7105 just the outermost expression. */
7107 if (TREE_CODE (exp
) == COMPONENT_REF
)
7109 tree field
= TREE_OPERAND (exp
, 1);
7110 size_tree
= DECL_SIZE (field
);
7111 if (flag_strict_volatile_bitfields
> 0
7112 && TREE_THIS_VOLATILE (exp
)
7113 && DECL_BIT_FIELD_TYPE (field
)
7114 && DECL_MODE (field
) != BLKmode
)
7115 /* Volatile bitfields should be accessed in the mode of the
7116 field's type, not the mode computed based on the bit
7118 mode
= TYPE_MODE (DECL_BIT_FIELD_TYPE (field
));
7119 else if (!DECL_BIT_FIELD (field
))
7121 mode
= DECL_MODE (field
);
7122 /* For vector fields re-check the target flags, as DECL_MODE
7123 could have been set with different target flags than
7124 the current function has. */
7126 && VECTOR_TYPE_P (TREE_TYPE (field
))
7127 && VECTOR_MODE_P (TYPE_MODE_RAW (TREE_TYPE (field
))))
7128 mode
= TYPE_MODE (TREE_TYPE (field
));
7130 else if (DECL_MODE (field
) == BLKmode
)
7131 blkmode_bitfield
= true;
7133 *punsignedp
= DECL_UNSIGNED (field
);
7135 else if (TREE_CODE (exp
) == BIT_FIELD_REF
)
7137 size_tree
= TREE_OPERAND (exp
, 1);
7138 *punsignedp
= (! INTEGRAL_TYPE_P (TREE_TYPE (exp
))
7139 || TYPE_UNSIGNED (TREE_TYPE (exp
)));
7141 /* For vector types, with the correct size of access, use the mode of
7143 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp
, 0))) == VECTOR_TYPE
7144 && TREE_TYPE (exp
) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp
, 0)))
7145 && tree_int_cst_equal (size_tree
, TYPE_SIZE (TREE_TYPE (exp
))))
7146 mode
= TYPE_MODE (TREE_TYPE (exp
));
7150 mode
= TYPE_MODE (TREE_TYPE (exp
));
7151 *punsignedp
= TYPE_UNSIGNED (TREE_TYPE (exp
));
7153 if (mode
== BLKmode
)
7154 size_tree
= TYPE_SIZE (TREE_TYPE (exp
));
7156 *pbitsize
= GET_MODE_BITSIZE (mode
);
7161 if (! tree_fits_uhwi_p (size_tree
))
7162 mode
= BLKmode
, *pbitsize
= -1;
7164 *pbitsize
= tree_to_uhwi (size_tree
);
7167 *preversep
= reverse_storage_order_for_component_p (exp
);
7169 /* Compute cumulative bit-offset for nested component-refs and array-refs,
7170 and find the ultimate containing object. */
7173 switch (TREE_CODE (exp
))
7176 bit_offset
+= wi::to_poly_offset (TREE_OPERAND (exp
, 2));
7181 tree field
= TREE_OPERAND (exp
, 1);
7182 tree this_offset
= component_ref_field_offset (exp
);
7184 /* If this field hasn't been filled in yet, don't go past it.
7185 This should only happen when folding expressions made during
7186 type construction. */
7187 if (this_offset
== 0)
7190 offset
= size_binop (PLUS_EXPR
, offset
, this_offset
);
7191 bit_offset
+= wi::to_poly_offset (DECL_FIELD_BIT_OFFSET (field
));
7193 /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
7198 case ARRAY_RANGE_REF
:
7200 tree index
= TREE_OPERAND (exp
, 1);
7201 tree low_bound
= array_ref_low_bound (exp
);
7202 tree unit_size
= array_ref_element_size (exp
);
7204 /* We assume all arrays have sizes that are a multiple of a byte.
7205 First subtract the lower bound, if any, in the type of the
7206 index, then convert to sizetype and multiply by the size of
7207 the array element. */
7208 if (! integer_zerop (low_bound
))
7209 index
= fold_build2 (MINUS_EXPR
, TREE_TYPE (index
),
7212 offset
= size_binop (PLUS_EXPR
, offset
,
7213 size_binop (MULT_EXPR
,
7214 fold_convert (sizetype
, index
),
7223 bit_offset
+= *pbitsize
;
7226 case VIEW_CONVERT_EXPR
:
7230 /* Hand back the decl for MEM[&decl, off]. */
7231 if (TREE_CODE (TREE_OPERAND (exp
, 0)) == ADDR_EXPR
)
7233 tree off
= TREE_OPERAND (exp
, 1);
7234 if (!integer_zerop (off
))
7236 poly_offset_int boff
= mem_ref_offset (exp
);
7237 boff
<<= LOG2_BITS_PER_UNIT
;
7240 exp
= TREE_OPERAND (TREE_OPERAND (exp
, 0), 0);
7248 /* If any reference in the chain is volatile, the effect is volatile. */
7249 if (TREE_THIS_VOLATILE (exp
))
7252 exp
= TREE_OPERAND (exp
, 0);
7256 /* If OFFSET is constant, see if we can return the whole thing as a
7257 constant bit position. Make sure to handle overflow during
7259 if (poly_int_tree_p (offset
))
7261 poly_offset_int tem
= wi::sext (wi::to_poly_offset (offset
),
7262 TYPE_PRECISION (sizetype
));
7263 tem
<<= LOG2_BITS_PER_UNIT
;
7265 if (tem
.to_shwi (pbitpos
))
7266 *poffset
= offset
= NULL_TREE
;
7269 /* Otherwise, split it up. */
7272 /* Avoid returning a negative bitpos as this may wreak havoc later. */
7273 if (!bit_offset
.to_shwi (pbitpos
) || maybe_lt (*pbitpos
, 0))
7275 *pbitpos
= num_trailing_bits (bit_offset
.force_shwi ());
7276 poly_offset_int bytes
= bits_to_bytes_round_down (bit_offset
);
7277 offset
= size_binop (PLUS_EXPR
, offset
,
7278 build_int_cst (sizetype
, bytes
.force_shwi ()));
7284 /* We can use BLKmode for a byte-aligned BLKmode bitfield. */
7285 if (mode
== VOIDmode
7287 && multiple_p (*pbitpos
, BITS_PER_UNIT
)
7288 && multiple_p (*pbitsize
, BITS_PER_UNIT
))
7296 /* Alignment in bits the TARGET of an assignment may be assumed to have. */
7298 static unsigned HOST_WIDE_INT
7299 target_align (const_tree target
)
7301 /* We might have a chain of nested references with intermediate misaligning
7302 bitfields components, so need to recurse to find out. */
7304 unsigned HOST_WIDE_INT this_align
, outer_align
;
7306 switch (TREE_CODE (target
))
7312 this_align
= DECL_ALIGN (TREE_OPERAND (target
, 1));
7313 outer_align
= target_align (TREE_OPERAND (target
, 0));
7314 return MIN (this_align
, outer_align
);
7317 case ARRAY_RANGE_REF
:
7318 this_align
= TYPE_ALIGN (TREE_TYPE (target
));
7319 outer_align
= target_align (TREE_OPERAND (target
, 0));
7320 return MIN (this_align
, outer_align
);
7323 case NON_LVALUE_EXPR
:
7324 case VIEW_CONVERT_EXPR
:
7325 this_align
= TYPE_ALIGN (TREE_TYPE (target
));
7326 outer_align
= target_align (TREE_OPERAND (target
, 0));
7327 return MAX (this_align
, outer_align
);
7330 return TYPE_ALIGN (TREE_TYPE (target
));
7335 /* Given an rtx VALUE that may contain additions and multiplications, return
7336 an equivalent value that just refers to a register, memory, or constant.
7337 This is done by generating instructions to perform the arithmetic and
7338 returning a pseudo-register containing the value.
7340 The returned value may be a REG, SUBREG, MEM or constant. */
7343 force_operand (rtx value
, rtx target
)
7346 /* Use subtarget as the target for operand 0 of a binary operation. */
7347 rtx subtarget
= get_subtarget (target
);
7348 enum rtx_code code
= GET_CODE (value
);
7350 /* Check for subreg applied to an expression produced by loop optimizer. */
7352 && !REG_P (SUBREG_REG (value
))
7353 && !MEM_P (SUBREG_REG (value
)))
7356 = simplify_gen_subreg (GET_MODE (value
),
7357 force_reg (GET_MODE (SUBREG_REG (value
)),
7358 force_operand (SUBREG_REG (value
),
7360 GET_MODE (SUBREG_REG (value
)),
7361 SUBREG_BYTE (value
));
7362 code
= GET_CODE (value
);
7365 /* Check for a PIC address load. */
7366 if ((code
== PLUS
|| code
== MINUS
)
7367 && XEXP (value
, 0) == pic_offset_table_rtx
7368 && (GET_CODE (XEXP (value
, 1)) == SYMBOL_REF
7369 || GET_CODE (XEXP (value
, 1)) == LABEL_REF
7370 || GET_CODE (XEXP (value
, 1)) == CONST
))
7373 subtarget
= gen_reg_rtx (GET_MODE (value
));
7374 emit_move_insn (subtarget
, value
);
7378 if (ARITHMETIC_P (value
))
7380 op2
= XEXP (value
, 1);
7381 if (!CONSTANT_P (op2
) && !(REG_P (op2
) && op2
!= subtarget
))
7383 if (code
== MINUS
&& CONST_INT_P (op2
))
7386 op2
= negate_rtx (GET_MODE (value
), op2
);
7389 /* Check for an addition with OP2 a constant integer and our first
7390 operand a PLUS of a virtual register and something else. In that
7391 case, we want to emit the sum of the virtual register and the
7392 constant first and then add the other value. This allows virtual
7393 register instantiation to simply modify the constant rather than
7394 creating another one around this addition. */
7395 if (code
== PLUS
&& CONST_INT_P (op2
)
7396 && GET_CODE (XEXP (value
, 0)) == PLUS
7397 && REG_P (XEXP (XEXP (value
, 0), 0))
7398 && REGNO (XEXP (XEXP (value
, 0), 0)) >= FIRST_VIRTUAL_REGISTER
7399 && REGNO (XEXP (XEXP (value
, 0), 0)) <= LAST_VIRTUAL_REGISTER
)
7401 rtx temp
= expand_simple_binop (GET_MODE (value
), code
,
7402 XEXP (XEXP (value
, 0), 0), op2
,
7403 subtarget
, 0, OPTAB_LIB_WIDEN
);
7404 return expand_simple_binop (GET_MODE (value
), code
, temp
,
7405 force_operand (XEXP (XEXP (value
,
7407 target
, 0, OPTAB_LIB_WIDEN
);
7410 op1
= force_operand (XEXP (value
, 0), subtarget
);
7411 op2
= force_operand (op2
, NULL_RTX
);
7415 return expand_mult (GET_MODE (value
), op1
, op2
, target
, 1);
7417 if (!INTEGRAL_MODE_P (GET_MODE (value
)))
7418 return expand_simple_binop (GET_MODE (value
), code
, op1
, op2
,
7419 target
, 1, OPTAB_LIB_WIDEN
);
7421 return expand_divmod (0,
7422 FLOAT_MODE_P (GET_MODE (value
))
7423 ? RDIV_EXPR
: TRUNC_DIV_EXPR
,
7424 GET_MODE (value
), op1
, op2
, target
, 0);
7426 return expand_divmod (1, TRUNC_MOD_EXPR
, GET_MODE (value
), op1
, op2
,
7429 return expand_divmod (0, TRUNC_DIV_EXPR
, GET_MODE (value
), op1
, op2
,
7432 return expand_divmod (1, TRUNC_MOD_EXPR
, GET_MODE (value
), op1
, op2
,
7435 return expand_simple_binop (GET_MODE (value
), code
, op1
, op2
,
7436 target
, 0, OPTAB_LIB_WIDEN
);
7438 return expand_simple_binop (GET_MODE (value
), code
, op1
, op2
,
7439 target
, 1, OPTAB_LIB_WIDEN
);
7442 if (UNARY_P (value
))
7445 target
= gen_reg_rtx (GET_MODE (value
));
7446 op1
= force_operand (XEXP (value
, 0), NULL_RTX
);
7453 case FLOAT_TRUNCATE
:
7454 convert_move (target
, op1
, code
== ZERO_EXTEND
);
7459 expand_fix (target
, op1
, code
== UNSIGNED_FIX
);
7463 case UNSIGNED_FLOAT
:
7464 expand_float (target
, op1
, code
== UNSIGNED_FLOAT
);
7468 return expand_simple_unop (GET_MODE (value
), code
, op1
, target
, 0);
7472 #ifdef INSN_SCHEDULING
7473 /* On machines that have insn scheduling, we want all memory reference to be
7474 explicit, so we need to deal with such paradoxical SUBREGs. */
7475 if (paradoxical_subreg_p (value
) && MEM_P (SUBREG_REG (value
)))
7477 = simplify_gen_subreg (GET_MODE (value
),
7478 force_reg (GET_MODE (SUBREG_REG (value
)),
7479 force_operand (SUBREG_REG (value
),
7481 GET_MODE (SUBREG_REG (value
)),
7482 SUBREG_BYTE (value
));
7488 /* Subroutine of expand_expr: return nonzero iff there is no way that
7489 EXP can reference X, which is being modified. TOP_P is nonzero if this
7490 call is going to be used to determine whether we need a temporary
7491 for EXP, as opposed to a recursive call to this function.
7493 It is always safe for this routine to return zero since it merely
7494 searches for optimization opportunities. */
7497 safe_from_p (const_rtx x
, tree exp
, int top_p
)
7503 /* If EXP has varying size, we MUST use a target since we currently
7504 have no way of allocating temporaries of variable size
7505 (except for arrays that have TYPE_ARRAY_MAX_SIZE set).
7506 So we assume here that something at a higher level has prevented a
7507 clash. This is somewhat bogus, but the best we can do. Only
7508 do this when X is BLKmode and when we are at the top level. */
7509 || (top_p
&& TREE_TYPE (exp
) != 0 && COMPLETE_TYPE_P (TREE_TYPE (exp
))
7510 && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp
))) != INTEGER_CST
7511 && (TREE_CODE (TREE_TYPE (exp
)) != ARRAY_TYPE
7512 || TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp
)) == NULL_TREE
7513 || TREE_CODE (TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp
)))
7515 && GET_MODE (x
) == BLKmode
)
7516 /* If X is in the outgoing argument area, it is always safe. */
7518 && (XEXP (x
, 0) == virtual_outgoing_args_rtx
7519 || (GET_CODE (XEXP (x
, 0)) == PLUS
7520 && XEXP (XEXP (x
, 0), 0) == virtual_outgoing_args_rtx
))))
7523 /* If this is a subreg of a hard register, declare it unsafe, otherwise,
7524 find the underlying pseudo. */
7525 if (GET_CODE (x
) == SUBREG
)
7528 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
7532 /* Now look at our tree code and possibly recurse. */
7533 switch (TREE_CODE_CLASS (TREE_CODE (exp
)))
7535 case tcc_declaration
:
7536 exp_rtl
= DECL_RTL_IF_SET (exp
);
7542 case tcc_exceptional
:
7543 if (TREE_CODE (exp
) == TREE_LIST
)
7547 if (TREE_VALUE (exp
) && !safe_from_p (x
, TREE_VALUE (exp
), 0))
7549 exp
= TREE_CHAIN (exp
);
7552 if (TREE_CODE (exp
) != TREE_LIST
)
7553 return safe_from_p (x
, exp
, 0);
7556 else if (TREE_CODE (exp
) == CONSTRUCTOR
)
7558 constructor_elt
*ce
;
7559 unsigned HOST_WIDE_INT idx
;
7561 FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (exp
), idx
, ce
)
7562 if ((ce
->index
!= NULL_TREE
&& !safe_from_p (x
, ce
->index
, 0))
7563 || !safe_from_p (x
, ce
->value
, 0))
7567 else if (TREE_CODE (exp
) == ERROR_MARK
)
7568 return 1; /* An already-visited SAVE_EXPR? */
7573 /* The only case we look at here is the DECL_INITIAL inside a
7575 return (TREE_CODE (exp
) != DECL_EXPR
7576 || TREE_CODE (DECL_EXPR_DECL (exp
)) != VAR_DECL
7577 || !DECL_INITIAL (DECL_EXPR_DECL (exp
))
7578 || safe_from_p (x
, DECL_INITIAL (DECL_EXPR_DECL (exp
)), 0));
7581 case tcc_comparison
:
7582 if (!safe_from_p (x
, TREE_OPERAND (exp
, 1), 0))
7587 return safe_from_p (x
, TREE_OPERAND (exp
, 0), 0);
7589 case tcc_expression
:
7592 /* Now do code-specific tests. EXP_RTL is set to any rtx we find in
7593 the expression. If it is set, we conflict iff we are that rtx or
7594 both are in memory. Otherwise, we check all operands of the
7595 expression recursively. */
7597 switch (TREE_CODE (exp
))
7600 /* If the operand is static or we are static, we can't conflict.
7601 Likewise if we don't conflict with the operand at all. */
7602 if (staticp (TREE_OPERAND (exp
, 0))
7603 || TREE_STATIC (exp
)
7604 || safe_from_p (x
, TREE_OPERAND (exp
, 0), 0))
7607 /* Otherwise, the only way this can conflict is if we are taking
7608 the address of a DECL a that address if part of X, which is
7610 exp
= TREE_OPERAND (exp
, 0);
7613 if (!DECL_RTL_SET_P (exp
)
7614 || !MEM_P (DECL_RTL (exp
)))
7617 exp_rtl
= XEXP (DECL_RTL (exp
), 0);
7623 && alias_sets_conflict_p (MEM_ALIAS_SET (x
),
7624 get_alias_set (exp
)))
7629 /* Assume that the call will clobber all hard registers and
7631 if ((REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
7636 case WITH_CLEANUP_EXPR
:
7637 case CLEANUP_POINT_EXPR
:
7638 /* Lowered by gimplify.c. */
7642 return safe_from_p (x
, TREE_OPERAND (exp
, 0), 0);
7648 /* If we have an rtx, we do not need to scan our operands. */
7652 nops
= TREE_OPERAND_LENGTH (exp
);
7653 for (i
= 0; i
< nops
; i
++)
7654 if (TREE_OPERAND (exp
, i
) != 0
7655 && ! safe_from_p (x
, TREE_OPERAND (exp
, i
), 0))
7661 /* Should never get a type here. */
7665 /* If we have an rtl, find any enclosed object. Then see if we conflict
7669 if (GET_CODE (exp_rtl
) == SUBREG
)
7671 exp_rtl
= SUBREG_REG (exp_rtl
);
7673 && REGNO (exp_rtl
) < FIRST_PSEUDO_REGISTER
)
7677 /* If the rtl is X, then it is not safe. Otherwise, it is unless both
7678 are memory and they conflict. */
7679 return ! (rtx_equal_p (x
, exp_rtl
)
7680 || (MEM_P (x
) && MEM_P (exp_rtl
)
7681 && true_dependence (exp_rtl
, VOIDmode
, x
)));
7684 /* If we reach here, it is safe. */
7689 /* Return the highest power of two that EXP is known to be a multiple of.
7690 This is used in updating alignment of MEMs in array references. */
7692 unsigned HOST_WIDE_INT
7693 highest_pow2_factor (const_tree exp
)
7695 unsigned HOST_WIDE_INT ret
;
7696 int trailing_zeros
= tree_ctz (exp
);
7697 if (trailing_zeros
>= HOST_BITS_PER_WIDE_INT
)
7698 return BIGGEST_ALIGNMENT
;
7699 ret
= HOST_WIDE_INT_1U
<< trailing_zeros
;
7700 if (ret
> BIGGEST_ALIGNMENT
)
7701 return BIGGEST_ALIGNMENT
;
7705 /* Similar, except that the alignment requirements of TARGET are
7706 taken into account. Assume it is at least as aligned as its
7707 type, unless it is a COMPONENT_REF in which case the layout of
7708 the structure gives the alignment. */
7710 static unsigned HOST_WIDE_INT
7711 highest_pow2_factor_for_target (const_tree target
, const_tree exp
)
7713 unsigned HOST_WIDE_INT talign
= target_align (target
) / BITS_PER_UNIT
;
7714 unsigned HOST_WIDE_INT factor
= highest_pow2_factor (exp
);
7716 return MAX (factor
, talign
);
7719 /* Convert the tree comparison code TCODE to the rtl one where the
7720 signedness is UNSIGNEDP. */
7722 static enum rtx_code
7723 convert_tree_comp_to_rtx (enum tree_code tcode
, int unsignedp
)
7735 code
= unsignedp
? LTU
: LT
;
7738 code
= unsignedp
? LEU
: LE
;
7741 code
= unsignedp
? GTU
: GT
;
7744 code
= unsignedp
? GEU
: GE
;
7746 case UNORDERED_EXPR
:
7777 /* Subroutine of expand_expr. Expand the two operands of a binary
7778 expression EXP0 and EXP1 placing the results in OP0 and OP1.
7779 The value may be stored in TARGET if TARGET is nonzero. The
7780 MODIFIER argument is as documented by expand_expr. */
7783 expand_operands (tree exp0
, tree exp1
, rtx target
, rtx
*op0
, rtx
*op1
,
7784 enum expand_modifier modifier
)
7786 if (! safe_from_p (target
, exp1
, 1))
7788 if (operand_equal_p (exp0
, exp1
, 0))
7790 *op0
= expand_expr (exp0
, target
, VOIDmode
, modifier
);
7791 *op1
= copy_rtx (*op0
);
7795 *op0
= expand_expr (exp0
, target
, VOIDmode
, modifier
);
7796 *op1
= expand_expr (exp1
, NULL_RTX
, VOIDmode
, modifier
);
7801 /* Return a MEM that contains constant EXP. DEFER is as for
7802 output_constant_def and MODIFIER is as for expand_expr. */
7805 expand_expr_constant (tree exp
, int defer
, enum expand_modifier modifier
)
7809 mem
= output_constant_def (exp
, defer
);
7810 if (modifier
!= EXPAND_INITIALIZER
)
7811 mem
= use_anchored_address (mem
);
7815 /* A subroutine of expand_expr_addr_expr. Evaluate the address of EXP.
7816 The TARGET, TMODE and MODIFIER arguments are as for expand_expr. */
7819 expand_expr_addr_expr_1 (tree exp
, rtx target
, scalar_int_mode tmode
,
7820 enum expand_modifier modifier
, addr_space_t as
)
7822 rtx result
, subtarget
;
7824 poly_int64 bitsize
, bitpos
;
7825 int unsignedp
, reversep
, volatilep
= 0;
7828 /* If we are taking the address of a constant and are at the top level,
7829 we have to use output_constant_def since we can't call force_const_mem
7831 /* ??? This should be considered a front-end bug. We should not be
7832 generating ADDR_EXPR of something that isn't an LVALUE. The only
7833 exception here is STRING_CST. */
7834 if (CONSTANT_CLASS_P (exp
))
7836 result
= XEXP (expand_expr_constant (exp
, 0, modifier
), 0);
7837 if (modifier
< EXPAND_SUM
)
7838 result
= force_operand (result
, target
);
7842 /* Everything must be something allowed by is_gimple_addressable. */
7843 switch (TREE_CODE (exp
))
7846 /* This case will happen via recursion for &a->b. */
7847 return expand_expr (TREE_OPERAND (exp
, 0), target
, tmode
, modifier
);
7851 tree tem
= TREE_OPERAND (exp
, 0);
7852 if (!integer_zerop (TREE_OPERAND (exp
, 1)))
7853 tem
= fold_build_pointer_plus (tem
, TREE_OPERAND (exp
, 1));
7854 return expand_expr (tem
, target
, tmode
, modifier
);
7858 /* Expand the initializer like constants above. */
7859 result
= XEXP (expand_expr_constant (DECL_INITIAL (exp
),
7861 if (modifier
< EXPAND_SUM
)
7862 result
= force_operand (result
, target
);
7866 /* The real part of the complex number is always first, therefore
7867 the address is the same as the address of the parent object. */
7870 inner
= TREE_OPERAND (exp
, 0);
7874 /* The imaginary part of the complex number is always second.
7875 The expression is therefore always offset by the size of the
7878 bitpos
= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (exp
)));
7879 inner
= TREE_OPERAND (exp
, 0);
7882 case COMPOUND_LITERAL_EXPR
:
7883 /* Allow COMPOUND_LITERAL_EXPR in initializers or coming from
7884 initializers, if e.g. rtl_for_decl_init is called on DECL_INITIAL
7885 with COMPOUND_LITERAL_EXPRs in it, or ARRAY_REF on a const static
7886 array with address of COMPOUND_LITERAL_EXPR in DECL_INITIAL;
7887 the initializers aren't gimplified. */
7888 if (COMPOUND_LITERAL_EXPR_DECL (exp
)
7889 && TREE_STATIC (COMPOUND_LITERAL_EXPR_DECL (exp
)))
7890 return expand_expr_addr_expr_1 (COMPOUND_LITERAL_EXPR_DECL (exp
),
7891 target
, tmode
, modifier
, as
);
7894 /* If the object is a DECL, then expand it for its rtl. Don't bypass
7895 expand_expr, as that can have various side effects; LABEL_DECLs for
7896 example, may not have their DECL_RTL set yet. Expand the rtl of
7897 CONSTRUCTORs too, which should yield a memory reference for the
7898 constructor's contents. Assume language specific tree nodes can
7899 be expanded in some interesting way. */
7900 gcc_assert (TREE_CODE (exp
) < LAST_AND_UNUSED_TREE_CODE
);
7902 || TREE_CODE (exp
) == CONSTRUCTOR
7903 || TREE_CODE (exp
) == COMPOUND_LITERAL_EXPR
)
7905 result
= expand_expr (exp
, target
, tmode
,
7906 modifier
== EXPAND_INITIALIZER
7907 ? EXPAND_INITIALIZER
: EXPAND_CONST_ADDRESS
);
7909 /* If the DECL isn't in memory, then the DECL wasn't properly
7910 marked TREE_ADDRESSABLE, which will be either a front-end
7911 or a tree optimizer bug. */
7913 gcc_assert (MEM_P (result
));
7914 result
= XEXP (result
, 0);
7916 /* ??? Is this needed anymore? */
7918 TREE_USED (exp
) = 1;
7920 if (modifier
!= EXPAND_INITIALIZER
7921 && modifier
!= EXPAND_CONST_ADDRESS
7922 && modifier
!= EXPAND_SUM
)
7923 result
= force_operand (result
, target
);
7927 /* Pass FALSE as the last argument to get_inner_reference although
7928 we are expanding to RTL. The rationale is that we know how to
7929 handle "aligning nodes" here: we can just bypass them because
7930 they won't change the final object whose address will be returned
7931 (they actually exist only for that purpose). */
7932 inner
= get_inner_reference (exp
, &bitsize
, &bitpos
, &offset
, &mode1
,
7933 &unsignedp
, &reversep
, &volatilep
);
7937 /* We must have made progress. */
7938 gcc_assert (inner
!= exp
);
7940 subtarget
= offset
|| maybe_ne (bitpos
, 0) ? NULL_RTX
: target
;
7941 /* For VIEW_CONVERT_EXPR, where the outer alignment is bigger than
7942 inner alignment, force the inner to be sufficiently aligned. */
7943 if (CONSTANT_CLASS_P (inner
)
7944 && TYPE_ALIGN (TREE_TYPE (inner
)) < TYPE_ALIGN (TREE_TYPE (exp
)))
7946 inner
= copy_node (inner
);
7947 TREE_TYPE (inner
) = copy_node (TREE_TYPE (inner
));
7948 SET_TYPE_ALIGN (TREE_TYPE (inner
), TYPE_ALIGN (TREE_TYPE (exp
)));
7949 TYPE_USER_ALIGN (TREE_TYPE (inner
)) = 1;
7951 result
= expand_expr_addr_expr_1 (inner
, subtarget
, tmode
, modifier
, as
);
7957 if (modifier
!= EXPAND_NORMAL
)
7958 result
= force_operand (result
, NULL
);
7959 tmp
= expand_expr (offset
, NULL_RTX
, tmode
,
7960 modifier
== EXPAND_INITIALIZER
7961 ? EXPAND_INITIALIZER
: EXPAND_NORMAL
);
7963 /* expand_expr is allowed to return an object in a mode other
7964 than TMODE. If it did, we need to convert. */
7965 if (GET_MODE (tmp
) != VOIDmode
&& tmode
!= GET_MODE (tmp
))
7966 tmp
= convert_modes (tmode
, GET_MODE (tmp
),
7967 tmp
, TYPE_UNSIGNED (TREE_TYPE (offset
)));
7968 result
= convert_memory_address_addr_space (tmode
, result
, as
);
7969 tmp
= convert_memory_address_addr_space (tmode
, tmp
, as
);
7971 if (modifier
== EXPAND_SUM
|| modifier
== EXPAND_INITIALIZER
)
7972 result
= simplify_gen_binary (PLUS
, tmode
, result
, tmp
);
7975 subtarget
= maybe_ne (bitpos
, 0) ? NULL_RTX
: target
;
7976 result
= expand_simple_binop (tmode
, PLUS
, result
, tmp
, subtarget
,
7977 1, OPTAB_LIB_WIDEN
);
7981 if (maybe_ne (bitpos
, 0))
7983 /* Someone beforehand should have rejected taking the address
7984 of an object that isn't byte-aligned. */
7985 poly_int64 bytepos
= exact_div (bitpos
, BITS_PER_UNIT
);
7986 result
= convert_memory_address_addr_space (tmode
, result
, as
);
7987 result
= plus_constant (tmode
, result
, bytepos
);
7988 if (modifier
< EXPAND_SUM
)
7989 result
= force_operand (result
, target
);
7995 /* A subroutine of expand_expr. Evaluate EXP, which is an ADDR_EXPR.
7996 The TARGET, TMODE and MODIFIER arguments are as for expand_expr. */
7999 expand_expr_addr_expr (tree exp
, rtx target
, machine_mode tmode
,
8000 enum expand_modifier modifier
)
8002 addr_space_t as
= ADDR_SPACE_GENERIC
;
8003 scalar_int_mode address_mode
= Pmode
;
8004 scalar_int_mode pointer_mode
= ptr_mode
;
8008 /* Target mode of VOIDmode says "whatever's natural". */
8009 if (tmode
== VOIDmode
)
8010 tmode
= TYPE_MODE (TREE_TYPE (exp
));
8012 if (POINTER_TYPE_P (TREE_TYPE (exp
)))
8014 as
= TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp
)));
8015 address_mode
= targetm
.addr_space
.address_mode (as
);
8016 pointer_mode
= targetm
.addr_space
.pointer_mode (as
);
8019 /* We can get called with some Weird Things if the user does silliness
8020 like "(short) &a". In that case, convert_memory_address won't do
8021 the right thing, so ignore the given target mode. */
8022 scalar_int_mode new_tmode
= (tmode
== pointer_mode
8026 result
= expand_expr_addr_expr_1 (TREE_OPERAND (exp
, 0), target
,
8027 new_tmode
, modifier
, as
);
8029 /* Despite expand_expr claims concerning ignoring TMODE when not
8030 strictly convenient, stuff breaks if we don't honor it. Note
8031 that combined with the above, we only do this for pointer modes. */
8032 rmode
= GET_MODE (result
);
8033 if (rmode
== VOIDmode
)
8035 if (rmode
!= new_tmode
)
8036 result
= convert_memory_address_addr_space (new_tmode
, result
, as
);
8041 /* Generate code for computing CONSTRUCTOR EXP.
8042 An rtx for the computed value is returned. If AVOID_TEMP_MEM
8043 is TRUE, instead of creating a temporary variable in memory
8044 NULL is returned and the caller needs to handle it differently. */
8047 expand_constructor (tree exp
, rtx target
, enum expand_modifier modifier
,
8048 bool avoid_temp_mem
)
8050 tree type
= TREE_TYPE (exp
);
8051 machine_mode mode
= TYPE_MODE (type
);
8053 /* Try to avoid creating a temporary at all. This is possible
8054 if all of the initializer is zero.
8055 FIXME: try to handle all [0..255] initializers we can handle
8057 if (TREE_STATIC (exp
)
8058 && !TREE_ADDRESSABLE (exp
)
8059 && target
!= 0 && mode
== BLKmode
8060 && all_zeros_p (exp
))
8062 clear_storage (target
, expr_size (exp
), BLOCK_OP_NORMAL
);
8066 /* All elts simple constants => refer to a constant in memory. But
8067 if this is a non-BLKmode mode, let it store a field at a time
8068 since that should make a CONST_INT, CONST_WIDE_INT or
8069 CONST_DOUBLE when we fold. Likewise, if we have a target we can
8070 use, it is best to store directly into the target unless the type
8071 is large enough that memcpy will be used. If we are making an
8072 initializer and all operands are constant, put it in memory as
8075 FIXME: Avoid trying to fill vector constructors piece-meal.
8076 Output them with output_constant_def below unless we're sure
8077 they're zeros. This should go away when vector initializers
8078 are treated like VECTOR_CST instead of arrays. */
8079 if ((TREE_STATIC (exp
)
8080 && ((mode
== BLKmode
8081 && ! (target
!= 0 && safe_from_p (target
, exp
, 1)))
8082 || TREE_ADDRESSABLE (exp
)
8083 || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
))
8084 && (! can_move_by_pieces
8085 (tree_to_uhwi (TYPE_SIZE_UNIT (type
)),
8087 && ! mostly_zeros_p (exp
))))
8088 || ((modifier
== EXPAND_INITIALIZER
|| modifier
== EXPAND_CONST_ADDRESS
)
8089 && TREE_CONSTANT (exp
)))
8096 constructor
= expand_expr_constant (exp
, 1, modifier
);
8098 if (modifier
!= EXPAND_CONST_ADDRESS
8099 && modifier
!= EXPAND_INITIALIZER
8100 && modifier
!= EXPAND_SUM
)
8101 constructor
= validize_mem (constructor
);
8106 /* Handle calls that pass values in multiple non-contiguous
8107 locations. The Irix 6 ABI has examples of this. */
8108 if (target
== 0 || ! safe_from_p (target
, exp
, 1)
8109 || GET_CODE (target
) == PARALLEL
|| modifier
== EXPAND_STACK_PARM
)
8114 target
= assign_temp (type
, TREE_ADDRESSABLE (exp
), 1);
8117 store_constructor (exp
, target
, 0, int_expr_size (exp
), false);
8122 /* expand_expr: generate code for computing expression EXP.
8123 An rtx for the computed value is returned. The value is never null.
8124 In the case of a void EXP, const0_rtx is returned.
8126 The value may be stored in TARGET if TARGET is nonzero.
8127 TARGET is just a suggestion; callers must assume that
8128 the rtx returned may not be the same as TARGET.
8130 If TARGET is CONST0_RTX, it means that the value will be ignored.
8132 If TMODE is not VOIDmode, it suggests generating the
8133 result in mode TMODE. But this is done only when convenient.
8134 Otherwise, TMODE is ignored and the value generated in its natural mode.
8135 TMODE is just a suggestion; callers must assume that
8136 the rtx returned may not have mode TMODE.
8138 Note that TARGET may have neither TMODE nor MODE. In that case, it
8139 probably will not be used.
8141 If MODIFIER is EXPAND_SUM then when EXP is an addition
8142 we can return an rtx of the form (MULT (REG ...) (CONST_INT ...))
8143 or a nest of (PLUS ...) and (MINUS ...) where the terms are
8144 products as above, or REG or MEM, or constant.
8145 Ordinarily in such cases we would output mul or add instructions
8146 and then return a pseudo reg containing the sum.
8148 EXPAND_INITIALIZER is much like EXPAND_SUM except that
8149 it also marks a label as absolutely required (it can't be dead).
8150 It also makes a ZERO_EXTEND or SIGN_EXTEND instead of emitting extend insns.
8151 This is used for outputting expressions used in initializers.
8153 EXPAND_CONST_ADDRESS says that it is okay to return a MEM
8154 with a constant address even if that address is not normally legitimate.
8155 EXPAND_INITIALIZER and EXPAND_SUM also have this effect.
8157 EXPAND_STACK_PARM is used when expanding to a TARGET on the stack for
8158 a call parameter. Such targets require special care as we haven't yet
8159 marked TARGET so that it's safe from being trashed by libcalls. We
8160 don't want to use TARGET for anything but the final result;
8161 Intermediate values must go elsewhere. Additionally, calls to
8162 emit_block_move will be flagged with BLOCK_OP_CALL_PARM.
8164 If EXP is a VAR_DECL whose DECL_RTL was a MEM with an invalid
8165 address, and ALT_RTL is non-NULL, then *ALT_RTL is set to the
8166 DECL_RTL of the VAR_DECL. *ALT_RTL is also set if EXP is a
8167 COMPOUND_EXPR whose second argument is such a VAR_DECL, and so on
8170 If INNER_REFERENCE_P is true, we are expanding an inner reference.
8171 In this case, we don't adjust a returned MEM rtx that wouldn't be
8172 sufficiently aligned for its mode; instead, it's up to the caller
8173 to deal with it afterwards. This is used to make sure that unaligned
8174 base objects for which out-of-bounds accesses are supported, for
8175 example record types with trailing arrays, aren't realigned behind
8176 the back of the caller.
8177 The normal operating mode is to pass FALSE for this parameter. */
8180 expand_expr_real (tree exp
, rtx target
, machine_mode tmode
,
8181 enum expand_modifier modifier
, rtx
*alt_rtl
,
8182 bool inner_reference_p
)
8186 /* Handle ERROR_MARK before anybody tries to access its type. */
8187 if (TREE_CODE (exp
) == ERROR_MARK
8188 || (TREE_CODE (TREE_TYPE (exp
)) == ERROR_MARK
))
8190 ret
= CONST0_RTX (tmode
);
8191 return ret
? ret
: const0_rtx
;
8194 ret
= expand_expr_real_1 (exp
, target
, tmode
, modifier
, alt_rtl
,
8199 /* Try to expand the conditional expression which is represented by
8200 TREEOP0 ? TREEOP1 : TREEOP2 using conditonal moves. If it succeeds
8201 return the rtl reg which represents the result. Otherwise return
8205 expand_cond_expr_using_cmove (tree treeop0 ATTRIBUTE_UNUSED
,
8206 tree treeop1 ATTRIBUTE_UNUSED
,
8207 tree treeop2 ATTRIBUTE_UNUSED
)
8210 rtx op00
, op01
, op1
, op2
;
8211 enum rtx_code comparison_code
;
8212 machine_mode comparison_mode
;
8215 tree type
= TREE_TYPE (treeop1
);
8216 int unsignedp
= TYPE_UNSIGNED (type
);
8217 machine_mode mode
= TYPE_MODE (type
);
8218 machine_mode orig_mode
= mode
;
8219 static bool expanding_cond_expr_using_cmove
= false;
8221 /* Conditional move expansion can end up TERing two operands which,
8222 when recursively hitting conditional expressions can result in
8223 exponential behavior if the cmove expansion ultimatively fails.
8224 It's hardly profitable to TER a cmove into a cmove so avoid doing
8225 that by failing early if we end up recursing. */
8226 if (expanding_cond_expr_using_cmove
)
8229 /* If we cannot do a conditional move on the mode, try doing it
8230 with the promoted mode. */
8231 if (!can_conditionally_move_p (mode
))
8233 mode
= promote_mode (type
, mode
, &unsignedp
);
8234 if (!can_conditionally_move_p (mode
))
8236 temp
= assign_temp (type
, 0, 0); /* Use promoted mode for temp. */
8239 temp
= assign_temp (type
, 0, 1);
8241 expanding_cond_expr_using_cmove
= true;
8243 expand_operands (treeop1
, treeop2
,
8244 temp
, &op1
, &op2
, EXPAND_NORMAL
);
8246 if (TREE_CODE (treeop0
) == SSA_NAME
8247 && (srcstmt
= get_def_for_expr_class (treeop0
, tcc_comparison
)))
8249 tree type
= TREE_TYPE (gimple_assign_rhs1 (srcstmt
));
8250 enum tree_code cmpcode
= gimple_assign_rhs_code (srcstmt
);
8251 op00
= expand_normal (gimple_assign_rhs1 (srcstmt
));
8252 op01
= expand_normal (gimple_assign_rhs2 (srcstmt
));
8253 comparison_mode
= TYPE_MODE (type
);
8254 unsignedp
= TYPE_UNSIGNED (type
);
8255 comparison_code
= convert_tree_comp_to_rtx (cmpcode
, unsignedp
);
8257 else if (COMPARISON_CLASS_P (treeop0
))
8259 tree type
= TREE_TYPE (TREE_OPERAND (treeop0
, 0));
8260 enum tree_code cmpcode
= TREE_CODE (treeop0
);
8261 op00
= expand_normal (TREE_OPERAND (treeop0
, 0));
8262 op01
= expand_normal (TREE_OPERAND (treeop0
, 1));
8263 unsignedp
= TYPE_UNSIGNED (type
);
8264 comparison_mode
= TYPE_MODE (type
);
8265 comparison_code
= convert_tree_comp_to_rtx (cmpcode
, unsignedp
);
8269 op00
= expand_normal (treeop0
);
8271 comparison_code
= NE
;
8272 comparison_mode
= GET_MODE (op00
);
8273 if (comparison_mode
== VOIDmode
)
8274 comparison_mode
= TYPE_MODE (TREE_TYPE (treeop0
));
8276 expanding_cond_expr_using_cmove
= false;
8278 if (GET_MODE (op1
) != mode
)
8279 op1
= gen_lowpart (mode
, op1
);
8281 if (GET_MODE (op2
) != mode
)
8282 op2
= gen_lowpart (mode
, op2
);
8284 /* Try to emit the conditional move. */
8285 insn
= emit_conditional_move (temp
, comparison_code
,
8286 op00
, op01
, comparison_mode
,
8290 /* If we could do the conditional move, emit the sequence,
8294 rtx_insn
*seq
= get_insns ();
8297 return convert_modes (orig_mode
, mode
, temp
, 0);
8300 /* Otherwise discard the sequence and fall back to code with
8307 expand_expr_real_2 (sepops ops
, rtx target
, machine_mode tmode
,
8308 enum expand_modifier modifier
)
8310 rtx op0
, op1
, op2
, temp
;
8311 rtx_code_label
*lab
;
8315 scalar_int_mode int_mode
;
8316 enum tree_code code
= ops
->code
;
8318 rtx subtarget
, original_target
;
8320 bool reduce_bit_field
;
8321 location_t loc
= ops
->location
;
8322 tree treeop0
, treeop1
, treeop2
;
8323 #define REDUCE_BIT_FIELD(expr) (reduce_bit_field \
8324 ? reduce_to_bit_field_precision ((expr), \
8330 mode
= TYPE_MODE (type
);
8331 unsignedp
= TYPE_UNSIGNED (type
);
8337 /* We should be called only on simple (binary or unary) expressions,
8338 exactly those that are valid in gimple expressions that aren't
8339 GIMPLE_SINGLE_RHS (or invalid). */
8340 gcc_assert (get_gimple_rhs_class (code
) == GIMPLE_UNARY_RHS
8341 || get_gimple_rhs_class (code
) == GIMPLE_BINARY_RHS
8342 || get_gimple_rhs_class (code
) == GIMPLE_TERNARY_RHS
);
8344 ignore
= (target
== const0_rtx
8345 || ((CONVERT_EXPR_CODE_P (code
)
8346 || code
== COND_EXPR
|| code
== VIEW_CONVERT_EXPR
)
8347 && TREE_CODE (type
) == VOID_TYPE
));
8349 /* We should be called only if we need the result. */
8350 gcc_assert (!ignore
);
8352 /* An operation in what may be a bit-field type needs the
8353 result to be reduced to the precision of the bit-field type,
8354 which is narrower than that of the type's mode. */
8355 reduce_bit_field
= (INTEGRAL_TYPE_P (type
)
8356 && !type_has_mode_precision_p (type
));
8358 if (reduce_bit_field
&& modifier
== EXPAND_STACK_PARM
)
8361 /* Use subtarget as the target for operand 0 of a binary operation. */
8362 subtarget
= get_subtarget (target
);
8363 original_target
= target
;
8367 case NON_LVALUE_EXPR
:
8370 if (treeop0
== error_mark_node
)
8373 if (TREE_CODE (type
) == UNION_TYPE
)
8375 tree valtype
= TREE_TYPE (treeop0
);
8377 /* If both input and output are BLKmode, this conversion isn't doing
8378 anything except possibly changing memory attribute. */
8379 if (mode
== BLKmode
&& TYPE_MODE (valtype
) == BLKmode
)
8381 rtx result
= expand_expr (treeop0
, target
, tmode
,
8384 result
= copy_rtx (result
);
8385 set_mem_attributes (result
, type
, 0);
8391 if (TYPE_MODE (type
) != BLKmode
)
8392 target
= gen_reg_rtx (TYPE_MODE (type
));
8394 target
= assign_temp (type
, 1, 1);
8398 /* Store data into beginning of memory target. */
8399 store_expr (treeop0
,
8400 adjust_address (target
, TYPE_MODE (valtype
), 0),
8401 modifier
== EXPAND_STACK_PARM
,
8402 false, TYPE_REVERSE_STORAGE_ORDER (type
));
8406 gcc_assert (REG_P (target
)
8407 && !TYPE_REVERSE_STORAGE_ORDER (type
));
8409 /* Store this field into a union of the proper type. */
8410 store_field (target
,
8411 MIN ((int_size_in_bytes (TREE_TYPE
8414 (HOST_WIDE_INT
) GET_MODE_BITSIZE (mode
)),
8415 0, 0, 0, TYPE_MODE (valtype
), treeop0
, 0,
8419 /* Return the entire union. */
8423 if (mode
== TYPE_MODE (TREE_TYPE (treeop0
)))
8425 op0
= expand_expr (treeop0
, target
, VOIDmode
,
8428 /* If the signedness of the conversion differs and OP0 is
8429 a promoted SUBREG, clear that indication since we now
8430 have to do the proper extension. */
8431 if (TYPE_UNSIGNED (TREE_TYPE (treeop0
)) != unsignedp
8432 && GET_CODE (op0
) == SUBREG
)
8433 SUBREG_PROMOTED_VAR_P (op0
) = 0;
8435 return REDUCE_BIT_FIELD (op0
);
8438 op0
= expand_expr (treeop0
, NULL_RTX
, mode
,
8439 modifier
== EXPAND_SUM
? EXPAND_NORMAL
: modifier
);
8440 if (GET_MODE (op0
) == mode
)
8443 /* If OP0 is a constant, just convert it into the proper mode. */
8444 else if (CONSTANT_P (op0
))
8446 tree inner_type
= TREE_TYPE (treeop0
);
8447 machine_mode inner_mode
= GET_MODE (op0
);
8449 if (inner_mode
== VOIDmode
)
8450 inner_mode
= TYPE_MODE (inner_type
);
8452 if (modifier
== EXPAND_INITIALIZER
)
8453 op0
= lowpart_subreg (mode
, op0
, inner_mode
);
8455 op0
= convert_modes (mode
, inner_mode
, op0
,
8456 TYPE_UNSIGNED (inner_type
));
8459 else if (modifier
== EXPAND_INITIALIZER
)
8460 op0
= gen_rtx_fmt_e (TYPE_UNSIGNED (TREE_TYPE (treeop0
))
8461 ? ZERO_EXTEND
: SIGN_EXTEND
, mode
, op0
);
8463 else if (target
== 0)
8464 op0
= convert_to_mode (mode
, op0
,
8465 TYPE_UNSIGNED (TREE_TYPE
8469 convert_move (target
, op0
,
8470 TYPE_UNSIGNED (TREE_TYPE (treeop0
)));
8474 return REDUCE_BIT_FIELD (op0
);
8476 case ADDR_SPACE_CONVERT_EXPR
:
8478 tree treeop0_type
= TREE_TYPE (treeop0
);
8480 gcc_assert (POINTER_TYPE_P (type
));
8481 gcc_assert (POINTER_TYPE_P (treeop0_type
));
8483 addr_space_t as_to
= TYPE_ADDR_SPACE (TREE_TYPE (type
));
8484 addr_space_t as_from
= TYPE_ADDR_SPACE (TREE_TYPE (treeop0_type
));
8486 /* Conversions between pointers to the same address space should
8487 have been implemented via CONVERT_EXPR / NOP_EXPR. */
8488 gcc_assert (as_to
!= as_from
);
8490 op0
= expand_expr (treeop0
, NULL_RTX
, VOIDmode
, modifier
);
8492 /* Ask target code to handle conversion between pointers
8493 to overlapping address spaces. */
8494 if (targetm
.addr_space
.subset_p (as_to
, as_from
)
8495 || targetm
.addr_space
.subset_p (as_from
, as_to
))
8497 op0
= targetm
.addr_space
.convert (op0
, treeop0_type
, type
);
8501 /* For disjoint address spaces, converting anything but a null
8502 pointer invokes undefined behavior. We truncate or extend the
8503 value as if we'd converted via integers, which handles 0 as
8504 required, and all others as the programmer likely expects. */
8505 #ifndef POINTERS_EXTEND_UNSIGNED
8506 const int POINTERS_EXTEND_UNSIGNED
= 1;
8508 op0
= convert_modes (mode
, TYPE_MODE (treeop0_type
),
8509 op0
, POINTERS_EXTEND_UNSIGNED
);
8515 case POINTER_PLUS_EXPR
:
8516 /* Even though the sizetype mode and the pointer's mode can be different
8517 expand is able to handle this correctly and get the correct result out
8518 of the PLUS_EXPR code. */
8519 /* Make sure to sign-extend the sizetype offset in a POINTER_PLUS_EXPR
8520 if sizetype precision is smaller than pointer precision. */
8521 if (TYPE_PRECISION (sizetype
) < TYPE_PRECISION (type
))
8522 treeop1
= fold_convert_loc (loc
, type
,
8523 fold_convert_loc (loc
, ssizetype
,
8525 /* If sizetype precision is larger than pointer precision, truncate the
8526 offset to have matching modes. */
8527 else if (TYPE_PRECISION (sizetype
) > TYPE_PRECISION (type
))
8528 treeop1
= fold_convert_loc (loc
, type
, treeop1
);
8532 /* If we are adding a constant, a VAR_DECL that is sp, fp, or ap, and
8533 something else, make sure we add the register to the constant and
8534 then to the other thing. This case can occur during strength
8535 reduction and doing it this way will produce better code if the
8536 frame pointer or argument pointer is eliminated.
8538 fold-const.c will ensure that the constant is always in the inner
8539 PLUS_EXPR, so the only case we need to do anything about is if
8540 sp, ap, or fp is our second argument, in which case we must swap
8541 the innermost first argument and our second argument. */
8543 if (TREE_CODE (treeop0
) == PLUS_EXPR
8544 && TREE_CODE (TREE_OPERAND (treeop0
, 1)) == INTEGER_CST
8546 && (DECL_RTL (treeop1
) == frame_pointer_rtx
8547 || DECL_RTL (treeop1
) == stack_pointer_rtx
8548 || DECL_RTL (treeop1
) == arg_pointer_rtx
))
8553 /* If the result is to be ptr_mode and we are adding an integer to
8554 something, we might be forming a constant. So try to use
8555 plus_constant. If it produces a sum and we can't accept it,
8556 use force_operand. This allows P = &ARR[const] to generate
8557 efficient code on machines where a SYMBOL_REF is not a valid
8560 If this is an EXPAND_SUM call, always return the sum. */
8561 if (modifier
== EXPAND_SUM
|| modifier
== EXPAND_INITIALIZER
8562 || (mode
== ptr_mode
&& (unsignedp
|| ! flag_trapv
)))
8564 if (modifier
== EXPAND_STACK_PARM
)
8566 if (TREE_CODE (treeop0
) == INTEGER_CST
8567 && HWI_COMPUTABLE_MODE_P (mode
)
8568 && TREE_CONSTANT (treeop1
))
8572 machine_mode wmode
= TYPE_MODE (TREE_TYPE (treeop1
));
8574 op1
= expand_expr (treeop1
, subtarget
, VOIDmode
,
8576 /* Use wi::shwi to ensure that the constant is
8577 truncated according to the mode of OP1, then sign extended
8578 to a HOST_WIDE_INT. Using the constant directly can result
8579 in non-canonical RTL in a 64x32 cross compile. */
8580 wc
= TREE_INT_CST_LOW (treeop0
);
8582 immed_wide_int_const (wi::shwi (wc
, wmode
), wmode
);
8583 op1
= plus_constant (mode
, op1
, INTVAL (constant_part
));
8584 if (modifier
!= EXPAND_SUM
&& modifier
!= EXPAND_INITIALIZER
)
8585 op1
= force_operand (op1
, target
);
8586 return REDUCE_BIT_FIELD (op1
);
8589 else if (TREE_CODE (treeop1
) == INTEGER_CST
8590 && HWI_COMPUTABLE_MODE_P (mode
)
8591 && TREE_CONSTANT (treeop0
))
8595 machine_mode wmode
= TYPE_MODE (TREE_TYPE (treeop0
));
8597 op0
= expand_expr (treeop0
, subtarget
, VOIDmode
,
8598 (modifier
== EXPAND_INITIALIZER
8599 ? EXPAND_INITIALIZER
: EXPAND_SUM
));
8600 if (! CONSTANT_P (op0
))
8602 op1
= expand_expr (treeop1
, NULL_RTX
,
8603 VOIDmode
, modifier
);
8604 /* Return a PLUS if modifier says it's OK. */
8605 if (modifier
== EXPAND_SUM
8606 || modifier
== EXPAND_INITIALIZER
)
8607 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
8610 /* Use wi::shwi to ensure that the constant is
8611 truncated according to the mode of OP1, then sign extended
8612 to a HOST_WIDE_INT. Using the constant directly can result
8613 in non-canonical RTL in a 64x32 cross compile. */
8614 wc
= TREE_INT_CST_LOW (treeop1
);
8616 = immed_wide_int_const (wi::shwi (wc
, wmode
), wmode
);
8617 op0
= plus_constant (mode
, op0
, INTVAL (constant_part
));
8618 if (modifier
!= EXPAND_SUM
&& modifier
!= EXPAND_INITIALIZER
)
8619 op0
= force_operand (op0
, target
);
8620 return REDUCE_BIT_FIELD (op0
);
8624 /* Use TER to expand pointer addition of a negated value
8625 as pointer subtraction. */
8626 if ((POINTER_TYPE_P (TREE_TYPE (treeop0
))
8627 || (TREE_CODE (TREE_TYPE (treeop0
)) == VECTOR_TYPE
8628 && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (treeop0
)))))
8629 && TREE_CODE (treeop1
) == SSA_NAME
8630 && TYPE_MODE (TREE_TYPE (treeop0
))
8631 == TYPE_MODE (TREE_TYPE (treeop1
)))
8633 gimple
*def
= get_def_for_expr (treeop1
, NEGATE_EXPR
);
8636 treeop1
= gimple_assign_rhs1 (def
);
8642 /* No sense saving up arithmetic to be done
8643 if it's all in the wrong mode to form part of an address.
8644 And force_operand won't know whether to sign-extend or
8646 if (modifier
!= EXPAND_INITIALIZER
8647 && (modifier
!= EXPAND_SUM
|| mode
!= ptr_mode
))
8649 expand_operands (treeop0
, treeop1
,
8650 subtarget
, &op0
, &op1
, modifier
);
8651 if (op0
== const0_rtx
)
8653 if (op1
== const0_rtx
)
8658 expand_operands (treeop0
, treeop1
,
8659 subtarget
, &op0
, &op1
, modifier
);
8660 return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS
, mode
, op0
, op1
));
8663 case POINTER_DIFF_EXPR
:
8665 /* For initializers, we are allowed to return a MINUS of two
8666 symbolic constants. Here we handle all cases when both operands
8668 /* Handle difference of two symbolic constants,
8669 for the sake of an initializer. */
8670 if ((modifier
== EXPAND_SUM
|| modifier
== EXPAND_INITIALIZER
)
8671 && really_constant_p (treeop0
)
8672 && really_constant_p (treeop1
))
8674 expand_operands (treeop0
, treeop1
,
8675 NULL_RTX
, &op0
, &op1
, modifier
);
8676 return simplify_gen_binary (MINUS
, mode
, op0
, op1
);
8679 /* No sense saving up arithmetic to be done
8680 if it's all in the wrong mode to form part of an address.
8681 And force_operand won't know whether to sign-extend or
8683 if (modifier
!= EXPAND_INITIALIZER
8684 && (modifier
!= EXPAND_SUM
|| mode
!= ptr_mode
))
8687 expand_operands (treeop0
, treeop1
,
8688 subtarget
, &op0
, &op1
, modifier
);
8690 /* Convert A - const to A + (-const). */
8691 if (CONST_INT_P (op1
))
8693 op1
= negate_rtx (mode
, op1
);
8694 return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS
, mode
, op0
, op1
));
8699 case WIDEN_MULT_PLUS_EXPR
:
8700 case WIDEN_MULT_MINUS_EXPR
:
8701 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
8702 op2
= expand_normal (treeop2
);
8703 target
= expand_widen_pattern_expr (ops
, op0
, op1
, op2
,
8707 case WIDEN_MULT_EXPR
:
8708 /* If first operand is constant, swap them.
8709 Thus the following special case checks need only
8710 check the second operand. */
8711 if (TREE_CODE (treeop0
) == INTEGER_CST
)
8712 std::swap (treeop0
, treeop1
);
8714 /* First, check if we have a multiplication of one signed and one
8715 unsigned operand. */
8716 if (TREE_CODE (treeop1
) != INTEGER_CST
8717 && (TYPE_UNSIGNED (TREE_TYPE (treeop0
))
8718 != TYPE_UNSIGNED (TREE_TYPE (treeop1
))))
8720 machine_mode innermode
= TYPE_MODE (TREE_TYPE (treeop0
));
8721 this_optab
= usmul_widen_optab
;
8722 if (find_widening_optab_handler (this_optab
, mode
, innermode
)
8723 != CODE_FOR_nothing
)
8725 if (TYPE_UNSIGNED (TREE_TYPE (treeop0
)))
8726 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
,
8729 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op1
, &op0
,
8731 /* op0 and op1 might still be constant, despite the above
8732 != INTEGER_CST check. Handle it. */
8733 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
8735 op0
= convert_modes (innermode
, mode
, op0
, true);
8736 op1
= convert_modes (innermode
, mode
, op1
, false);
8737 return REDUCE_BIT_FIELD (expand_mult (mode
, op0
, op1
,
8738 target
, unsignedp
));
8743 /* Check for a multiplication with matching signedness. */
8744 else if ((TREE_CODE (treeop1
) == INTEGER_CST
8745 && int_fits_type_p (treeop1
, TREE_TYPE (treeop0
)))
8746 || (TYPE_UNSIGNED (TREE_TYPE (treeop1
))
8747 == TYPE_UNSIGNED (TREE_TYPE (treeop0
))))
8749 tree op0type
= TREE_TYPE (treeop0
);
8750 machine_mode innermode
= TYPE_MODE (op0type
);
8751 bool zextend_p
= TYPE_UNSIGNED (op0type
);
8752 optab other_optab
= zextend_p
? smul_widen_optab
: umul_widen_optab
;
8753 this_optab
= zextend_p
? umul_widen_optab
: smul_widen_optab
;
8755 if (TREE_CODE (treeop0
) != INTEGER_CST
)
8757 if (find_widening_optab_handler (this_optab
, mode
, innermode
)
8758 != CODE_FOR_nothing
)
8760 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
,
8762 /* op0 and op1 might still be constant, despite the above
8763 != INTEGER_CST check. Handle it. */
8764 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
8767 op0
= convert_modes (innermode
, mode
, op0
, zextend_p
);
8769 = convert_modes (innermode
, mode
, op1
,
8770 TYPE_UNSIGNED (TREE_TYPE (treeop1
)));
8771 return REDUCE_BIT_FIELD (expand_mult (mode
, op0
, op1
,
8775 temp
= expand_widening_mult (mode
, op0
, op1
, target
,
8776 unsignedp
, this_optab
);
8777 return REDUCE_BIT_FIELD (temp
);
8779 if (find_widening_optab_handler (other_optab
, mode
, innermode
)
8781 && innermode
== word_mode
)
8784 op0
= expand_normal (treeop0
);
8785 if (TREE_CODE (treeop1
) == INTEGER_CST
)
8786 op1
= convert_modes (word_mode
, mode
,
8787 expand_normal (treeop1
),
8788 TYPE_UNSIGNED (TREE_TYPE (treeop1
)));
8790 op1
= expand_normal (treeop1
);
8791 /* op0 and op1 might still be constant, despite the above
8792 != INTEGER_CST check. Handle it. */
8793 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
8794 goto widen_mult_const
;
8795 temp
= expand_binop (mode
, other_optab
, op0
, op1
, target
,
8796 unsignedp
, OPTAB_LIB_WIDEN
);
8797 hipart
= gen_highpart (word_mode
, temp
);
8798 htem
= expand_mult_highpart_adjust (word_mode
, hipart
,
8802 emit_move_insn (hipart
, htem
);
8803 return REDUCE_BIT_FIELD (temp
);
8807 treeop0
= fold_build1 (CONVERT_EXPR
, type
, treeop0
);
8808 treeop1
= fold_build1 (CONVERT_EXPR
, type
, treeop1
);
8809 expand_operands (treeop0
, treeop1
, subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
8810 return REDUCE_BIT_FIELD (expand_mult (mode
, op0
, op1
, target
, unsignedp
));
8814 optab opt
= fma_optab
;
8815 gimple
*def0
, *def2
;
8817 /* If there is no insn for FMA, emit it as __builtin_fma{,f,l}
8819 if (optab_handler (fma_optab
, mode
) == CODE_FOR_nothing
)
8821 tree fn
= mathfn_built_in (TREE_TYPE (treeop0
), BUILT_IN_FMA
);
8824 gcc_assert (fn
!= NULL_TREE
);
8825 call_expr
= build_call_expr (fn
, 3, treeop0
, treeop1
, treeop2
);
8826 return expand_builtin (call_expr
, target
, subtarget
, mode
, false);
8829 def0
= get_def_for_expr (treeop0
, NEGATE_EXPR
);
8830 /* The multiplication is commutative - look at its 2nd operand
8831 if the first isn't fed by a negate. */
8834 def0
= get_def_for_expr (treeop1
, NEGATE_EXPR
);
8835 /* Swap operands if the 2nd operand is fed by a negate. */
8837 std::swap (treeop0
, treeop1
);
8839 def2
= get_def_for_expr (treeop2
, NEGATE_EXPR
);
8844 && optab_handler (fnms_optab
, mode
) != CODE_FOR_nothing
)
8847 op0
= expand_normal (gimple_assign_rhs1 (def0
));
8848 op2
= expand_normal (gimple_assign_rhs1 (def2
));
8851 && optab_handler (fnma_optab
, mode
) != CODE_FOR_nothing
)
8854 op0
= expand_normal (gimple_assign_rhs1 (def0
));
8857 && optab_handler (fms_optab
, mode
) != CODE_FOR_nothing
)
8860 op2
= expand_normal (gimple_assign_rhs1 (def2
));
8864 op0
= expand_expr (treeop0
, subtarget
, VOIDmode
, EXPAND_NORMAL
);
8866 op2
= expand_normal (treeop2
);
8867 op1
= expand_normal (treeop1
);
8869 return expand_ternary_op (TYPE_MODE (type
), opt
,
8870 op0
, op1
, op2
, target
, 0);
8874 /* If this is a fixed-point operation, then we cannot use the code
8875 below because "expand_mult" doesn't support sat/no-sat fixed-point
8877 if (ALL_FIXED_POINT_MODE_P (mode
))
8880 /* If first operand is constant, swap them.
8881 Thus the following special case checks need only
8882 check the second operand. */
8883 if (TREE_CODE (treeop0
) == INTEGER_CST
)
8884 std::swap (treeop0
, treeop1
);
8886 /* Attempt to return something suitable for generating an
8887 indexed address, for machines that support that. */
8889 if (modifier
== EXPAND_SUM
&& mode
== ptr_mode
8890 && tree_fits_shwi_p (treeop1
))
8892 tree exp1
= treeop1
;
8894 op0
= expand_expr (treeop0
, subtarget
, VOIDmode
,
8898 op0
= force_operand (op0
, NULL_RTX
);
8900 op0
= copy_to_mode_reg (mode
, op0
);
8902 return REDUCE_BIT_FIELD (gen_rtx_MULT (mode
, op0
,
8903 gen_int_mode (tree_to_shwi (exp1
),
8904 TYPE_MODE (TREE_TYPE (exp1
)))));
8907 if (modifier
== EXPAND_STACK_PARM
)
8910 expand_operands (treeop0
, treeop1
, subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
8911 return REDUCE_BIT_FIELD (expand_mult (mode
, op0
, op1
, target
, unsignedp
));
8913 case TRUNC_MOD_EXPR
:
8914 case FLOOR_MOD_EXPR
:
8916 case ROUND_MOD_EXPR
:
8918 case TRUNC_DIV_EXPR
:
8919 case FLOOR_DIV_EXPR
:
8921 case ROUND_DIV_EXPR
:
8922 case EXACT_DIV_EXPR
:
8924 /* If this is a fixed-point operation, then we cannot use the code
8925 below because "expand_divmod" doesn't support sat/no-sat fixed-point
8927 if (ALL_FIXED_POINT_MODE_P (mode
))
8930 if (modifier
== EXPAND_STACK_PARM
)
8932 /* Possible optimization: compute the dividend with EXPAND_SUM
8933 then if the divisor is constant can optimize the case
8934 where some terms of the dividend have coeffs divisible by it. */
8935 expand_operands (treeop0
, treeop1
,
8936 subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
8937 bool mod_p
= code
== TRUNC_MOD_EXPR
|| code
== FLOOR_MOD_EXPR
8938 || code
== CEIL_MOD_EXPR
|| code
== ROUND_MOD_EXPR
;
8939 if (SCALAR_INT_MODE_P (mode
)
8941 && get_range_pos_neg (treeop0
) == 1
8942 && get_range_pos_neg (treeop1
) == 1)
8944 /* If both arguments are known to be positive when interpreted
8945 as signed, we can expand it as both signed and unsigned
8946 division or modulo. Choose the cheaper sequence in that case. */
8947 bool speed_p
= optimize_insn_for_speed_p ();
8948 do_pending_stack_adjust ();
8950 rtx uns_ret
= expand_divmod (mod_p
, code
, mode
, op0
, op1
, target
, 1);
8951 rtx_insn
*uns_insns
= get_insns ();
8954 rtx sgn_ret
= expand_divmod (mod_p
, code
, mode
, op0
, op1
, target
, 0);
8955 rtx_insn
*sgn_insns
= get_insns ();
8957 unsigned uns_cost
= seq_cost (uns_insns
, speed_p
);
8958 unsigned sgn_cost
= seq_cost (sgn_insns
, speed_p
);
8960 /* If costs are the same then use as tie breaker the other
8962 if (uns_cost
== sgn_cost
)
8964 uns_cost
= seq_cost (uns_insns
, !speed_p
);
8965 sgn_cost
= seq_cost (sgn_insns
, !speed_p
);
8968 if (uns_cost
< sgn_cost
|| (uns_cost
== sgn_cost
&& unsignedp
))
8970 emit_insn (uns_insns
);
8973 emit_insn (sgn_insns
);
8976 return expand_divmod (mod_p
, code
, mode
, op0
, op1
, target
, unsignedp
);
8981 case MULT_HIGHPART_EXPR
:
8982 expand_operands (treeop0
, treeop1
, subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
8983 temp
= expand_mult_highpart (mode
, op0
, op1
, target
, unsignedp
);
8987 case FIXED_CONVERT_EXPR
:
8988 op0
= expand_normal (treeop0
);
8989 if (target
== 0 || modifier
== EXPAND_STACK_PARM
)
8990 target
= gen_reg_rtx (mode
);
8992 if ((TREE_CODE (TREE_TYPE (treeop0
)) == INTEGER_TYPE
8993 && TYPE_UNSIGNED (TREE_TYPE (treeop0
)))
8994 || (TREE_CODE (type
) == INTEGER_TYPE
&& TYPE_UNSIGNED (type
)))
8995 expand_fixed_convert (target
, op0
, 1, TYPE_SATURATING (type
));
8997 expand_fixed_convert (target
, op0
, 0, TYPE_SATURATING (type
));
9000 case FIX_TRUNC_EXPR
:
9001 op0
= expand_normal (treeop0
);
9002 if (target
== 0 || modifier
== EXPAND_STACK_PARM
)
9003 target
= gen_reg_rtx (mode
);
9004 expand_fix (target
, op0
, unsignedp
);
9008 op0
= expand_normal (treeop0
);
9009 if (target
== 0 || modifier
== EXPAND_STACK_PARM
)
9010 target
= gen_reg_rtx (mode
);
9011 /* expand_float can't figure out what to do if FROM has VOIDmode.
9012 So give it the correct mode. With -O, cse will optimize this. */
9013 if (GET_MODE (op0
) == VOIDmode
)
9014 op0
= copy_to_mode_reg (TYPE_MODE (TREE_TYPE (treeop0
)),
9016 expand_float (target
, op0
,
9017 TYPE_UNSIGNED (TREE_TYPE (treeop0
)));
9021 op0
= expand_expr (treeop0
, subtarget
,
9022 VOIDmode
, EXPAND_NORMAL
);
9023 if (modifier
== EXPAND_STACK_PARM
)
9025 temp
= expand_unop (mode
,
9026 optab_for_tree_code (NEGATE_EXPR
, type
,
9030 return REDUCE_BIT_FIELD (temp
);
9033 op0
= expand_expr (treeop0
, subtarget
,
9034 VOIDmode
, EXPAND_NORMAL
);
9035 if (modifier
== EXPAND_STACK_PARM
)
9038 /* ABS_EXPR is not valid for complex arguments. */
9039 gcc_assert (GET_MODE_CLASS (mode
) != MODE_COMPLEX_INT
9040 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_FLOAT
);
9042 /* Unsigned abs is simply the operand. Testing here means we don't
9043 risk generating incorrect code below. */
9044 if (TYPE_UNSIGNED (type
))
9047 return expand_abs (mode
, op0
, target
, unsignedp
,
9048 safe_from_p (target
, treeop0
, 1));
9052 target
= original_target
;
9054 || modifier
== EXPAND_STACK_PARM
9055 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
9056 || GET_MODE (target
) != mode
9058 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
9059 target
= gen_reg_rtx (mode
);
9060 expand_operands (treeop0
, treeop1
,
9061 target
, &op0
, &op1
, EXPAND_NORMAL
);
9063 /* First try to do it with a special MIN or MAX instruction.
9064 If that does not win, use a conditional jump to select the proper
9066 this_optab
= optab_for_tree_code (code
, type
, optab_default
);
9067 temp
= expand_binop (mode
, this_optab
, op0
, op1
, target
, unsignedp
,
9072 /* For vector MIN <x, y>, expand it a VEC_COND_EXPR <x <= y, x, y>
9073 and similarly for MAX <x, y>. */
9074 if (VECTOR_TYPE_P (type
))
9076 tree t0
= make_tree (type
, op0
);
9077 tree t1
= make_tree (type
, op1
);
9078 tree comparison
= build2 (code
== MIN_EXPR
? LE_EXPR
: GE_EXPR
,
9080 return expand_vec_cond_expr (type
, comparison
, t0
, t1
,
9084 /* At this point, a MEM target is no longer useful; we will get better
9087 if (! REG_P (target
))
9088 target
= gen_reg_rtx (mode
);
9090 /* If op1 was placed in target, swap op0 and op1. */
9091 if (target
!= op0
&& target
== op1
)
9092 std::swap (op0
, op1
);
9094 /* We generate better code and avoid problems with op1 mentioning
9095 target by forcing op1 into a pseudo if it isn't a constant. */
9096 if (! CONSTANT_P (op1
))
9097 op1
= force_reg (mode
, op1
);
9100 enum rtx_code comparison_code
;
9103 if (code
== MAX_EXPR
)
9104 comparison_code
= unsignedp
? GEU
: GE
;
9106 comparison_code
= unsignedp
? LEU
: LE
;
9108 /* Canonicalize to comparisons against 0. */
9109 if (op1
== const1_rtx
)
9111 /* Converting (a >= 1 ? a : 1) into (a > 0 ? a : 1)
9112 or (a != 0 ? a : 1) for unsigned.
9113 For MIN we are safe converting (a <= 1 ? a : 1)
9114 into (a <= 0 ? a : 1) */
9115 cmpop1
= const0_rtx
;
9116 if (code
== MAX_EXPR
)
9117 comparison_code
= unsignedp
? NE
: GT
;
9119 if (op1
== constm1_rtx
&& !unsignedp
)
9121 /* Converting (a >= -1 ? a : -1) into (a >= 0 ? a : -1)
9122 and (a <= -1 ? a : -1) into (a < 0 ? a : -1) */
9123 cmpop1
= const0_rtx
;
9124 if (code
== MIN_EXPR
)
9125 comparison_code
= LT
;
9128 /* Use a conditional move if possible. */
9129 if (can_conditionally_move_p (mode
))
9135 /* Try to emit the conditional move. */
9136 insn
= emit_conditional_move (target
, comparison_code
,
9141 /* If we could do the conditional move, emit the sequence,
9145 rtx_insn
*seq
= get_insns ();
9151 /* Otherwise discard the sequence and fall back to code with
9157 emit_move_insn (target
, op0
);
9159 lab
= gen_label_rtx ();
9160 do_compare_rtx_and_jump (target
, cmpop1
, comparison_code
,
9161 unsignedp
, mode
, NULL_RTX
, NULL
, lab
,
9162 profile_probability::uninitialized ());
9164 emit_move_insn (target
, op1
);
9169 op0
= expand_expr (treeop0
, subtarget
,
9170 VOIDmode
, EXPAND_NORMAL
);
9171 if (modifier
== EXPAND_STACK_PARM
)
9173 /* In case we have to reduce the result to bitfield precision
9174 for unsigned bitfield expand this as XOR with a proper constant
9176 if (reduce_bit_field
&& TYPE_UNSIGNED (type
))
9178 int_mode
= SCALAR_INT_TYPE_MODE (type
);
9179 wide_int mask
= wi::mask (TYPE_PRECISION (type
),
9180 false, GET_MODE_PRECISION (int_mode
));
9182 temp
= expand_binop (int_mode
, xor_optab
, op0
,
9183 immed_wide_int_const (mask
, int_mode
),
9184 target
, 1, OPTAB_LIB_WIDEN
);
9187 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, target
, 1);
9191 /* ??? Can optimize bitwise operations with one arg constant.
9192 Can optimize (a bitwise1 n) bitwise2 (a bitwise3 b)
9193 and (a bitwise1 b) bitwise2 b (etc)
9194 but that is probably not worth while. */
9203 gcc_assert (VECTOR_MODE_P (TYPE_MODE (type
))
9204 || type_has_mode_precision_p (type
));
9210 /* If this is a fixed-point operation, then we cannot use the code
9211 below because "expand_shift" doesn't support sat/no-sat fixed-point
9213 if (ALL_FIXED_POINT_MODE_P (mode
))
9216 if (! safe_from_p (subtarget
, treeop1
, 1))
9218 if (modifier
== EXPAND_STACK_PARM
)
9220 op0
= expand_expr (treeop0
, subtarget
,
9221 VOIDmode
, EXPAND_NORMAL
);
9223 /* Left shift optimization when shifting across word_size boundary.
9225 If mode == GET_MODE_WIDER_MODE (word_mode), then normally
9226 there isn't native instruction to support this wide mode
9227 left shift. Given below scenario:
9229 Type A = (Type) B << C
9232 | dest_high | dest_low |
9236 If the shift amount C caused we shift B to across the word
9237 size boundary, i.e part of B shifted into high half of
9238 destination register, and part of B remains in the low
9239 half, then GCC will use the following left shift expand
9242 1. Initialize dest_low to B.
9243 2. Initialize every bit of dest_high to the sign bit of B.
9244 3. Logic left shift dest_low by C bit to finalize dest_low.
9245 The value of dest_low before this shift is kept in a temp D.
9246 4. Logic left shift dest_high by C.
9247 5. Logic right shift D by (word_size - C).
9248 6. Or the result of 4 and 5 to finalize dest_high.
9250 While, by checking gimple statements, if operand B is
9251 coming from signed extension, then we can simplify above
9254 1. dest_high = src_low >> (word_size - C).
9255 2. dest_low = src_low << C.
9257 We can use one arithmetic right shift to finish all the
9258 purpose of steps 2, 4, 5, 6, thus we reduce the steps
9259 needed from 6 into 2.
9261 The case is similar for zero extension, except that we
9262 initialize dest_high to zero rather than copies of the sign
9263 bit from B. Furthermore, we need to use a logical right shift
9266 The choice of sign-extension versus zero-extension is
9267 determined entirely by whether or not B is signed and is
9268 independent of the current setting of unsignedp. */
9271 if (code
== LSHIFT_EXPR
9274 && GET_MODE_2XWIDER_MODE (word_mode
).exists (&int_mode
)
9276 && TREE_CONSTANT (treeop1
)
9277 && TREE_CODE (treeop0
) == SSA_NAME
)
9279 gimple
*def
= SSA_NAME_DEF_STMT (treeop0
);
9280 if (is_gimple_assign (def
)
9281 && gimple_assign_rhs_code (def
) == NOP_EXPR
)
9283 scalar_int_mode rmode
= SCALAR_INT_TYPE_MODE
9284 (TREE_TYPE (gimple_assign_rhs1 (def
)));
9286 if (GET_MODE_SIZE (rmode
) < GET_MODE_SIZE (int_mode
)
9287 && TREE_INT_CST_LOW (treeop1
) < GET_MODE_BITSIZE (word_mode
)
9288 && ((TREE_INT_CST_LOW (treeop1
) + GET_MODE_BITSIZE (rmode
))
9289 >= GET_MODE_BITSIZE (word_mode
)))
9291 rtx_insn
*seq
, *seq_old
;
9292 poly_uint64 high_off
= subreg_highpart_offset (word_mode
,
9294 bool extend_unsigned
9295 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def
)));
9296 rtx low
= lowpart_subreg (word_mode
, op0
, int_mode
);
9297 rtx dest_low
= lowpart_subreg (word_mode
, target
, int_mode
);
9298 rtx dest_high
= simplify_gen_subreg (word_mode
, target
,
9299 int_mode
, high_off
);
9300 HOST_WIDE_INT ramount
= (BITS_PER_WORD
9301 - TREE_INT_CST_LOW (treeop1
));
9302 tree rshift
= build_int_cst (TREE_TYPE (treeop1
), ramount
);
9305 /* dest_high = src_low >> (word_size - C). */
9306 temp
= expand_variable_shift (RSHIFT_EXPR
, word_mode
, low
,
9309 if (temp
!= dest_high
)
9310 emit_move_insn (dest_high
, temp
);
9312 /* dest_low = src_low << C. */
9313 temp
= expand_variable_shift (LSHIFT_EXPR
, word_mode
, low
,
9314 treeop1
, dest_low
, unsignedp
);
9315 if (temp
!= dest_low
)
9316 emit_move_insn (dest_low
, temp
);
9322 if (have_insn_for (ASHIFT
, int_mode
))
9324 bool speed_p
= optimize_insn_for_speed_p ();
9326 rtx ret_old
= expand_variable_shift (code
, int_mode
,
9331 seq_old
= get_insns ();
9333 if (seq_cost (seq
, speed_p
)
9334 >= seq_cost (seq_old
, speed_p
))
9345 if (temp
== NULL_RTX
)
9346 temp
= expand_variable_shift (code
, mode
, op0
, treeop1
, target
,
9348 if (code
== LSHIFT_EXPR
)
9349 temp
= REDUCE_BIT_FIELD (temp
);
9353 /* Could determine the answer when only additive constants differ. Also,
9354 the addition of one can be handled by changing the condition. */
9361 case UNORDERED_EXPR
:
9370 temp
= do_store_flag (ops
,
9371 modifier
!= EXPAND_STACK_PARM
? target
: NULL_RTX
,
9372 tmode
!= VOIDmode
? tmode
: mode
);
9376 /* Use a compare and a jump for BLKmode comparisons, or for function
9377 type comparisons is have_canonicalize_funcptr_for_compare. */
9380 || modifier
== EXPAND_STACK_PARM
9381 || ! safe_from_p (target
, treeop0
, 1)
9382 || ! safe_from_p (target
, treeop1
, 1)
9383 /* Make sure we don't have a hard reg (such as function's return
9384 value) live across basic blocks, if not optimizing. */
9385 || (!optimize
&& REG_P (target
)
9386 && REGNO (target
) < FIRST_PSEUDO_REGISTER
)))
9387 target
= gen_reg_rtx (tmode
!= VOIDmode
? tmode
: mode
);
9389 emit_move_insn (target
, const0_rtx
);
9391 rtx_code_label
*lab1
= gen_label_rtx ();
9392 jumpifnot_1 (code
, treeop0
, treeop1
, lab1
,
9393 profile_probability::uninitialized ());
9395 if (TYPE_PRECISION (type
) == 1 && !TYPE_UNSIGNED (type
))
9396 emit_move_insn (target
, constm1_rtx
);
9398 emit_move_insn (target
, const1_rtx
);
9404 /* Get the rtx code of the operands. */
9405 op0
= expand_normal (treeop0
);
9406 op1
= expand_normal (treeop1
);
9409 target
= gen_reg_rtx (TYPE_MODE (type
));
9411 /* If target overlaps with op1, then either we need to force
9412 op1 into a pseudo (if target also overlaps with op0),
9413 or write the complex parts in reverse order. */
9414 switch (GET_CODE (target
))
9417 if (reg_overlap_mentioned_p (XEXP (target
, 0), op1
))
9419 if (reg_overlap_mentioned_p (XEXP (target
, 1), op0
))
9421 complex_expr_force_op1
:
9422 temp
= gen_reg_rtx (GET_MODE_INNER (GET_MODE (target
)));
9423 emit_move_insn (temp
, op1
);
9427 complex_expr_swap_order
:
9428 /* Move the imaginary (op1) and real (op0) parts to their
9430 write_complex_part (target
, op1
, true);
9431 write_complex_part (target
, op0
, false);
9437 temp
= adjust_address_nv (target
,
9438 GET_MODE_INNER (GET_MODE (target
)), 0);
9439 if (reg_overlap_mentioned_p (temp
, op1
))
9441 scalar_mode imode
= GET_MODE_INNER (GET_MODE (target
));
9442 temp
= adjust_address_nv (target
, imode
,
9443 GET_MODE_SIZE (imode
));
9444 if (reg_overlap_mentioned_p (temp
, op0
))
9445 goto complex_expr_force_op1
;
9446 goto complex_expr_swap_order
;
9450 if (reg_overlap_mentioned_p (target
, op1
))
9452 if (reg_overlap_mentioned_p (target
, op0
))
9453 goto complex_expr_force_op1
;
9454 goto complex_expr_swap_order
;
9459 /* Move the real (op0) and imaginary (op1) parts to their location. */
9460 write_complex_part (target
, op0
, false);
9461 write_complex_part (target
, op1
, true);
9465 case WIDEN_SUM_EXPR
:
9467 tree oprnd0
= treeop0
;
9468 tree oprnd1
= treeop1
;
9470 expand_operands (oprnd0
, oprnd1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
9471 target
= expand_widen_pattern_expr (ops
, op0
, NULL_RTX
, op1
,
9476 case VEC_UNPACK_HI_EXPR
:
9477 case VEC_UNPACK_LO_EXPR
:
9479 op0
= expand_normal (treeop0
);
9480 temp
= expand_widen_pattern_expr (ops
, op0
, NULL_RTX
, NULL_RTX
,
9486 case VEC_UNPACK_FLOAT_HI_EXPR
:
9487 case VEC_UNPACK_FLOAT_LO_EXPR
:
9489 op0
= expand_normal (treeop0
);
9490 /* The signedness is determined from input operand. */
9491 temp
= expand_widen_pattern_expr
9492 (ops
, op0
, NULL_RTX
, NULL_RTX
,
9493 target
, TYPE_UNSIGNED (TREE_TYPE (treeop0
)));
9499 case VEC_WIDEN_MULT_HI_EXPR
:
9500 case VEC_WIDEN_MULT_LO_EXPR
:
9501 case VEC_WIDEN_MULT_EVEN_EXPR
:
9502 case VEC_WIDEN_MULT_ODD_EXPR
:
9503 case VEC_WIDEN_LSHIFT_HI_EXPR
:
9504 case VEC_WIDEN_LSHIFT_LO_EXPR
:
9505 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
9506 target
= expand_widen_pattern_expr (ops
, op0
, op1
, NULL_RTX
,
9508 gcc_assert (target
);
9511 case VEC_PACK_TRUNC_EXPR
:
9512 case VEC_PACK_SAT_EXPR
:
9513 case VEC_PACK_FIX_TRUNC_EXPR
:
9514 mode
= TYPE_MODE (TREE_TYPE (treeop0
));
9519 expand_operands (treeop0
, treeop1
, target
, &op0
, &op1
, EXPAND_NORMAL
);
9520 vec_perm_builder sel
;
9521 if (TREE_CODE (treeop2
) == VECTOR_CST
9522 && tree_to_vec_perm_builder (&sel
, treeop2
))
9524 machine_mode sel_mode
= TYPE_MODE (TREE_TYPE (treeop2
));
9525 temp
= expand_vec_perm_const (mode
, op0
, op1
, sel
,
9530 op2
= expand_normal (treeop2
);
9531 temp
= expand_vec_perm_var (mode
, op0
, op1
, op2
, target
);
9539 tree oprnd0
= treeop0
;
9540 tree oprnd1
= treeop1
;
9541 tree oprnd2
= treeop2
;
9544 expand_operands (oprnd0
, oprnd1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
9545 op2
= expand_normal (oprnd2
);
9546 target
= expand_widen_pattern_expr (ops
, op0
, op1
, op2
,
9553 tree oprnd0
= treeop0
;
9554 tree oprnd1
= treeop1
;
9555 tree oprnd2
= treeop2
;
9558 expand_operands (oprnd0
, oprnd1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
9559 op2
= expand_normal (oprnd2
);
9560 target
= expand_widen_pattern_expr (ops
, op0
, op1
, op2
,
9565 case REALIGN_LOAD_EXPR
:
9567 tree oprnd0
= treeop0
;
9568 tree oprnd1
= treeop1
;
9569 tree oprnd2
= treeop2
;
9572 this_optab
= optab_for_tree_code (code
, type
, optab_default
);
9573 expand_operands (oprnd0
, oprnd1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
9574 op2
= expand_normal (oprnd2
);
9575 temp
= expand_ternary_op (mode
, this_optab
, op0
, op1
, op2
,
9583 /* A COND_EXPR with its type being VOID_TYPE represents a
9584 conditional jump and is handled in
9585 expand_gimple_cond_expr. */
9586 gcc_assert (!VOID_TYPE_P (type
));
9588 /* Note that COND_EXPRs whose type is a structure or union
9589 are required to be constructed to contain assignments of
9590 a temporary variable, so that we can evaluate them here
9591 for side effect only. If type is void, we must do likewise. */
9593 gcc_assert (!TREE_ADDRESSABLE (type
)
9595 && TREE_TYPE (treeop1
) != void_type_node
9596 && TREE_TYPE (treeop2
) != void_type_node
);
9598 temp
= expand_cond_expr_using_cmove (treeop0
, treeop1
, treeop2
);
9602 /* If we are not to produce a result, we have no target. Otherwise,
9603 if a target was specified use it; it will not be used as an
9604 intermediate target unless it is safe. If no target, use a
9607 if (modifier
!= EXPAND_STACK_PARM
9609 && safe_from_p (original_target
, treeop0
, 1)
9610 && GET_MODE (original_target
) == mode
9611 && !MEM_P (original_target
))
9612 temp
= original_target
;
9614 temp
= assign_temp (type
, 0, 1);
9616 do_pending_stack_adjust ();
9618 rtx_code_label
*lab0
= gen_label_rtx ();
9619 rtx_code_label
*lab1
= gen_label_rtx ();
9620 jumpifnot (treeop0
, lab0
,
9621 profile_probability::uninitialized ());
9622 store_expr (treeop1
, temp
,
9623 modifier
== EXPAND_STACK_PARM
,
9626 emit_jump_insn (targetm
.gen_jump (lab1
));
9629 store_expr (treeop2
, temp
,
9630 modifier
== EXPAND_STACK_PARM
,
9639 target
= expand_vec_cond_expr (type
, treeop0
, treeop1
, treeop2
, target
);
9642 case VEC_DUPLICATE_EXPR
:
9643 op0
= expand_expr (treeop0
, NULL_RTX
, VOIDmode
, modifier
);
9644 target
= expand_vector_broadcast (mode
, op0
);
9645 gcc_assert (target
);
9648 case VEC_SERIES_EXPR
:
9649 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
, modifier
);
9650 return expand_vec_series_expr (mode
, op0
, op1
, target
);
9652 case BIT_INSERT_EXPR
:
9654 unsigned bitpos
= tree_to_uhwi (treeop2
);
9656 if (INTEGRAL_TYPE_P (TREE_TYPE (treeop1
)))
9657 bitsize
= TYPE_PRECISION (TREE_TYPE (treeop1
));
9659 bitsize
= tree_to_uhwi (TYPE_SIZE (TREE_TYPE (treeop1
)));
9660 rtx op0
= expand_normal (treeop0
);
9661 rtx op1
= expand_normal (treeop1
);
9662 rtx dst
= gen_reg_rtx (mode
);
9663 emit_move_insn (dst
, op0
);
9664 store_bit_field (dst
, bitsize
, bitpos
, 0, 0,
9665 TYPE_MODE (TREE_TYPE (treeop1
)), op1
, false);
9673 /* Here to do an ordinary binary operator. */
9675 expand_operands (treeop0
, treeop1
,
9676 subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
9678 this_optab
= optab_for_tree_code (code
, type
, optab_default
);
9680 if (modifier
== EXPAND_STACK_PARM
)
9682 temp
= expand_binop (mode
, this_optab
, op0
, op1
, target
,
9683 unsignedp
, OPTAB_LIB_WIDEN
);
9685 /* Bitwise operations do not need bitfield reduction as we expect their
9686 operands being properly truncated. */
9687 if (code
== BIT_XOR_EXPR
9688 || code
== BIT_AND_EXPR
9689 || code
== BIT_IOR_EXPR
)
9691 return REDUCE_BIT_FIELD (temp
);
9693 #undef REDUCE_BIT_FIELD
9696 /* Return TRUE if expression STMT is suitable for replacement.
9697 Never consider memory loads as replaceable, because those don't ever lead
9698 into constant expressions. */
9701 stmt_is_replaceable_p (gimple
*stmt
)
9703 if (ssa_is_replaceable_p (stmt
))
9705 /* Don't move around loads. */
9706 if (!gimple_assign_single_p (stmt
)
9707 || is_gimple_val (gimple_assign_rhs1 (stmt
)))
9714 expand_expr_real_1 (tree exp
, rtx target
, machine_mode tmode
,
9715 enum expand_modifier modifier
, rtx
*alt_rtl
,
9716 bool inner_reference_p
)
9718 rtx op0
, op1
, temp
, decl_rtl
;
9721 machine_mode mode
, dmode
;
9722 enum tree_code code
= TREE_CODE (exp
);
9723 rtx subtarget
, original_target
;
9726 bool reduce_bit_field
;
9727 location_t loc
= EXPR_LOCATION (exp
);
9728 struct separate_ops ops
;
9729 tree treeop0
, treeop1
, treeop2
;
9730 tree ssa_name
= NULL_TREE
;
9733 type
= TREE_TYPE (exp
);
9734 mode
= TYPE_MODE (type
);
9735 unsignedp
= TYPE_UNSIGNED (type
);
9737 treeop0
= treeop1
= treeop2
= NULL_TREE
;
9738 if (!VL_EXP_CLASS_P (exp
))
9739 switch (TREE_CODE_LENGTH (code
))
9742 case 3: treeop2
= TREE_OPERAND (exp
, 2); /* FALLTHRU */
9743 case 2: treeop1
= TREE_OPERAND (exp
, 1); /* FALLTHRU */
9744 case 1: treeop0
= TREE_OPERAND (exp
, 0); /* FALLTHRU */
9754 ignore
= (target
== const0_rtx
9755 || ((CONVERT_EXPR_CODE_P (code
)
9756 || code
== COND_EXPR
|| code
== VIEW_CONVERT_EXPR
)
9757 && TREE_CODE (type
) == VOID_TYPE
));
9759 /* An operation in what may be a bit-field type needs the
9760 result to be reduced to the precision of the bit-field type,
9761 which is narrower than that of the type's mode. */
9762 reduce_bit_field
= (!ignore
9763 && INTEGRAL_TYPE_P (type
)
9764 && !type_has_mode_precision_p (type
));
9766 /* If we are going to ignore this result, we need only do something
9767 if there is a side-effect somewhere in the expression. If there
9768 is, short-circuit the most common cases here. Note that we must
9769 not call expand_expr with anything but const0_rtx in case this
9770 is an initial expansion of a size that contains a PLACEHOLDER_EXPR. */
9774 if (! TREE_SIDE_EFFECTS (exp
))
9777 /* Ensure we reference a volatile object even if value is ignored, but
9778 don't do this if all we are doing is taking its address. */
9779 if (TREE_THIS_VOLATILE (exp
)
9780 && TREE_CODE (exp
) != FUNCTION_DECL
9781 && mode
!= VOIDmode
&& mode
!= BLKmode
9782 && modifier
!= EXPAND_CONST_ADDRESS
)
9784 temp
= expand_expr (exp
, NULL_RTX
, VOIDmode
, modifier
);
9790 if (TREE_CODE_CLASS (code
) == tcc_unary
9791 || code
== BIT_FIELD_REF
9792 || code
== COMPONENT_REF
9793 || code
== INDIRECT_REF
)
9794 return expand_expr (treeop0
, const0_rtx
, VOIDmode
,
9797 else if (TREE_CODE_CLASS (code
) == tcc_binary
9798 || TREE_CODE_CLASS (code
) == tcc_comparison
9799 || code
== ARRAY_REF
|| code
== ARRAY_RANGE_REF
)
9801 expand_expr (treeop0
, const0_rtx
, VOIDmode
, modifier
);
9802 expand_expr (treeop1
, const0_rtx
, VOIDmode
, modifier
);
9809 if (reduce_bit_field
&& modifier
== EXPAND_STACK_PARM
)
9812 /* Use subtarget as the target for operand 0 of a binary operation. */
9813 subtarget
= get_subtarget (target
);
9814 original_target
= target
;
9820 tree function
= decl_function_context (exp
);
9822 temp
= label_rtx (exp
);
9823 temp
= gen_rtx_LABEL_REF (Pmode
, temp
);
9825 if (function
!= current_function_decl
9827 LABEL_REF_NONLOCAL_P (temp
) = 1;
9829 temp
= gen_rtx_MEM (FUNCTION_MODE
, temp
);
9834 /* ??? ivopts calls expander, without any preparation from
9835 out-of-ssa. So fake instructions as if this was an access to the
9836 base variable. This unnecessarily allocates a pseudo, see how we can
9837 reuse it, if partition base vars have it set already. */
9838 if (!currently_expanding_to_rtl
)
9840 tree var
= SSA_NAME_VAR (exp
);
9841 if (var
&& DECL_RTL_SET_P (var
))
9842 return DECL_RTL (var
);
9843 return gen_raw_REG (TYPE_MODE (TREE_TYPE (exp
)),
9844 LAST_VIRTUAL_REGISTER
+ 1);
9847 g
= get_gimple_for_ssa_name (exp
);
9848 /* For EXPAND_INITIALIZER try harder to get something simpler. */
9850 && modifier
== EXPAND_INITIALIZER
9851 && !SSA_NAME_IS_DEFAULT_DEF (exp
)
9852 && (optimize
|| !SSA_NAME_VAR (exp
)
9853 || DECL_IGNORED_P (SSA_NAME_VAR (exp
)))
9854 && stmt_is_replaceable_p (SSA_NAME_DEF_STMT (exp
)))
9855 g
= SSA_NAME_DEF_STMT (exp
);
9859 location_t saved_loc
= curr_insn_location ();
9860 location_t loc
= gimple_location (g
);
9861 if (loc
!= UNKNOWN_LOCATION
)
9862 set_curr_insn_location (loc
);
9863 ops
.code
= gimple_assign_rhs_code (g
);
9864 switch (get_gimple_rhs_class (ops
.code
))
9866 case GIMPLE_TERNARY_RHS
:
9867 ops
.op2
= gimple_assign_rhs3 (g
);
9869 case GIMPLE_BINARY_RHS
:
9870 ops
.op1
= gimple_assign_rhs2 (g
);
9872 /* Try to expand conditonal compare. */
9873 if (targetm
.gen_ccmp_first
)
9875 gcc_checking_assert (targetm
.gen_ccmp_next
!= NULL
);
9876 r
= expand_ccmp_expr (g
, mode
);
9881 case GIMPLE_UNARY_RHS
:
9882 ops
.op0
= gimple_assign_rhs1 (g
);
9883 ops
.type
= TREE_TYPE (gimple_assign_lhs (g
));
9885 r
= expand_expr_real_2 (&ops
, target
, tmode
, modifier
);
9887 case GIMPLE_SINGLE_RHS
:
9889 r
= expand_expr_real (gimple_assign_rhs1 (g
), target
,
9890 tmode
, modifier
, alt_rtl
,
9897 set_curr_insn_location (saved_loc
);
9898 if (REG_P (r
) && !REG_EXPR (r
))
9899 set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (exp
), r
);
9904 decl_rtl
= get_rtx_for_ssa_name (ssa_name
);
9905 exp
= SSA_NAME_VAR (ssa_name
);
9906 goto expand_decl_rtl
;
9910 /* If a static var's type was incomplete when the decl was written,
9911 but the type is complete now, lay out the decl now. */
9912 if (DECL_SIZE (exp
) == 0
9913 && COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (TREE_TYPE (exp
))
9914 && (TREE_STATIC (exp
) || DECL_EXTERNAL (exp
)))
9915 layout_decl (exp
, 0);
9921 decl_rtl
= DECL_RTL (exp
);
9923 gcc_assert (decl_rtl
);
9925 /* DECL_MODE might change when TYPE_MODE depends on attribute target
9926 settings for VECTOR_TYPE_P that might switch for the function. */
9927 if (currently_expanding_to_rtl
9928 && code
== VAR_DECL
&& MEM_P (decl_rtl
)
9929 && VECTOR_TYPE_P (type
) && exp
&& DECL_MODE (exp
) != mode
)
9930 decl_rtl
= change_address (decl_rtl
, TYPE_MODE (type
), 0);
9932 decl_rtl
= copy_rtx (decl_rtl
);
9934 /* Record writes to register variables. */
9935 if (modifier
== EXPAND_WRITE
9937 && HARD_REGISTER_P (decl_rtl
))
9938 add_to_hard_reg_set (&crtl
->asm_clobbers
,
9939 GET_MODE (decl_rtl
), REGNO (decl_rtl
));
9941 /* Ensure variable marked as used even if it doesn't go through
9942 a parser. If it hasn't be used yet, write out an external
9945 TREE_USED (exp
) = 1;
9947 /* Show we haven't gotten RTL for this yet. */
9950 /* Variables inherited from containing functions should have
9951 been lowered by this point. */
9953 context
= decl_function_context (exp
);
9955 || SCOPE_FILE_SCOPE_P (context
)
9956 || context
== current_function_decl
9957 || TREE_STATIC (exp
)
9958 || DECL_EXTERNAL (exp
)
9959 /* ??? C++ creates functions that are not TREE_STATIC. */
9960 || TREE_CODE (exp
) == FUNCTION_DECL
);
9962 /* This is the case of an array whose size is to be determined
9963 from its initializer, while the initializer is still being parsed.
9964 ??? We aren't parsing while expanding anymore. */
9966 if (MEM_P (decl_rtl
) && REG_P (XEXP (decl_rtl
, 0)))
9967 temp
= validize_mem (decl_rtl
);
9969 /* If DECL_RTL is memory, we are in the normal case and the
9970 address is not valid, get the address into a register. */
9972 else if (MEM_P (decl_rtl
) && modifier
!= EXPAND_INITIALIZER
)
9975 *alt_rtl
= decl_rtl
;
9976 decl_rtl
= use_anchored_address (decl_rtl
);
9977 if (modifier
!= EXPAND_CONST_ADDRESS
9978 && modifier
!= EXPAND_SUM
9979 && !memory_address_addr_space_p (exp
? DECL_MODE (exp
)
9980 : GET_MODE (decl_rtl
),
9982 MEM_ADDR_SPACE (decl_rtl
)))
9983 temp
= replace_equiv_address (decl_rtl
,
9984 copy_rtx (XEXP (decl_rtl
, 0)));
9987 /* If we got something, return it. But first, set the alignment
9988 if the address is a register. */
9991 if (exp
&& MEM_P (temp
) && REG_P (XEXP (temp
, 0)))
9992 mark_reg_pointer (XEXP (temp
, 0), DECL_ALIGN (exp
));
9998 dmode
= DECL_MODE (exp
);
10000 dmode
= TYPE_MODE (TREE_TYPE (ssa_name
));
10002 /* If the mode of DECL_RTL does not match that of the decl,
10003 there are two cases: we are dealing with a BLKmode value
10004 that is returned in a register, or we are dealing with
10005 a promoted value. In the latter case, return a SUBREG
10006 of the wanted mode, but mark it so that we know that it
10007 was already extended. */
10008 if (REG_P (decl_rtl
)
10009 && dmode
!= BLKmode
10010 && GET_MODE (decl_rtl
) != dmode
)
10012 machine_mode pmode
;
10014 /* Get the signedness to be used for this variable. Ensure we get
10015 the same mode we got when the variable was declared. */
10016 if (code
!= SSA_NAME
)
10017 pmode
= promote_decl_mode (exp
, &unsignedp
);
10018 else if ((g
= SSA_NAME_DEF_STMT (ssa_name
))
10019 && gimple_code (g
) == GIMPLE_CALL
10020 && !gimple_call_internal_p (g
))
10021 pmode
= promote_function_mode (type
, mode
, &unsignedp
,
10022 gimple_call_fntype (g
),
10025 pmode
= promote_ssa_mode (ssa_name
, &unsignedp
);
10026 gcc_assert (GET_MODE (decl_rtl
) == pmode
);
10028 temp
= gen_lowpart_SUBREG (mode
, decl_rtl
);
10029 SUBREG_PROMOTED_VAR_P (temp
) = 1;
10030 SUBREG_PROMOTED_SET (temp
, unsignedp
);
10038 /* Given that TYPE_PRECISION (type) is not always equal to
10039 GET_MODE_PRECISION (TYPE_MODE (type)), we need to extend from
10040 the former to the latter according to the signedness of the
10042 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (type
);
10043 temp
= immed_wide_int_const
10044 (wi::to_wide (exp
, GET_MODE_PRECISION (mode
)), mode
);
10050 tree tmp
= NULL_TREE
;
10051 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
10052 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
10053 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FRACT
10054 || GET_MODE_CLASS (mode
) == MODE_VECTOR_UFRACT
10055 || GET_MODE_CLASS (mode
) == MODE_VECTOR_ACCUM
10056 || GET_MODE_CLASS (mode
) == MODE_VECTOR_UACCUM
)
10057 return const_vector_from_tree (exp
);
10058 scalar_int_mode int_mode
;
10059 if (is_int_mode (mode
, &int_mode
))
10061 if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp
)))
10062 return const_scalar_mask_from_tree (int_mode
, exp
);
10066 = lang_hooks
.types
.type_for_mode (int_mode
, 1);
10068 tmp
= fold_unary_loc (loc
, VIEW_CONVERT_EXPR
,
10069 type_for_mode
, exp
);
10074 vec
<constructor_elt
, va_gc
> *v
;
10076 vec_alloc (v
, VECTOR_CST_NELTS (exp
));
10077 for (i
= 0; i
< VECTOR_CST_NELTS (exp
); ++i
)
10078 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, VECTOR_CST_ELT (exp
, i
));
10079 tmp
= build_constructor (type
, v
);
10081 return expand_expr (tmp
, ignore
? const0_rtx
: target
,
10086 if (modifier
== EXPAND_WRITE
)
10088 /* Writing into CONST_DECL is always invalid, but handle it
10090 addr_space_t as
= TYPE_ADDR_SPACE (TREE_TYPE (exp
));
10091 scalar_int_mode address_mode
= targetm
.addr_space
.address_mode (as
);
10092 op0
= expand_expr_addr_expr_1 (exp
, NULL_RTX
, address_mode
,
10093 EXPAND_NORMAL
, as
);
10094 op0
= memory_address_addr_space (mode
, op0
, as
);
10095 temp
= gen_rtx_MEM (mode
, op0
);
10096 set_mem_addr_space (temp
, as
);
10099 return expand_expr (DECL_INITIAL (exp
), target
, VOIDmode
, modifier
);
10102 /* If optimized, generate immediate CONST_DOUBLE
10103 which will be turned into memory by reload if necessary.
10105 We used to force a register so that loop.c could see it. But
10106 this does not allow gen_* patterns to perform optimizations with
10107 the constants. It also produces two insns in cases like "x = 1.0;".
10108 On most machines, floating-point constants are not permitted in
10109 many insns, so we'd end up copying it to a register in any case.
10111 Now, we do the copying in expand_binop, if appropriate. */
10112 return const_double_from_real_value (TREE_REAL_CST (exp
),
10113 TYPE_MODE (TREE_TYPE (exp
)));
10116 return CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (exp
),
10117 TYPE_MODE (TREE_TYPE (exp
)));
10120 /* Handle evaluating a complex constant in a CONCAT target. */
10121 if (original_target
&& GET_CODE (original_target
) == CONCAT
)
10123 machine_mode mode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (exp
)));
10126 rtarg
= XEXP (original_target
, 0);
10127 itarg
= XEXP (original_target
, 1);
10129 /* Move the real and imaginary parts separately. */
10130 op0
= expand_expr (TREE_REALPART (exp
), rtarg
, mode
, EXPAND_NORMAL
);
10131 op1
= expand_expr (TREE_IMAGPART (exp
), itarg
, mode
, EXPAND_NORMAL
);
10134 emit_move_insn (rtarg
, op0
);
10136 emit_move_insn (itarg
, op1
);
10138 return original_target
;
10144 temp
= expand_expr_constant (exp
, 1, modifier
);
10146 /* temp contains a constant address.
10147 On RISC machines where a constant address isn't valid,
10148 make some insns to get that address into a register. */
10149 if (modifier
!= EXPAND_CONST_ADDRESS
10150 && modifier
!= EXPAND_INITIALIZER
10151 && modifier
!= EXPAND_SUM
10152 && ! memory_address_addr_space_p (mode
, XEXP (temp
, 0),
10153 MEM_ADDR_SPACE (temp
)))
10154 return replace_equiv_address (temp
,
10155 copy_rtx (XEXP (temp
, 0)));
10159 return immed_wide_int_const (poly_int_cst_value (exp
), mode
);
10163 tree val
= treeop0
;
10164 rtx ret
= expand_expr_real_1 (val
, target
, tmode
, modifier
, alt_rtl
,
10165 inner_reference_p
);
10167 if (!SAVE_EXPR_RESOLVED_P (exp
))
10169 /* We can indeed still hit this case, typically via builtin
10170 expanders calling save_expr immediately before expanding
10171 something. Assume this means that we only have to deal
10172 with non-BLKmode values. */
10173 gcc_assert (GET_MODE (ret
) != BLKmode
);
10175 val
= build_decl (curr_insn_location (),
10176 VAR_DECL
, NULL
, TREE_TYPE (exp
));
10177 DECL_ARTIFICIAL (val
) = 1;
10178 DECL_IGNORED_P (val
) = 1;
10180 TREE_OPERAND (exp
, 0) = treeop0
;
10181 SAVE_EXPR_RESOLVED_P (exp
) = 1;
10183 if (!CONSTANT_P (ret
))
10184 ret
= copy_to_reg (ret
);
10185 SET_DECL_RTL (val
, ret
);
10193 /* If we don't need the result, just ensure we evaluate any
10197 unsigned HOST_WIDE_INT idx
;
10200 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp
), idx
, value
)
10201 expand_expr (value
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
10206 return expand_constructor (exp
, target
, modifier
, false);
10208 case TARGET_MEM_REF
:
10211 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp
, 0))));
10212 enum insn_code icode
;
10213 unsigned int align
;
10215 op0
= addr_for_mem_ref (exp
, as
, true);
10216 op0
= memory_address_addr_space (mode
, op0
, as
);
10217 temp
= gen_rtx_MEM (mode
, op0
);
10218 set_mem_attributes (temp
, exp
, 0);
10219 set_mem_addr_space (temp
, as
);
10220 align
= get_object_alignment (exp
);
10221 if (modifier
!= EXPAND_WRITE
10222 && modifier
!= EXPAND_MEMORY
10224 && align
< GET_MODE_ALIGNMENT (mode
)
10225 /* If the target does not have special handling for unaligned
10226 loads of mode then it can use regular moves for them. */
10227 && ((icode
= optab_handler (movmisalign_optab
, mode
))
10228 != CODE_FOR_nothing
))
10230 struct expand_operand ops
[2];
10232 /* We've already validated the memory, and we're creating a
10233 new pseudo destination. The predicates really can't fail,
10234 nor can the generator. */
10235 create_output_operand (&ops
[0], NULL_RTX
, mode
);
10236 create_fixed_operand (&ops
[1], temp
);
10237 expand_insn (icode
, 2, ops
);
10238 temp
= ops
[0].value
;
10245 const bool reverse
= REF_REVERSE_STORAGE_ORDER (exp
);
10247 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp
, 0))));
10248 machine_mode address_mode
;
10249 tree base
= TREE_OPERAND (exp
, 0);
10251 enum insn_code icode
;
10253 /* Handle expansion of non-aliased memory with non-BLKmode. That
10254 might end up in a register. */
10255 if (mem_ref_refers_to_non_mem_p (exp
))
10257 poly_int64 offset
= mem_ref_offset (exp
).force_shwi ();
10258 base
= TREE_OPERAND (base
, 0);
10259 if (known_eq (offset
, 0)
10261 && tree_fits_uhwi_p (TYPE_SIZE (type
))
10262 && (GET_MODE_BITSIZE (DECL_MODE (base
))
10263 == tree_to_uhwi (TYPE_SIZE (type
))))
10264 return expand_expr (build1 (VIEW_CONVERT_EXPR
, type
, base
),
10265 target
, tmode
, modifier
);
10266 if (TYPE_MODE (type
) == BLKmode
)
10268 temp
= assign_stack_temp (DECL_MODE (base
),
10269 GET_MODE_SIZE (DECL_MODE (base
)));
10270 store_expr (base
, temp
, 0, false, false);
10271 temp
= adjust_address (temp
, BLKmode
, offset
);
10272 set_mem_size (temp
, int_size_in_bytes (type
));
10275 exp
= build3 (BIT_FIELD_REF
, type
, base
, TYPE_SIZE (type
),
10276 bitsize_int (offset
* BITS_PER_UNIT
));
10277 REF_REVERSE_STORAGE_ORDER (exp
) = reverse
;
10278 return expand_expr (exp
, target
, tmode
, modifier
);
10280 address_mode
= targetm
.addr_space
.address_mode (as
);
10281 base
= TREE_OPERAND (exp
, 0);
10282 if ((def_stmt
= get_def_for_expr (base
, BIT_AND_EXPR
)))
10284 tree mask
= gimple_assign_rhs2 (def_stmt
);
10285 base
= build2 (BIT_AND_EXPR
, TREE_TYPE (base
),
10286 gimple_assign_rhs1 (def_stmt
), mask
);
10287 TREE_OPERAND (exp
, 0) = base
;
10289 align
= get_object_alignment (exp
);
10290 op0
= expand_expr (base
, NULL_RTX
, VOIDmode
, EXPAND_SUM
);
10291 op0
= memory_address_addr_space (mode
, op0
, as
);
10292 if (!integer_zerop (TREE_OPERAND (exp
, 1)))
10294 rtx off
= immed_wide_int_const (mem_ref_offset (exp
), address_mode
);
10295 op0
= simplify_gen_binary (PLUS
, address_mode
, op0
, off
);
10296 op0
= memory_address_addr_space (mode
, op0
, as
);
10298 temp
= gen_rtx_MEM (mode
, op0
);
10299 set_mem_attributes (temp
, exp
, 0);
10300 set_mem_addr_space (temp
, as
);
10301 if (TREE_THIS_VOLATILE (exp
))
10302 MEM_VOLATILE_P (temp
) = 1;
10303 if (modifier
!= EXPAND_WRITE
10304 && modifier
!= EXPAND_MEMORY
10305 && !inner_reference_p
10307 && align
< GET_MODE_ALIGNMENT (mode
))
10309 if ((icode
= optab_handler (movmisalign_optab
, mode
))
10310 != CODE_FOR_nothing
)
10312 struct expand_operand ops
[2];
10314 /* We've already validated the memory, and we're creating a
10315 new pseudo destination. The predicates really can't fail,
10316 nor can the generator. */
10317 create_output_operand (&ops
[0], NULL_RTX
, mode
);
10318 create_fixed_operand (&ops
[1], temp
);
10319 expand_insn (icode
, 2, ops
);
10320 temp
= ops
[0].value
;
10322 else if (targetm
.slow_unaligned_access (mode
, align
))
10323 temp
= extract_bit_field (temp
, GET_MODE_BITSIZE (mode
),
10324 0, TYPE_UNSIGNED (TREE_TYPE (exp
)),
10325 (modifier
== EXPAND_STACK_PARM
10326 ? NULL_RTX
: target
),
10327 mode
, mode
, false, alt_rtl
);
10330 && modifier
!= EXPAND_MEMORY
10331 && modifier
!= EXPAND_WRITE
)
10332 temp
= flip_storage_order (mode
, temp
);
10339 tree array
= treeop0
;
10340 tree index
= treeop1
;
10343 /* Fold an expression like: "foo"[2].
10344 This is not done in fold so it won't happen inside &.
10345 Don't fold if this is for wide characters since it's too
10346 difficult to do correctly and this is a very rare case. */
10348 if (modifier
!= EXPAND_CONST_ADDRESS
10349 && modifier
!= EXPAND_INITIALIZER
10350 && modifier
!= EXPAND_MEMORY
)
10352 tree t
= fold_read_from_constant_string (exp
);
10355 return expand_expr (t
, target
, tmode
, modifier
);
10358 /* If this is a constant index into a constant array,
10359 just get the value from the array. Handle both the cases when
10360 we have an explicit constructor and when our operand is a variable
10361 that was declared const. */
10363 if (modifier
!= EXPAND_CONST_ADDRESS
10364 && modifier
!= EXPAND_INITIALIZER
10365 && modifier
!= EXPAND_MEMORY
10366 && TREE_CODE (array
) == CONSTRUCTOR
10367 && ! TREE_SIDE_EFFECTS (array
)
10368 && TREE_CODE (index
) == INTEGER_CST
)
10370 unsigned HOST_WIDE_INT ix
;
10373 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (array
), ix
,
10375 if (tree_int_cst_equal (field
, index
))
10377 if (!TREE_SIDE_EFFECTS (value
))
10378 return expand_expr (fold (value
), target
, tmode
, modifier
);
10383 else if (optimize
>= 1
10384 && modifier
!= EXPAND_CONST_ADDRESS
10385 && modifier
!= EXPAND_INITIALIZER
10386 && modifier
!= EXPAND_MEMORY
10387 && TREE_READONLY (array
) && ! TREE_SIDE_EFFECTS (array
)
10388 && TREE_CODE (index
) == INTEGER_CST
10389 && (VAR_P (array
) || TREE_CODE (array
) == CONST_DECL
)
10390 && (init
= ctor_for_folding (array
)) != error_mark_node
)
10392 if (init
== NULL_TREE
)
10394 tree value
= build_zero_cst (type
);
10395 if (TREE_CODE (value
) == CONSTRUCTOR
)
10397 /* If VALUE is a CONSTRUCTOR, this optimization is only
10398 useful if this doesn't store the CONSTRUCTOR into
10399 memory. If it does, it is more efficient to just
10400 load the data from the array directly. */
10401 rtx ret
= expand_constructor (value
, target
,
10403 if (ret
== NULL_RTX
)
10408 return expand_expr (value
, target
, tmode
, modifier
);
10410 else if (TREE_CODE (init
) == CONSTRUCTOR
)
10412 unsigned HOST_WIDE_INT ix
;
10415 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init
), ix
,
10417 if (tree_int_cst_equal (field
, index
))
10419 if (TREE_SIDE_EFFECTS (value
))
10422 if (TREE_CODE (value
) == CONSTRUCTOR
)
10424 /* If VALUE is a CONSTRUCTOR, this
10425 optimization is only useful if
10426 this doesn't store the CONSTRUCTOR
10427 into memory. If it does, it is more
10428 efficient to just load the data from
10429 the array directly. */
10430 rtx ret
= expand_constructor (value
, target
,
10432 if (ret
== NULL_RTX
)
10437 expand_expr (fold (value
), target
, tmode
, modifier
);
10440 else if (TREE_CODE (init
) == STRING_CST
)
10442 tree low_bound
= array_ref_low_bound (exp
);
10443 tree index1
= fold_convert_loc (loc
, sizetype
, treeop1
);
10445 /* Optimize the special case of a zero lower bound.
10447 We convert the lower bound to sizetype to avoid problems
10448 with constant folding. E.g. suppose the lower bound is
10449 1 and its mode is QI. Without the conversion
10450 (ARRAY + (INDEX - (unsigned char)1))
10452 (ARRAY + (-(unsigned char)1) + INDEX)
10454 (ARRAY + 255 + INDEX). Oops! */
10455 if (!integer_zerop (low_bound
))
10456 index1
= size_diffop_loc (loc
, index1
,
10457 fold_convert_loc (loc
, sizetype
,
10460 if (tree_fits_uhwi_p (index1
)
10461 && compare_tree_int (index1
, TREE_STRING_LENGTH (init
)) < 0)
10463 tree type
= TREE_TYPE (TREE_TYPE (init
));
10464 scalar_int_mode mode
;
10466 if (is_int_mode (TYPE_MODE (type
), &mode
)
10467 && GET_MODE_SIZE (mode
) == 1)
10468 return gen_int_mode (TREE_STRING_POINTER (init
)
10469 [TREE_INT_CST_LOW (index1
)],
10475 goto normal_inner_ref
;
10477 case COMPONENT_REF
:
10478 /* If the operand is a CONSTRUCTOR, we can just extract the
10479 appropriate field if it is present. */
10480 if (TREE_CODE (treeop0
) == CONSTRUCTOR
)
10482 unsigned HOST_WIDE_INT idx
;
10484 scalar_int_mode field_mode
;
10486 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (treeop0
),
10488 if (field
== treeop1
10489 /* We can normally use the value of the field in the
10490 CONSTRUCTOR. However, if this is a bitfield in
10491 an integral mode that we can fit in a HOST_WIDE_INT,
10492 we must mask only the number of bits in the bitfield,
10493 since this is done implicitly by the constructor. If
10494 the bitfield does not meet either of those conditions,
10495 we can't do this optimization. */
10496 && (! DECL_BIT_FIELD (field
)
10497 || (is_int_mode (DECL_MODE (field
), &field_mode
)
10498 && (GET_MODE_PRECISION (field_mode
)
10499 <= HOST_BITS_PER_WIDE_INT
))))
10501 if (DECL_BIT_FIELD (field
)
10502 && modifier
== EXPAND_STACK_PARM
)
10504 op0
= expand_expr (value
, target
, tmode
, modifier
);
10505 if (DECL_BIT_FIELD (field
))
10507 HOST_WIDE_INT bitsize
= TREE_INT_CST_LOW (DECL_SIZE (field
));
10508 scalar_int_mode imode
10509 = SCALAR_INT_TYPE_MODE (TREE_TYPE (field
));
10511 if (TYPE_UNSIGNED (TREE_TYPE (field
)))
10513 op1
= gen_int_mode ((HOST_WIDE_INT_1
<< bitsize
) - 1,
10515 op0
= expand_and (imode
, op0
, op1
, target
);
10519 int count
= GET_MODE_PRECISION (imode
) - bitsize
;
10521 op0
= expand_shift (LSHIFT_EXPR
, imode
, op0
, count
,
10523 op0
= expand_shift (RSHIFT_EXPR
, imode
, op0
, count
,
10531 goto normal_inner_ref
;
10533 case BIT_FIELD_REF
:
10534 case ARRAY_RANGE_REF
:
10537 machine_mode mode1
, mode2
;
10538 poly_int64 bitsize
, bitpos
, bytepos
;
10540 int reversep
, volatilep
= 0, must_force_mem
;
10542 = get_inner_reference (exp
, &bitsize
, &bitpos
, &offset
, &mode1
,
10543 &unsignedp
, &reversep
, &volatilep
);
10544 rtx orig_op0
, memloc
;
10545 bool clear_mem_expr
= false;
10547 /* If we got back the original object, something is wrong. Perhaps
10548 we are evaluating an expression too early. In any event, don't
10549 infinitely recurse. */
10550 gcc_assert (tem
!= exp
);
10552 /* If TEM's type is a union of variable size, pass TARGET to the inner
10553 computation, since it will need a temporary and TARGET is known
10554 to have to do. This occurs in unchecked conversion in Ada. */
10556 = expand_expr_real (tem
,
10557 (TREE_CODE (TREE_TYPE (tem
)) == UNION_TYPE
10558 && COMPLETE_TYPE_P (TREE_TYPE (tem
))
10559 && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem
)))
10561 && modifier
!= EXPAND_STACK_PARM
10562 ? target
: NULL_RTX
),
10564 modifier
== EXPAND_SUM
? EXPAND_NORMAL
: modifier
,
10567 /* If the field has a mode, we want to access it in the
10568 field's mode, not the computed mode.
10569 If a MEM has VOIDmode (external with incomplete type),
10570 use BLKmode for it instead. */
10573 if (mode1
!= VOIDmode
)
10574 op0
= adjust_address (op0
, mode1
, 0);
10575 else if (GET_MODE (op0
) == VOIDmode
)
10576 op0
= adjust_address (op0
, BLKmode
, 0);
10580 = CONSTANT_P (op0
) ? TYPE_MODE (TREE_TYPE (tem
)) : GET_MODE (op0
);
10582 /* If we have either an offset, a BLKmode result, or a reference
10583 outside the underlying object, we must force it to memory.
10584 Such a case can occur in Ada if we have unchecked conversion
10585 of an expression from a scalar type to an aggregate type or
10586 for an ARRAY_RANGE_REF whose type is BLKmode, or if we were
10587 passed a partially uninitialized object or a view-conversion
10588 to a larger size. */
10589 must_force_mem
= (offset
10590 || mode1
== BLKmode
10591 || maybe_gt (bitpos
+ bitsize
,
10592 GET_MODE_BITSIZE (mode2
)));
10594 /* Handle CONCAT first. */
10595 if (GET_CODE (op0
) == CONCAT
&& !must_force_mem
)
10597 if (known_eq (bitpos
, 0)
10598 && known_eq (bitsize
, GET_MODE_BITSIZE (GET_MODE (op0
)))
10599 && COMPLEX_MODE_P (mode1
)
10600 && COMPLEX_MODE_P (GET_MODE (op0
))
10601 && (GET_MODE_PRECISION (GET_MODE_INNER (mode1
))
10602 == GET_MODE_PRECISION (GET_MODE_INNER (GET_MODE (op0
)))))
10605 op0
= flip_storage_order (GET_MODE (op0
), op0
);
10606 if (mode1
!= GET_MODE (op0
))
10609 for (int i
= 0; i
< 2; i
++)
10611 rtx op
= read_complex_part (op0
, i
!= 0);
10612 if (GET_CODE (op
) == SUBREG
)
10613 op
= force_reg (GET_MODE (op
), op
);
10614 rtx temp
= gen_lowpart_common (GET_MODE_INNER (mode1
),
10620 if (!REG_P (op
) && !MEM_P (op
))
10621 op
= force_reg (GET_MODE (op
), op
);
10622 op
= gen_lowpart (GET_MODE_INNER (mode1
), op
);
10626 op0
= gen_rtx_CONCAT (mode1
, parts
[0], parts
[1]);
10630 if (known_eq (bitpos
, 0)
10631 && known_eq (bitsize
,
10632 GET_MODE_BITSIZE (GET_MODE (XEXP (op0
, 0))))
10633 && maybe_ne (bitsize
, 0))
10635 op0
= XEXP (op0
, 0);
10636 mode2
= GET_MODE (op0
);
10638 else if (known_eq (bitpos
,
10639 GET_MODE_BITSIZE (GET_MODE (XEXP (op0
, 0))))
10640 && known_eq (bitsize
,
10641 GET_MODE_BITSIZE (GET_MODE (XEXP (op0
, 1))))
10642 && maybe_ne (bitpos
, 0)
10643 && maybe_ne (bitsize
, 0))
10645 op0
= XEXP (op0
, 1);
10647 mode2
= GET_MODE (op0
);
10650 /* Otherwise force into memory. */
10651 must_force_mem
= 1;
10654 /* If this is a constant, put it in a register if it is a legitimate
10655 constant and we don't need a memory reference. */
10656 if (CONSTANT_P (op0
)
10657 && mode2
!= BLKmode
10658 && targetm
.legitimate_constant_p (mode2
, op0
)
10659 && !must_force_mem
)
10660 op0
= force_reg (mode2
, op0
);
10662 /* Otherwise, if this is a constant, try to force it to the constant
10663 pool. Note that back-ends, e.g. MIPS, may refuse to do so if it
10664 is a legitimate constant. */
10665 else if (CONSTANT_P (op0
) && (memloc
= force_const_mem (mode2
, op0
)))
10666 op0
= validize_mem (memloc
);
10668 /* Otherwise, if this is a constant or the object is not in memory
10669 and need be, put it there. */
10670 else if (CONSTANT_P (op0
) || (!MEM_P (op0
) && must_force_mem
))
10672 memloc
= assign_temp (TREE_TYPE (tem
), 1, 1);
10673 emit_move_insn (memloc
, op0
);
10675 clear_mem_expr
= true;
10680 machine_mode address_mode
;
10681 rtx offset_rtx
= expand_expr (offset
, NULL_RTX
, VOIDmode
,
10684 gcc_assert (MEM_P (op0
));
10686 address_mode
= get_address_mode (op0
);
10687 if (GET_MODE (offset_rtx
) != address_mode
)
10689 /* We cannot be sure that the RTL in offset_rtx is valid outside
10690 of a memory address context, so force it into a register
10691 before attempting to convert it to the desired mode. */
10692 offset_rtx
= force_operand (offset_rtx
, NULL_RTX
);
10693 offset_rtx
= convert_to_mode (address_mode
, offset_rtx
, 0);
10696 /* See the comment in expand_assignment for the rationale. */
10697 if (mode1
!= VOIDmode
10698 && maybe_ne (bitpos
, 0)
10699 && maybe_gt (bitsize
, 0)
10700 && multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
10701 && multiple_p (bitpos
, bitsize
)
10702 && multiple_p (bitsize
, GET_MODE_ALIGNMENT (mode1
))
10703 && MEM_ALIGN (op0
) >= GET_MODE_ALIGNMENT (mode1
))
10705 op0
= adjust_address (op0
, mode1
, bytepos
);
10709 op0
= offset_address (op0
, offset_rtx
,
10710 highest_pow2_factor (offset
));
10713 /* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT,
10714 record its alignment as BIGGEST_ALIGNMENT. */
10716 && known_eq (bitpos
, 0)
10718 && is_aligning_offset (offset
, tem
))
10719 set_mem_align (op0
, BIGGEST_ALIGNMENT
);
10721 /* Don't forget about volatility even if this is a bitfield. */
10722 if (MEM_P (op0
) && volatilep
&& ! MEM_VOLATILE_P (op0
))
10724 if (op0
== orig_op0
)
10725 op0
= copy_rtx (op0
);
10727 MEM_VOLATILE_P (op0
) = 1;
10730 /* In cases where an aligned union has an unaligned object
10731 as a field, we might be extracting a BLKmode value from
10732 an integer-mode (e.g., SImode) object. Handle this case
10733 by doing the extract into an object as wide as the field
10734 (which we know to be the width of a basic mode), then
10735 storing into memory, and changing the mode to BLKmode. */
10736 if (mode1
== VOIDmode
10737 || REG_P (op0
) || GET_CODE (op0
) == SUBREG
10738 || (mode1
!= BLKmode
&& ! direct_load
[(int) mode1
]
10739 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_INT
10740 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_FLOAT
10741 && modifier
!= EXPAND_CONST_ADDRESS
10742 && modifier
!= EXPAND_INITIALIZER
10743 && modifier
!= EXPAND_MEMORY
)
10744 /* If the bitfield is volatile and the bitsize
10745 is narrower than the access size of the bitfield,
10746 we need to extract bitfields from the access. */
10747 || (volatilep
&& TREE_CODE (exp
) == COMPONENT_REF
10748 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (exp
, 1))
10749 && mode1
!= BLKmode
10750 && maybe_lt (bitsize
, GET_MODE_SIZE (mode1
) * BITS_PER_UNIT
))
10751 /* If the field isn't aligned enough to fetch as a memref,
10752 fetch it as a bit field. */
10753 || (mode1
!= BLKmode
10755 ? MEM_ALIGN (op0
) < GET_MODE_ALIGNMENT (mode1
)
10756 || !multiple_p (bitpos
, GET_MODE_ALIGNMENT (mode1
))
10757 : TYPE_ALIGN (TREE_TYPE (tem
)) < GET_MODE_ALIGNMENT (mode
)
10758 || !multiple_p (bitpos
, GET_MODE_ALIGNMENT (mode
)))
10759 && modifier
!= EXPAND_MEMORY
10760 && ((modifier
== EXPAND_CONST_ADDRESS
10761 || modifier
== EXPAND_INITIALIZER
)
10763 : targetm
.slow_unaligned_access (mode1
,
10765 || !multiple_p (bitpos
, BITS_PER_UNIT
)))
10766 /* If the type and the field are a constant size and the
10767 size of the type isn't the same size as the bitfield,
10768 we must use bitfield operations. */
10769 || (known_size_p (bitsize
)
10770 && TYPE_SIZE (TREE_TYPE (exp
))
10771 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp
)))
10772 && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp
))),
10775 machine_mode ext_mode
= mode
;
10777 if (ext_mode
== BLKmode
10778 && ! (target
!= 0 && MEM_P (op0
)
10780 && multiple_p (bitpos
, BITS_PER_UNIT
)))
10781 ext_mode
= int_mode_for_size (bitsize
, 1).else_blk ();
10783 if (ext_mode
== BLKmode
)
10786 target
= assign_temp (type
, 1, 1);
10788 /* ??? Unlike the similar test a few lines below, this one is
10789 very likely obsolete. */
10790 if (known_eq (bitsize
, 0))
10793 /* In this case, BITPOS must start at a byte boundary and
10794 TARGET, if specified, must be a MEM. */
10795 gcc_assert (MEM_P (op0
)
10796 && (!target
|| MEM_P (target
)));
10798 bytepos
= exact_div (bitpos
, BITS_PER_UNIT
);
10799 poly_int64 bytesize
= bits_to_bytes_round_up (bitsize
);
10800 emit_block_move (target
,
10801 adjust_address (op0
, VOIDmode
, bytepos
),
10802 gen_int_mode (bytesize
, Pmode
),
10803 (modifier
== EXPAND_STACK_PARM
10804 ? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL
));
10809 /* If we have nothing to extract, the result will be 0 for targets
10810 with SHIFT_COUNT_TRUNCATED == 0 and garbage otherwise. Always
10811 return 0 for the sake of consistency, as reading a zero-sized
10812 bitfield is valid in Ada and the value is fully specified. */
10813 if (known_eq (bitsize
, 0))
10816 op0
= validize_mem (op0
);
10818 if (MEM_P (op0
) && REG_P (XEXP (op0
, 0)))
10819 mark_reg_pointer (XEXP (op0
, 0), MEM_ALIGN (op0
));
10821 /* If the result has a record type and the extraction is done in
10822 an integral mode, then the field may be not aligned on a byte
10823 boundary; in this case, if it has reverse storage order, it
10824 needs to be extracted as a scalar field with reverse storage
10825 order and put back into memory order afterwards. */
10826 if (TREE_CODE (type
) == RECORD_TYPE
10827 && GET_MODE_CLASS (ext_mode
) == MODE_INT
)
10828 reversep
= TYPE_REVERSE_STORAGE_ORDER (type
);
10830 op0
= extract_bit_field (op0
, bitsize
, bitpos
, unsignedp
,
10831 (modifier
== EXPAND_STACK_PARM
10832 ? NULL_RTX
: target
),
10833 ext_mode
, ext_mode
, reversep
, alt_rtl
);
10835 /* If the result has a record type and the mode of OP0 is an
10836 integral mode then, if BITSIZE is narrower than this mode
10837 and this is for big-endian data, we must put the field
10838 into the high-order bits. And we must also put it back
10839 into memory order if it has been previously reversed. */
10840 scalar_int_mode op0_mode
;
10841 if (TREE_CODE (type
) == RECORD_TYPE
10842 && is_int_mode (GET_MODE (op0
), &op0_mode
))
10844 HOST_WIDE_INT size
= GET_MODE_BITSIZE (op0_mode
);
10846 gcc_checking_assert (known_le (bitsize
, size
));
10847 if (maybe_lt (bitsize
, size
)
10848 && reversep
? !BYTES_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
10849 op0
= expand_shift (LSHIFT_EXPR
, op0_mode
, op0
,
10850 size
- bitsize
, op0
, 1);
10853 op0
= flip_storage_order (op0_mode
, op0
);
10856 /* If the result type is BLKmode, store the data into a temporary
10857 of the appropriate type, but with the mode corresponding to the
10858 mode for the data we have (op0's mode). */
10859 if (mode
== BLKmode
)
10862 = assign_stack_temp_for_type (ext_mode
,
10863 GET_MODE_BITSIZE (ext_mode
),
10865 emit_move_insn (new_rtx
, op0
);
10866 op0
= copy_rtx (new_rtx
);
10867 PUT_MODE (op0
, BLKmode
);
10873 /* If the result is BLKmode, use that to access the object
10875 if (mode
== BLKmode
)
10878 /* Get a reference to just this component. */
10879 bytepos
= bits_to_bytes_round_down (bitpos
);
10880 if (modifier
== EXPAND_CONST_ADDRESS
10881 || modifier
== EXPAND_SUM
|| modifier
== EXPAND_INITIALIZER
)
10882 op0
= adjust_address_nv (op0
, mode1
, bytepos
);
10884 op0
= adjust_address (op0
, mode1
, bytepos
);
10886 if (op0
== orig_op0
)
10887 op0
= copy_rtx (op0
);
10889 /* Don't set memory attributes if the base expression is
10890 SSA_NAME that got expanded as a MEM. In that case, we should
10891 just honor its original memory attributes. */
10892 if (TREE_CODE (tem
) != SSA_NAME
|| !MEM_P (orig_op0
))
10893 set_mem_attributes (op0
, exp
, 0);
10895 if (REG_P (XEXP (op0
, 0)))
10896 mark_reg_pointer (XEXP (op0
, 0), MEM_ALIGN (op0
));
10898 /* If op0 is a temporary because the original expressions was forced
10899 to memory, clear MEM_EXPR so that the original expression cannot
10900 be marked as addressable through MEM_EXPR of the temporary. */
10901 if (clear_mem_expr
)
10902 set_mem_expr (op0
, NULL_TREE
);
10904 MEM_VOLATILE_P (op0
) |= volatilep
;
10907 && modifier
!= EXPAND_MEMORY
10908 && modifier
!= EXPAND_WRITE
)
10909 op0
= flip_storage_order (mode1
, op0
);
10911 if (mode
== mode1
|| mode1
== BLKmode
|| mode1
== tmode
10912 || modifier
== EXPAND_CONST_ADDRESS
10913 || modifier
== EXPAND_INITIALIZER
)
10917 target
= gen_reg_rtx (tmode
!= VOIDmode
? tmode
: mode
);
10919 convert_move (target
, op0
, unsignedp
);
10924 return expand_expr (OBJ_TYPE_REF_EXPR (exp
), target
, tmode
, modifier
);
10927 /* All valid uses of __builtin_va_arg_pack () are removed during
10929 if (CALL_EXPR_VA_ARG_PACK (exp
))
10930 error ("%Kinvalid use of %<__builtin_va_arg_pack ()%>", exp
);
10932 tree fndecl
= get_callee_fndecl (exp
), attr
;
10935 && (attr
= lookup_attribute ("error",
10936 DECL_ATTRIBUTES (fndecl
))) != NULL
)
10937 error ("%Kcall to %qs declared with attribute error: %s",
10938 exp
, identifier_to_locale (lang_hooks
.decl_printable_name (fndecl
, 1)),
10939 TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr
))));
10941 && (attr
= lookup_attribute ("warning",
10942 DECL_ATTRIBUTES (fndecl
))) != NULL
)
10943 warning_at (tree_nonartificial_location (exp
),
10944 0, "%Kcall to %qs declared with attribute warning: %s",
10945 exp
, identifier_to_locale (lang_hooks
.decl_printable_name (fndecl
, 1)),
10946 TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr
))));
10948 /* Check for a built-in function. */
10949 if (fndecl
&& DECL_BUILT_IN (fndecl
))
10951 gcc_assert (DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_FRONTEND
);
10952 if (CALL_WITH_BOUNDS_P (exp
))
10953 return expand_builtin_with_bounds (exp
, target
, subtarget
,
10956 return expand_builtin (exp
, target
, subtarget
, tmode
, ignore
);
10959 return expand_call (exp
, target
, ignore
);
10961 case VIEW_CONVERT_EXPR
:
10964 /* If we are converting to BLKmode, try to avoid an intermediate
10965 temporary by fetching an inner memory reference. */
10966 if (mode
== BLKmode
10967 && poly_int_tree_p (TYPE_SIZE (type
))
10968 && TYPE_MODE (TREE_TYPE (treeop0
)) != BLKmode
10969 && handled_component_p (treeop0
))
10971 machine_mode mode1
;
10972 poly_int64 bitsize
, bitpos
, bytepos
;
10974 int unsignedp
, reversep
, volatilep
= 0;
10976 = get_inner_reference (treeop0
, &bitsize
, &bitpos
, &offset
, &mode1
,
10977 &unsignedp
, &reversep
, &volatilep
);
10980 /* ??? We should work harder and deal with non-zero offsets. */
10982 && multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
10984 && known_size_p (bitsize
)
10985 && known_eq (wi::to_poly_offset (TYPE_SIZE (type
)), bitsize
))
10987 /* See the normal_inner_ref case for the rationale. */
10989 = expand_expr_real (tem
,
10990 (TREE_CODE (TREE_TYPE (tem
)) == UNION_TYPE
10991 && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem
)))
10993 && modifier
!= EXPAND_STACK_PARM
10994 ? target
: NULL_RTX
),
10996 modifier
== EXPAND_SUM
? EXPAND_NORMAL
: modifier
,
10999 if (MEM_P (orig_op0
))
11003 /* Get a reference to just this component. */
11004 if (modifier
== EXPAND_CONST_ADDRESS
11005 || modifier
== EXPAND_SUM
11006 || modifier
== EXPAND_INITIALIZER
)
11007 op0
= adjust_address_nv (op0
, mode
, bytepos
);
11009 op0
= adjust_address (op0
, mode
, bytepos
);
11011 if (op0
== orig_op0
)
11012 op0
= copy_rtx (op0
);
11014 set_mem_attributes (op0
, treeop0
, 0);
11015 if (REG_P (XEXP (op0
, 0)))
11016 mark_reg_pointer (XEXP (op0
, 0), MEM_ALIGN (op0
));
11018 MEM_VOLATILE_P (op0
) |= volatilep
;
11024 op0
= expand_expr_real (treeop0
, NULL_RTX
, VOIDmode
, modifier
,
11025 NULL
, inner_reference_p
);
11027 /* If the input and output modes are both the same, we are done. */
11028 if (mode
== GET_MODE (op0
))
11030 /* If neither mode is BLKmode, and both modes are the same size
11031 then we can use gen_lowpart. */
11032 else if (mode
!= BLKmode
&& GET_MODE (op0
) != BLKmode
11033 && (GET_MODE_PRECISION (mode
)
11034 == GET_MODE_PRECISION (GET_MODE (op0
)))
11035 && !COMPLEX_MODE_P (GET_MODE (op0
)))
11037 if (GET_CODE (op0
) == SUBREG
)
11038 op0
= force_reg (GET_MODE (op0
), op0
);
11039 temp
= gen_lowpart_common (mode
, op0
);
11044 if (!REG_P (op0
) && !MEM_P (op0
))
11045 op0
= force_reg (GET_MODE (op0
), op0
);
11046 op0
= gen_lowpart (mode
, op0
);
11049 /* If both types are integral, convert from one mode to the other. */
11050 else if (INTEGRAL_TYPE_P (type
) && INTEGRAL_TYPE_P (TREE_TYPE (treeop0
)))
11051 op0
= convert_modes (mode
, GET_MODE (op0
), op0
,
11052 TYPE_UNSIGNED (TREE_TYPE (treeop0
)));
11053 /* If the output type is a bit-field type, do an extraction. */
11054 else if (reduce_bit_field
)
11055 return extract_bit_field (op0
, TYPE_PRECISION (type
), 0,
11056 TYPE_UNSIGNED (type
), NULL_RTX
,
11057 mode
, mode
, false, NULL
);
11058 /* As a last resort, spill op0 to memory, and reload it in a
11060 else if (!MEM_P (op0
))
11062 /* If the operand is not a MEM, force it into memory. Since we
11063 are going to be changing the mode of the MEM, don't call
11064 force_const_mem for constants because we don't allow pool
11065 constants to change mode. */
11066 tree inner_type
= TREE_TYPE (treeop0
);
11068 gcc_assert (!TREE_ADDRESSABLE (exp
));
11070 if (target
== 0 || GET_MODE (target
) != TYPE_MODE (inner_type
))
11072 = assign_stack_temp_for_type
11073 (TYPE_MODE (inner_type
),
11074 GET_MODE_SIZE (TYPE_MODE (inner_type
)), inner_type
);
11076 emit_move_insn (target
, op0
);
11080 /* If OP0 is (now) a MEM, we need to deal with alignment issues. If the
11081 output type is such that the operand is known to be aligned, indicate
11082 that it is. Otherwise, we need only be concerned about alignment for
11083 non-BLKmode results. */
11086 enum insn_code icode
;
11088 if (modifier
!= EXPAND_WRITE
11089 && modifier
!= EXPAND_MEMORY
11090 && !inner_reference_p
11092 && MEM_ALIGN (op0
) < GET_MODE_ALIGNMENT (mode
))
11094 /* If the target does have special handling for unaligned
11095 loads of mode then use them. */
11096 if ((icode
= optab_handler (movmisalign_optab
, mode
))
11097 != CODE_FOR_nothing
)
11101 op0
= adjust_address (op0
, mode
, 0);
11102 /* We've already validated the memory, and we're creating a
11103 new pseudo destination. The predicates really can't
11105 reg
= gen_reg_rtx (mode
);
11107 /* Nor can the insn generator. */
11108 rtx_insn
*insn
= GEN_FCN (icode
) (reg
, op0
);
11112 else if (STRICT_ALIGNMENT
)
11114 tree inner_type
= TREE_TYPE (treeop0
);
11115 HOST_WIDE_INT temp_size
11116 = MAX (int_size_in_bytes (inner_type
),
11117 (HOST_WIDE_INT
) GET_MODE_SIZE (mode
));
11119 = assign_stack_temp_for_type (mode
, temp_size
, type
);
11120 rtx new_with_op0_mode
11121 = adjust_address (new_rtx
, GET_MODE (op0
), 0);
11123 gcc_assert (!TREE_ADDRESSABLE (exp
));
11125 if (GET_MODE (op0
) == BLKmode
)
11126 emit_block_move (new_with_op0_mode
, op0
,
11127 GEN_INT (GET_MODE_SIZE (mode
)),
11128 (modifier
== EXPAND_STACK_PARM
11129 ? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL
));
11131 emit_move_insn (new_with_op0_mode
, op0
);
11137 op0
= adjust_address (op0
, mode
, 0);
11144 tree lhs
= treeop0
;
11145 tree rhs
= treeop1
;
11146 gcc_assert (ignore
);
11148 /* Check for |= or &= of a bitfield of size one into another bitfield
11149 of size 1. In this case, (unless we need the result of the
11150 assignment) we can do this more efficiently with a
11151 test followed by an assignment, if necessary.
11153 ??? At this point, we can't get a BIT_FIELD_REF here. But if
11154 things change so we do, this code should be enhanced to
11156 if (TREE_CODE (lhs
) == COMPONENT_REF
11157 && (TREE_CODE (rhs
) == BIT_IOR_EXPR
11158 || TREE_CODE (rhs
) == BIT_AND_EXPR
)
11159 && TREE_OPERAND (rhs
, 0) == lhs
11160 && TREE_CODE (TREE_OPERAND (rhs
, 1)) == COMPONENT_REF
11161 && integer_onep (DECL_SIZE (TREE_OPERAND (lhs
, 1)))
11162 && integer_onep (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs
, 1), 1))))
11164 rtx_code_label
*label
= gen_label_rtx ();
11165 int value
= TREE_CODE (rhs
) == BIT_IOR_EXPR
;
11166 do_jump (TREE_OPERAND (rhs
, 1),
11169 profile_probability::uninitialized ());
11170 expand_assignment (lhs
, build_int_cst (TREE_TYPE (rhs
), value
),
11172 do_pending_stack_adjust ();
11173 emit_label (label
);
11177 expand_assignment (lhs
, rhs
, false);
11182 return expand_expr_addr_expr (exp
, target
, tmode
, modifier
);
11184 case REALPART_EXPR
:
11185 op0
= expand_normal (treeop0
);
11186 return read_complex_part (op0
, false);
11188 case IMAGPART_EXPR
:
11189 op0
= expand_normal (treeop0
);
11190 return read_complex_part (op0
, true);
11197 /* Expanded in cfgexpand.c. */
11198 gcc_unreachable ();
11200 case TRY_CATCH_EXPR
:
11202 case EH_FILTER_EXPR
:
11203 case TRY_FINALLY_EXPR
:
11204 /* Lowered by tree-eh.c. */
11205 gcc_unreachable ();
11207 case WITH_CLEANUP_EXPR
:
11208 case CLEANUP_POINT_EXPR
:
11210 case CASE_LABEL_EXPR
:
11215 case COMPOUND_EXPR
:
11216 case PREINCREMENT_EXPR
:
11217 case PREDECREMENT_EXPR
:
11218 case POSTINCREMENT_EXPR
:
11219 case POSTDECREMENT_EXPR
:
11222 case COMPOUND_LITERAL_EXPR
:
11223 /* Lowered by gimplify.c. */
11224 gcc_unreachable ();
11227 /* Function descriptors are not valid except for as
11228 initialization constants, and should not be expanded. */
11229 gcc_unreachable ();
11231 case WITH_SIZE_EXPR
:
11232 /* WITH_SIZE_EXPR expands to its first argument. The caller should
11233 have pulled out the size to use in whatever context it needed. */
11234 return expand_expr_real (treeop0
, original_target
, tmode
,
11235 modifier
, alt_rtl
, inner_reference_p
);
11238 return expand_expr_real_2 (&ops
, target
, tmode
, modifier
);
11242 /* Subroutine of above: reduce EXP to the precision of TYPE (in the
11243 signedness of TYPE), possibly returning the result in TARGET.
11244 TYPE is known to be a partial integer type. */
11246 reduce_to_bit_field_precision (rtx exp
, rtx target
, tree type
)
11248 HOST_WIDE_INT prec
= TYPE_PRECISION (type
);
11249 if (target
&& GET_MODE (target
) != GET_MODE (exp
))
11251 /* For constant values, reduce using build_int_cst_type. */
11252 if (CONST_INT_P (exp
))
11254 HOST_WIDE_INT value
= INTVAL (exp
);
11255 tree t
= build_int_cst_type (type
, value
);
11256 return expand_expr (t
, target
, VOIDmode
, EXPAND_NORMAL
);
11258 else if (TYPE_UNSIGNED (type
))
11260 scalar_int_mode mode
= as_a
<scalar_int_mode
> (GET_MODE (exp
));
11261 rtx mask
= immed_wide_int_const
11262 (wi::mask (prec
, false, GET_MODE_PRECISION (mode
)), mode
);
11263 return expand_and (mode
, exp
, mask
, target
);
11267 scalar_int_mode mode
= as_a
<scalar_int_mode
> (GET_MODE (exp
));
11268 int count
= GET_MODE_PRECISION (mode
) - prec
;
11269 exp
= expand_shift (LSHIFT_EXPR
, mode
, exp
, count
, target
, 0);
11270 return expand_shift (RSHIFT_EXPR
, mode
, exp
, count
, target
, 0);
11274 /* Subroutine of above: returns 1 if OFFSET corresponds to an offset that
11275 when applied to the address of EXP produces an address known to be
11276 aligned more than BIGGEST_ALIGNMENT. */
11279 is_aligning_offset (const_tree offset
, const_tree exp
)
11281 /* Strip off any conversions. */
11282 while (CONVERT_EXPR_P (offset
))
11283 offset
= TREE_OPERAND (offset
, 0);
11285 /* We must now have a BIT_AND_EXPR with a constant that is one less than
11286 power of 2 and which is larger than BIGGEST_ALIGNMENT. */
11287 if (TREE_CODE (offset
) != BIT_AND_EXPR
11288 || !tree_fits_uhwi_p (TREE_OPERAND (offset
, 1))
11289 || compare_tree_int (TREE_OPERAND (offset
, 1),
11290 BIGGEST_ALIGNMENT
/ BITS_PER_UNIT
) <= 0
11291 || !pow2p_hwi (tree_to_uhwi (TREE_OPERAND (offset
, 1)) + 1))
11294 /* Look at the first operand of BIT_AND_EXPR and strip any conversion.
11295 It must be NEGATE_EXPR. Then strip any more conversions. */
11296 offset
= TREE_OPERAND (offset
, 0);
11297 while (CONVERT_EXPR_P (offset
))
11298 offset
= TREE_OPERAND (offset
, 0);
11300 if (TREE_CODE (offset
) != NEGATE_EXPR
)
11303 offset
= TREE_OPERAND (offset
, 0);
11304 while (CONVERT_EXPR_P (offset
))
11305 offset
= TREE_OPERAND (offset
, 0);
11307 /* This must now be the address of EXP. */
11308 return TREE_CODE (offset
) == ADDR_EXPR
&& TREE_OPERAND (offset
, 0) == exp
;
11311 /* Return the tree node if an ARG corresponds to a string constant or zero
11312 if it doesn't. If we return nonzero, set *PTR_OFFSET to the offset
11313 in bytes within the string that ARG is accessing. The type of the
11314 offset will be `sizetype'. */
11317 string_constant (tree arg
, tree
*ptr_offset
)
11319 tree array
, offset
, lower_bound
;
11322 if (TREE_CODE (arg
) == ADDR_EXPR
)
11324 if (TREE_CODE (TREE_OPERAND (arg
, 0)) == STRING_CST
)
11326 *ptr_offset
= size_zero_node
;
11327 return TREE_OPERAND (arg
, 0);
11329 else if (TREE_CODE (TREE_OPERAND (arg
, 0)) == VAR_DECL
)
11331 array
= TREE_OPERAND (arg
, 0);
11332 offset
= size_zero_node
;
11334 else if (TREE_CODE (TREE_OPERAND (arg
, 0)) == ARRAY_REF
)
11336 array
= TREE_OPERAND (TREE_OPERAND (arg
, 0), 0);
11337 offset
= TREE_OPERAND (TREE_OPERAND (arg
, 0), 1);
11338 if (TREE_CODE (array
) != STRING_CST
&& !VAR_P (array
))
11341 /* Check if the array has a nonzero lower bound. */
11342 lower_bound
= array_ref_low_bound (TREE_OPERAND (arg
, 0));
11343 if (!integer_zerop (lower_bound
))
11345 /* If the offset and base aren't both constants, return 0. */
11346 if (TREE_CODE (lower_bound
) != INTEGER_CST
)
11348 if (TREE_CODE (offset
) != INTEGER_CST
)
11350 /* Adjust offset by the lower bound. */
11351 offset
= size_diffop (fold_convert (sizetype
, offset
),
11352 fold_convert (sizetype
, lower_bound
));
11355 else if (TREE_CODE (TREE_OPERAND (arg
, 0)) == MEM_REF
)
11357 array
= TREE_OPERAND (TREE_OPERAND (arg
, 0), 0);
11358 offset
= TREE_OPERAND (TREE_OPERAND (arg
, 0), 1);
11359 if (TREE_CODE (array
) != ADDR_EXPR
)
11361 array
= TREE_OPERAND (array
, 0);
11362 if (TREE_CODE (array
) != STRING_CST
&& !VAR_P (array
))
11368 else if (TREE_CODE (arg
) == PLUS_EXPR
|| TREE_CODE (arg
) == POINTER_PLUS_EXPR
)
11370 tree arg0
= TREE_OPERAND (arg
, 0);
11371 tree arg1
= TREE_OPERAND (arg
, 1);
11376 if (TREE_CODE (arg0
) == ADDR_EXPR
11377 && (TREE_CODE (TREE_OPERAND (arg0
, 0)) == STRING_CST
11378 || TREE_CODE (TREE_OPERAND (arg0
, 0)) == VAR_DECL
))
11380 array
= TREE_OPERAND (arg0
, 0);
11383 else if (TREE_CODE (arg1
) == ADDR_EXPR
11384 && (TREE_CODE (TREE_OPERAND (arg1
, 0)) == STRING_CST
11385 || TREE_CODE (TREE_OPERAND (arg1
, 0)) == VAR_DECL
))
11387 array
= TREE_OPERAND (arg1
, 0);
11396 if (TREE_CODE (array
) == STRING_CST
)
11398 *ptr_offset
= fold_convert (sizetype
, offset
);
11401 else if (VAR_P (array
) || TREE_CODE (array
) == CONST_DECL
)
11404 tree init
= ctor_for_folding (array
);
11406 /* Variables initialized to string literals can be handled too. */
11407 if (init
== error_mark_node
11409 || TREE_CODE (init
) != STRING_CST
)
11412 /* Avoid const char foo[4] = "abcde"; */
11413 if (DECL_SIZE_UNIT (array
) == NULL_TREE
11414 || TREE_CODE (DECL_SIZE_UNIT (array
)) != INTEGER_CST
11415 || (length
= TREE_STRING_LENGTH (init
)) <= 0
11416 || compare_tree_int (DECL_SIZE_UNIT (array
), length
) < 0)
11419 /* If variable is bigger than the string literal, OFFSET must be constant
11420 and inside of the bounds of the string literal. */
11421 offset
= fold_convert (sizetype
, offset
);
11422 if (compare_tree_int (DECL_SIZE_UNIT (array
), length
) > 0
11423 && (! tree_fits_uhwi_p (offset
)
11424 || compare_tree_int (offset
, length
) >= 0))
11427 *ptr_offset
= offset
;
11434 /* Generate code to calculate OPS, and exploded expression
11435 using a store-flag instruction and return an rtx for the result.
11436 OPS reflects a comparison.
11438 If TARGET is nonzero, store the result there if convenient.
11440 Return zero if there is no suitable set-flag instruction
11441 available on this machine.
11443 Once expand_expr has been called on the arguments of the comparison,
11444 we are committed to doing the store flag, since it is not safe to
11445 re-evaluate the expression. We emit the store-flag insn by calling
11446 emit_store_flag, but only expand the arguments if we have a reason
11447 to believe that emit_store_flag will be successful. If we think that
11448 it will, but it isn't, we have to simulate the store-flag with a
11449 set/jump/set sequence. */
11452 do_store_flag (sepops ops
, rtx target
, machine_mode mode
)
11454 enum rtx_code code
;
11455 tree arg0
, arg1
, type
;
11456 machine_mode operand_mode
;
11459 rtx subtarget
= target
;
11460 location_t loc
= ops
->location
;
11465 /* Don't crash if the comparison was erroneous. */
11466 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
11469 type
= TREE_TYPE (arg0
);
11470 operand_mode
= TYPE_MODE (type
);
11471 unsignedp
= TYPE_UNSIGNED (type
);
11473 /* We won't bother with BLKmode store-flag operations because it would mean
11474 passing a lot of information to emit_store_flag. */
11475 if (operand_mode
== BLKmode
)
11478 /* We won't bother with store-flag operations involving function pointers
11479 when function pointers must be canonicalized before comparisons. */
11480 if (targetm
.have_canonicalize_funcptr_for_compare ()
11481 && ((TREE_CODE (TREE_TYPE (arg0
)) == POINTER_TYPE
11482 && (TREE_CODE (TREE_TYPE (TREE_TYPE (arg0
)))
11484 || (TREE_CODE (TREE_TYPE (arg1
)) == POINTER_TYPE
11485 && (TREE_CODE (TREE_TYPE (TREE_TYPE (arg1
)))
11486 == FUNCTION_TYPE
))))
11492 /* For vector typed comparisons emit code to generate the desired
11493 all-ones or all-zeros mask. Conveniently use the VEC_COND_EXPR
11494 expander for this. */
11495 if (TREE_CODE (ops
->type
) == VECTOR_TYPE
)
11497 tree ifexp
= build2 (ops
->code
, ops
->type
, arg0
, arg1
);
11498 if (VECTOR_BOOLEAN_TYPE_P (ops
->type
)
11499 && expand_vec_cmp_expr_p (TREE_TYPE (arg0
), ops
->type
, ops
->code
))
11500 return expand_vec_cmp_expr (ops
->type
, ifexp
, target
);
11503 tree if_true
= constant_boolean_node (true, ops
->type
);
11504 tree if_false
= constant_boolean_node (false, ops
->type
);
11505 return expand_vec_cond_expr (ops
->type
, ifexp
, if_true
,
11510 /* Get the rtx comparison code to use. We know that EXP is a comparison
11511 operation of some type. Some comparisons against 1 and -1 can be
11512 converted to comparisons with zero. Do so here so that the tests
11513 below will be aware that we have a comparison with zero. These
11514 tests will not catch constants in the first operand, but constants
11515 are rarely passed as the first operand. */
11526 if (integer_onep (arg1
))
11527 arg1
= integer_zero_node
, code
= unsignedp
? LEU
: LE
;
11529 code
= unsignedp
? LTU
: LT
;
11532 if (! unsignedp
&& integer_all_onesp (arg1
))
11533 arg1
= integer_zero_node
, code
= LT
;
11535 code
= unsignedp
? LEU
: LE
;
11538 if (! unsignedp
&& integer_all_onesp (arg1
))
11539 arg1
= integer_zero_node
, code
= GE
;
11541 code
= unsignedp
? GTU
: GT
;
11544 if (integer_onep (arg1
))
11545 arg1
= integer_zero_node
, code
= unsignedp
? GTU
: GT
;
11547 code
= unsignedp
? GEU
: GE
;
11550 case UNORDERED_EXPR
:
11576 gcc_unreachable ();
11579 /* Put a constant second. */
11580 if (TREE_CODE (arg0
) == REAL_CST
|| TREE_CODE (arg0
) == INTEGER_CST
11581 || TREE_CODE (arg0
) == FIXED_CST
)
11583 std::swap (arg0
, arg1
);
11584 code
= swap_condition (code
);
11587 /* If this is an equality or inequality test of a single bit, we can
11588 do this by shifting the bit being tested to the low-order bit and
11589 masking the result with the constant 1. If the condition was EQ,
11590 we xor it with 1. This does not require an scc insn and is faster
11591 than an scc insn even if we have it.
11593 The code to make this transformation was moved into fold_single_bit_test,
11594 so we just call into the folder and expand its result. */
11596 if ((code
== NE
|| code
== EQ
)
11597 && integer_zerop (arg1
)
11598 && (TYPE_PRECISION (ops
->type
) != 1 || TYPE_UNSIGNED (ops
->type
)))
11600 gimple
*srcstmt
= get_def_for_expr (arg0
, BIT_AND_EXPR
);
11602 && integer_pow2p (gimple_assign_rhs2 (srcstmt
)))
11604 enum tree_code tcode
= code
== NE
? NE_EXPR
: EQ_EXPR
;
11605 tree type
= lang_hooks
.types
.type_for_mode (mode
, unsignedp
);
11606 tree temp
= fold_build2_loc (loc
, BIT_AND_EXPR
, TREE_TYPE (arg1
),
11607 gimple_assign_rhs1 (srcstmt
),
11608 gimple_assign_rhs2 (srcstmt
));
11609 temp
= fold_single_bit_test (loc
, tcode
, temp
, arg1
, type
);
11611 return expand_expr (temp
, target
, VOIDmode
, EXPAND_NORMAL
);
11615 if (! get_subtarget (target
)
11616 || GET_MODE (subtarget
) != operand_mode
)
11619 expand_operands (arg0
, arg1
, subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
11622 target
= gen_reg_rtx (mode
);
11624 /* Try a cstore if possible. */
11625 return emit_store_flag_force (target
, code
, op0
, op1
,
11626 operand_mode
, unsignedp
,
11627 (TYPE_PRECISION (ops
->type
) == 1
11628 && !TYPE_UNSIGNED (ops
->type
)) ? -1 : 1);
11631 /* Attempt to generate a casesi instruction. Returns 1 if successful,
11632 0 otherwise (i.e. if there is no casesi instruction).
11634 DEFAULT_PROBABILITY is the probability of jumping to the default
11637 try_casesi (tree index_type
, tree index_expr
, tree minval
, tree range
,
11638 rtx table_label
, rtx default_label
, rtx fallback_label
,
11639 profile_probability default_probability
)
11641 struct expand_operand ops
[5];
11642 scalar_int_mode index_mode
= SImode
;
11643 rtx op1
, op2
, index
;
11645 if (! targetm
.have_casesi ())
11648 /* The index must be some form of integer. Convert it to SImode. */
11649 scalar_int_mode omode
= SCALAR_INT_TYPE_MODE (index_type
);
11650 if (GET_MODE_BITSIZE (omode
) > GET_MODE_BITSIZE (index_mode
))
11652 rtx rangertx
= expand_normal (range
);
11654 /* We must handle the endpoints in the original mode. */
11655 index_expr
= build2 (MINUS_EXPR
, index_type
,
11656 index_expr
, minval
);
11657 minval
= integer_zero_node
;
11658 index
= expand_normal (index_expr
);
11660 emit_cmp_and_jump_insns (rangertx
, index
, LTU
, NULL_RTX
,
11661 omode
, 1, default_label
,
11662 default_probability
);
11663 /* Now we can safely truncate. */
11664 index
= convert_to_mode (index_mode
, index
, 0);
11668 if (omode
!= index_mode
)
11670 index_type
= lang_hooks
.types
.type_for_mode (index_mode
, 0);
11671 index_expr
= fold_convert (index_type
, index_expr
);
11674 index
= expand_normal (index_expr
);
11677 do_pending_stack_adjust ();
11679 op1
= expand_normal (minval
);
11680 op2
= expand_normal (range
);
11682 create_input_operand (&ops
[0], index
, index_mode
);
11683 create_convert_operand_from_type (&ops
[1], op1
, TREE_TYPE (minval
));
11684 create_convert_operand_from_type (&ops
[2], op2
, TREE_TYPE (range
));
11685 create_fixed_operand (&ops
[3], table_label
);
11686 create_fixed_operand (&ops
[4], (default_label
11688 : fallback_label
));
11689 expand_jump_insn (targetm
.code_for_casesi
, 5, ops
);
11693 /* Attempt to generate a tablejump instruction; same concept. */
11694 /* Subroutine of the next function.
11696 INDEX is the value being switched on, with the lowest value
11697 in the table already subtracted.
11698 MODE is its expected mode (needed if INDEX is constant).
11699 RANGE is the length of the jump table.
11700 TABLE_LABEL is a CODE_LABEL rtx for the table itself.
11702 DEFAULT_LABEL is a CODE_LABEL rtx to jump to if the
11703 index value is out of range.
11704 DEFAULT_PROBABILITY is the probability of jumping to
11705 the default label. */
11708 do_tablejump (rtx index
, machine_mode mode
, rtx range
, rtx table_label
,
11709 rtx default_label
, profile_probability default_probability
)
11713 if (INTVAL (range
) > cfun
->cfg
->max_jumptable_ents
)
11714 cfun
->cfg
->max_jumptable_ents
= INTVAL (range
);
11716 /* Do an unsigned comparison (in the proper mode) between the index
11717 expression and the value which represents the length of the range.
11718 Since we just finished subtracting the lower bound of the range
11719 from the index expression, this comparison allows us to simultaneously
11720 check that the original index expression value is both greater than
11721 or equal to the minimum value of the range and less than or equal to
11722 the maximum value of the range. */
11725 emit_cmp_and_jump_insns (index
, range
, GTU
, NULL_RTX
, mode
, 1,
11726 default_label
, default_probability
);
11729 /* If index is in range, it must fit in Pmode.
11730 Convert to Pmode so we can index with it. */
11732 index
= convert_to_mode (Pmode
, index
, 1);
11734 /* Don't let a MEM slip through, because then INDEX that comes
11735 out of PIC_CASE_VECTOR_ADDRESS won't be a valid address,
11736 and break_out_memory_refs will go to work on it and mess it up. */
11737 #ifdef PIC_CASE_VECTOR_ADDRESS
11738 if (flag_pic
&& !REG_P (index
))
11739 index
= copy_to_mode_reg (Pmode
, index
);
11742 /* ??? The only correct use of CASE_VECTOR_MODE is the one inside the
11743 GET_MODE_SIZE, because this indicates how large insns are. The other
11744 uses should all be Pmode, because they are addresses. This code
11745 could fail if addresses and insns are not the same size. */
11746 index
= simplify_gen_binary (MULT
, Pmode
, index
,
11747 gen_int_mode (GET_MODE_SIZE (CASE_VECTOR_MODE
),
11749 index
= simplify_gen_binary (PLUS
, Pmode
, index
,
11750 gen_rtx_LABEL_REF (Pmode
, table_label
));
11752 #ifdef PIC_CASE_VECTOR_ADDRESS
11754 index
= PIC_CASE_VECTOR_ADDRESS (index
);
11757 index
= memory_address (CASE_VECTOR_MODE
, index
);
11758 temp
= gen_reg_rtx (CASE_VECTOR_MODE
);
11759 vector
= gen_const_mem (CASE_VECTOR_MODE
, index
);
11760 convert_move (temp
, vector
, 0);
11762 emit_jump_insn (targetm
.gen_tablejump (temp
, table_label
));
11764 /* If we are generating PIC code or if the table is PC-relative, the
11765 table and JUMP_INSN must be adjacent, so don't output a BARRIER. */
11766 if (! CASE_VECTOR_PC_RELATIVE
&& ! flag_pic
)
11771 try_tablejump (tree index_type
, tree index_expr
, tree minval
, tree range
,
11772 rtx table_label
, rtx default_label
,
11773 profile_probability default_probability
)
11777 if (! targetm
.have_tablejump ())
11780 index_expr
= fold_build2 (MINUS_EXPR
, index_type
,
11781 fold_convert (index_type
, index_expr
),
11782 fold_convert (index_type
, minval
));
11783 index
= expand_normal (index_expr
);
11784 do_pending_stack_adjust ();
11786 do_tablejump (index
, TYPE_MODE (index_type
),
11787 convert_modes (TYPE_MODE (index_type
),
11788 TYPE_MODE (TREE_TYPE (range
)),
11789 expand_normal (range
),
11790 TYPE_UNSIGNED (TREE_TYPE (range
))),
11791 table_label
, default_label
, default_probability
);
11795 /* Return a CONST_VECTOR rtx representing vector mask for
11796 a VECTOR_CST of booleans. */
11798 const_vector_mask_from_tree (tree exp
)
11803 machine_mode inner
, mode
;
11805 mode
= TYPE_MODE (TREE_TYPE (exp
));
11806 units
= VECTOR_CST_NELTS (exp
);
11807 inner
= GET_MODE_INNER (mode
);
11809 v
= rtvec_alloc (units
);
11811 for (i
= 0; i
< units
; ++i
)
11813 elt
= VECTOR_CST_ELT (exp
, i
);
11815 gcc_assert (TREE_CODE (elt
) == INTEGER_CST
);
11816 if (integer_zerop (elt
))
11817 RTVEC_ELT (v
, i
) = CONST0_RTX (inner
);
11818 else if (integer_onep (elt
)
11819 || integer_minus_onep (elt
))
11820 RTVEC_ELT (v
, i
) = CONSTM1_RTX (inner
);
11822 gcc_unreachable ();
11825 return gen_rtx_CONST_VECTOR (mode
, v
);
11828 /* EXP is a VECTOR_CST in which each element is either all-zeros or all-ones.
11829 Return a constant scalar rtx of mode MODE in which bit X is set if element
11830 X of EXP is nonzero. */
11832 const_scalar_mask_from_tree (scalar_int_mode mode
, tree exp
)
11834 wide_int res
= wi::zero (GET_MODE_PRECISION (mode
));
11838 for (i
= 0; i
< VECTOR_CST_NELTS (exp
); ++i
)
11840 elt
= VECTOR_CST_ELT (exp
, i
);
11841 gcc_assert (TREE_CODE (elt
) == INTEGER_CST
);
11842 if (integer_all_onesp (elt
))
11843 res
= wi::set_bit (res
, i
);
11845 gcc_assert (integer_zerop (elt
));
11848 return immed_wide_int_const (res
, mode
);
11851 /* Return a CONST_VECTOR rtx for a VECTOR_CST tree. */
11853 const_vector_from_tree (tree exp
)
11858 machine_mode inner
, mode
;
11860 mode
= TYPE_MODE (TREE_TYPE (exp
));
11862 if (initializer_zerop (exp
))
11863 return CONST0_RTX (mode
);
11865 if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp
)))
11866 return const_vector_mask_from_tree (exp
);
11868 units
= VECTOR_CST_NELTS (exp
);
11869 inner
= GET_MODE_INNER (mode
);
11871 v
= rtvec_alloc (units
);
11873 for (i
= 0; i
< units
; ++i
)
11875 elt
= VECTOR_CST_ELT (exp
, i
);
11877 if (TREE_CODE (elt
) == REAL_CST
)
11878 RTVEC_ELT (v
, i
) = const_double_from_real_value (TREE_REAL_CST (elt
),
11880 else if (TREE_CODE (elt
) == FIXED_CST
)
11881 RTVEC_ELT (v
, i
) = CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt
),
11884 RTVEC_ELT (v
, i
) = immed_wide_int_const (wi::to_poly_wide (elt
),
11888 return gen_rtx_CONST_VECTOR (mode
, v
);
11891 /* Build a decl for a personality function given a language prefix. */
11894 build_personality_function (const char *lang
)
11896 const char *unwind_and_version
;
11900 switch (targetm_common
.except_unwind_info (&global_options
))
11905 unwind_and_version
= "_sj0";
11909 unwind_and_version
= "_v0";
11912 unwind_and_version
= "_seh0";
11915 gcc_unreachable ();
11918 name
= ACONCAT (("__", lang
, "_personality", unwind_and_version
, NULL
));
11920 type
= build_function_type_list (integer_type_node
, integer_type_node
,
11921 long_long_unsigned_type_node
,
11922 ptr_type_node
, ptr_type_node
, NULL_TREE
);
11923 decl
= build_decl (UNKNOWN_LOCATION
, FUNCTION_DECL
,
11924 get_identifier (name
), type
);
11925 DECL_ARTIFICIAL (decl
) = 1;
11926 DECL_EXTERNAL (decl
) = 1;
11927 TREE_PUBLIC (decl
) = 1;
11929 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
11930 are the flags assigned by targetm.encode_section_info. */
11931 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl
), 0), NULL
);
11936 /* Extracts the personality function of DECL and returns the corresponding
11940 get_personality_function (tree decl
)
11942 tree personality
= DECL_FUNCTION_PERSONALITY (decl
);
11943 enum eh_personality_kind pk
;
11945 pk
= function_needs_eh_personality (DECL_STRUCT_FUNCTION (decl
));
11946 if (pk
== eh_personality_none
)
11950 && pk
== eh_personality_any
)
11951 personality
= lang_hooks
.eh_personality ();
11953 if (pk
== eh_personality_lang
)
11954 gcc_assert (personality
!= NULL_TREE
);
11956 return XEXP (DECL_RTL (personality
), 0);
11959 /* Returns a tree for the size of EXP in bytes. */
11962 tree_expr_size (const_tree exp
)
11965 && DECL_SIZE_UNIT (exp
) != 0)
11966 return DECL_SIZE_UNIT (exp
);
11968 return size_in_bytes (TREE_TYPE (exp
));
11971 /* Return an rtx for the size in bytes of the value of EXP. */
11974 expr_size (tree exp
)
11978 if (TREE_CODE (exp
) == WITH_SIZE_EXPR
)
11979 size
= TREE_OPERAND (exp
, 1);
11982 size
= tree_expr_size (exp
);
11984 gcc_assert (size
== SUBSTITUTE_PLACEHOLDER_IN_EXPR (size
, exp
));
11987 return expand_expr (size
, NULL_RTX
, TYPE_MODE (sizetype
), EXPAND_NORMAL
);
11990 /* Return a wide integer for the size in bytes of the value of EXP, or -1
11991 if the size can vary or is larger than an integer. */
11993 static HOST_WIDE_INT
11994 int_expr_size (tree exp
)
11998 if (TREE_CODE (exp
) == WITH_SIZE_EXPR
)
11999 size
= TREE_OPERAND (exp
, 1);
12002 size
= tree_expr_size (exp
);
12006 if (size
== 0 || !tree_fits_shwi_p (size
))
12009 return tree_to_shwi (size
);